##// END OF EJS Templates
localrepo: "blindly" do a dirstate backup at the end of the transaction...
marmoute -
r50979:5b9c3ae8 default
parent child Browse files
Show More
@@ -1,1762 +1,1778 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import collections
10 10 import contextlib
11 11 import os
12 12 import stat
13 13 import uuid
14 14
15 15 from .i18n import _
16 16 from .pycompat import delattr
17 17
18 18 from hgdemandimport import tracing
19 19
20 20 from . import (
21 21 dirstatemap,
22 22 encoding,
23 23 error,
24 24 match as matchmod,
25 25 node,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 util,
31 31 )
32 32
33 33 from .dirstateutils import (
34 34 docket as docketmod,
35 35 timestamp,
36 36 )
37 37
38 38 from .interfaces import (
39 39 dirstate as intdirstate,
40 40 util as interfaceutil,
41 41 )
42 42
43 43 parsers = policy.importmod('parsers')
44 44 rustmod = policy.importrust('dirstate')
45 45
46 46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
47 47
48 48 propertycache = util.propertycache
49 49 filecache = scmutil.filecache
50 50 _rangemask = dirstatemap.rangemask
51 51
52 52 DirstateItem = dirstatemap.DirstateItem
53 53
54 54
55 55 class repocache(filecache):
56 56 """filecache for files in .hg/"""
57 57
58 58 def join(self, obj, fname):
59 59 return obj._opener.join(fname)
60 60
61 61
62 62 class rootcache(filecache):
63 63 """filecache for files in the repository root"""
64 64
65 65 def join(self, obj, fname):
66 66 return obj._join(fname)
67 67
68 68
69 69 def requires_changing_parents(func):
70 70 def wrap(self, *args, **kwargs):
71 71 if not self.is_changing_parents:
72 72 msg = 'calling `%s` outside of a changing_parents context'
73 73 msg %= func.__name__
74 74 raise error.ProgrammingError(msg)
75 75 if self._invalidated_context:
76 76 msg = 'calling `%s` after the dirstate was invalidated'
77 77 raise error.ProgrammingError(msg)
78 78 return func(self, *args, **kwargs)
79 79
80 80 return wrap
81 81
82 82
83 83 def requires_changing_files(func):
84 84 def wrap(self, *args, **kwargs):
85 85 if not self.is_changing_files:
86 86 msg = 'calling `%s` outside of a `changing_files`'
87 87 msg %= func.__name__
88 88 raise error.ProgrammingError(msg)
89 89 return func(self, *args, **kwargs)
90 90
91 91 return wrap
92 92
93 93
94 94 def requires_not_changing_parents(func):
95 95 def wrap(self, *args, **kwargs):
96 96 if self.is_changing_parents:
97 97 msg = 'calling `%s` inside of a changing_parents context'
98 98 msg %= func.__name__
99 99 raise error.ProgrammingError(msg)
100 100 return func(self, *args, **kwargs)
101 101
102 102 return wrap
103 103
104 104
105 105 CHANGE_TYPE_PARENTS = "parents"
106 106 CHANGE_TYPE_FILES = "files"
107 107
108 108
109 109 @interfaceutil.implementer(intdirstate.idirstate)
110 110 class dirstate:
111 111 def __init__(
112 112 self,
113 113 opener,
114 114 ui,
115 115 root,
116 116 validate,
117 117 sparsematchfn,
118 118 nodeconstants,
119 119 use_dirstate_v2,
120 120 use_tracked_hint=False,
121 121 ):
122 122 """Create a new dirstate object.
123 123
124 124 opener is an open()-like callable that can be used to open the
125 125 dirstate file; root is the root of the directory tracked by
126 126 the dirstate.
127 127 """
128 128 self._use_dirstate_v2 = use_dirstate_v2
129 129 self._use_tracked_hint = use_tracked_hint
130 130 self._nodeconstants = nodeconstants
131 131 self._opener = opener
132 132 self._validate = validate
133 133 self._root = root
134 134 # Either build a sparse-matcher or None if sparse is disabled
135 135 self._sparsematchfn = sparsematchfn
136 136 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
137 137 # UNC path pointing to root share (issue4557)
138 138 self._rootdir = pathutil.normasprefix(root)
139 139 # True is any internal state may be different
140 140 self._dirty = False
141 141 # True if the set of tracked file may be different
142 142 self._dirty_tracked_set = False
143 143 self._ui = ui
144 144 self._filecache = {}
145 145 # nesting level of `changing_parents` context
146 146 self._changing_level = 0
147 147 # the change currently underway
148 148 self._change_type = None
149 149 # True if the current dirstate changing operations have been
150 150 # invalidated (used to make sure all nested contexts have been exited)
151 151 self._invalidated_context = False
152 152 self._filename = b'dirstate'
153 153 self._filename_th = b'dirstate-tracked-hint'
154 154 self._pendingfilename = b'%s.pending' % self._filename
155 155 self._plchangecallbacks = {}
156 156 self._origpl = None
157 157 self._mapcls = dirstatemap.dirstatemap
158 158 # Access and cache cwd early, so we don't access it for the first time
159 159 # after a working-copy update caused it to not exist (accessing it then
160 160 # raises an exception).
161 161 self._cwd
162 162
163 163 def prefetch_parents(self):
164 164 """make sure the parents are loaded
165 165
166 166 Used to avoid a race condition.
167 167 """
168 168 self._pl
169 169
170 170 @contextlib.contextmanager
171 171 def _changing(self, repo, change_type):
172 172 if repo.currentwlock() is None:
173 173 msg = b"trying to change the dirstate without holding the wlock"
174 174 raise error.ProgrammingError(msg)
175 175 if self._invalidated_context:
176 176 msg = "trying to use an invalidated dirstate before it has reset"
177 177 raise error.ProgrammingError(msg)
178 178
179 179 has_tr = repo.currenttransaction() is not None
180 180
181 181 # different type of change are mutually exclusive
182 182 if self._change_type is None:
183 183 assert self._changing_level == 0
184 184 self._change_type = change_type
185 185 elif self._change_type != change_type:
186 186 msg = (
187 187 'trying to open "%s" dirstate-changing context while a "%s" is'
188 188 ' already open'
189 189 )
190 190 msg %= (change_type, self._change_type)
191 191 raise error.ProgrammingError(msg)
192 192 self._changing_level += 1
193 193 try:
194 194 yield
195 195 except Exception:
196 196 self.invalidate()
197 197 raise
198 198 finally:
199 199 tr = repo.currenttransaction()
200 200 if self._changing_level > 0:
201 201 if self._invalidated_context:
202 202 # make sure we invalidate anything an upper context might
203 203 # have changed.
204 204 self.invalidate()
205 205 self._changing_level -= 1
206 206 # The invalidation is complete once we exit the final context
207 207 # manager
208 208 if self._changing_level <= 0:
209 209 self._change_type = None
210 210 assert self._changing_level == 0
211 211 if self._invalidated_context:
212 212 self._invalidated_context = False
213 213 else:
214 214 # When an exception occured, `_invalidated_context`
215 215 # would have been set to True by the `invalidate`
216 216 # call earlier.
217 217 #
218 218 # We don't have more straightforward code, because the
219 219 # Exception catching (and the associated `invalidate`
220 220 # calling) might have been called by a nested context
221 221 # instead of the top level one.
222 222 self.write(tr)
223 223 if has_tr != (tr is not None):
224 224 if has_tr:
225 225 m = "transaction vanished while changing dirstate"
226 226 else:
227 227 m = "transaction appeared while changing dirstate"
228 228 raise error.ProgrammingError(m)
229 229
230 230 @contextlib.contextmanager
231 231 def changing_parents(self, repo):
232 232 with self._changing(repo, CHANGE_TYPE_PARENTS) as c:
233 233 yield c
234 234
235 235 @contextlib.contextmanager
236 236 def changing_files(self, repo):
237 237 with self._changing(repo, CHANGE_TYPE_FILES) as c:
238 238 yield c
239 239
240 240 # here to help migration to the new code
241 241 def parentchange(self):
242 242 msg = (
243 243 "Mercurial 6.4 and later requires call to "
244 244 "`dirstate.changing_parents(repo)`"
245 245 )
246 246 raise error.ProgrammingError(msg)
247 247
248 248 @property
249 249 def is_changing_any(self):
250 250 """Returns true if the dirstate is in the middle of a set of changes.
251 251
252 252 This returns True for any kind of change.
253 253 """
254 254 return self._changing_level > 0
255 255
256 256 def pendingparentchange(self):
257 257 return self.is_changing_parent()
258 258
259 259 def is_changing_parent(self):
260 260 """Returns true if the dirstate is in the middle of a set of changes
261 261 that modify the dirstate parent.
262 262 """
263 263 self._ui.deprecwarn(b"dirstate.is_changing_parents", b"6.5")
264 264 return self.is_changing_parents
265 265
266 266 @property
267 267 def is_changing_parents(self):
268 268 """Returns true if the dirstate is in the middle of a set of changes
269 269 that modify the dirstate parent.
270 270 """
271 271 if self._changing_level <= 0:
272 272 return False
273 273 return self._change_type == CHANGE_TYPE_PARENTS
274 274
275 275 @property
276 276 def is_changing_files(self):
277 277 """Returns true if the dirstate is in the middle of a set of changes
278 278 that modify the files tracked or their sources.
279 279 """
280 280 if self._changing_level <= 0:
281 281 return False
282 282 return self._change_type == CHANGE_TYPE_FILES
283 283
284 284 @propertycache
285 285 def _map(self):
286 286 """Return the dirstate contents (see documentation for dirstatemap)."""
287 287 self._map = self._mapcls(
288 288 self._ui,
289 289 self._opener,
290 290 self._root,
291 291 self._nodeconstants,
292 292 self._use_dirstate_v2,
293 293 )
294 294 return self._map
295 295
296 296 @property
297 297 def _sparsematcher(self):
298 298 """The matcher for the sparse checkout.
299 299
300 300 The working directory may not include every file from a manifest. The
301 301 matcher obtained by this property will match a path if it is to be
302 302 included in the working directory.
303 303
304 304 When sparse if disabled, return None.
305 305 """
306 306 if self._sparsematchfn is None:
307 307 return None
308 308 # TODO there is potential to cache this property. For now, the matcher
309 309 # is resolved on every access. (But the called function does use a
310 310 # cache to keep the lookup fast.)
311 311 return self._sparsematchfn()
312 312
313 313 @repocache(b'branch')
314 314 def _branch(self):
315 315 try:
316 316 return self._opener.read(b"branch").strip() or b"default"
317 317 except FileNotFoundError:
318 318 return b"default"
319 319
320 320 @property
321 321 def _pl(self):
322 322 return self._map.parents()
323 323
324 324 def hasdir(self, d):
325 325 return self._map.hastrackeddir(d)
326 326
327 327 @rootcache(b'.hgignore')
328 328 def _ignore(self):
329 329 files = self._ignorefiles()
330 330 if not files:
331 331 return matchmod.never()
332 332
333 333 pats = [b'include:%s' % f for f in files]
334 334 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
335 335
336 336 @propertycache
337 337 def _slash(self):
338 338 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
339 339
340 340 @propertycache
341 341 def _checklink(self):
342 342 return util.checklink(self._root)
343 343
344 344 @propertycache
345 345 def _checkexec(self):
346 346 return bool(util.checkexec(self._root))
347 347
348 348 @propertycache
349 349 def _checkcase(self):
350 350 return not util.fscasesensitive(self._join(b'.hg'))
351 351
352 352 def _join(self, f):
353 353 # much faster than os.path.join()
354 354 # it's safe because f is always a relative path
355 355 return self._rootdir + f
356 356
357 357 def flagfunc(self, buildfallback):
358 358 """build a callable that returns flags associated with a filename
359 359
360 360 The information is extracted from three possible layers:
361 361 1. the file system if it supports the information
362 362 2. the "fallback" information stored in the dirstate if any
363 363 3. a more expensive mechanism inferring the flags from the parents.
364 364 """
365 365
366 366 # small hack to cache the result of buildfallback()
367 367 fallback_func = []
368 368
369 369 def get_flags(x):
370 370 entry = None
371 371 fallback_value = None
372 372 try:
373 373 st = os.lstat(self._join(x))
374 374 except OSError:
375 375 return b''
376 376
377 377 if self._checklink:
378 378 if util.statislink(st):
379 379 return b'l'
380 380 else:
381 381 entry = self.get_entry(x)
382 382 if entry.has_fallback_symlink:
383 383 if entry.fallback_symlink:
384 384 return b'l'
385 385 else:
386 386 if not fallback_func:
387 387 fallback_func.append(buildfallback())
388 388 fallback_value = fallback_func[0](x)
389 389 if b'l' in fallback_value:
390 390 return b'l'
391 391
392 392 if self._checkexec:
393 393 if util.statisexec(st):
394 394 return b'x'
395 395 else:
396 396 if entry is None:
397 397 entry = self.get_entry(x)
398 398 if entry.has_fallback_exec:
399 399 if entry.fallback_exec:
400 400 return b'x'
401 401 else:
402 402 if fallback_value is None:
403 403 if not fallback_func:
404 404 fallback_func.append(buildfallback())
405 405 fallback_value = fallback_func[0](x)
406 406 if b'x' in fallback_value:
407 407 return b'x'
408 408 return b''
409 409
410 410 return get_flags
411 411
412 412 @propertycache
413 413 def _cwd(self):
414 414 # internal config: ui.forcecwd
415 415 forcecwd = self._ui.config(b'ui', b'forcecwd')
416 416 if forcecwd:
417 417 return forcecwd
418 418 return encoding.getcwd()
419 419
420 420 def getcwd(self):
421 421 """Return the path from which a canonical path is calculated.
422 422
423 423 This path should be used to resolve file patterns or to convert
424 424 canonical paths back to file paths for display. It shouldn't be
425 425 used to get real file paths. Use vfs functions instead.
426 426 """
427 427 cwd = self._cwd
428 428 if cwd == self._root:
429 429 return b''
430 430 # self._root ends with a path separator if self._root is '/' or 'C:\'
431 431 rootsep = self._root
432 432 if not util.endswithsep(rootsep):
433 433 rootsep += pycompat.ossep
434 434 if cwd.startswith(rootsep):
435 435 return cwd[len(rootsep) :]
436 436 else:
437 437 # we're outside the repo. return an absolute path.
438 438 return cwd
439 439
440 440 def pathto(self, f, cwd=None):
441 441 if cwd is None:
442 442 cwd = self.getcwd()
443 443 path = util.pathto(self._root, cwd, f)
444 444 if self._slash:
445 445 return util.pconvert(path)
446 446 return path
447 447
448 448 def get_entry(self, path):
449 449 """return a DirstateItem for the associated path"""
450 450 entry = self._map.get(path)
451 451 if entry is None:
452 452 return DirstateItem()
453 453 return entry
454 454
455 455 def __contains__(self, key):
456 456 return key in self._map
457 457
458 458 def __iter__(self):
459 459 return iter(sorted(self._map))
460 460
461 461 def items(self):
462 462 return self._map.items()
463 463
464 464 iteritems = items
465 465
466 466 def parents(self):
467 467 return [self._validate(p) for p in self._pl]
468 468
469 469 def p1(self):
470 470 return self._validate(self._pl[0])
471 471
472 472 def p2(self):
473 473 return self._validate(self._pl[1])
474 474
475 475 @property
476 476 def in_merge(self):
477 477 """True if a merge is in progress"""
478 478 return self._pl[1] != self._nodeconstants.nullid
479 479
480 480 def branch(self):
481 481 return encoding.tolocal(self._branch)
482 482
483 483 # XXX since this make the dirstate dirty, we should enforce that it is done
484 484 # withing an appropriate change-context that scope the change and ensure it
485 485 # eventually get written on disk (or rolled back)
486 486 def setparents(self, p1, p2=None):
487 487 """Set dirstate parents to p1 and p2.
488 488
489 489 When moving from two parents to one, "merged" entries a
490 490 adjusted to normal and previous copy records discarded and
491 491 returned by the call.
492 492
493 493 See localrepo.setparents()
494 494 """
495 495 if p2 is None:
496 496 p2 = self._nodeconstants.nullid
497 497 if self._changing_level == 0:
498 498 raise ValueError(
499 499 b"cannot set dirstate parent outside of "
500 500 b"dirstate.changing_parents context manager"
501 501 )
502 502
503 503 self._dirty = True
504 504 oldp2 = self._pl[1]
505 505 if self._origpl is None:
506 506 self._origpl = self._pl
507 507 nullid = self._nodeconstants.nullid
508 508 # True if we need to fold p2 related state back to a linear case
509 509 fold_p2 = oldp2 != nullid and p2 == nullid
510 510 return self._map.setparents(p1, p2, fold_p2=fold_p2)
511 511
512 512 def setbranch(self, branch):
513 513 self.__class__._branch.set(self, encoding.fromlocal(branch))
514 514 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
515 515 try:
516 516 f.write(self._branch + b'\n')
517 517 f.close()
518 518
519 519 # make sure filecache has the correct stat info for _branch after
520 520 # replacing the underlying file
521 521 ce = self._filecache[b'_branch']
522 522 if ce:
523 523 ce.refresh()
524 524 except: # re-raises
525 525 f.discard()
526 526 raise
527 527
528 528 def invalidate(self):
529 529 """Causes the next access to reread the dirstate.
530 530
531 531 This is different from localrepo.invalidatedirstate() because it always
532 532 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
533 533 check whether the dirstate has changed before rereading it."""
534 534
535 535 for a in ("_map", "_branch", "_ignore"):
536 536 if a in self.__dict__:
537 537 delattr(self, a)
538 538 self._dirty = False
539 539 self._dirty_tracked_set = False
540 540 self._invalidated_context = self._changing_level > 0
541 541 self._origpl = None
542 542
543 543 # XXX since this make the dirstate dirty, we should enforce that it is done
544 544 # withing an appropriate change-context that scope the change and ensure it
545 545 # eventually get written on disk (or rolled back)
546 546 def copy(self, source, dest):
547 547 """Mark dest as a copy of source. Unmark dest if source is None."""
548 548 if source == dest:
549 549 return
550 550 self._dirty = True
551 551 if source is not None:
552 552 self._check_sparse(source)
553 553 self._map.copymap[dest] = source
554 554 else:
555 555 self._map.copymap.pop(dest, None)
556 556
557 557 def copied(self, file):
558 558 return self._map.copymap.get(file, None)
559 559
560 560 def copies(self):
561 561 return self._map.copymap
562 562
563 563 @requires_changing_files
564 564 def set_tracked(self, filename, reset_copy=False):
565 565 """a "public" method for generic code to mark a file as tracked
566 566
567 567 This function is to be called outside of "update/merge" case. For
568 568 example by a command like `hg add X`.
569 569
570 570 if reset_copy is set, any existing copy information will be dropped.
571 571
572 572 return True the file was previously untracked, False otherwise.
573 573 """
574 574 self._dirty = True
575 575 entry = self._map.get(filename)
576 576 if entry is None or not entry.tracked:
577 577 self._check_new_tracked_filename(filename)
578 578 pre_tracked = self._map.set_tracked(filename)
579 579 if reset_copy:
580 580 self._map.copymap.pop(filename, None)
581 581 if pre_tracked:
582 582 self._dirty_tracked_set = True
583 583 return pre_tracked
584 584
585 585 @requires_changing_files
586 586 def set_untracked(self, filename):
587 587 """a "public" method for generic code to mark a file as untracked
588 588
589 589 This function is to be called outside of "update/merge" case. For
590 590 example by a command like `hg remove X`.
591 591
592 592 return True the file was previously tracked, False otherwise.
593 593 """
594 594 ret = self._map.set_untracked(filename)
595 595 if ret:
596 596 self._dirty = True
597 597 self._dirty_tracked_set = True
598 598 return ret
599 599
600 600 @requires_not_changing_parents
601 601 def set_clean(self, filename, parentfiledata):
602 602 """record that the current state of the file on disk is known to be clean"""
603 603 self._dirty = True
604 604 if not self._map[filename].tracked:
605 605 self._check_new_tracked_filename(filename)
606 606 (mode, size, mtime) = parentfiledata
607 607 self._map.set_clean(filename, mode, size, mtime)
608 608
609 609 @requires_not_changing_parents
610 610 def set_possibly_dirty(self, filename):
611 611 """record that the current state of the file on disk is unknown"""
612 612 self._dirty = True
613 613 self._map.set_possibly_dirty(filename)
614 614
615 615 @requires_changing_parents
616 616 def update_file_p1(
617 617 self,
618 618 filename,
619 619 p1_tracked,
620 620 ):
621 621 """Set a file as tracked in the parent (or not)
622 622
623 623 This is to be called when adjust the dirstate to a new parent after an history
624 624 rewriting operation.
625 625
626 626 It should not be called during a merge (p2 != nullid) and only within
627 627 a `with dirstate.changing_parents(repo):` context.
628 628 """
629 629 if self.in_merge:
630 630 msg = b'update_file_reference should not be called when merging'
631 631 raise error.ProgrammingError(msg)
632 632 entry = self._map.get(filename)
633 633 if entry is None:
634 634 wc_tracked = False
635 635 else:
636 636 wc_tracked = entry.tracked
637 637 if not (p1_tracked or wc_tracked):
638 638 # the file is no longer relevant to anyone
639 639 if self._map.get(filename) is not None:
640 640 self._map.reset_state(filename)
641 641 self._dirty = True
642 642 elif (not p1_tracked) and wc_tracked:
643 643 if entry is not None and entry.added:
644 644 return # avoid dropping copy information (maybe?)
645 645
646 646 self._map.reset_state(
647 647 filename,
648 648 wc_tracked,
649 649 p1_tracked,
650 650 # the underlying reference might have changed, we will have to
651 651 # check it.
652 652 has_meaningful_mtime=False,
653 653 )
654 654
655 655 @requires_changing_parents
656 656 def update_file(
657 657 self,
658 658 filename,
659 659 wc_tracked,
660 660 p1_tracked,
661 661 p2_info=False,
662 662 possibly_dirty=False,
663 663 parentfiledata=None,
664 664 ):
665 665 """update the information about a file in the dirstate
666 666
667 667 This is to be called when the direstates parent changes to keep track
668 668 of what is the file situation in regards to the working copy and its parent.
669 669
670 670 This function must be called within a `dirstate.changing_parents` context.
671 671
672 672 note: the API is at an early stage and we might need to adjust it
673 673 depending of what information ends up being relevant and useful to
674 674 other processing.
675 675 """
676 676 self._update_file(
677 677 filename=filename,
678 678 wc_tracked=wc_tracked,
679 679 p1_tracked=p1_tracked,
680 680 p2_info=p2_info,
681 681 possibly_dirty=possibly_dirty,
682 682 parentfiledata=parentfiledata,
683 683 )
684 684
685 685 # XXX since this make the dirstate dirty, we should enforce that it is done
686 686 # withing an appropriate change-context that scope the change and ensure it
687 687 # eventually get written on disk (or rolled back)
688 688 def hacky_extension_update_file(self, *args, **kwargs):
689 689 """NEVER USE THIS, YOU DO NOT NEED IT
690 690
691 691 This function is a variant of "update_file" to be called by a small set
692 692 of extensions, it also adjust the internal state of file, but can be
693 693 called outside an `changing_parents` context.
694 694
695 695 A very small number of extension meddle with the working copy content
696 696 in a way that requires to adjust the dirstate accordingly. At the time
697 697 this command is written they are :
698 698 - keyword,
699 699 - largefile,
700 700 PLEASE DO NOT GROW THIS LIST ANY FURTHER.
701 701
702 702 This function could probably be replaced by more semantic one (like
703 703 "adjust expected size" or "always revalidate file content", etc)
704 704 however at the time where this is writen, this is too much of a detour
705 705 to be considered.
706 706 """
707 707 self._update_file(
708 708 *args,
709 709 **kwargs,
710 710 )
711 711
712 712 def _update_file(
713 713 self,
714 714 filename,
715 715 wc_tracked,
716 716 p1_tracked,
717 717 p2_info=False,
718 718 possibly_dirty=False,
719 719 parentfiledata=None,
720 720 ):
721 721
722 722 # note: I do not think we need to double check name clash here since we
723 723 # are in a update/merge case that should already have taken care of
724 724 # this. The test agrees
725 725
726 726 self._dirty = True
727 727 old_entry = self._map.get(filename)
728 728 if old_entry is None:
729 729 prev_tracked = False
730 730 else:
731 731 prev_tracked = old_entry.tracked
732 732 if prev_tracked != wc_tracked:
733 733 self._dirty_tracked_set = True
734 734
735 735 self._map.reset_state(
736 736 filename,
737 737 wc_tracked,
738 738 p1_tracked,
739 739 p2_info=p2_info,
740 740 has_meaningful_mtime=not possibly_dirty,
741 741 parentfiledata=parentfiledata,
742 742 )
743 743
744 744 def _check_new_tracked_filename(self, filename):
745 745 scmutil.checkfilename(filename)
746 746 if self._map.hastrackeddir(filename):
747 747 msg = _(b'directory %r already in dirstate')
748 748 msg %= pycompat.bytestr(filename)
749 749 raise error.Abort(msg)
750 750 # shadows
751 751 for d in pathutil.finddirs(filename):
752 752 if self._map.hastrackeddir(d):
753 753 break
754 754 entry = self._map.get(d)
755 755 if entry is not None and not entry.removed:
756 756 msg = _(b'file %r in dirstate clashes with %r')
757 757 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
758 758 raise error.Abort(msg)
759 759 self._check_sparse(filename)
760 760
761 761 def _check_sparse(self, filename):
762 762 """Check that a filename is inside the sparse profile"""
763 763 sparsematch = self._sparsematcher
764 764 if sparsematch is not None and not sparsematch.always():
765 765 if not sparsematch(filename):
766 766 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
767 767 hint = _(
768 768 b'include file with `hg debugsparse --include <pattern>` or use '
769 769 b'`hg add -s <file>` to include file directory while adding'
770 770 )
771 771 raise error.Abort(msg % filename, hint=hint)
772 772
773 773 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
774 774 if exists is None:
775 775 exists = os.path.lexists(os.path.join(self._root, path))
776 776 if not exists:
777 777 # Maybe a path component exists
778 778 if not ignoremissing and b'/' in path:
779 779 d, f = path.rsplit(b'/', 1)
780 780 d = self._normalize(d, False, ignoremissing, None)
781 781 folded = d + b"/" + f
782 782 else:
783 783 # No path components, preserve original case
784 784 folded = path
785 785 else:
786 786 # recursively normalize leading directory components
787 787 # against dirstate
788 788 if b'/' in normed:
789 789 d, f = normed.rsplit(b'/', 1)
790 790 d = self._normalize(d, False, ignoremissing, True)
791 791 r = self._root + b"/" + d
792 792 folded = d + b"/" + util.fspath(f, r)
793 793 else:
794 794 folded = util.fspath(normed, self._root)
795 795 storemap[normed] = folded
796 796
797 797 return folded
798 798
799 799 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
800 800 normed = util.normcase(path)
801 801 folded = self._map.filefoldmap.get(normed, None)
802 802 if folded is None:
803 803 if isknown:
804 804 folded = path
805 805 else:
806 806 folded = self._discoverpath(
807 807 path, normed, ignoremissing, exists, self._map.filefoldmap
808 808 )
809 809 return folded
810 810
811 811 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
812 812 normed = util.normcase(path)
813 813 folded = self._map.filefoldmap.get(normed, None)
814 814 if folded is None:
815 815 folded = self._map.dirfoldmap.get(normed, None)
816 816 if folded is None:
817 817 if isknown:
818 818 folded = path
819 819 else:
820 820 # store discovered result in dirfoldmap so that future
821 821 # normalizefile calls don't start matching directories
822 822 folded = self._discoverpath(
823 823 path, normed, ignoremissing, exists, self._map.dirfoldmap
824 824 )
825 825 return folded
826 826
827 827 def normalize(self, path, isknown=False, ignoremissing=False):
828 828 """
829 829 normalize the case of a pathname when on a casefolding filesystem
830 830
831 831 isknown specifies whether the filename came from walking the
832 832 disk, to avoid extra filesystem access.
833 833
834 834 If ignoremissing is True, missing path are returned
835 835 unchanged. Otherwise, we try harder to normalize possibly
836 836 existing path components.
837 837
838 838 The normalized case is determined based on the following precedence:
839 839
840 840 - version of name already stored in the dirstate
841 841 - version of name stored on disk
842 842 - version provided via command arguments
843 843 """
844 844
845 845 if self._checkcase:
846 846 return self._normalize(path, isknown, ignoremissing)
847 847 return path
848 848
849 849 # XXX since this make the dirstate dirty, we should enforce that it is done
850 850 # withing an appropriate change-context that scope the change and ensure it
851 851 # eventually get written on disk (or rolled back)
852 852 def clear(self):
853 853 self._map.clear()
854 854 self._dirty = True
855 855
856 856 # XXX since this make the dirstate dirty, we should enforce that it is done
857 857 # withing an appropriate change-context that scope the change and ensure it
858 858 # eventually get written on disk (or rolled back)
859 859 def rebuild(self, parent, allfiles, changedfiles=None):
860 860 matcher = self._sparsematcher
861 861 if matcher is not None and not matcher.always():
862 862 # should not add non-matching files
863 863 allfiles = [f for f in allfiles if matcher(f)]
864 864 if changedfiles:
865 865 changedfiles = [f for f in changedfiles if matcher(f)]
866 866
867 867 if changedfiles is not None:
868 868 # these files will be deleted from the dirstate when they are
869 869 # not found to be in allfiles
870 870 dirstatefilestoremove = {f for f in self if not matcher(f)}
871 871 changedfiles = dirstatefilestoremove.union(changedfiles)
872 872
873 873 if changedfiles is None:
874 874 # Rebuild entire dirstate
875 875 to_lookup = allfiles
876 876 to_drop = []
877 877 self.clear()
878 878 elif len(changedfiles) < 10:
879 879 # Avoid turning allfiles into a set, which can be expensive if it's
880 880 # large.
881 881 to_lookup = []
882 882 to_drop = []
883 883 for f in changedfiles:
884 884 if f in allfiles:
885 885 to_lookup.append(f)
886 886 else:
887 887 to_drop.append(f)
888 888 else:
889 889 changedfilesset = set(changedfiles)
890 890 to_lookup = changedfilesset & set(allfiles)
891 891 to_drop = changedfilesset - to_lookup
892 892
893 893 if self._origpl is None:
894 894 self._origpl = self._pl
895 895 self._map.setparents(parent, self._nodeconstants.nullid)
896 896
897 897 for f in to_lookup:
898 898 if self.in_merge:
899 899 self.set_tracked(f)
900 900 else:
901 901 self._map.reset_state(
902 902 f,
903 903 wc_tracked=True,
904 904 p1_tracked=True,
905 905 )
906 906 for f in to_drop:
907 907 self._map.reset_state(f)
908 908
909 909 self._dirty = True
910 910
911 911 def identity(self):
912 912 """Return identity of dirstate itself to detect changing in storage
913 913
914 914 If identity of previous dirstate is equal to this, writing
915 915 changes based on the former dirstate out can keep consistency.
916 916 """
917 917 return self._map.identity
918 918
919 919 def write(self, tr):
920 920 if not self._dirty:
921 921 return
922 922
923 923 write_key = self._use_tracked_hint and self._dirty_tracked_set
924 924 if tr:
925 925 # make sure we invalidate the current change on abort
926 926 if tr is not None:
927 927 tr.addabort(
928 928 b'dirstate-invalidate',
929 929 lambda tr: self.invalidate(),
930 930 )
931 931 # delay writing in-memory changes out
932 932 tr.addfilegenerator(
933 933 b'dirstate-1-main',
934 934 (self._filename,),
935 935 lambda f: self._writedirstate(tr, f),
936 936 location=b'plain',
937 937 post_finalize=True,
938 938 )
939 939 if write_key:
940 940 tr.addfilegenerator(
941 941 b'dirstate-2-key-post',
942 942 (self._filename_th,),
943 943 lambda f: self._write_tracked_hint(tr, f),
944 944 location=b'plain',
945 945 post_finalize=True,
946 946 )
947 947 return
948 948
949 949 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
950 950 with file(self._filename) as f:
951 951 self._writedirstate(tr, f)
952 952 if write_key:
953 953 # we update the key-file after writing to make sure reader have a
954 954 # key that match the newly written content
955 955 with file(self._filename_th) as f:
956 956 self._write_tracked_hint(tr, f)
957 957
958 958 def delete_tracked_hint(self):
959 959 """remove the tracked_hint file
960 960
961 961 To be used by format downgrades operation"""
962 962 self._opener.unlink(self._filename_th)
963 963 self._use_tracked_hint = False
964 964
965 965 def addparentchangecallback(self, category, callback):
966 966 """add a callback to be called when the wd parents are changed
967 967
968 968 Callback will be called with the following arguments:
969 969 dirstate, (oldp1, oldp2), (newp1, newp2)
970 970
971 971 Category is a unique identifier to allow overwriting an old callback
972 972 with a newer callback.
973 973 """
974 974 self._plchangecallbacks[category] = callback
975 975
976 976 def _writedirstate(self, tr, st):
977 977 # notify callbacks about parents change
978 978 if self._origpl is not None and self._origpl != self._pl:
979 979 for c, callback in sorted(self._plchangecallbacks.items()):
980 980 callback(self, self._origpl, self._pl)
981 981 self._origpl = None
982 982 self._map.write(tr, st)
983 983 self._dirty = False
984 984 self._dirty_tracked_set = False
985 985
986 986 def _write_tracked_hint(self, tr, f):
987 987 key = node.hex(uuid.uuid4().bytes)
988 988 f.write(b"1\n%s\n" % key) # 1 is the format version
989 989
990 990 def _dirignore(self, f):
991 991 if self._ignore(f):
992 992 return True
993 993 for p in pathutil.finddirs(f):
994 994 if self._ignore(p):
995 995 return True
996 996 return False
997 997
998 998 def _ignorefiles(self):
999 999 files = []
1000 1000 if os.path.exists(self._join(b'.hgignore')):
1001 1001 files.append(self._join(b'.hgignore'))
1002 1002 for name, path in self._ui.configitems(b"ui"):
1003 1003 if name == b'ignore' or name.startswith(b'ignore.'):
1004 1004 # we need to use os.path.join here rather than self._join
1005 1005 # because path is arbitrary and user-specified
1006 1006 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1007 1007 return files
1008 1008
1009 1009 def _ignorefileandline(self, f):
1010 1010 files = collections.deque(self._ignorefiles())
1011 1011 visited = set()
1012 1012 while files:
1013 1013 i = files.popleft()
1014 1014 patterns = matchmod.readpatternfile(
1015 1015 i, self._ui.warn, sourceinfo=True
1016 1016 )
1017 1017 for pattern, lineno, line in patterns:
1018 1018 kind, p = matchmod._patsplit(pattern, b'glob')
1019 1019 if kind == b"subinclude":
1020 1020 if p not in visited:
1021 1021 files.append(p)
1022 1022 continue
1023 1023 m = matchmod.match(
1024 1024 self._root, b'', [], [pattern], warn=self._ui.warn
1025 1025 )
1026 1026 if m(f):
1027 1027 return (i, lineno, line)
1028 1028 visited.add(i)
1029 1029 return (None, -1, b"")
1030 1030
1031 1031 def _walkexplicit(self, match, subrepos):
1032 1032 """Get stat data about the files explicitly specified by match.
1033 1033
1034 1034 Return a triple (results, dirsfound, dirsnotfound).
1035 1035 - results is a mapping from filename to stat result. It also contains
1036 1036 listings mapping subrepos and .hg to None.
1037 1037 - dirsfound is a list of files found to be directories.
1038 1038 - dirsnotfound is a list of files that the dirstate thinks are
1039 1039 directories and that were not found."""
1040 1040
1041 1041 def badtype(mode):
1042 1042 kind = _(b'unknown')
1043 1043 if stat.S_ISCHR(mode):
1044 1044 kind = _(b'character device')
1045 1045 elif stat.S_ISBLK(mode):
1046 1046 kind = _(b'block device')
1047 1047 elif stat.S_ISFIFO(mode):
1048 1048 kind = _(b'fifo')
1049 1049 elif stat.S_ISSOCK(mode):
1050 1050 kind = _(b'socket')
1051 1051 elif stat.S_ISDIR(mode):
1052 1052 kind = _(b'directory')
1053 1053 return _(b'unsupported file type (type is %s)') % kind
1054 1054
1055 1055 badfn = match.bad
1056 1056 dmap = self._map
1057 1057 lstat = os.lstat
1058 1058 getkind = stat.S_IFMT
1059 1059 dirkind = stat.S_IFDIR
1060 1060 regkind = stat.S_IFREG
1061 1061 lnkkind = stat.S_IFLNK
1062 1062 join = self._join
1063 1063 dirsfound = []
1064 1064 foundadd = dirsfound.append
1065 1065 dirsnotfound = []
1066 1066 notfoundadd = dirsnotfound.append
1067 1067
1068 1068 if not match.isexact() and self._checkcase:
1069 1069 normalize = self._normalize
1070 1070 else:
1071 1071 normalize = None
1072 1072
1073 1073 files = sorted(match.files())
1074 1074 subrepos.sort()
1075 1075 i, j = 0, 0
1076 1076 while i < len(files) and j < len(subrepos):
1077 1077 subpath = subrepos[j] + b"/"
1078 1078 if files[i] < subpath:
1079 1079 i += 1
1080 1080 continue
1081 1081 while i < len(files) and files[i].startswith(subpath):
1082 1082 del files[i]
1083 1083 j += 1
1084 1084
1085 1085 if not files or b'' in files:
1086 1086 files = [b'']
1087 1087 # constructing the foldmap is expensive, so don't do it for the
1088 1088 # common case where files is ['']
1089 1089 normalize = None
1090 1090 results = dict.fromkeys(subrepos)
1091 1091 results[b'.hg'] = None
1092 1092
1093 1093 for ff in files:
1094 1094 if normalize:
1095 1095 nf = normalize(ff, False, True)
1096 1096 else:
1097 1097 nf = ff
1098 1098 if nf in results:
1099 1099 continue
1100 1100
1101 1101 try:
1102 1102 st = lstat(join(nf))
1103 1103 kind = getkind(st.st_mode)
1104 1104 if kind == dirkind:
1105 1105 if nf in dmap:
1106 1106 # file replaced by dir on disk but still in dirstate
1107 1107 results[nf] = None
1108 1108 foundadd((nf, ff))
1109 1109 elif kind == regkind or kind == lnkkind:
1110 1110 results[nf] = st
1111 1111 else:
1112 1112 badfn(ff, badtype(kind))
1113 1113 if nf in dmap:
1114 1114 results[nf] = None
1115 1115 except (OSError) as inst:
1116 1116 # nf not found on disk - it is dirstate only
1117 1117 if nf in dmap: # does it exactly match a missing file?
1118 1118 results[nf] = None
1119 1119 else: # does it match a missing directory?
1120 1120 if self._map.hasdir(nf):
1121 1121 notfoundadd(nf)
1122 1122 else:
1123 1123 badfn(ff, encoding.strtolocal(inst.strerror))
1124 1124
1125 1125 # match.files() may contain explicitly-specified paths that shouldn't
1126 1126 # be taken; drop them from the list of files found. dirsfound/notfound
1127 1127 # aren't filtered here because they will be tested later.
1128 1128 if match.anypats():
1129 1129 for f in list(results):
1130 1130 if f == b'.hg' or f in subrepos:
1131 1131 # keep sentinel to disable further out-of-repo walks
1132 1132 continue
1133 1133 if not match(f):
1134 1134 del results[f]
1135 1135
1136 1136 # Case insensitive filesystems cannot rely on lstat() failing to detect
1137 1137 # a case-only rename. Prune the stat object for any file that does not
1138 1138 # match the case in the filesystem, if there are multiple files that
1139 1139 # normalize to the same path.
1140 1140 if match.isexact() and self._checkcase:
1141 1141 normed = {}
1142 1142
1143 1143 for f, st in results.items():
1144 1144 if st is None:
1145 1145 continue
1146 1146
1147 1147 nc = util.normcase(f)
1148 1148 paths = normed.get(nc)
1149 1149
1150 1150 if paths is None:
1151 1151 paths = set()
1152 1152 normed[nc] = paths
1153 1153
1154 1154 paths.add(f)
1155 1155
1156 1156 for norm, paths in normed.items():
1157 1157 if len(paths) > 1:
1158 1158 for path in paths:
1159 1159 folded = self._discoverpath(
1160 1160 path, norm, True, None, self._map.dirfoldmap
1161 1161 )
1162 1162 if path != folded:
1163 1163 results[path] = None
1164 1164
1165 1165 return results, dirsfound, dirsnotfound
1166 1166
1167 1167 def walk(self, match, subrepos, unknown, ignored, full=True):
1168 1168 """
1169 1169 Walk recursively through the directory tree, finding all files
1170 1170 matched by match.
1171 1171
1172 1172 If full is False, maybe skip some known-clean files.
1173 1173
1174 1174 Return a dict mapping filename to stat-like object (either
1175 1175 mercurial.osutil.stat instance or return value of os.stat()).
1176 1176
1177 1177 """
1178 1178 # full is a flag that extensions that hook into walk can use -- this
1179 1179 # implementation doesn't use it at all. This satisfies the contract
1180 1180 # because we only guarantee a "maybe".
1181 1181
1182 1182 if ignored:
1183 1183 ignore = util.never
1184 1184 dirignore = util.never
1185 1185 elif unknown:
1186 1186 ignore = self._ignore
1187 1187 dirignore = self._dirignore
1188 1188 else:
1189 1189 # if not unknown and not ignored, drop dir recursion and step 2
1190 1190 ignore = util.always
1191 1191 dirignore = util.always
1192 1192
1193 1193 if self._sparsematchfn is not None:
1194 1194 em = matchmod.exact(match.files())
1195 1195 sm = matchmod.unionmatcher([self._sparsematcher, em])
1196 1196 match = matchmod.intersectmatchers(match, sm)
1197 1197
1198 1198 matchfn = match.matchfn
1199 1199 matchalways = match.always()
1200 1200 matchtdir = match.traversedir
1201 1201 dmap = self._map
1202 1202 listdir = util.listdir
1203 1203 lstat = os.lstat
1204 1204 dirkind = stat.S_IFDIR
1205 1205 regkind = stat.S_IFREG
1206 1206 lnkkind = stat.S_IFLNK
1207 1207 join = self._join
1208 1208
1209 1209 exact = skipstep3 = False
1210 1210 if match.isexact(): # match.exact
1211 1211 exact = True
1212 1212 dirignore = util.always # skip step 2
1213 1213 elif match.prefix(): # match.match, no patterns
1214 1214 skipstep3 = True
1215 1215
1216 1216 if not exact and self._checkcase:
1217 1217 normalize = self._normalize
1218 1218 normalizefile = self._normalizefile
1219 1219 skipstep3 = False
1220 1220 else:
1221 1221 normalize = self._normalize
1222 1222 normalizefile = None
1223 1223
1224 1224 # step 1: find all explicit files
1225 1225 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1226 1226 if matchtdir:
1227 1227 for d in work:
1228 1228 matchtdir(d[0])
1229 1229 for d in dirsnotfound:
1230 1230 matchtdir(d)
1231 1231
1232 1232 skipstep3 = skipstep3 and not (work or dirsnotfound)
1233 1233 work = [d for d in work if not dirignore(d[0])]
1234 1234
1235 1235 # step 2: visit subdirectories
1236 1236 def traverse(work, alreadynormed):
1237 1237 wadd = work.append
1238 1238 while work:
1239 1239 tracing.counter('dirstate.walk work', len(work))
1240 1240 nd = work.pop()
1241 1241 visitentries = match.visitchildrenset(nd)
1242 1242 if not visitentries:
1243 1243 continue
1244 1244 if visitentries == b'this' or visitentries == b'all':
1245 1245 visitentries = None
1246 1246 skip = None
1247 1247 if nd != b'':
1248 1248 skip = b'.hg'
1249 1249 try:
1250 1250 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1251 1251 entries = listdir(join(nd), stat=True, skip=skip)
1252 1252 except (PermissionError, FileNotFoundError) as inst:
1253 1253 match.bad(
1254 1254 self.pathto(nd), encoding.strtolocal(inst.strerror)
1255 1255 )
1256 1256 continue
1257 1257 for f, kind, st in entries:
1258 1258 # Some matchers may return files in the visitentries set,
1259 1259 # instead of 'this', if the matcher explicitly mentions them
1260 1260 # and is not an exactmatcher. This is acceptable; we do not
1261 1261 # make any hard assumptions about file-or-directory below
1262 1262 # based on the presence of `f` in visitentries. If
1263 1263 # visitchildrenset returned a set, we can always skip the
1264 1264 # entries *not* in the set it provided regardless of whether
1265 1265 # they're actually a file or a directory.
1266 1266 if visitentries and f not in visitentries:
1267 1267 continue
1268 1268 if normalizefile:
1269 1269 # even though f might be a directory, we're only
1270 1270 # interested in comparing it to files currently in the
1271 1271 # dmap -- therefore normalizefile is enough
1272 1272 nf = normalizefile(
1273 1273 nd and (nd + b"/" + f) or f, True, True
1274 1274 )
1275 1275 else:
1276 1276 nf = nd and (nd + b"/" + f) or f
1277 1277 if nf not in results:
1278 1278 if kind == dirkind:
1279 1279 if not ignore(nf):
1280 1280 if matchtdir:
1281 1281 matchtdir(nf)
1282 1282 wadd(nf)
1283 1283 if nf in dmap and (matchalways or matchfn(nf)):
1284 1284 results[nf] = None
1285 1285 elif kind == regkind or kind == lnkkind:
1286 1286 if nf in dmap:
1287 1287 if matchalways or matchfn(nf):
1288 1288 results[nf] = st
1289 1289 elif (matchalways or matchfn(nf)) and not ignore(
1290 1290 nf
1291 1291 ):
1292 1292 # unknown file -- normalize if necessary
1293 1293 if not alreadynormed:
1294 1294 nf = normalize(nf, False, True)
1295 1295 results[nf] = st
1296 1296 elif nf in dmap and (matchalways or matchfn(nf)):
1297 1297 results[nf] = None
1298 1298
1299 1299 for nd, d in work:
1300 1300 # alreadynormed means that processwork doesn't have to do any
1301 1301 # expensive directory normalization
1302 1302 alreadynormed = not normalize or nd == d
1303 1303 traverse([d], alreadynormed)
1304 1304
1305 1305 for s in subrepos:
1306 1306 del results[s]
1307 1307 del results[b'.hg']
1308 1308
1309 1309 # step 3: visit remaining files from dmap
1310 1310 if not skipstep3 and not exact:
1311 1311 # If a dmap file is not in results yet, it was either
1312 1312 # a) not matching matchfn b) ignored, c) missing, or d) under a
1313 1313 # symlink directory.
1314 1314 if not results and matchalways:
1315 1315 visit = [f for f in dmap]
1316 1316 else:
1317 1317 visit = [f for f in dmap if f not in results and matchfn(f)]
1318 1318 visit.sort()
1319 1319
1320 1320 if unknown:
1321 1321 # unknown == True means we walked all dirs under the roots
1322 1322 # that wasn't ignored, and everything that matched was stat'ed
1323 1323 # and is already in results.
1324 1324 # The rest must thus be ignored or under a symlink.
1325 1325 audit_path = pathutil.pathauditor(self._root, cached=True)
1326 1326
1327 1327 for nf in iter(visit):
1328 1328 # If a stat for the same file was already added with a
1329 1329 # different case, don't add one for this, since that would
1330 1330 # make it appear as if the file exists under both names
1331 1331 # on disk.
1332 1332 if (
1333 1333 normalizefile
1334 1334 and normalizefile(nf, True, True) in results
1335 1335 ):
1336 1336 results[nf] = None
1337 1337 # Report ignored items in the dmap as long as they are not
1338 1338 # under a symlink directory.
1339 1339 elif audit_path.check(nf):
1340 1340 try:
1341 1341 results[nf] = lstat(join(nf))
1342 1342 # file was just ignored, no links, and exists
1343 1343 except OSError:
1344 1344 # file doesn't exist
1345 1345 results[nf] = None
1346 1346 else:
1347 1347 # It's either missing or under a symlink directory
1348 1348 # which we in this case report as missing
1349 1349 results[nf] = None
1350 1350 else:
1351 1351 # We may not have walked the full directory tree above,
1352 1352 # so stat and check everything we missed.
1353 1353 iv = iter(visit)
1354 1354 for st in util.statfiles([join(i) for i in visit]):
1355 1355 results[next(iv)] = st
1356 1356 return results
1357 1357
1358 1358 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1359 1359 if self._sparsematchfn is not None:
1360 1360 em = matchmod.exact(matcher.files())
1361 1361 sm = matchmod.unionmatcher([self._sparsematcher, em])
1362 1362 matcher = matchmod.intersectmatchers(matcher, sm)
1363 1363 # Force Rayon (Rust parallelism library) to respect the number of
1364 1364 # workers. This is a temporary workaround until Rust code knows
1365 1365 # how to read the config file.
1366 1366 numcpus = self._ui.configint(b"worker", b"numcpus")
1367 1367 if numcpus is not None:
1368 1368 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1369 1369
1370 1370 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1371 1371 if not workers_enabled:
1372 1372 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1373 1373
1374 1374 (
1375 1375 lookup,
1376 1376 modified,
1377 1377 added,
1378 1378 removed,
1379 1379 deleted,
1380 1380 clean,
1381 1381 ignored,
1382 1382 unknown,
1383 1383 warnings,
1384 1384 bad,
1385 1385 traversed,
1386 1386 dirty,
1387 1387 ) = rustmod.status(
1388 1388 self._map._map,
1389 1389 matcher,
1390 1390 self._rootdir,
1391 1391 self._ignorefiles(),
1392 1392 self._checkexec,
1393 1393 bool(list_clean),
1394 1394 bool(list_ignored),
1395 1395 bool(list_unknown),
1396 1396 bool(matcher.traversedir),
1397 1397 )
1398 1398
1399 1399 self._dirty |= dirty
1400 1400
1401 1401 if matcher.traversedir:
1402 1402 for dir in traversed:
1403 1403 matcher.traversedir(dir)
1404 1404
1405 1405 if self._ui.warn:
1406 1406 for item in warnings:
1407 1407 if isinstance(item, tuple):
1408 1408 file_path, syntax = item
1409 1409 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1410 1410 file_path,
1411 1411 syntax,
1412 1412 )
1413 1413 self._ui.warn(msg)
1414 1414 else:
1415 1415 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1416 1416 self._ui.warn(
1417 1417 msg
1418 1418 % (
1419 1419 pathutil.canonpath(
1420 1420 self._rootdir, self._rootdir, item
1421 1421 ),
1422 1422 b"No such file or directory",
1423 1423 )
1424 1424 )
1425 1425
1426 1426 for fn, message in bad:
1427 1427 matcher.bad(fn, encoding.strtolocal(message))
1428 1428
1429 1429 status = scmutil.status(
1430 1430 modified=modified,
1431 1431 added=added,
1432 1432 removed=removed,
1433 1433 deleted=deleted,
1434 1434 unknown=unknown,
1435 1435 ignored=ignored,
1436 1436 clean=clean,
1437 1437 )
1438 1438 return (lookup, status)
1439 1439
1440 1440 # XXX since this can make the dirstate dirty (through rust), we should
1441 1441 # enforce that it is done withing an appropriate change-context that scope
1442 1442 # the change and ensure it eventually get written on disk (or rolled back)
1443 1443 def status(self, match, subrepos, ignored, clean, unknown):
1444 1444 """Determine the status of the working copy relative to the
1445 1445 dirstate and return a pair of (unsure, status), where status is of type
1446 1446 scmutil.status and:
1447 1447
1448 1448 unsure:
1449 1449 files that might have been modified since the dirstate was
1450 1450 written, but need to be read to be sure (size is the same
1451 1451 but mtime differs)
1452 1452 status.modified:
1453 1453 files that have definitely been modified since the dirstate
1454 1454 was written (different size or mode)
1455 1455 status.clean:
1456 1456 files that have definitely not been modified since the
1457 1457 dirstate was written
1458 1458 """
1459 1459 listignored, listclean, listunknown = ignored, clean, unknown
1460 1460 lookup, modified, added, unknown, ignored = [], [], [], [], []
1461 1461 removed, deleted, clean = [], [], []
1462 1462
1463 1463 dmap = self._map
1464 1464 dmap.preload()
1465 1465
1466 1466 use_rust = True
1467 1467
1468 1468 allowed_matchers = (
1469 1469 matchmod.alwaysmatcher,
1470 1470 matchmod.differencematcher,
1471 1471 matchmod.exactmatcher,
1472 1472 matchmod.includematcher,
1473 1473 matchmod.intersectionmatcher,
1474 1474 matchmod.nevermatcher,
1475 1475 matchmod.unionmatcher,
1476 1476 )
1477 1477
1478 1478 if rustmod is None:
1479 1479 use_rust = False
1480 1480 elif self._checkcase:
1481 1481 # Case-insensitive filesystems are not handled yet
1482 1482 use_rust = False
1483 1483 elif subrepos:
1484 1484 use_rust = False
1485 1485 elif not isinstance(match, allowed_matchers):
1486 1486 # Some matchers have yet to be implemented
1487 1487 use_rust = False
1488 1488
1489 1489 # Get the time from the filesystem so we can disambiguate files that
1490 1490 # appear modified in the present or future.
1491 1491 try:
1492 1492 mtime_boundary = timestamp.get_fs_now(self._opener)
1493 1493 except OSError:
1494 1494 # In largefiles or readonly context
1495 1495 mtime_boundary = None
1496 1496
1497 1497 if use_rust:
1498 1498 try:
1499 1499 res = self._rust_status(
1500 1500 match, listclean, listignored, listunknown
1501 1501 )
1502 1502 return res + (mtime_boundary,)
1503 1503 except rustmod.FallbackError:
1504 1504 pass
1505 1505
1506 1506 def noop(f):
1507 1507 pass
1508 1508
1509 1509 dcontains = dmap.__contains__
1510 1510 dget = dmap.__getitem__
1511 1511 ladd = lookup.append # aka "unsure"
1512 1512 madd = modified.append
1513 1513 aadd = added.append
1514 1514 uadd = unknown.append if listunknown else noop
1515 1515 iadd = ignored.append if listignored else noop
1516 1516 radd = removed.append
1517 1517 dadd = deleted.append
1518 1518 cadd = clean.append if listclean else noop
1519 1519 mexact = match.exact
1520 1520 dirignore = self._dirignore
1521 1521 checkexec = self._checkexec
1522 1522 checklink = self._checklink
1523 1523 copymap = self._map.copymap
1524 1524
1525 1525 # We need to do full walks when either
1526 1526 # - we're listing all clean files, or
1527 1527 # - match.traversedir does something, because match.traversedir should
1528 1528 # be called for every dir in the working dir
1529 1529 full = listclean or match.traversedir is not None
1530 1530 for fn, st in self.walk(
1531 1531 match, subrepos, listunknown, listignored, full=full
1532 1532 ).items():
1533 1533 if not dcontains(fn):
1534 1534 if (listignored or mexact(fn)) and dirignore(fn):
1535 1535 if listignored:
1536 1536 iadd(fn)
1537 1537 else:
1538 1538 uadd(fn)
1539 1539 continue
1540 1540
1541 1541 t = dget(fn)
1542 1542 mode = t.mode
1543 1543 size = t.size
1544 1544
1545 1545 if not st and t.tracked:
1546 1546 dadd(fn)
1547 1547 elif t.p2_info:
1548 1548 madd(fn)
1549 1549 elif t.added:
1550 1550 aadd(fn)
1551 1551 elif t.removed:
1552 1552 radd(fn)
1553 1553 elif t.tracked:
1554 1554 if not checklink and t.has_fallback_symlink:
1555 1555 # If the file system does not support symlink, the mode
1556 1556 # might not be correctly stored in the dirstate, so do not
1557 1557 # trust it.
1558 1558 ladd(fn)
1559 1559 elif not checkexec and t.has_fallback_exec:
1560 1560 # If the file system does not support exec bits, the mode
1561 1561 # might not be correctly stored in the dirstate, so do not
1562 1562 # trust it.
1563 1563 ladd(fn)
1564 1564 elif (
1565 1565 size >= 0
1566 1566 and (
1567 1567 (size != st.st_size and size != st.st_size & _rangemask)
1568 1568 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1569 1569 )
1570 1570 or fn in copymap
1571 1571 ):
1572 1572 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1573 1573 # issue6456: Size returned may be longer due to
1574 1574 # encryption on EXT-4 fscrypt, undecided.
1575 1575 ladd(fn)
1576 1576 else:
1577 1577 madd(fn)
1578 1578 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1579 1579 # There might be a change in the future if for example the
1580 1580 # internal clock is off, but this is a case where the issues
1581 1581 # the user would face would be a lot worse and there is
1582 1582 # nothing we can really do.
1583 1583 ladd(fn)
1584 1584 elif listclean:
1585 1585 cadd(fn)
1586 1586 status = scmutil.status(
1587 1587 modified, added, removed, deleted, unknown, ignored, clean
1588 1588 )
1589 1589 return (lookup, status, mtime_boundary)
1590 1590
1591 1591 def matches(self, match):
1592 1592 """
1593 1593 return files in the dirstate (in whatever state) filtered by match
1594 1594 """
1595 1595 dmap = self._map
1596 1596 if rustmod is not None:
1597 1597 dmap = self._map._map
1598 1598
1599 1599 if match.always():
1600 1600 return dmap.keys()
1601 1601 files = match.files()
1602 1602 if match.isexact():
1603 1603 # fast path -- filter the other way around, since typically files is
1604 1604 # much smaller than dmap
1605 1605 return [f for f in files if f in dmap]
1606 1606 if match.prefix() and all(fn in dmap for fn in files):
1607 1607 # fast path -- all the values are known to be files, so just return
1608 1608 # that
1609 1609 return list(files)
1610 1610 return [f for f in dmap if match(f)]
1611 1611
1612 1612 def _actualfilename(self, tr):
1613 1613 if tr:
1614 1614 return self._pendingfilename
1615 1615 else:
1616 1616 return self._filename
1617 1617
1618 def all_file_names(self):
1619 """list all filename currently used by this dirstate
1620
1621 This is only used to do `hg rollback` related backup in the transaction
1622 """
1623 if not self._opener.exists(self._filename):
1624 # no data every written to disk yet
1625 return ()
1626 elif self._use_dirstate_v2:
1627 return (
1628 self._filename,
1629 self._map.docket.data_filename(),
1630 )
1631 else:
1632 return (self._filename,)
1633
1618 1634 def data_backup_filename(self, backupname):
1619 1635 if not self._use_dirstate_v2:
1620 1636 return None
1621 1637 return backupname + b'.v2-data'
1622 1638
1623 1639 def _new_backup_data_filename(self, backupname):
1624 1640 """return a filename to backup a data-file or None"""
1625 1641 if not self._use_dirstate_v2:
1626 1642 return None
1627 1643 if self._map.docket.uuid is None:
1628 1644 # not created yet, nothing to backup
1629 1645 return None
1630 1646 data_filename = self._map.docket.data_filename()
1631 1647 return data_filename, self.data_backup_filename(backupname)
1632 1648
1633 1649 def backup_data_file(self, backupname):
1634 1650 if not self._use_dirstate_v2:
1635 1651 return None
1636 1652 docket = docketmod.DirstateDocket.parse(
1637 1653 self._opener.read(backupname),
1638 1654 self._nodeconstants,
1639 1655 )
1640 1656 return self.data_backup_filename(backupname), docket.data_filename()
1641 1657
1642 1658 def savebackup(self, tr, backupname):
1643 1659 '''Save current dirstate into backup file'''
1644 1660 filename = self._actualfilename(tr)
1645 1661 assert backupname != filename
1646 1662
1647 1663 # use '_writedirstate' instead of 'write' to write changes certainly,
1648 1664 # because the latter omits writing out if transaction is running.
1649 1665 # output file will be used to create backup of dirstate at this point.
1650 1666 if self._dirty:
1651 1667 self._writedirstate(
1652 1668 tr,
1653 1669 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1654 1670 )
1655 1671
1656 1672 if tr:
1657 1673 # ensure that subsequent tr.writepending returns True for
1658 1674 # changes written out above, even if dirstate is never
1659 1675 # changed after this
1660 1676 tr.addfilegenerator(
1661 1677 b'dirstate-1-main',
1662 1678 (self._filename,),
1663 1679 lambda f: self._writedirstate(tr, f),
1664 1680 location=b'plain',
1665 1681 post_finalize=True,
1666 1682 )
1667 1683
1668 1684 self._opener.tryunlink(backupname)
1669 1685 if self._opener.exists(filename):
1670 1686 # hardlink backup is okay because _writedirstate is always called
1671 1687 # with an "atomictemp=True" file.
1672 1688 util.copyfile(
1673 1689 self._opener.join(filename),
1674 1690 self._opener.join(backupname),
1675 1691 hardlink=True,
1676 1692 )
1677 1693 data_pair = self._new_backup_data_filename(backupname)
1678 1694 if data_pair is not None:
1679 1695 data_filename, bck_data_filename = data_pair
1680 1696 util.copyfile(
1681 1697 self._opener.join(data_filename),
1682 1698 self._opener.join(bck_data_filename),
1683 1699 hardlink=True,
1684 1700 )
1685 1701 if tr is not None:
1686 1702 # ensure that pending file written above is unlinked at
1687 1703 # failure, even if tr.writepending isn't invoked until the
1688 1704 # end of this transaction
1689 1705 tr.registertmp(bck_data_filename, location=b'plain')
1690 1706
1691 1707 def restorebackup(self, tr, backupname):
1692 1708 '''Restore dirstate by backup file'''
1693 1709 # this "invalidate()" prevents "wlock.release()" from writing
1694 1710 # changes of dirstate out after restoring from backup file
1695 1711 self.invalidate()
1696 1712 o = self._opener
1697 1713 if not o.exists(backupname):
1698 1714 # there was no file backup, delete existing files
1699 1715 filename = self._actualfilename(tr)
1700 1716 data_file = None
1701 1717 if self._use_dirstate_v2 and self._map.docket.uuid is not None:
1702 1718 data_file = self._map.docket.data_filename()
1703 1719 if o.exists(filename):
1704 1720 o.unlink(filename)
1705 1721 if data_file is not None and o.exists(data_file):
1706 1722 o.unlink(data_file)
1707 1723 return
1708 1724 filename = self._actualfilename(tr)
1709 1725 data_pair = self.backup_data_file(backupname)
1710 1726 if o.exists(filename) and util.samefile(
1711 1727 o.join(backupname), o.join(filename)
1712 1728 ):
1713 1729 o.unlink(backupname)
1714 1730 else:
1715 1731 o.rename(backupname, filename, checkambig=True)
1716 1732
1717 1733 if data_pair is not None:
1718 1734 data_backup, target = data_pair
1719 1735 if o.exists(target) and util.samefile(
1720 1736 o.join(data_backup), o.join(target)
1721 1737 ):
1722 1738 o.unlink(data_backup)
1723 1739 else:
1724 1740 o.rename(data_backup, target, checkambig=True)
1725 1741
1726 1742 def clearbackup(self, tr, backupname):
1727 1743 '''Clear backup file'''
1728 1744 o = self._opener
1729 1745 if o.exists(backupname):
1730 1746 data_backup = self.backup_data_file(backupname)
1731 1747 o.unlink(backupname)
1732 1748 if data_backup is not None:
1733 1749 o.unlink(data_backup[0])
1734 1750
1735 1751 def verify(self, m1, m2, p1, narrow_matcher=None):
1736 1752 """
1737 1753 check the dirstate contents against the parent manifest and yield errors
1738 1754 """
1739 1755 missing_from_p1 = _(
1740 1756 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1741 1757 )
1742 1758 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1743 1759 missing_from_ps = _(
1744 1760 b"%s marked as modified, but not in either manifest\n"
1745 1761 )
1746 1762 missing_from_ds = _(
1747 1763 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1748 1764 )
1749 1765 for f, entry in self.items():
1750 1766 if entry.p1_tracked:
1751 1767 if entry.modified and f not in m1 and f not in m2:
1752 1768 yield missing_from_ps % f
1753 1769 elif f not in m1:
1754 1770 yield missing_from_p1 % (f, node.short(p1))
1755 1771 if entry.added and f in m1:
1756 1772 yield unexpected_in_p1 % f
1757 1773 for f in m1:
1758 1774 if narrow_matcher is not None and not narrow_matcher(f):
1759 1775 continue
1760 1776 entry = self.get_entry(f)
1761 1777 if not entry.p1_tracked:
1762 1778 yield missing_from_ds % (f, node.short(p1))
@@ -1,3999 +1,4025 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import functools
11 11 import os
12 12 import random
13 13 import re
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from concurrent import futures
19 19 from typing import (
20 20 Optional,
21 21 )
22 22
23 23 from .i18n import _
24 24 from .node import (
25 25 bin,
26 26 hex,
27 27 nullrev,
28 28 sha1nodeconstants,
29 29 short,
30 30 )
31 31 from .pycompat import (
32 32 delattr,
33 33 getattr,
34 34 )
35 35 from . import (
36 36 bookmarks,
37 37 branchmap,
38 38 bundle2,
39 39 bundlecaches,
40 40 changegroup,
41 41 color,
42 42 commit,
43 43 context,
44 44 dirstate,
45 45 discovery,
46 46 encoding,
47 47 error,
48 48 exchange,
49 49 extensions,
50 50 filelog,
51 51 hook,
52 52 lock as lockmod,
53 53 match as matchmod,
54 54 mergestate as mergestatemod,
55 55 mergeutil,
56 56 namespaces,
57 57 narrowspec,
58 58 obsolete,
59 59 pathutil,
60 60 phases,
61 61 pushkey,
62 62 pycompat,
63 63 rcutil,
64 64 repoview,
65 65 requirements as requirementsmod,
66 66 revlog,
67 67 revset,
68 68 revsetlang,
69 69 scmutil,
70 70 sparse,
71 71 store as storemod,
72 72 subrepoutil,
73 73 tags as tagsmod,
74 74 transaction,
75 75 txnutil,
76 76 util,
77 77 vfs as vfsmod,
78 78 wireprototypes,
79 79 )
80 80
81 81 from .interfaces import (
82 82 repository,
83 83 util as interfaceutil,
84 84 )
85 85
86 86 from .utils import (
87 87 hashutil,
88 88 procutil,
89 89 stringutil,
90 90 urlutil,
91 91 )
92 92
93 93 from .revlogutils import (
94 94 concurrency_checker as revlogchecker,
95 95 constants as revlogconst,
96 96 sidedata as sidedatamod,
97 97 )
98 98
99 99 release = lockmod.release
100 100 urlerr = util.urlerr
101 101 urlreq = util.urlreq
102 102
103 103 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(b"^(dirstate|narrowspec.dirstate).*")
104 104
105 105 # set of (path, vfs-location) tuples. vfs-location is:
106 106 # - 'plain for vfs relative paths
107 107 # - '' for svfs relative paths
108 108 _cachedfiles = set()
109 109
110 110
111 111 class _basefilecache(scmutil.filecache):
112 112 """All filecache usage on repo are done for logic that should be unfiltered"""
113 113
114 114 def __get__(self, repo, type=None):
115 115 if repo is None:
116 116 return self
117 117 # proxy to unfiltered __dict__ since filtered repo has no entry
118 118 unfi = repo.unfiltered()
119 119 try:
120 120 return unfi.__dict__[self.sname]
121 121 except KeyError:
122 122 pass
123 123 return super(_basefilecache, self).__get__(unfi, type)
124 124
125 125 def set(self, repo, value):
126 126 return super(_basefilecache, self).set(repo.unfiltered(), value)
127 127
128 128
129 129 class repofilecache(_basefilecache):
130 130 """filecache for files in .hg but outside of .hg/store"""
131 131
132 132 def __init__(self, *paths):
133 133 super(repofilecache, self).__init__(*paths)
134 134 for path in paths:
135 135 _cachedfiles.add((path, b'plain'))
136 136
137 137 def join(self, obj, fname):
138 138 return obj.vfs.join(fname)
139 139
140 140
141 141 class storecache(_basefilecache):
142 142 """filecache for files in the store"""
143 143
144 144 def __init__(self, *paths):
145 145 super(storecache, self).__init__(*paths)
146 146 for path in paths:
147 147 _cachedfiles.add((path, b''))
148 148
149 149 def join(self, obj, fname):
150 150 return obj.sjoin(fname)
151 151
152 152
153 153 class changelogcache(storecache):
154 154 """filecache for the changelog"""
155 155
156 156 def __init__(self):
157 157 super(changelogcache, self).__init__()
158 158 _cachedfiles.add((b'00changelog.i', b''))
159 159 _cachedfiles.add((b'00changelog.n', b''))
160 160
161 161 def tracked_paths(self, obj):
162 162 paths = [self.join(obj, b'00changelog.i')]
163 163 if obj.store.opener.options.get(b'persistent-nodemap', False):
164 164 paths.append(self.join(obj, b'00changelog.n'))
165 165 return paths
166 166
167 167
168 168 class manifestlogcache(storecache):
169 169 """filecache for the manifestlog"""
170 170
171 171 def __init__(self):
172 172 super(manifestlogcache, self).__init__()
173 173 _cachedfiles.add((b'00manifest.i', b''))
174 174 _cachedfiles.add((b'00manifest.n', b''))
175 175
176 176 def tracked_paths(self, obj):
177 177 paths = [self.join(obj, b'00manifest.i')]
178 178 if obj.store.opener.options.get(b'persistent-nodemap', False):
179 179 paths.append(self.join(obj, b'00manifest.n'))
180 180 return paths
181 181
182 182
183 183 class mixedrepostorecache(_basefilecache):
184 184 """filecache for a mix files in .hg/store and outside"""
185 185
186 186 def __init__(self, *pathsandlocations):
187 187 # scmutil.filecache only uses the path for passing back into our
188 188 # join(), so we can safely pass a list of paths and locations
189 189 super(mixedrepostorecache, self).__init__(*pathsandlocations)
190 190 _cachedfiles.update(pathsandlocations)
191 191
192 192 def join(self, obj, fnameandlocation):
193 193 fname, location = fnameandlocation
194 194 if location == b'plain':
195 195 return obj.vfs.join(fname)
196 196 else:
197 197 if location != b'':
198 198 raise error.ProgrammingError(
199 199 b'unexpected location: %s' % location
200 200 )
201 201 return obj.sjoin(fname)
202 202
203 203
204 204 def isfilecached(repo, name):
205 205 """check if a repo has already cached "name" filecache-ed property
206 206
207 207 This returns (cachedobj-or-None, iscached) tuple.
208 208 """
209 209 cacheentry = repo.unfiltered()._filecache.get(name, None)
210 210 if not cacheentry:
211 211 return None, False
212 212 return cacheentry.obj, True
213 213
214 214
215 215 class unfilteredpropertycache(util.propertycache):
216 216 """propertycache that apply to unfiltered repo only"""
217 217
218 218 def __get__(self, repo, type=None):
219 219 unfi = repo.unfiltered()
220 220 if unfi is repo:
221 221 return super(unfilteredpropertycache, self).__get__(unfi)
222 222 return getattr(unfi, self.name)
223 223
224 224
225 225 class filteredpropertycache(util.propertycache):
226 226 """propertycache that must take filtering in account"""
227 227
228 228 def cachevalue(self, obj, value):
229 229 object.__setattr__(obj, self.name, value)
230 230
231 231
232 232 def hasunfilteredcache(repo, name):
233 233 """check if a repo has an unfilteredpropertycache value for <name>"""
234 234 return name in vars(repo.unfiltered())
235 235
236 236
237 237 def unfilteredmethod(orig):
238 238 """decorate method that always need to be run on unfiltered version"""
239 239
240 240 @functools.wraps(orig)
241 241 def wrapper(repo, *args, **kwargs):
242 242 return orig(repo.unfiltered(), *args, **kwargs)
243 243
244 244 return wrapper
245 245
246 246
247 247 moderncaps = {
248 248 b'lookup',
249 249 b'branchmap',
250 250 b'pushkey',
251 251 b'known',
252 252 b'getbundle',
253 253 b'unbundle',
254 254 }
255 255 legacycaps = moderncaps.union({b'changegroupsubset'})
256 256
257 257
258 258 @interfaceutil.implementer(repository.ipeercommandexecutor)
259 259 class localcommandexecutor:
260 260 def __init__(self, peer):
261 261 self._peer = peer
262 262 self._sent = False
263 263 self._closed = False
264 264
265 265 def __enter__(self):
266 266 return self
267 267
268 268 def __exit__(self, exctype, excvalue, exctb):
269 269 self.close()
270 270
271 271 def callcommand(self, command, args):
272 272 if self._sent:
273 273 raise error.ProgrammingError(
274 274 b'callcommand() cannot be used after sendcommands()'
275 275 )
276 276
277 277 if self._closed:
278 278 raise error.ProgrammingError(
279 279 b'callcommand() cannot be used after close()'
280 280 )
281 281
282 282 # We don't need to support anything fancy. Just call the named
283 283 # method on the peer and return a resolved future.
284 284 fn = getattr(self._peer, pycompat.sysstr(command))
285 285
286 286 f = futures.Future()
287 287
288 288 try:
289 289 result = fn(**pycompat.strkwargs(args))
290 290 except Exception:
291 291 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
292 292 else:
293 293 f.set_result(result)
294 294
295 295 return f
296 296
297 297 def sendcommands(self):
298 298 self._sent = True
299 299
300 300 def close(self):
301 301 self._closed = True
302 302
303 303
304 304 @interfaceutil.implementer(repository.ipeercommands)
305 305 class localpeer(repository.peer):
306 306 '''peer for a local repo; reflects only the most recent API'''
307 307
308 308 def __init__(self, repo, caps=None, path=None):
309 309 super(localpeer, self).__init__(repo.ui, path=path)
310 310
311 311 if caps is None:
312 312 caps = moderncaps.copy()
313 313 self._repo = repo.filtered(b'served')
314 314
315 315 if repo._wanted_sidedata:
316 316 formatted = bundle2.format_remote_wanted_sidedata(repo)
317 317 caps.add(b'exp-wanted-sidedata=' + formatted)
318 318
319 319 self._caps = repo._restrictcapabilities(caps)
320 320
321 321 # Begin of _basepeer interface.
322 322
323 323 def url(self):
324 324 return self._repo.url()
325 325
326 326 def local(self):
327 327 return self._repo
328 328
329 329 def canpush(self):
330 330 return True
331 331
332 332 def close(self):
333 333 self._repo.close()
334 334
335 335 # End of _basepeer interface.
336 336
337 337 # Begin of _basewirecommands interface.
338 338
339 339 def branchmap(self):
340 340 return self._repo.branchmap()
341 341
342 342 def capabilities(self):
343 343 return self._caps
344 344
345 345 def clonebundles(self):
346 346 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
347 347
348 348 def debugwireargs(self, one, two, three=None, four=None, five=None):
349 349 """Used to test argument passing over the wire"""
350 350 return b"%s %s %s %s %s" % (
351 351 one,
352 352 two,
353 353 pycompat.bytestr(three),
354 354 pycompat.bytestr(four),
355 355 pycompat.bytestr(five),
356 356 )
357 357
358 358 def getbundle(
359 359 self,
360 360 source,
361 361 heads=None,
362 362 common=None,
363 363 bundlecaps=None,
364 364 remote_sidedata=None,
365 365 **kwargs
366 366 ):
367 367 chunks = exchange.getbundlechunks(
368 368 self._repo,
369 369 source,
370 370 heads=heads,
371 371 common=common,
372 372 bundlecaps=bundlecaps,
373 373 remote_sidedata=remote_sidedata,
374 374 **kwargs
375 375 )[1]
376 376 cb = util.chunkbuffer(chunks)
377 377
378 378 if exchange.bundle2requested(bundlecaps):
379 379 # When requesting a bundle2, getbundle returns a stream to make the
380 380 # wire level function happier. We need to build a proper object
381 381 # from it in local peer.
382 382 return bundle2.getunbundler(self.ui, cb)
383 383 else:
384 384 return changegroup.getunbundler(b'01', cb, None)
385 385
386 386 def heads(self):
387 387 return self._repo.heads()
388 388
389 389 def known(self, nodes):
390 390 return self._repo.known(nodes)
391 391
392 392 def listkeys(self, namespace):
393 393 return self._repo.listkeys(namespace)
394 394
395 395 def lookup(self, key):
396 396 return self._repo.lookup(key)
397 397
398 398 def pushkey(self, namespace, key, old, new):
399 399 return self._repo.pushkey(namespace, key, old, new)
400 400
401 401 def stream_out(self):
402 402 raise error.Abort(_(b'cannot perform stream clone against local peer'))
403 403
404 404 def unbundle(self, bundle, heads, url):
405 405 """apply a bundle on a repo
406 406
407 407 This function handles the repo locking itself."""
408 408 try:
409 409 try:
410 410 bundle = exchange.readbundle(self.ui, bundle, None)
411 411 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
412 412 if util.safehasattr(ret, b'getchunks'):
413 413 # This is a bundle20 object, turn it into an unbundler.
414 414 # This little dance should be dropped eventually when the
415 415 # API is finally improved.
416 416 stream = util.chunkbuffer(ret.getchunks())
417 417 ret = bundle2.getunbundler(self.ui, stream)
418 418 return ret
419 419 except Exception as exc:
420 420 # If the exception contains output salvaged from a bundle2
421 421 # reply, we need to make sure it is printed before continuing
422 422 # to fail. So we build a bundle2 with such output and consume
423 423 # it directly.
424 424 #
425 425 # This is not very elegant but allows a "simple" solution for
426 426 # issue4594
427 427 output = getattr(exc, '_bundle2salvagedoutput', ())
428 428 if output:
429 429 bundler = bundle2.bundle20(self._repo.ui)
430 430 for out in output:
431 431 bundler.addpart(out)
432 432 stream = util.chunkbuffer(bundler.getchunks())
433 433 b = bundle2.getunbundler(self.ui, stream)
434 434 bundle2.processbundle(self._repo, b)
435 435 raise
436 436 except error.PushRaced as exc:
437 437 raise error.ResponseError(
438 438 _(b'push failed:'), stringutil.forcebytestr(exc)
439 439 )
440 440
441 441 # End of _basewirecommands interface.
442 442
443 443 # Begin of peer interface.
444 444
445 445 def commandexecutor(self):
446 446 return localcommandexecutor(self)
447 447
448 448 # End of peer interface.
449 449
450 450
451 451 @interfaceutil.implementer(repository.ipeerlegacycommands)
452 452 class locallegacypeer(localpeer):
453 453 """peer extension which implements legacy methods too; used for tests with
454 454 restricted capabilities"""
455 455
456 456 def __init__(self, repo, path=None):
457 457 super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
458 458
459 459 # Begin of baselegacywirecommands interface.
460 460
461 461 def between(self, pairs):
462 462 return self._repo.between(pairs)
463 463
464 464 def branches(self, nodes):
465 465 return self._repo.branches(nodes)
466 466
467 467 def changegroup(self, nodes, source):
468 468 outgoing = discovery.outgoing(
469 469 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
470 470 )
471 471 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
472 472
473 473 def changegroupsubset(self, bases, heads, source):
474 474 outgoing = discovery.outgoing(
475 475 self._repo, missingroots=bases, ancestorsof=heads
476 476 )
477 477 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
478 478
479 479 # End of baselegacywirecommands interface.
480 480
481 481
482 482 # Functions receiving (ui, features) that extensions can register to impact
483 483 # the ability to load repositories with custom requirements. Only
484 484 # functions defined in loaded extensions are called.
485 485 #
486 486 # The function receives a set of requirement strings that the repository
487 487 # is capable of opening. Functions will typically add elements to the
488 488 # set to reflect that the extension knows how to handle that requirements.
489 489 featuresetupfuncs = set()
490 490
491 491
492 492 def _getsharedvfs(hgvfs, requirements):
493 493 """returns the vfs object pointing to root of shared source
494 494 repo for a shared repository
495 495
496 496 hgvfs is vfs pointing at .hg/ of current repo (shared one)
497 497 requirements is a set of requirements of current repo (shared one)
498 498 """
499 499 # The ``shared`` or ``relshared`` requirements indicate the
500 500 # store lives in the path contained in the ``.hg/sharedpath`` file.
501 501 # This is an absolute path for ``shared`` and relative to
502 502 # ``.hg/`` for ``relshared``.
503 503 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
504 504 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
505 505 sharedpath = util.normpath(hgvfs.join(sharedpath))
506 506
507 507 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
508 508
509 509 if not sharedvfs.exists():
510 510 raise error.RepoError(
511 511 _(b'.hg/sharedpath points to nonexistent directory %s')
512 512 % sharedvfs.base
513 513 )
514 514 return sharedvfs
515 515
516 516
517 517 def _readrequires(vfs, allowmissing):
518 518 """reads the require file present at root of this vfs
519 519 and return a set of requirements
520 520
521 521 If allowmissing is True, we suppress FileNotFoundError if raised"""
522 522 # requires file contains a newline-delimited list of
523 523 # features/capabilities the opener (us) must have in order to use
524 524 # the repository. This file was introduced in Mercurial 0.9.2,
525 525 # which means very old repositories may not have one. We assume
526 526 # a missing file translates to no requirements.
527 527 read = vfs.tryread if allowmissing else vfs.read
528 528 return set(read(b'requires').splitlines())
529 529
530 530
531 531 def makelocalrepository(baseui, path: bytes, intents=None):
532 532 """Create a local repository object.
533 533
534 534 Given arguments needed to construct a local repository, this function
535 535 performs various early repository loading functionality (such as
536 536 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
537 537 the repository can be opened, derives a type suitable for representing
538 538 that repository, and returns an instance of it.
539 539
540 540 The returned object conforms to the ``repository.completelocalrepository``
541 541 interface.
542 542
543 543 The repository type is derived by calling a series of factory functions
544 544 for each aspect/interface of the final repository. These are defined by
545 545 ``REPO_INTERFACES``.
546 546
547 547 Each factory function is called to produce a type implementing a specific
548 548 interface. The cumulative list of returned types will be combined into a
549 549 new type and that type will be instantiated to represent the local
550 550 repository.
551 551
552 552 The factory functions each receive various state that may be consulted
553 553 as part of deriving a type.
554 554
555 555 Extensions should wrap these factory functions to customize repository type
556 556 creation. Note that an extension's wrapped function may be called even if
557 557 that extension is not loaded for the repo being constructed. Extensions
558 558 should check if their ``__name__`` appears in the
559 559 ``extensionmodulenames`` set passed to the factory function and no-op if
560 560 not.
561 561 """
562 562 ui = baseui.copy()
563 563 # Prevent copying repo configuration.
564 564 ui.copy = baseui.copy
565 565
566 566 # Working directory VFS rooted at repository root.
567 567 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
568 568
569 569 # Main VFS for .hg/ directory.
570 570 hgpath = wdirvfs.join(b'.hg')
571 571 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
572 572 # Whether this repository is shared one or not
573 573 shared = False
574 574 # If this repository is shared, vfs pointing to shared repo
575 575 sharedvfs = None
576 576
577 577 # The .hg/ path should exist and should be a directory. All other
578 578 # cases are errors.
579 579 if not hgvfs.isdir():
580 580 try:
581 581 hgvfs.stat()
582 582 except FileNotFoundError:
583 583 pass
584 584 except ValueError as e:
585 585 # Can be raised on Python 3.8 when path is invalid.
586 586 raise error.Abort(
587 587 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
588 588 )
589 589
590 590 raise error.RepoError(_(b'repository %s not found') % path)
591 591
592 592 requirements = _readrequires(hgvfs, True)
593 593 shared = (
594 594 requirementsmod.SHARED_REQUIREMENT in requirements
595 595 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
596 596 )
597 597 storevfs = None
598 598 if shared:
599 599 # This is a shared repo
600 600 sharedvfs = _getsharedvfs(hgvfs, requirements)
601 601 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
602 602 else:
603 603 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
604 604
605 605 # if .hg/requires contains the sharesafe requirement, it means
606 606 # there exists a `.hg/store/requires` too and we should read it
607 607 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
608 608 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
609 609 # is not present, refer checkrequirementscompat() for that
610 610 #
611 611 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
612 612 # repository was shared the old way. We check the share source .hg/requires
613 613 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
614 614 # to be reshared
615 615 hint = _(b"see `hg help config.format.use-share-safe` for more information")
616 616 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
617 617 if (
618 618 shared
619 619 and requirementsmod.SHARESAFE_REQUIREMENT
620 620 not in _readrequires(sharedvfs, True)
621 621 ):
622 622 mismatch_warn = ui.configbool(
623 623 b'share', b'safe-mismatch.source-not-safe.warn'
624 624 )
625 625 mismatch_config = ui.config(
626 626 b'share', b'safe-mismatch.source-not-safe'
627 627 )
628 628 mismatch_verbose_upgrade = ui.configbool(
629 629 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
630 630 )
631 631 if mismatch_config in (
632 632 b'downgrade-allow',
633 633 b'allow',
634 634 b'downgrade-abort',
635 635 ):
636 636 # prevent cyclic import localrepo -> upgrade -> localrepo
637 637 from . import upgrade
638 638
639 639 upgrade.downgrade_share_to_non_safe(
640 640 ui,
641 641 hgvfs,
642 642 sharedvfs,
643 643 requirements,
644 644 mismatch_config,
645 645 mismatch_warn,
646 646 mismatch_verbose_upgrade,
647 647 )
648 648 elif mismatch_config == b'abort':
649 649 raise error.Abort(
650 650 _(b"share source does not support share-safe requirement"),
651 651 hint=hint,
652 652 )
653 653 else:
654 654 raise error.Abort(
655 655 _(
656 656 b"share-safe mismatch with source.\nUnrecognized"
657 657 b" value '%s' of `share.safe-mismatch.source-not-safe`"
658 658 b" set."
659 659 )
660 660 % mismatch_config,
661 661 hint=hint,
662 662 )
663 663 else:
664 664 requirements |= _readrequires(storevfs, False)
665 665 elif shared:
666 666 sourcerequires = _readrequires(sharedvfs, False)
667 667 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
668 668 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
669 669 mismatch_warn = ui.configbool(
670 670 b'share', b'safe-mismatch.source-safe.warn'
671 671 )
672 672 mismatch_verbose_upgrade = ui.configbool(
673 673 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
674 674 )
675 675 if mismatch_config in (
676 676 b'upgrade-allow',
677 677 b'allow',
678 678 b'upgrade-abort',
679 679 ):
680 680 # prevent cyclic import localrepo -> upgrade -> localrepo
681 681 from . import upgrade
682 682
683 683 upgrade.upgrade_share_to_safe(
684 684 ui,
685 685 hgvfs,
686 686 storevfs,
687 687 requirements,
688 688 mismatch_config,
689 689 mismatch_warn,
690 690 mismatch_verbose_upgrade,
691 691 )
692 692 elif mismatch_config == b'abort':
693 693 raise error.Abort(
694 694 _(
695 695 b'version mismatch: source uses share-safe'
696 696 b' functionality while the current share does not'
697 697 ),
698 698 hint=hint,
699 699 )
700 700 else:
701 701 raise error.Abort(
702 702 _(
703 703 b"share-safe mismatch with source.\nUnrecognized"
704 704 b" value '%s' of `share.safe-mismatch.source-safe` set."
705 705 )
706 706 % mismatch_config,
707 707 hint=hint,
708 708 )
709 709
710 710 # The .hg/hgrc file may load extensions or contain config options
711 711 # that influence repository construction. Attempt to load it and
712 712 # process any new extensions that it may have pulled in.
713 713 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
714 714 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
715 715 extensions.loadall(ui)
716 716 extensions.populateui(ui)
717 717
718 718 # Set of module names of extensions loaded for this repository.
719 719 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
720 720
721 721 supportedrequirements = gathersupportedrequirements(ui)
722 722
723 723 # We first validate the requirements are known.
724 724 ensurerequirementsrecognized(requirements, supportedrequirements)
725 725
726 726 # Then we validate that the known set is reasonable to use together.
727 727 ensurerequirementscompatible(ui, requirements)
728 728
729 729 # TODO there are unhandled edge cases related to opening repositories with
730 730 # shared storage. If storage is shared, we should also test for requirements
731 731 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
732 732 # that repo, as that repo may load extensions needed to open it. This is a
733 733 # bit complicated because we don't want the other hgrc to overwrite settings
734 734 # in this hgrc.
735 735 #
736 736 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
737 737 # file when sharing repos. But if a requirement is added after the share is
738 738 # performed, thereby introducing a new requirement for the opener, we may
739 739 # will not see that and could encounter a run-time error interacting with
740 740 # that shared store since it has an unknown-to-us requirement.
741 741
742 742 # At this point, we know we should be capable of opening the repository.
743 743 # Now get on with doing that.
744 744
745 745 features = set()
746 746
747 747 # The "store" part of the repository holds versioned data. How it is
748 748 # accessed is determined by various requirements. If `shared` or
749 749 # `relshared` requirements are present, this indicates current repository
750 750 # is a share and store exists in path mentioned in `.hg/sharedpath`
751 751 if shared:
752 752 storebasepath = sharedvfs.base
753 753 cachepath = sharedvfs.join(b'cache')
754 754 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
755 755 else:
756 756 storebasepath = hgvfs.base
757 757 cachepath = hgvfs.join(b'cache')
758 758 wcachepath = hgvfs.join(b'wcache')
759 759
760 760 # The store has changed over time and the exact layout is dictated by
761 761 # requirements. The store interface abstracts differences across all
762 762 # of them.
763 763 store = makestore(
764 764 requirements,
765 765 storebasepath,
766 766 lambda base: vfsmod.vfs(base, cacheaudited=True),
767 767 )
768 768 hgvfs.createmode = store.createmode
769 769
770 770 storevfs = store.vfs
771 771 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
772 772
773 773 if (
774 774 requirementsmod.REVLOGV2_REQUIREMENT in requirements
775 775 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
776 776 ):
777 777 features.add(repository.REPO_FEATURE_SIDE_DATA)
778 778 # the revlogv2 docket introduced race condition that we need to fix
779 779 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
780 780
781 781 # The cache vfs is used to manage cache files.
782 782 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
783 783 cachevfs.createmode = store.createmode
784 784 # The cache vfs is used to manage cache files related to the working copy
785 785 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
786 786 wcachevfs.createmode = store.createmode
787 787
788 788 # Now resolve the type for the repository object. We do this by repeatedly
789 789 # calling a factory function to produces types for specific aspects of the
790 790 # repo's operation. The aggregate returned types are used as base classes
791 791 # for a dynamically-derived type, which will represent our new repository.
792 792
793 793 bases = []
794 794 extrastate = {}
795 795
796 796 for iface, fn in REPO_INTERFACES:
797 797 # We pass all potentially useful state to give extensions tons of
798 798 # flexibility.
799 799 typ = fn()(
800 800 ui=ui,
801 801 intents=intents,
802 802 requirements=requirements,
803 803 features=features,
804 804 wdirvfs=wdirvfs,
805 805 hgvfs=hgvfs,
806 806 store=store,
807 807 storevfs=storevfs,
808 808 storeoptions=storevfs.options,
809 809 cachevfs=cachevfs,
810 810 wcachevfs=wcachevfs,
811 811 extensionmodulenames=extensionmodulenames,
812 812 extrastate=extrastate,
813 813 baseclasses=bases,
814 814 )
815 815
816 816 if not isinstance(typ, type):
817 817 raise error.ProgrammingError(
818 818 b'unable to construct type for %s' % iface
819 819 )
820 820
821 821 bases.append(typ)
822 822
823 823 # type() allows you to use characters in type names that wouldn't be
824 824 # recognized as Python symbols in source code. We abuse that to add
825 825 # rich information about our constructed repo.
826 826 name = pycompat.sysstr(
827 827 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
828 828 )
829 829
830 830 cls = type(name, tuple(bases), {})
831 831
832 832 return cls(
833 833 baseui=baseui,
834 834 ui=ui,
835 835 origroot=path,
836 836 wdirvfs=wdirvfs,
837 837 hgvfs=hgvfs,
838 838 requirements=requirements,
839 839 supportedrequirements=supportedrequirements,
840 840 sharedpath=storebasepath,
841 841 store=store,
842 842 cachevfs=cachevfs,
843 843 wcachevfs=wcachevfs,
844 844 features=features,
845 845 intents=intents,
846 846 )
847 847
848 848
849 849 def loadhgrc(
850 850 ui,
851 851 wdirvfs: vfsmod.vfs,
852 852 hgvfs: vfsmod.vfs,
853 853 requirements,
854 854 sharedvfs: Optional[vfsmod.vfs] = None,
855 855 ):
856 856 """Load hgrc files/content into a ui instance.
857 857
858 858 This is called during repository opening to load any additional
859 859 config files or settings relevant to the current repository.
860 860
861 861 Returns a bool indicating whether any additional configs were loaded.
862 862
863 863 Extensions should monkeypatch this function to modify how per-repo
864 864 configs are loaded. For example, an extension may wish to pull in
865 865 configs from alternate files or sources.
866 866
867 867 sharedvfs is vfs object pointing to source repo if the current one is a
868 868 shared one
869 869 """
870 870 if not rcutil.use_repo_hgrc():
871 871 return False
872 872
873 873 ret = False
874 874 # first load config from shared source if we has to
875 875 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
876 876 try:
877 877 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
878 878 ret = True
879 879 except IOError:
880 880 pass
881 881
882 882 try:
883 883 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
884 884 ret = True
885 885 except IOError:
886 886 pass
887 887
888 888 try:
889 889 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
890 890 ret = True
891 891 except IOError:
892 892 pass
893 893
894 894 return ret
895 895
896 896
897 897 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
898 898 """Perform additional actions after .hg/hgrc is loaded.
899 899
900 900 This function is called during repository loading immediately after
901 901 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
902 902
903 903 The function can be used to validate configs, automatically add
904 904 options (including extensions) based on requirements, etc.
905 905 """
906 906
907 907 # Map of requirements to list of extensions to load automatically when
908 908 # requirement is present.
909 909 autoextensions = {
910 910 b'git': [b'git'],
911 911 b'largefiles': [b'largefiles'],
912 912 b'lfs': [b'lfs'],
913 913 }
914 914
915 915 for requirement, names in sorted(autoextensions.items()):
916 916 if requirement not in requirements:
917 917 continue
918 918
919 919 for name in names:
920 920 if not ui.hasconfig(b'extensions', name):
921 921 ui.setconfig(b'extensions', name, b'', source=b'autoload')
922 922
923 923
924 924 def gathersupportedrequirements(ui):
925 925 """Determine the complete set of recognized requirements."""
926 926 # Start with all requirements supported by this file.
927 927 supported = set(localrepository._basesupported)
928 928
929 929 # Execute ``featuresetupfuncs`` entries if they belong to an extension
930 930 # relevant to this ui instance.
931 931 modules = {m.__name__ for n, m in extensions.extensions(ui)}
932 932
933 933 for fn in featuresetupfuncs:
934 934 if fn.__module__ in modules:
935 935 fn(ui, supported)
936 936
937 937 # Add derived requirements from registered compression engines.
938 938 for name in util.compengines:
939 939 engine = util.compengines[name]
940 940 if engine.available() and engine.revlogheader():
941 941 supported.add(b'exp-compression-%s' % name)
942 942 if engine.name() == b'zstd':
943 943 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
944 944
945 945 return supported
946 946
947 947
948 948 def ensurerequirementsrecognized(requirements, supported):
949 949 """Validate that a set of local requirements is recognized.
950 950
951 951 Receives a set of requirements. Raises an ``error.RepoError`` if there
952 952 exists any requirement in that set that currently loaded code doesn't
953 953 recognize.
954 954
955 955 Returns a set of supported requirements.
956 956 """
957 957 missing = set()
958 958
959 959 for requirement in requirements:
960 960 if requirement in supported:
961 961 continue
962 962
963 963 if not requirement or not requirement[0:1].isalnum():
964 964 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
965 965
966 966 missing.add(requirement)
967 967
968 968 if missing:
969 969 raise error.RequirementError(
970 970 _(b'repository requires features unknown to this Mercurial: %s')
971 971 % b' '.join(sorted(missing)),
972 972 hint=_(
973 973 b'see https://mercurial-scm.org/wiki/MissingRequirement '
974 974 b'for more information'
975 975 ),
976 976 )
977 977
978 978
979 979 def ensurerequirementscompatible(ui, requirements):
980 980 """Validates that a set of recognized requirements is mutually compatible.
981 981
982 982 Some requirements may not be compatible with others or require
983 983 config options that aren't enabled. This function is called during
984 984 repository opening to ensure that the set of requirements needed
985 985 to open a repository is sane and compatible with config options.
986 986
987 987 Extensions can monkeypatch this function to perform additional
988 988 checking.
989 989
990 990 ``error.RepoError`` should be raised on failure.
991 991 """
992 992 if (
993 993 requirementsmod.SPARSE_REQUIREMENT in requirements
994 994 and not sparse.enabled
995 995 ):
996 996 raise error.RepoError(
997 997 _(
998 998 b'repository is using sparse feature but '
999 999 b'sparse is not enabled; enable the '
1000 1000 b'"sparse" extensions to access'
1001 1001 )
1002 1002 )
1003 1003
1004 1004
1005 1005 def makestore(requirements, path, vfstype):
1006 1006 """Construct a storage object for a repository."""
1007 1007 if requirementsmod.STORE_REQUIREMENT in requirements:
1008 1008 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1009 1009 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1010 1010 return storemod.fncachestore(path, vfstype, dotencode)
1011 1011
1012 1012 return storemod.encodedstore(path, vfstype)
1013 1013
1014 1014 return storemod.basicstore(path, vfstype)
1015 1015
1016 1016
1017 1017 def resolvestorevfsoptions(ui, requirements, features):
1018 1018 """Resolve the options to pass to the store vfs opener.
1019 1019
1020 1020 The returned dict is used to influence behavior of the storage layer.
1021 1021 """
1022 1022 options = {}
1023 1023
1024 1024 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1025 1025 options[b'treemanifest'] = True
1026 1026
1027 1027 # experimental config: format.manifestcachesize
1028 1028 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1029 1029 if manifestcachesize is not None:
1030 1030 options[b'manifestcachesize'] = manifestcachesize
1031 1031
1032 1032 # In the absence of another requirement superseding a revlog-related
1033 1033 # requirement, we have to assume the repo is using revlog version 0.
1034 1034 # This revlog format is super old and we don't bother trying to parse
1035 1035 # opener options for it because those options wouldn't do anything
1036 1036 # meaningful on such old repos.
1037 1037 if (
1038 1038 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1039 1039 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1040 1040 ):
1041 1041 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1042 1042 else: # explicitly mark repo as using revlogv0
1043 1043 options[b'revlogv0'] = True
1044 1044
1045 1045 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1046 1046 options[b'copies-storage'] = b'changeset-sidedata'
1047 1047 else:
1048 1048 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1049 1049 copiesextramode = (b'changeset-only', b'compatibility')
1050 1050 if writecopiesto in copiesextramode:
1051 1051 options[b'copies-storage'] = b'extra'
1052 1052
1053 1053 return options
1054 1054
1055 1055
1056 1056 def resolverevlogstorevfsoptions(ui, requirements, features):
1057 1057 """Resolve opener options specific to revlogs."""
1058 1058
1059 1059 options = {}
1060 1060 options[b'flagprocessors'] = {}
1061 1061
1062 1062 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1063 1063 options[b'revlogv1'] = True
1064 1064 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1065 1065 options[b'revlogv2'] = True
1066 1066 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1067 1067 options[b'changelogv2'] = True
1068 1068 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1069 1069 options[b'changelogv2.compute-rank'] = cmp_rank
1070 1070
1071 1071 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1072 1072 options[b'generaldelta'] = True
1073 1073
1074 1074 # experimental config: format.chunkcachesize
1075 1075 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1076 1076 if chunkcachesize is not None:
1077 1077 options[b'chunkcachesize'] = chunkcachesize
1078 1078
1079 1079 deltabothparents = ui.configbool(
1080 1080 b'storage', b'revlog.optimize-delta-parent-choice'
1081 1081 )
1082 1082 options[b'deltabothparents'] = deltabothparents
1083 1083 dps_cgds = ui.configint(
1084 1084 b'storage',
1085 1085 b'revlog.delta-parent-search.candidate-group-chunk-size',
1086 1086 )
1087 1087 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1088 1088 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1089 1089
1090 1090 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1091 1091 options[b'issue6528.fix-incoming'] = issue6528
1092 1092
1093 1093 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1094 1094 lazydeltabase = False
1095 1095 if lazydelta:
1096 1096 lazydeltabase = ui.configbool(
1097 1097 b'storage', b'revlog.reuse-external-delta-parent'
1098 1098 )
1099 1099 if lazydeltabase is None:
1100 1100 lazydeltabase = not scmutil.gddeltaconfig(ui)
1101 1101 options[b'lazydelta'] = lazydelta
1102 1102 options[b'lazydeltabase'] = lazydeltabase
1103 1103
1104 1104 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1105 1105 if 0 <= chainspan:
1106 1106 options[b'maxdeltachainspan'] = chainspan
1107 1107
1108 1108 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1109 1109 if mmapindexthreshold is not None:
1110 1110 options[b'mmapindexthreshold'] = mmapindexthreshold
1111 1111
1112 1112 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1113 1113 srdensitythres = float(
1114 1114 ui.config(b'experimental', b'sparse-read.density-threshold')
1115 1115 )
1116 1116 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1117 1117 options[b'with-sparse-read'] = withsparseread
1118 1118 options[b'sparse-read-density-threshold'] = srdensitythres
1119 1119 options[b'sparse-read-min-gap-size'] = srmingapsize
1120 1120
1121 1121 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1122 1122 options[b'sparse-revlog'] = sparserevlog
1123 1123 if sparserevlog:
1124 1124 options[b'generaldelta'] = True
1125 1125
1126 1126 maxchainlen = None
1127 1127 if sparserevlog:
1128 1128 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1129 1129 # experimental config: format.maxchainlen
1130 1130 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1131 1131 if maxchainlen is not None:
1132 1132 options[b'maxchainlen'] = maxchainlen
1133 1133
1134 1134 for r in requirements:
1135 1135 # we allow multiple compression engine requirement to co-exist because
1136 1136 # strickly speaking, revlog seems to support mixed compression style.
1137 1137 #
1138 1138 # The compression used for new entries will be "the last one"
1139 1139 prefix = r.startswith
1140 1140 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1141 1141 options[b'compengine'] = r.split(b'-', 2)[2]
1142 1142
1143 1143 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1144 1144 if options[b'zlib.level'] is not None:
1145 1145 if not (0 <= options[b'zlib.level'] <= 9):
1146 1146 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1147 1147 raise error.Abort(msg % options[b'zlib.level'])
1148 1148 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1149 1149 if options[b'zstd.level'] is not None:
1150 1150 if not (0 <= options[b'zstd.level'] <= 22):
1151 1151 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1152 1152 raise error.Abort(msg % options[b'zstd.level'])
1153 1153
1154 1154 if requirementsmod.NARROW_REQUIREMENT in requirements:
1155 1155 options[b'enableellipsis'] = True
1156 1156
1157 1157 if ui.configbool(b'experimental', b'rust.index'):
1158 1158 options[b'rust.index'] = True
1159 1159 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1160 1160 slow_path = ui.config(
1161 1161 b'storage', b'revlog.persistent-nodemap.slow-path'
1162 1162 )
1163 1163 if slow_path not in (b'allow', b'warn', b'abort'):
1164 1164 default = ui.config_default(
1165 1165 b'storage', b'revlog.persistent-nodemap.slow-path'
1166 1166 )
1167 1167 msg = _(
1168 1168 b'unknown value for config '
1169 1169 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1170 1170 )
1171 1171 ui.warn(msg % slow_path)
1172 1172 if not ui.quiet:
1173 1173 ui.warn(_(b'falling back to default value: %s\n') % default)
1174 1174 slow_path = default
1175 1175
1176 1176 msg = _(
1177 1177 b"accessing `persistent-nodemap` repository without associated "
1178 1178 b"fast implementation."
1179 1179 )
1180 1180 hint = _(
1181 1181 b"check `hg help config.format.use-persistent-nodemap` "
1182 1182 b"for details"
1183 1183 )
1184 1184 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1185 1185 if slow_path == b'warn':
1186 1186 msg = b"warning: " + msg + b'\n'
1187 1187 ui.warn(msg)
1188 1188 if not ui.quiet:
1189 1189 hint = b'(' + hint + b')\n'
1190 1190 ui.warn(hint)
1191 1191 if slow_path == b'abort':
1192 1192 raise error.Abort(msg, hint=hint)
1193 1193 options[b'persistent-nodemap'] = True
1194 1194 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1195 1195 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1196 1196 if slow_path not in (b'allow', b'warn', b'abort'):
1197 1197 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1198 1198 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1199 1199 ui.warn(msg % slow_path)
1200 1200 if not ui.quiet:
1201 1201 ui.warn(_(b'falling back to default value: %s\n') % default)
1202 1202 slow_path = default
1203 1203
1204 1204 msg = _(
1205 1205 b"accessing `dirstate-v2` repository without associated "
1206 1206 b"fast implementation."
1207 1207 )
1208 1208 hint = _(
1209 1209 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1210 1210 )
1211 1211 if not dirstate.HAS_FAST_DIRSTATE_V2:
1212 1212 if slow_path == b'warn':
1213 1213 msg = b"warning: " + msg + b'\n'
1214 1214 ui.warn(msg)
1215 1215 if not ui.quiet:
1216 1216 hint = b'(' + hint + b')\n'
1217 1217 ui.warn(hint)
1218 1218 if slow_path == b'abort':
1219 1219 raise error.Abort(msg, hint=hint)
1220 1220 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1221 1221 options[b'persistent-nodemap.mmap'] = True
1222 1222 if ui.configbool(b'devel', b'persistent-nodemap'):
1223 1223 options[b'devel-force-nodemap'] = True
1224 1224
1225 1225 return options
1226 1226
1227 1227
1228 1228 def makemain(**kwargs):
1229 1229 """Produce a type conforming to ``ilocalrepositorymain``."""
1230 1230 return localrepository
1231 1231
1232 1232
1233 1233 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1234 1234 class revlogfilestorage:
1235 1235 """File storage when using revlogs."""
1236 1236
1237 1237 def file(self, path):
1238 1238 if path.startswith(b'/'):
1239 1239 path = path[1:]
1240 1240
1241 1241 return filelog.filelog(self.svfs, path)
1242 1242
1243 1243
1244 1244 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1245 1245 class revlognarrowfilestorage:
1246 1246 """File storage when using revlogs and narrow files."""
1247 1247
1248 1248 def file(self, path):
1249 1249 if path.startswith(b'/'):
1250 1250 path = path[1:]
1251 1251
1252 1252 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1253 1253
1254 1254
1255 1255 def makefilestorage(requirements, features, **kwargs):
1256 1256 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1257 1257 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1258 1258 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1259 1259
1260 1260 if requirementsmod.NARROW_REQUIREMENT in requirements:
1261 1261 return revlognarrowfilestorage
1262 1262 else:
1263 1263 return revlogfilestorage
1264 1264
1265 1265
1266 1266 # List of repository interfaces and factory functions for them. Each
1267 1267 # will be called in order during ``makelocalrepository()`` to iteratively
1268 1268 # derive the final type for a local repository instance. We capture the
1269 1269 # function as a lambda so we don't hold a reference and the module-level
1270 1270 # functions can be wrapped.
1271 1271 REPO_INTERFACES = [
1272 1272 (repository.ilocalrepositorymain, lambda: makemain),
1273 1273 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1274 1274 ]
1275 1275
1276 1276
1277 1277 @interfaceutil.implementer(repository.ilocalrepositorymain)
1278 1278 class localrepository:
1279 1279 """Main class for representing local repositories.
1280 1280
1281 1281 All local repositories are instances of this class.
1282 1282
1283 1283 Constructed on its own, instances of this class are not usable as
1284 1284 repository objects. To obtain a usable repository object, call
1285 1285 ``hg.repository()``, ``localrepo.instance()``, or
1286 1286 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1287 1287 ``instance()`` adds support for creating new repositories.
1288 1288 ``hg.repository()`` adds more extension integration, including calling
1289 1289 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1290 1290 used.
1291 1291 """
1292 1292
1293 1293 _basesupported = {
1294 1294 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1295 1295 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1296 1296 requirementsmod.CHANGELOGV2_REQUIREMENT,
1297 1297 requirementsmod.COPIESSDC_REQUIREMENT,
1298 1298 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1299 1299 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1300 1300 requirementsmod.DOTENCODE_REQUIREMENT,
1301 1301 requirementsmod.FNCACHE_REQUIREMENT,
1302 1302 requirementsmod.GENERALDELTA_REQUIREMENT,
1303 1303 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1304 1304 requirementsmod.NODEMAP_REQUIREMENT,
1305 1305 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1306 1306 requirementsmod.REVLOGV1_REQUIREMENT,
1307 1307 requirementsmod.REVLOGV2_REQUIREMENT,
1308 1308 requirementsmod.SHARED_REQUIREMENT,
1309 1309 requirementsmod.SHARESAFE_REQUIREMENT,
1310 1310 requirementsmod.SPARSE_REQUIREMENT,
1311 1311 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1312 1312 requirementsmod.STORE_REQUIREMENT,
1313 1313 requirementsmod.TREEMANIFEST_REQUIREMENT,
1314 1314 }
1315 1315
1316 1316 # list of prefix for file which can be written without 'wlock'
1317 1317 # Extensions should extend this list when needed
1318 1318 _wlockfreeprefix = {
1319 1319 # We migh consider requiring 'wlock' for the next
1320 1320 # two, but pretty much all the existing code assume
1321 1321 # wlock is not needed so we keep them excluded for
1322 1322 # now.
1323 1323 b'hgrc',
1324 1324 b'requires',
1325 1325 # XXX cache is a complicatged business someone
1326 1326 # should investigate this in depth at some point
1327 1327 b'cache/',
1328 1328 # XXX bisect was still a bit too messy at the time
1329 1329 # this changeset was introduced. Someone should fix
1330 1330 # the remainig bit and drop this line
1331 1331 b'bisect.state',
1332 1332 }
1333 1333
1334 1334 def __init__(
1335 1335 self,
1336 1336 baseui,
1337 1337 ui,
1338 1338 origroot: bytes,
1339 1339 wdirvfs: vfsmod.vfs,
1340 1340 hgvfs: vfsmod.vfs,
1341 1341 requirements,
1342 1342 supportedrequirements,
1343 1343 sharedpath: bytes,
1344 1344 store,
1345 1345 cachevfs: vfsmod.vfs,
1346 1346 wcachevfs: vfsmod.vfs,
1347 1347 features,
1348 1348 intents=None,
1349 1349 ):
1350 1350 """Create a new local repository instance.
1351 1351
1352 1352 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1353 1353 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1354 1354 object.
1355 1355
1356 1356 Arguments:
1357 1357
1358 1358 baseui
1359 1359 ``ui.ui`` instance that ``ui`` argument was based off of.
1360 1360
1361 1361 ui
1362 1362 ``ui.ui`` instance for use by the repository.
1363 1363
1364 1364 origroot
1365 1365 ``bytes`` path to working directory root of this repository.
1366 1366
1367 1367 wdirvfs
1368 1368 ``vfs.vfs`` rooted at the working directory.
1369 1369
1370 1370 hgvfs
1371 1371 ``vfs.vfs`` rooted at .hg/
1372 1372
1373 1373 requirements
1374 1374 ``set`` of bytestrings representing repository opening requirements.
1375 1375
1376 1376 supportedrequirements
1377 1377 ``set`` of bytestrings representing repository requirements that we
1378 1378 know how to open. May be a supetset of ``requirements``.
1379 1379
1380 1380 sharedpath
1381 1381 ``bytes`` Defining path to storage base directory. Points to a
1382 1382 ``.hg/`` directory somewhere.
1383 1383
1384 1384 store
1385 1385 ``store.basicstore`` (or derived) instance providing access to
1386 1386 versioned storage.
1387 1387
1388 1388 cachevfs
1389 1389 ``vfs.vfs`` used for cache files.
1390 1390
1391 1391 wcachevfs
1392 1392 ``vfs.vfs`` used for cache files related to the working copy.
1393 1393
1394 1394 features
1395 1395 ``set`` of bytestrings defining features/capabilities of this
1396 1396 instance.
1397 1397
1398 1398 intents
1399 1399 ``set`` of system strings indicating what this repo will be used
1400 1400 for.
1401 1401 """
1402 1402 self.baseui = baseui
1403 1403 self.ui = ui
1404 1404 self.origroot = origroot
1405 1405 # vfs rooted at working directory.
1406 1406 self.wvfs = wdirvfs
1407 1407 self.root = wdirvfs.base
1408 1408 # vfs rooted at .hg/. Used to access most non-store paths.
1409 1409 self.vfs = hgvfs
1410 1410 self.path = hgvfs.base
1411 1411 self.requirements = requirements
1412 1412 self.nodeconstants = sha1nodeconstants
1413 1413 self.nullid = self.nodeconstants.nullid
1414 1414 self.supported = supportedrequirements
1415 1415 self.sharedpath = sharedpath
1416 1416 self.store = store
1417 1417 self.cachevfs = cachevfs
1418 1418 self.wcachevfs = wcachevfs
1419 1419 self.features = features
1420 1420
1421 1421 self.filtername = None
1422 1422
1423 1423 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1424 1424 b'devel', b'check-locks'
1425 1425 ):
1426 1426 self.vfs.audit = self._getvfsward(self.vfs.audit)
1427 1427 # A list of callback to shape the phase if no data were found.
1428 1428 # Callback are in the form: func(repo, roots) --> processed root.
1429 1429 # This list it to be filled by extension during repo setup
1430 1430 self._phasedefaults = []
1431 1431
1432 1432 color.setup(self.ui)
1433 1433
1434 1434 self.spath = self.store.path
1435 1435 self.svfs = self.store.vfs
1436 1436 self.sjoin = self.store.join
1437 1437 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1438 1438 b'devel', b'check-locks'
1439 1439 ):
1440 1440 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1441 1441 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1442 1442 else: # standard vfs
1443 1443 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1444 1444
1445 1445 self._dirstatevalidatewarned = False
1446 1446
1447 1447 self._branchcaches = branchmap.BranchMapCache()
1448 1448 self._revbranchcache = None
1449 1449 self._filterpats = {}
1450 1450 self._datafilters = {}
1451 1451 self._transref = self._lockref = self._wlockref = None
1452 1452
1453 1453 # A cache for various files under .hg/ that tracks file changes,
1454 1454 # (used by the filecache decorator)
1455 1455 #
1456 1456 # Maps a property name to its util.filecacheentry
1457 1457 self._filecache = {}
1458 1458
1459 1459 # hold sets of revision to be filtered
1460 1460 # should be cleared when something might have changed the filter value:
1461 1461 # - new changesets,
1462 1462 # - phase change,
1463 1463 # - new obsolescence marker,
1464 1464 # - working directory parent change,
1465 1465 # - bookmark changes
1466 1466 self.filteredrevcache = {}
1467 1467
1468 1468 # post-dirstate-status hooks
1469 1469 self._postdsstatus = []
1470 1470
1471 1471 # generic mapping between names and nodes
1472 1472 self.names = namespaces.namespaces()
1473 1473
1474 1474 # Key to signature value.
1475 1475 self._sparsesignaturecache = {}
1476 1476 # Signature to cached matcher instance.
1477 1477 self._sparsematchercache = {}
1478 1478
1479 1479 self._extrafilterid = repoview.extrafilter(ui)
1480 1480
1481 1481 self.filecopiesmode = None
1482 1482 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1483 1483 self.filecopiesmode = b'changeset-sidedata'
1484 1484
1485 1485 self._wanted_sidedata = set()
1486 1486 self._sidedata_computers = {}
1487 1487 sidedatamod.set_sidedata_spec_for_repo(self)
1488 1488
1489 1489 def _getvfsward(self, origfunc):
1490 1490 """build a ward for self.vfs"""
1491 1491 rref = weakref.ref(self)
1492 1492
1493 1493 def checkvfs(path, mode=None):
1494 1494 ret = origfunc(path, mode=mode)
1495 1495 repo = rref()
1496 1496 if (
1497 1497 repo is None
1498 1498 or not util.safehasattr(repo, b'_wlockref')
1499 1499 or not util.safehasattr(repo, b'_lockref')
1500 1500 ):
1501 1501 return
1502 1502 if mode in (None, b'r', b'rb'):
1503 1503 return
1504 1504 if path.startswith(repo.path):
1505 1505 # truncate name relative to the repository (.hg)
1506 1506 path = path[len(repo.path) + 1 :]
1507 1507 if path.startswith(b'cache/'):
1508 1508 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1509 1509 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1510 1510 # path prefixes covered by 'lock'
1511 1511 vfs_path_prefixes = (
1512 1512 b'journal.',
1513 1513 b'undo.',
1514 1514 b'strip-backup/',
1515 1515 b'cache/',
1516 1516 )
1517 1517 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1518 1518 if repo._currentlock(repo._lockref) is None:
1519 1519 repo.ui.develwarn(
1520 1520 b'write with no lock: "%s"' % path,
1521 1521 stacklevel=3,
1522 1522 config=b'check-locks',
1523 1523 )
1524 1524 elif repo._currentlock(repo._wlockref) is None:
1525 1525 # rest of vfs files are covered by 'wlock'
1526 1526 #
1527 1527 # exclude special files
1528 1528 for prefix in self._wlockfreeprefix:
1529 1529 if path.startswith(prefix):
1530 1530 return
1531 1531 repo.ui.develwarn(
1532 1532 b'write with no wlock: "%s"' % path,
1533 1533 stacklevel=3,
1534 1534 config=b'check-locks',
1535 1535 )
1536 1536 return ret
1537 1537
1538 1538 return checkvfs
1539 1539
1540 1540 def _getsvfsward(self, origfunc):
1541 1541 """build a ward for self.svfs"""
1542 1542 rref = weakref.ref(self)
1543 1543
1544 1544 def checksvfs(path, mode=None):
1545 1545 ret = origfunc(path, mode=mode)
1546 1546 repo = rref()
1547 1547 if repo is None or not util.safehasattr(repo, b'_lockref'):
1548 1548 return
1549 1549 if mode in (None, b'r', b'rb'):
1550 1550 return
1551 1551 if path.startswith(repo.sharedpath):
1552 1552 # truncate name relative to the repository (.hg)
1553 1553 path = path[len(repo.sharedpath) + 1 :]
1554 1554 if repo._currentlock(repo._lockref) is None:
1555 1555 repo.ui.develwarn(
1556 1556 b'write with no lock: "%s"' % path, stacklevel=4
1557 1557 )
1558 1558 return ret
1559 1559
1560 1560 return checksvfs
1561 1561
1562 1562 def close(self):
1563 1563 self._writecaches()
1564 1564
1565 1565 def _writecaches(self):
1566 1566 if self._revbranchcache:
1567 1567 self._revbranchcache.write()
1568 1568
1569 1569 def _restrictcapabilities(self, caps):
1570 1570 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1571 1571 caps = set(caps)
1572 1572 capsblob = bundle2.encodecaps(
1573 1573 bundle2.getrepocaps(self, role=b'client')
1574 1574 )
1575 1575 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1576 1576 if self.ui.configbool(b'experimental', b'narrow'):
1577 1577 caps.add(wireprototypes.NARROWCAP)
1578 1578 return caps
1579 1579
1580 1580 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1581 1581 # self -> auditor -> self._checknested -> self
1582 1582
1583 1583 @property
1584 1584 def auditor(self):
1585 1585 # This is only used by context.workingctx.match in order to
1586 1586 # detect files in subrepos.
1587 1587 return pathutil.pathauditor(self.root, callback=self._checknested)
1588 1588
1589 1589 @property
1590 1590 def nofsauditor(self):
1591 1591 # This is only used by context.basectx.match in order to detect
1592 1592 # files in subrepos.
1593 1593 return pathutil.pathauditor(
1594 1594 self.root, callback=self._checknested, realfs=False, cached=True
1595 1595 )
1596 1596
1597 1597 def _checknested(self, path):
1598 1598 """Determine if path is a legal nested repository."""
1599 1599 if not path.startswith(self.root):
1600 1600 return False
1601 1601 subpath = path[len(self.root) + 1 :]
1602 1602 normsubpath = util.pconvert(subpath)
1603 1603
1604 1604 # XXX: Checking against the current working copy is wrong in
1605 1605 # the sense that it can reject things like
1606 1606 #
1607 1607 # $ hg cat -r 10 sub/x.txt
1608 1608 #
1609 1609 # if sub/ is no longer a subrepository in the working copy
1610 1610 # parent revision.
1611 1611 #
1612 1612 # However, it can of course also allow things that would have
1613 1613 # been rejected before, such as the above cat command if sub/
1614 1614 # is a subrepository now, but was a normal directory before.
1615 1615 # The old path auditor would have rejected by mistake since it
1616 1616 # panics when it sees sub/.hg/.
1617 1617 #
1618 1618 # All in all, checking against the working copy seems sensible
1619 1619 # since we want to prevent access to nested repositories on
1620 1620 # the filesystem *now*.
1621 1621 ctx = self[None]
1622 1622 parts = util.splitpath(subpath)
1623 1623 while parts:
1624 1624 prefix = b'/'.join(parts)
1625 1625 if prefix in ctx.substate:
1626 1626 if prefix == normsubpath:
1627 1627 return True
1628 1628 else:
1629 1629 sub = ctx.sub(prefix)
1630 1630 return sub.checknested(subpath[len(prefix) + 1 :])
1631 1631 else:
1632 1632 parts.pop()
1633 1633 return False
1634 1634
1635 1635 def peer(self, path=None):
1636 1636 return localpeer(self, path=path) # not cached to avoid reference cycle
1637 1637
1638 1638 def unfiltered(self):
1639 1639 """Return unfiltered version of the repository
1640 1640
1641 1641 Intended to be overwritten by filtered repo."""
1642 1642 return self
1643 1643
1644 1644 def filtered(self, name, visibilityexceptions=None):
1645 1645 """Return a filtered version of a repository
1646 1646
1647 1647 The `name` parameter is the identifier of the requested view. This
1648 1648 will return a repoview object set "exactly" to the specified view.
1649 1649
1650 1650 This function does not apply recursive filtering to a repository. For
1651 1651 example calling `repo.filtered("served")` will return a repoview using
1652 1652 the "served" view, regardless of the initial view used by `repo`.
1653 1653
1654 1654 In other word, there is always only one level of `repoview` "filtering".
1655 1655 """
1656 1656 if self._extrafilterid is not None and b'%' not in name:
1657 1657 name = name + b'%' + self._extrafilterid
1658 1658
1659 1659 cls = repoview.newtype(self.unfiltered().__class__)
1660 1660 return cls(self, name, visibilityexceptions)
1661 1661
1662 1662 @mixedrepostorecache(
1663 1663 (b'bookmarks', b'plain'),
1664 1664 (b'bookmarks.current', b'plain'),
1665 1665 (b'bookmarks', b''),
1666 1666 (b'00changelog.i', b''),
1667 1667 )
1668 1668 def _bookmarks(self):
1669 1669 # Since the multiple files involved in the transaction cannot be
1670 1670 # written atomically (with current repository format), there is a race
1671 1671 # condition here.
1672 1672 #
1673 1673 # 1) changelog content A is read
1674 1674 # 2) outside transaction update changelog to content B
1675 1675 # 3) outside transaction update bookmark file referring to content B
1676 1676 # 4) bookmarks file content is read and filtered against changelog-A
1677 1677 #
1678 1678 # When this happens, bookmarks against nodes missing from A are dropped.
1679 1679 #
1680 1680 # Having this happening during read is not great, but it become worse
1681 1681 # when this happen during write because the bookmarks to the "unknown"
1682 1682 # nodes will be dropped for good. However, writes happen within locks.
1683 1683 # This locking makes it possible to have a race free consistent read.
1684 1684 # For this purpose data read from disc before locking are
1685 1685 # "invalidated" right after the locks are taken. This invalidations are
1686 1686 # "light", the `filecache` mechanism keep the data in memory and will
1687 1687 # reuse them if the underlying files did not changed. Not parsing the
1688 1688 # same data multiple times helps performances.
1689 1689 #
1690 1690 # Unfortunately in the case describe above, the files tracked by the
1691 1691 # bookmarks file cache might not have changed, but the in-memory
1692 1692 # content is still "wrong" because we used an older changelog content
1693 1693 # to process the on-disk data. So after locking, the changelog would be
1694 1694 # refreshed but `_bookmarks` would be preserved.
1695 1695 # Adding `00changelog.i` to the list of tracked file is not
1696 1696 # enough, because at the time we build the content for `_bookmarks` in
1697 1697 # (4), the changelog file has already diverged from the content used
1698 1698 # for loading `changelog` in (1)
1699 1699 #
1700 1700 # To prevent the issue, we force the changelog to be explicitly
1701 1701 # reloaded while computing `_bookmarks`. The data race can still happen
1702 1702 # without the lock (with a narrower window), but it would no longer go
1703 1703 # undetected during the lock time refresh.
1704 1704 #
1705 1705 # The new schedule is as follow
1706 1706 #
1707 1707 # 1) filecache logic detect that `_bookmarks` needs to be computed
1708 1708 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1709 1709 # 3) We force `changelog` filecache to be tested
1710 1710 # 4) cachestat for `changelog` are captured (for changelog)
1711 1711 # 5) `_bookmarks` is computed and cached
1712 1712 #
1713 1713 # The step in (3) ensure we have a changelog at least as recent as the
1714 1714 # cache stat computed in (1). As a result at locking time:
1715 1715 # * if the changelog did not changed since (1) -> we can reuse the data
1716 1716 # * otherwise -> the bookmarks get refreshed.
1717 1717 self._refreshchangelog()
1718 1718 return bookmarks.bmstore(self)
1719 1719
1720 1720 def _refreshchangelog(self):
1721 1721 """make sure the in memory changelog match the on-disk one"""
1722 1722 if 'changelog' in vars(self) and self.currenttransaction() is None:
1723 1723 del self.changelog
1724 1724
1725 1725 @property
1726 1726 def _activebookmark(self):
1727 1727 return self._bookmarks.active
1728 1728
1729 1729 # _phasesets depend on changelog. what we need is to call
1730 1730 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1731 1731 # can't be easily expressed in filecache mechanism.
1732 1732 @storecache(b'phaseroots', b'00changelog.i')
1733 1733 def _phasecache(self):
1734 1734 return phases.phasecache(self, self._phasedefaults)
1735 1735
1736 1736 @storecache(b'obsstore')
1737 1737 def obsstore(self):
1738 1738 return obsolete.makestore(self.ui, self)
1739 1739
1740 1740 @changelogcache()
1741 1741 def changelog(repo):
1742 1742 # load dirstate before changelog to avoid race see issue6303
1743 1743 repo.dirstate.prefetch_parents()
1744 1744 return repo.store.changelog(
1745 1745 txnutil.mayhavepending(repo.root),
1746 1746 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1747 1747 )
1748 1748
1749 1749 @manifestlogcache()
1750 1750 def manifestlog(self):
1751 1751 return self.store.manifestlog(self, self._storenarrowmatch)
1752 1752
1753 1753 @repofilecache(b'dirstate')
1754 1754 def dirstate(self):
1755 1755 return self._makedirstate()
1756 1756
1757 1757 def _makedirstate(self):
1758 1758 """Extension point for wrapping the dirstate per-repo."""
1759 1759 sparsematchfn = None
1760 1760 if sparse.use_sparse(self):
1761 1761 sparsematchfn = lambda: sparse.matcher(self)
1762 1762 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1763 1763 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1764 1764 use_dirstate_v2 = v2_req in self.requirements
1765 1765 use_tracked_hint = th in self.requirements
1766 1766
1767 1767 return dirstate.dirstate(
1768 1768 self.vfs,
1769 1769 self.ui,
1770 1770 self.root,
1771 1771 self._dirstatevalidate,
1772 1772 sparsematchfn,
1773 1773 self.nodeconstants,
1774 1774 use_dirstate_v2,
1775 1775 use_tracked_hint=use_tracked_hint,
1776 1776 )
1777 1777
1778 1778 def _dirstatevalidate(self, node):
1779 1779 try:
1780 1780 self.changelog.rev(node)
1781 1781 return node
1782 1782 except error.LookupError:
1783 1783 if not self._dirstatevalidatewarned:
1784 1784 self._dirstatevalidatewarned = True
1785 1785 self.ui.warn(
1786 1786 _(b"warning: ignoring unknown working parent %s!\n")
1787 1787 % short(node)
1788 1788 )
1789 1789 return self.nullid
1790 1790
1791 1791 @storecache(narrowspec.FILENAME)
1792 1792 def narrowpats(self):
1793 1793 """matcher patterns for this repository's narrowspec
1794 1794
1795 1795 A tuple of (includes, excludes).
1796 1796 """
1797 1797 return narrowspec.load(self)
1798 1798
1799 1799 @storecache(narrowspec.FILENAME)
1800 1800 def _storenarrowmatch(self):
1801 1801 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1802 1802 return matchmod.always()
1803 1803 include, exclude = self.narrowpats
1804 1804 return narrowspec.match(self.root, include=include, exclude=exclude)
1805 1805
1806 1806 @storecache(narrowspec.FILENAME)
1807 1807 def _narrowmatch(self):
1808 1808 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1809 1809 return matchmod.always()
1810 1810 narrowspec.checkworkingcopynarrowspec(self)
1811 1811 include, exclude = self.narrowpats
1812 1812 return narrowspec.match(self.root, include=include, exclude=exclude)
1813 1813
1814 1814 def narrowmatch(self, match=None, includeexact=False):
1815 1815 """matcher corresponding the the repo's narrowspec
1816 1816
1817 1817 If `match` is given, then that will be intersected with the narrow
1818 1818 matcher.
1819 1819
1820 1820 If `includeexact` is True, then any exact matches from `match` will
1821 1821 be included even if they're outside the narrowspec.
1822 1822 """
1823 1823 if match:
1824 1824 if includeexact and not self._narrowmatch.always():
1825 1825 # do not exclude explicitly-specified paths so that they can
1826 1826 # be warned later on
1827 1827 em = matchmod.exact(match.files())
1828 1828 nm = matchmod.unionmatcher([self._narrowmatch, em])
1829 1829 return matchmod.intersectmatchers(match, nm)
1830 1830 return matchmod.intersectmatchers(match, self._narrowmatch)
1831 1831 return self._narrowmatch
1832 1832
1833 1833 def setnarrowpats(self, newincludes, newexcludes):
1834 1834 narrowspec.save(self, newincludes, newexcludes)
1835 1835 self.invalidate(clearfilecache=True)
1836 1836
1837 1837 @unfilteredpropertycache
1838 1838 def _quick_access_changeid_null(self):
1839 1839 return {
1840 1840 b'null': (nullrev, self.nodeconstants.nullid),
1841 1841 nullrev: (nullrev, self.nodeconstants.nullid),
1842 1842 self.nullid: (nullrev, self.nullid),
1843 1843 }
1844 1844
1845 1845 @unfilteredpropertycache
1846 1846 def _quick_access_changeid_wc(self):
1847 1847 # also fast path access to the working copy parents
1848 1848 # however, only do it for filter that ensure wc is visible.
1849 1849 quick = self._quick_access_changeid_null.copy()
1850 1850 cl = self.unfiltered().changelog
1851 1851 for node in self.dirstate.parents():
1852 1852 if node == self.nullid:
1853 1853 continue
1854 1854 rev = cl.index.get_rev(node)
1855 1855 if rev is None:
1856 1856 # unknown working copy parent case:
1857 1857 #
1858 1858 # skip the fast path and let higher code deal with it
1859 1859 continue
1860 1860 pair = (rev, node)
1861 1861 quick[rev] = pair
1862 1862 quick[node] = pair
1863 1863 # also add the parents of the parents
1864 1864 for r in cl.parentrevs(rev):
1865 1865 if r == nullrev:
1866 1866 continue
1867 1867 n = cl.node(r)
1868 1868 pair = (r, n)
1869 1869 quick[r] = pair
1870 1870 quick[n] = pair
1871 1871 p1node = self.dirstate.p1()
1872 1872 if p1node != self.nullid:
1873 1873 quick[b'.'] = quick[p1node]
1874 1874 return quick
1875 1875
1876 1876 @unfilteredmethod
1877 1877 def _quick_access_changeid_invalidate(self):
1878 1878 if '_quick_access_changeid_wc' in vars(self):
1879 1879 del self.__dict__['_quick_access_changeid_wc']
1880 1880
1881 1881 @property
1882 1882 def _quick_access_changeid(self):
1883 1883 """an helper dictionnary for __getitem__ calls
1884 1884
1885 1885 This contains a list of symbol we can recognise right away without
1886 1886 further processing.
1887 1887 """
1888 1888 if self.filtername in repoview.filter_has_wc:
1889 1889 return self._quick_access_changeid_wc
1890 1890 return self._quick_access_changeid_null
1891 1891
1892 1892 def __getitem__(self, changeid):
1893 1893 # dealing with special cases
1894 1894 if changeid is None:
1895 1895 return context.workingctx(self)
1896 1896 if isinstance(changeid, context.basectx):
1897 1897 return changeid
1898 1898
1899 1899 # dealing with multiple revisions
1900 1900 if isinstance(changeid, slice):
1901 1901 # wdirrev isn't contiguous so the slice shouldn't include it
1902 1902 return [
1903 1903 self[i]
1904 1904 for i in range(*changeid.indices(len(self)))
1905 1905 if i not in self.changelog.filteredrevs
1906 1906 ]
1907 1907
1908 1908 # dealing with some special values
1909 1909 quick_access = self._quick_access_changeid.get(changeid)
1910 1910 if quick_access is not None:
1911 1911 rev, node = quick_access
1912 1912 return context.changectx(self, rev, node, maybe_filtered=False)
1913 1913 if changeid == b'tip':
1914 1914 node = self.changelog.tip()
1915 1915 rev = self.changelog.rev(node)
1916 1916 return context.changectx(self, rev, node)
1917 1917
1918 1918 # dealing with arbitrary values
1919 1919 try:
1920 1920 if isinstance(changeid, int):
1921 1921 node = self.changelog.node(changeid)
1922 1922 rev = changeid
1923 1923 elif changeid == b'.':
1924 1924 # this is a hack to delay/avoid loading obsmarkers
1925 1925 # when we know that '.' won't be hidden
1926 1926 node = self.dirstate.p1()
1927 1927 rev = self.unfiltered().changelog.rev(node)
1928 1928 elif len(changeid) == self.nodeconstants.nodelen:
1929 1929 try:
1930 1930 node = changeid
1931 1931 rev = self.changelog.rev(changeid)
1932 1932 except error.FilteredLookupError:
1933 1933 changeid = hex(changeid) # for the error message
1934 1934 raise
1935 1935 except LookupError:
1936 1936 # check if it might have come from damaged dirstate
1937 1937 #
1938 1938 # XXX we could avoid the unfiltered if we had a recognizable
1939 1939 # exception for filtered changeset access
1940 1940 if (
1941 1941 self.local()
1942 1942 and changeid in self.unfiltered().dirstate.parents()
1943 1943 ):
1944 1944 msg = _(b"working directory has unknown parent '%s'!")
1945 1945 raise error.Abort(msg % short(changeid))
1946 1946 changeid = hex(changeid) # for the error message
1947 1947 raise
1948 1948
1949 1949 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1950 1950 node = bin(changeid)
1951 1951 rev = self.changelog.rev(node)
1952 1952 else:
1953 1953 raise error.ProgrammingError(
1954 1954 b"unsupported changeid '%s' of type %s"
1955 1955 % (changeid, pycompat.bytestr(type(changeid)))
1956 1956 )
1957 1957
1958 1958 return context.changectx(self, rev, node)
1959 1959
1960 1960 except (error.FilteredIndexError, error.FilteredLookupError):
1961 1961 raise error.FilteredRepoLookupError(
1962 1962 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1963 1963 )
1964 1964 except (IndexError, LookupError):
1965 1965 raise error.RepoLookupError(
1966 1966 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1967 1967 )
1968 1968 except error.WdirUnsupported:
1969 1969 return context.workingctx(self)
1970 1970
1971 1971 def __contains__(self, changeid):
1972 1972 """True if the given changeid exists"""
1973 1973 try:
1974 1974 self[changeid]
1975 1975 return True
1976 1976 except error.RepoLookupError:
1977 1977 return False
1978 1978
1979 1979 def __nonzero__(self):
1980 1980 return True
1981 1981
1982 1982 __bool__ = __nonzero__
1983 1983
1984 1984 def __len__(self):
1985 1985 # no need to pay the cost of repoview.changelog
1986 1986 unfi = self.unfiltered()
1987 1987 return len(unfi.changelog)
1988 1988
1989 1989 def __iter__(self):
1990 1990 return iter(self.changelog)
1991 1991
1992 1992 def revs(self, expr: bytes, *args):
1993 1993 """Find revisions matching a revset.
1994 1994
1995 1995 The revset is specified as a string ``expr`` that may contain
1996 1996 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1997 1997
1998 1998 Revset aliases from the configuration are not expanded. To expand
1999 1999 user aliases, consider calling ``scmutil.revrange()`` or
2000 2000 ``repo.anyrevs([expr], user=True)``.
2001 2001
2002 2002 Returns a smartset.abstractsmartset, which is a list-like interface
2003 2003 that contains integer revisions.
2004 2004 """
2005 2005 tree = revsetlang.spectree(expr, *args)
2006 2006 return revset.makematcher(tree)(self)
2007 2007
2008 2008 def set(self, expr: bytes, *args):
2009 2009 """Find revisions matching a revset and emit changectx instances.
2010 2010
2011 2011 This is a convenience wrapper around ``revs()`` that iterates the
2012 2012 result and is a generator of changectx instances.
2013 2013
2014 2014 Revset aliases from the configuration are not expanded. To expand
2015 2015 user aliases, consider calling ``scmutil.revrange()``.
2016 2016 """
2017 2017 for r in self.revs(expr, *args):
2018 2018 yield self[r]
2019 2019
2020 2020 def anyrevs(self, specs: bytes, user=False, localalias=None):
2021 2021 """Find revisions matching one of the given revsets.
2022 2022
2023 2023 Revset aliases from the configuration are not expanded by default. To
2024 2024 expand user aliases, specify ``user=True``. To provide some local
2025 2025 definitions overriding user aliases, set ``localalias`` to
2026 2026 ``{name: definitionstring}``.
2027 2027 """
2028 2028 if specs == [b'null']:
2029 2029 return revset.baseset([nullrev])
2030 2030 if specs == [b'.']:
2031 2031 quick_data = self._quick_access_changeid.get(b'.')
2032 2032 if quick_data is not None:
2033 2033 return revset.baseset([quick_data[0]])
2034 2034 if user:
2035 2035 m = revset.matchany(
2036 2036 self.ui,
2037 2037 specs,
2038 2038 lookup=revset.lookupfn(self),
2039 2039 localalias=localalias,
2040 2040 )
2041 2041 else:
2042 2042 m = revset.matchany(None, specs, localalias=localalias)
2043 2043 return m(self)
2044 2044
2045 2045 def url(self) -> bytes:
2046 2046 return b'file:' + self.root
2047 2047
2048 2048 def hook(self, name, throw=False, **args):
2049 2049 """Call a hook, passing this repo instance.
2050 2050
2051 2051 This a convenience method to aid invoking hooks. Extensions likely
2052 2052 won't call this unless they have registered a custom hook or are
2053 2053 replacing code that is expected to call a hook.
2054 2054 """
2055 2055 return hook.hook(self.ui, self, name, throw, **args)
2056 2056
2057 2057 @filteredpropertycache
2058 2058 def _tagscache(self):
2059 2059 """Returns a tagscache object that contains various tags related
2060 2060 caches."""
2061 2061
2062 2062 # This simplifies its cache management by having one decorated
2063 2063 # function (this one) and the rest simply fetch things from it.
2064 2064 class tagscache:
2065 2065 def __init__(self):
2066 2066 # These two define the set of tags for this repository. tags
2067 2067 # maps tag name to node; tagtypes maps tag name to 'global' or
2068 2068 # 'local'. (Global tags are defined by .hgtags across all
2069 2069 # heads, and local tags are defined in .hg/localtags.)
2070 2070 # They constitute the in-memory cache of tags.
2071 2071 self.tags = self.tagtypes = None
2072 2072
2073 2073 self.nodetagscache = self.tagslist = None
2074 2074
2075 2075 cache = tagscache()
2076 2076 cache.tags, cache.tagtypes = self._findtags()
2077 2077
2078 2078 return cache
2079 2079
2080 2080 def tags(self):
2081 2081 '''return a mapping of tag to node'''
2082 2082 t = {}
2083 2083 if self.changelog.filteredrevs:
2084 2084 tags, tt = self._findtags()
2085 2085 else:
2086 2086 tags = self._tagscache.tags
2087 2087 rev = self.changelog.rev
2088 2088 for k, v in tags.items():
2089 2089 try:
2090 2090 # ignore tags to unknown nodes
2091 2091 rev(v)
2092 2092 t[k] = v
2093 2093 except (error.LookupError, ValueError):
2094 2094 pass
2095 2095 return t
2096 2096
2097 2097 def _findtags(self):
2098 2098 """Do the hard work of finding tags. Return a pair of dicts
2099 2099 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2100 2100 maps tag name to a string like \'global\' or \'local\'.
2101 2101 Subclasses or extensions are free to add their own tags, but
2102 2102 should be aware that the returned dicts will be retained for the
2103 2103 duration of the localrepo object."""
2104 2104
2105 2105 # XXX what tagtype should subclasses/extensions use? Currently
2106 2106 # mq and bookmarks add tags, but do not set the tagtype at all.
2107 2107 # Should each extension invent its own tag type? Should there
2108 2108 # be one tagtype for all such "virtual" tags? Or is the status
2109 2109 # quo fine?
2110 2110
2111 2111 # map tag name to (node, hist)
2112 2112 alltags = tagsmod.findglobaltags(self.ui, self)
2113 2113 # map tag name to tag type
2114 2114 tagtypes = {tag: b'global' for tag in alltags}
2115 2115
2116 2116 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2117 2117
2118 2118 # Build the return dicts. Have to re-encode tag names because
2119 2119 # the tags module always uses UTF-8 (in order not to lose info
2120 2120 # writing to the cache), but the rest of Mercurial wants them in
2121 2121 # local encoding.
2122 2122 tags = {}
2123 2123 for name, (node, hist) in alltags.items():
2124 2124 if node != self.nullid:
2125 2125 tags[encoding.tolocal(name)] = node
2126 2126 tags[b'tip'] = self.changelog.tip()
2127 2127 tagtypes = {
2128 2128 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2129 2129 }
2130 2130 return (tags, tagtypes)
2131 2131
2132 2132 def tagtype(self, tagname):
2133 2133 """
2134 2134 return the type of the given tag. result can be:
2135 2135
2136 2136 'local' : a local tag
2137 2137 'global' : a global tag
2138 2138 None : tag does not exist
2139 2139 """
2140 2140
2141 2141 return self._tagscache.tagtypes.get(tagname)
2142 2142
2143 2143 def tagslist(self):
2144 2144 '''return a list of tags ordered by revision'''
2145 2145 if not self._tagscache.tagslist:
2146 2146 l = []
2147 2147 for t, n in self.tags().items():
2148 2148 l.append((self.changelog.rev(n), t, n))
2149 2149 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2150 2150
2151 2151 return self._tagscache.tagslist
2152 2152
2153 2153 def nodetags(self, node):
2154 2154 '''return the tags associated with a node'''
2155 2155 if not self._tagscache.nodetagscache:
2156 2156 nodetagscache = {}
2157 2157 for t, n in self._tagscache.tags.items():
2158 2158 nodetagscache.setdefault(n, []).append(t)
2159 2159 for tags in nodetagscache.values():
2160 2160 tags.sort()
2161 2161 self._tagscache.nodetagscache = nodetagscache
2162 2162 return self._tagscache.nodetagscache.get(node, [])
2163 2163
2164 2164 def nodebookmarks(self, node):
2165 2165 """return the list of bookmarks pointing to the specified node"""
2166 2166 return self._bookmarks.names(node)
2167 2167
2168 2168 def branchmap(self):
2169 2169 """returns a dictionary {branch: [branchheads]} with branchheads
2170 2170 ordered by increasing revision number"""
2171 2171 return self._branchcaches[self]
2172 2172
2173 2173 @unfilteredmethod
2174 2174 def revbranchcache(self):
2175 2175 if not self._revbranchcache:
2176 2176 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2177 2177 return self._revbranchcache
2178 2178
2179 2179 def register_changeset(self, rev, changelogrevision):
2180 2180 self.revbranchcache().setdata(rev, changelogrevision)
2181 2181
2182 2182 def branchtip(self, branch, ignoremissing=False):
2183 2183 """return the tip node for a given branch
2184 2184
2185 2185 If ignoremissing is True, then this method will not raise an error.
2186 2186 This is helpful for callers that only expect None for a missing branch
2187 2187 (e.g. namespace).
2188 2188
2189 2189 """
2190 2190 try:
2191 2191 return self.branchmap().branchtip(branch)
2192 2192 except KeyError:
2193 2193 if not ignoremissing:
2194 2194 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2195 2195 else:
2196 2196 pass
2197 2197
2198 2198 def lookup(self, key):
2199 2199 node = scmutil.revsymbol(self, key).node()
2200 2200 if node is None:
2201 2201 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2202 2202 return node
2203 2203
2204 2204 def lookupbranch(self, key):
2205 2205 if self.branchmap().hasbranch(key):
2206 2206 return key
2207 2207
2208 2208 return scmutil.revsymbol(self, key).branch()
2209 2209
2210 2210 def known(self, nodes):
2211 2211 cl = self.changelog
2212 2212 get_rev = cl.index.get_rev
2213 2213 filtered = cl.filteredrevs
2214 2214 result = []
2215 2215 for n in nodes:
2216 2216 r = get_rev(n)
2217 2217 resp = not (r is None or r in filtered)
2218 2218 result.append(resp)
2219 2219 return result
2220 2220
2221 2221 def local(self):
2222 2222 return self
2223 2223
2224 2224 def publishing(self):
2225 2225 # it's safe (and desirable) to trust the publish flag unconditionally
2226 2226 # so that we don't finalize changes shared between users via ssh or nfs
2227 2227 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2228 2228
2229 2229 def cancopy(self):
2230 2230 # so statichttprepo's override of local() works
2231 2231 if not self.local():
2232 2232 return False
2233 2233 if not self.publishing():
2234 2234 return True
2235 2235 # if publishing we can't copy if there is filtered content
2236 2236 return not self.filtered(b'visible').changelog.filteredrevs
2237 2237
2238 2238 def shared(self):
2239 2239 '''the type of shared repository (None if not shared)'''
2240 2240 if self.sharedpath != self.path:
2241 2241 return b'store'
2242 2242 return None
2243 2243
2244 2244 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2245 2245 return self.vfs.reljoin(self.root, f, *insidef)
2246 2246
2247 2247 def setparents(self, p1, p2=None):
2248 2248 if p2 is None:
2249 2249 p2 = self.nullid
2250 2250 self[None].setparents(p1, p2)
2251 2251 self._quick_access_changeid_invalidate()
2252 2252
2253 2253 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2254 2254 """changeid must be a changeset revision, if specified.
2255 2255 fileid can be a file revision or node."""
2256 2256 return context.filectx(
2257 2257 self, path, changeid, fileid, changectx=changectx
2258 2258 )
2259 2259
2260 2260 def getcwd(self) -> bytes:
2261 2261 return self.dirstate.getcwd()
2262 2262
2263 2263 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2264 2264 return self.dirstate.pathto(f, cwd)
2265 2265
2266 2266 def _loadfilter(self, filter):
2267 2267 if filter not in self._filterpats:
2268 2268 l = []
2269 2269 for pat, cmd in self.ui.configitems(filter):
2270 2270 if cmd == b'!':
2271 2271 continue
2272 2272 mf = matchmod.match(self.root, b'', [pat])
2273 2273 fn = None
2274 2274 params = cmd
2275 2275 for name, filterfn in self._datafilters.items():
2276 2276 if cmd.startswith(name):
2277 2277 fn = filterfn
2278 2278 params = cmd[len(name) :].lstrip()
2279 2279 break
2280 2280 if not fn:
2281 2281 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2282 2282 fn.__name__ = 'commandfilter'
2283 2283 # Wrap old filters not supporting keyword arguments
2284 2284 if not pycompat.getargspec(fn)[2]:
2285 2285 oldfn = fn
2286 2286 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2287 2287 fn.__name__ = 'compat-' + oldfn.__name__
2288 2288 l.append((mf, fn, params))
2289 2289 self._filterpats[filter] = l
2290 2290 return self._filterpats[filter]
2291 2291
2292 2292 def _filter(self, filterpats, filename, data):
2293 2293 for mf, fn, cmd in filterpats:
2294 2294 if mf(filename):
2295 2295 self.ui.debug(
2296 2296 b"filtering %s through %s\n"
2297 2297 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2298 2298 )
2299 2299 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2300 2300 break
2301 2301
2302 2302 return data
2303 2303
2304 2304 @unfilteredpropertycache
2305 2305 def _encodefilterpats(self):
2306 2306 return self._loadfilter(b'encode')
2307 2307
2308 2308 @unfilteredpropertycache
2309 2309 def _decodefilterpats(self):
2310 2310 return self._loadfilter(b'decode')
2311 2311
2312 2312 def adddatafilter(self, name, filter):
2313 2313 self._datafilters[name] = filter
2314 2314
2315 2315 def wread(self, filename: bytes) -> bytes:
2316 2316 if self.wvfs.islink(filename):
2317 2317 data = self.wvfs.readlink(filename)
2318 2318 else:
2319 2319 data = self.wvfs.read(filename)
2320 2320 return self._filter(self._encodefilterpats, filename, data)
2321 2321
2322 2322 def wwrite(
2323 2323 self,
2324 2324 filename: bytes,
2325 2325 data: bytes,
2326 2326 flags: bytes,
2327 2327 backgroundclose=False,
2328 2328 **kwargs
2329 2329 ) -> int:
2330 2330 """write ``data`` into ``filename`` in the working directory
2331 2331
2332 2332 This returns length of written (maybe decoded) data.
2333 2333 """
2334 2334 data = self._filter(self._decodefilterpats, filename, data)
2335 2335 if b'l' in flags:
2336 2336 self.wvfs.symlink(data, filename)
2337 2337 else:
2338 2338 self.wvfs.write(
2339 2339 filename, data, backgroundclose=backgroundclose, **kwargs
2340 2340 )
2341 2341 if b'x' in flags:
2342 2342 self.wvfs.setflags(filename, False, True)
2343 2343 else:
2344 2344 self.wvfs.setflags(filename, False, False)
2345 2345 return len(data)
2346 2346
2347 2347 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2348 2348 return self._filter(self._decodefilterpats, filename, data)
2349 2349
2350 2350 def currenttransaction(self):
2351 2351 """return the current transaction or None if non exists"""
2352 2352 if self._transref:
2353 2353 tr = self._transref()
2354 2354 else:
2355 2355 tr = None
2356 2356
2357 2357 if tr and tr.running():
2358 2358 return tr
2359 2359 return None
2360 2360
2361 2361 def transaction(self, desc, report=None):
2362 2362 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2363 2363 b'devel', b'check-locks'
2364 2364 ):
2365 2365 if self._currentlock(self._lockref) is None:
2366 2366 raise error.ProgrammingError(b'transaction requires locking')
2367 2367 tr = self.currenttransaction()
2368 2368 if tr is not None:
2369 2369 return tr.nest(name=desc)
2370 2370
2371 2371 # abort here if the journal already exists
2372 2372 if self.svfs.exists(b"journal"):
2373 2373 raise error.RepoError(
2374 2374 _(b"abandoned transaction found"),
2375 2375 hint=_(b"run 'hg recover' to clean up transaction"),
2376 2376 )
2377 2377
2378 2378 # At that point your dirstate should be clean:
2379 2379 #
2380 2380 # - If you don't have the wlock, why would you still have a dirty
2381 2381 # dirstate ?
2382 2382 #
2383 2383 # - If you hold the wlock, you should not be opening a transaction in
2384 2384 # the middle of a `distate.changing_*` block. The transaction needs to
2385 2385 # be open before that and wrap the change-context.
2386 2386 #
2387 2387 # - If you are not within a `dirstate.changing_*` context, why is our
2388 2388 # dirstate dirty?
2389 2389 if self.dirstate._dirty:
2390 2390 m = "cannot open a transaction with a dirty dirstate"
2391 2391 raise error.ProgrammingError(m)
2392 2392
2393 2393 idbase = b"%.40f#%f" % (random.random(), time.time())
2394 2394 ha = hex(hashutil.sha1(idbase).digest())
2395 2395 txnid = b'TXN:' + ha
2396 2396 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2397 2397
2398 2398 self._writejournal(desc)
2399 2399 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2400 2400 if report:
2401 2401 rp = report
2402 2402 else:
2403 2403 rp = self.ui.warn
2404 2404 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2405 2405 # we must avoid cyclic reference between repo and transaction.
2406 2406 reporef = weakref.ref(self)
2407 2407 # Code to track tag movement
2408 2408 #
2409 2409 # Since tags are all handled as file content, it is actually quite hard
2410 2410 # to track these movement from a code perspective. So we fallback to a
2411 2411 # tracking at the repository level. One could envision to track changes
2412 2412 # to the '.hgtags' file through changegroup apply but that fails to
2413 2413 # cope with case where transaction expose new heads without changegroup
2414 2414 # being involved (eg: phase movement).
2415 2415 #
2416 2416 # For now, We gate the feature behind a flag since this likely comes
2417 2417 # with performance impacts. The current code run more often than needed
2418 2418 # and do not use caches as much as it could. The current focus is on
2419 2419 # the behavior of the feature so we disable it by default. The flag
2420 2420 # will be removed when we are happy with the performance impact.
2421 2421 #
2422 2422 # Once this feature is no longer experimental move the following
2423 2423 # documentation to the appropriate help section:
2424 2424 #
2425 2425 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2426 2426 # tags (new or changed or deleted tags). In addition the details of
2427 2427 # these changes are made available in a file at:
2428 2428 # ``REPOROOT/.hg/changes/tags.changes``.
2429 2429 # Make sure you check for HG_TAG_MOVED before reading that file as it
2430 2430 # might exist from a previous transaction even if no tag were touched
2431 2431 # in this one. Changes are recorded in a line base format::
2432 2432 #
2433 2433 # <action> <hex-node> <tag-name>\n
2434 2434 #
2435 2435 # Actions are defined as follow:
2436 2436 # "-R": tag is removed,
2437 2437 # "+A": tag is added,
2438 2438 # "-M": tag is moved (old value),
2439 2439 # "+M": tag is moved (new value),
2440 2440 tracktags = lambda x: None
2441 2441 # experimental config: experimental.hook-track-tags
2442 2442 shouldtracktags = self.ui.configbool(
2443 2443 b'experimental', b'hook-track-tags'
2444 2444 )
2445 2445 if desc != b'strip' and shouldtracktags:
2446 2446 oldheads = self.changelog.headrevs()
2447 2447
2448 2448 def tracktags(tr2):
2449 2449 repo = reporef()
2450 2450 assert repo is not None # help pytype
2451 2451 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2452 2452 newheads = repo.changelog.headrevs()
2453 2453 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2454 2454 # notes: we compare lists here.
2455 2455 # As we do it only once buiding set would not be cheaper
2456 2456 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2457 2457 if changes:
2458 2458 tr2.hookargs[b'tag_moved'] = b'1'
2459 2459 with repo.vfs(
2460 2460 b'changes/tags.changes', b'w', atomictemp=True
2461 2461 ) as changesfile:
2462 2462 # note: we do not register the file to the transaction
2463 2463 # because we needs it to still exist on the transaction
2464 2464 # is close (for txnclose hooks)
2465 2465 tagsmod.writediff(changesfile, changes)
2466 2466
2467 2467 def validate(tr2):
2468 2468 """will run pre-closing hooks"""
2469 2469 # XXX the transaction API is a bit lacking here so we take a hacky
2470 2470 # path for now
2471 2471 #
2472 2472 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2473 2473 # dict is copied before these run. In addition we needs the data
2474 2474 # available to in memory hooks too.
2475 2475 #
2476 2476 # Moreover, we also need to make sure this runs before txnclose
2477 2477 # hooks and there is no "pending" mechanism that would execute
2478 2478 # logic only if hooks are about to run.
2479 2479 #
2480 2480 # Fixing this limitation of the transaction is also needed to track
2481 2481 # other families of changes (bookmarks, phases, obsolescence).
2482 2482 #
2483 2483 # This will have to be fixed before we remove the experimental
2484 2484 # gating.
2485 2485 tracktags(tr2)
2486 2486 repo = reporef()
2487 2487 assert repo is not None # help pytype
2488 2488
2489 2489 singleheadopt = (b'experimental', b'single-head-per-branch')
2490 2490 singlehead = repo.ui.configbool(*singleheadopt)
2491 2491 if singlehead:
2492 2492 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2493 2493 accountclosed = singleheadsub.get(
2494 2494 b"account-closed-heads", False
2495 2495 )
2496 2496 if singleheadsub.get(b"public-changes-only", False):
2497 2497 filtername = b"immutable"
2498 2498 else:
2499 2499 filtername = b"visible"
2500 2500 scmutil.enforcesinglehead(
2501 2501 repo, tr2, desc, accountclosed, filtername
2502 2502 )
2503 2503 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2504 2504 for name, (old, new) in sorted(
2505 2505 tr.changes[b'bookmarks'].items()
2506 2506 ):
2507 2507 args = tr.hookargs.copy()
2508 2508 args.update(bookmarks.preparehookargs(name, old, new))
2509 2509 repo.hook(
2510 2510 b'pretxnclose-bookmark',
2511 2511 throw=True,
2512 2512 **pycompat.strkwargs(args)
2513 2513 )
2514 2514 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2515 2515 cl = repo.unfiltered().changelog
2516 2516 for revs, (old, new) in tr.changes[b'phases']:
2517 2517 for rev in revs:
2518 2518 args = tr.hookargs.copy()
2519 2519 node = hex(cl.node(rev))
2520 2520 args.update(phases.preparehookargs(node, old, new))
2521 2521 repo.hook(
2522 2522 b'pretxnclose-phase',
2523 2523 throw=True,
2524 2524 **pycompat.strkwargs(args)
2525 2525 )
2526 2526
2527 2527 repo.hook(
2528 2528 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2529 2529 )
2530 2530
2531 2531 def releasefn(tr, success):
2532 2532 repo = reporef()
2533 2533 if repo is None:
2534 2534 # If the repo has been GC'd (and this release function is being
2535 2535 # called from transaction.__del__), there's not much we can do,
2536 2536 # so just leave the unfinished transaction there and let the
2537 2537 # user run `hg recover`.
2538 2538 return
2539 2539 if success:
2540 2540 # this should be explicitly invoked here, because
2541 2541 # in-memory changes aren't written out at closing
2542 2542 # transaction, if tr.addfilegenerator (via
2543 2543 # dirstate.write or so) isn't invoked while
2544 2544 # transaction running
2545 2545 repo.dirstate.write(None)
2546 2546 else:
2547 2547 # discard all changes (including ones already written
2548 2548 # out) in this transaction
2549 2549 narrowspec.restorebackup(self, b'journal.narrowspec')
2550 2550 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2551 2551 if repo.currentwlock() is not None:
2552 2552 repo.dirstate.restorebackup(None, b'journal.dirstate')
2553 2553
2554 2554 repo.invalidate(clearfilecache=True)
2555 2555
2556 2556 tr = transaction.transaction(
2557 2557 rp,
2558 2558 self.svfs,
2559 2559 vfsmap,
2560 2560 b"journal",
2561 2561 b"undo",
2562 2562 aftertrans(renames),
2563 2563 self.store.createmode,
2564 2564 validator=validate,
2565 2565 releasefn=releasefn,
2566 2566 checkambigfiles=_cachedfiles,
2567 2567 name=desc,
2568 2568 )
2569 2569 tr.changes[b'origrepolen'] = len(self)
2570 2570 tr.changes[b'obsmarkers'] = set()
2571 2571 tr.changes[b'phases'] = []
2572 2572 tr.changes[b'bookmarks'] = {}
2573 2573
2574 2574 tr.hookargs[b'txnid'] = txnid
2575 2575 tr.hookargs[b'txnname'] = desc
2576 2576 tr.hookargs[b'changes'] = tr.changes
2577 2577 # note: writing the fncache only during finalize mean that the file is
2578 2578 # outdated when running hooks. As fncache is used for streaming clone,
2579 2579 # this is not expected to break anything that happen during the hooks.
2580 2580 tr.addfinalize(b'flush-fncache', self.store.write)
2581 2581
2582 2582 def txnclosehook(tr2):
2583 2583 """To be run if transaction is successful, will schedule a hook run"""
2584 2584 # Don't reference tr2 in hook() so we don't hold a reference.
2585 2585 # This reduces memory consumption when there are multiple
2586 2586 # transactions per lock. This can likely go away if issue5045
2587 2587 # fixes the function accumulation.
2588 2588 hookargs = tr2.hookargs
2589 2589
2590 2590 def hookfunc(unused_success):
2591 2591 repo = reporef()
2592 2592 assert repo is not None # help pytype
2593 2593
2594 2594 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2595 2595 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2596 2596 for name, (old, new) in bmchanges:
2597 2597 args = tr.hookargs.copy()
2598 2598 args.update(bookmarks.preparehookargs(name, old, new))
2599 2599 repo.hook(
2600 2600 b'txnclose-bookmark',
2601 2601 throw=False,
2602 2602 **pycompat.strkwargs(args)
2603 2603 )
2604 2604
2605 2605 if hook.hashook(repo.ui, b'txnclose-phase'):
2606 2606 cl = repo.unfiltered().changelog
2607 2607 phasemv = sorted(
2608 2608 tr.changes[b'phases'], key=lambda r: r[0][0]
2609 2609 )
2610 2610 for revs, (old, new) in phasemv:
2611 2611 for rev in revs:
2612 2612 args = tr.hookargs.copy()
2613 2613 node = hex(cl.node(rev))
2614 2614 args.update(phases.preparehookargs(node, old, new))
2615 2615 repo.hook(
2616 2616 b'txnclose-phase',
2617 2617 throw=False,
2618 2618 **pycompat.strkwargs(args)
2619 2619 )
2620 2620
2621 2621 repo.hook(
2622 2622 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2623 2623 )
2624 2624
2625 2625 repo = reporef()
2626 2626 assert repo is not None # help pytype
2627 2627 repo._afterlock(hookfunc)
2628 2628
2629 2629 tr.addfinalize(b'txnclose-hook', txnclosehook)
2630 2630 # Include a leading "-" to make it happen before the transaction summary
2631 2631 # reports registered via scmutil.registersummarycallback() whose names
2632 2632 # are 00-txnreport etc. That way, the caches will be warm when the
2633 2633 # callbacks run.
2634 2634 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2635 2635
2636 2636 def txnaborthook(tr2):
2637 2637 """To be run if transaction is aborted"""
2638 2638 repo = reporef()
2639 2639 assert repo is not None # help pytype
2640 2640 repo.hook(
2641 2641 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2642 2642 )
2643 2643
2644 2644 tr.addabort(b'txnabort-hook', txnaborthook)
2645 2645 # avoid eager cache invalidation. in-memory data should be identical
2646 2646 # to stored data if transaction has no error.
2647 2647 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2648 2648 self._transref = weakref.ref(tr)
2649 2649 scmutil.registersummarycallback(self, tr, desc)
2650 # This only exist to deal with the need of rollback to have viable
2651 # parents at the end of the operation. So backup viable parents at the
2652 # time of this operation.
2653 #
2654 # We only do it when the `wlock` is taken, otherwise other might be
2655 # altering the dirstate under us.
2656 #
2657 # This is really not a great way to do this (first, because we cannot
2658 # always do it). There are more viable alternative that exists
2659 #
2660 # - backing only the working copy parent in a dedicated files and doing
2661 # a clean "keep-update" to them on `hg rollback`.
2662 #
2663 # - slightly changing the behavior an applying a logic similar to "hg
2664 # strip" to pick a working copy destination on `hg rollback`
2665 if self.currentwlock() is not None:
2666 ds = self.dirstate
2667
2668 def backup_dirstate(tr):
2669 for f in ds.all_file_names():
2670 # hardlink backup is okay because `dirstate` is always
2671 # atomically written and possible data file are append only
2672 # and resistant to trailing data.
2673 tr.addbackup(f, hardlink=True, location=b'plain')
2674
2675 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2650 2676 return tr
2651 2677
2652 2678 def _journalfiles(self):
2653 2679 first = (
2654 2680 (self.svfs, b'journal'),
2655 2681 (self.svfs, b'journal.narrowspec'),
2656 2682 (self.vfs, b'journal.narrowspec.dirstate'),
2657 2683 (self.vfs, b'journal.dirstate'),
2658 2684 )
2659 2685 middle = []
2660 2686 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2661 2687 if dirstate_data is not None:
2662 2688 middle.append((self.vfs, dirstate_data))
2663 2689 end = (
2664 2690 (self.vfs, b'journal.branch'),
2665 2691 (self.vfs, b'journal.desc'),
2666 2692 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2667 2693 (self.svfs, b'journal.phaseroots'),
2668 2694 )
2669 2695 return first + tuple(middle) + end
2670 2696
2671 2697 def undofiles(self):
2672 2698 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2673 2699
2674 2700 @unfilteredmethod
2675 2701 def _writejournal(self, desc):
2676 2702 if self.currentwlock() is not None:
2677 2703 self.dirstate.savebackup(None, b'journal.dirstate')
2678 2704 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2679 2705 narrowspec.savebackup(self, b'journal.narrowspec')
2680 2706 self.vfs.write(
2681 2707 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2682 2708 )
2683 2709 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2684 2710 bookmarksvfs = bookmarks.bookmarksvfs(self)
2685 2711 bookmarksvfs.write(
2686 2712 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2687 2713 )
2688 2714 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2689 2715
2690 2716 def recover(self):
2691 2717 with self.lock():
2692 2718 if self.svfs.exists(b"journal"):
2693 2719 self.ui.status(_(b"rolling back interrupted transaction\n"))
2694 2720 vfsmap = {
2695 2721 b'': self.svfs,
2696 2722 b'plain': self.vfs,
2697 2723 }
2698 2724 transaction.rollback(
2699 2725 self.svfs,
2700 2726 vfsmap,
2701 2727 b"journal",
2702 2728 self.ui.warn,
2703 2729 checkambigfiles=_cachedfiles,
2704 2730 )
2705 2731 self.invalidate()
2706 2732 return True
2707 2733 else:
2708 2734 self.ui.warn(_(b"no interrupted transaction available\n"))
2709 2735 return False
2710 2736
2711 2737 def rollback(self, dryrun=False, force=False):
2712 2738 wlock = lock = None
2713 2739 try:
2714 2740 wlock = self.wlock()
2715 2741 lock = self.lock()
2716 2742 if self.svfs.exists(b"undo"):
2717 2743 return self._rollback(dryrun, force)
2718 2744 else:
2719 2745 self.ui.warn(_(b"no rollback information available\n"))
2720 2746 return 1
2721 2747 finally:
2722 2748 release(lock, wlock)
2723 2749
2724 2750 @unfilteredmethod # Until we get smarter cache management
2725 2751 def _rollback(self, dryrun, force):
2726 2752 ui = self.ui
2727 2753
2728 2754 parents = self.dirstate.parents()
2729 2755 try:
2730 2756 args = self.vfs.read(b'undo.desc').splitlines()
2731 2757 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2732 2758 if len(args) >= 3:
2733 2759 detail = args[2]
2734 2760 oldtip = oldlen - 1
2735 2761
2736 2762 if detail and ui.verbose:
2737 2763 msg = _(
2738 2764 b'repository tip rolled back to revision %d'
2739 2765 b' (undo %s: %s)\n'
2740 2766 ) % (oldtip, desc, detail)
2741 2767 else:
2742 2768 msg = _(
2743 2769 b'repository tip rolled back to revision %d (undo %s)\n'
2744 2770 ) % (oldtip, desc)
2745 2771 parentgone = any(self[p].rev() > oldtip for p in parents)
2746 2772 except IOError:
2747 2773 msg = _(b'rolling back unknown transaction\n')
2748 2774 desc = None
2749 2775 parentgone = True
2750 2776
2751 2777 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2752 2778 raise error.Abort(
2753 2779 _(
2754 2780 b'rollback of last commit while not checked out '
2755 2781 b'may lose data'
2756 2782 ),
2757 2783 hint=_(b'use -f to force'),
2758 2784 )
2759 2785
2760 2786 ui.status(msg)
2761 2787 if dryrun:
2762 2788 return 0
2763 2789
2764 2790 self.destroying()
2765 2791 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2766 2792 skip_journal_pattern = None
2767 2793 if not parentgone:
2768 2794 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2769 2795 transaction.rollback(
2770 2796 self.svfs,
2771 2797 vfsmap,
2772 2798 b'undo',
2773 2799 ui.warn,
2774 2800 checkambigfiles=_cachedfiles,
2775 2801 skip_journal_pattern=skip_journal_pattern,
2776 2802 )
2777 2803 bookmarksvfs = bookmarks.bookmarksvfs(self)
2778 2804 if bookmarksvfs.exists(b'undo.bookmarks'):
2779 2805 bookmarksvfs.rename(
2780 2806 b'undo.bookmarks', b'bookmarks', checkambig=True
2781 2807 )
2782 2808 if self.svfs.exists(b'undo.phaseroots'):
2783 2809 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2784 2810 self.invalidate()
2785 2811
2786 2812 if parentgone:
2787 2813 narrowspec.restorebackup(self, b'undo.narrowspec')
2788 2814 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2789 2815 self.dirstate.restorebackup(None, b'undo.dirstate')
2790 2816 try:
2791 2817 branch = self.vfs.read(b'undo.branch')
2792 2818 self.dirstate.setbranch(encoding.tolocal(branch))
2793 2819 except IOError:
2794 2820 ui.warn(
2795 2821 _(
2796 2822 b'named branch could not be reset: '
2797 2823 b'current branch is still \'%s\'\n'
2798 2824 )
2799 2825 % self.dirstate.branch()
2800 2826 )
2801 2827
2802 2828 parents = tuple([p.rev() for p in self[None].parents()])
2803 2829 if len(parents) > 1:
2804 2830 ui.status(
2805 2831 _(
2806 2832 b'working directory now based on '
2807 2833 b'revisions %d and %d\n'
2808 2834 )
2809 2835 % parents
2810 2836 )
2811 2837 else:
2812 2838 ui.status(
2813 2839 _(b'working directory now based on revision %d\n') % parents
2814 2840 )
2815 2841 mergestatemod.mergestate.clean(self)
2816 2842
2817 2843 # TODO: if we know which new heads may result from this rollback, pass
2818 2844 # them to destroy(), which will prevent the branchhead cache from being
2819 2845 # invalidated.
2820 2846 self.destroyed()
2821 2847 return 0
2822 2848
2823 2849 def _buildcacheupdater(self, newtransaction):
2824 2850 """called during transaction to build the callback updating cache
2825 2851
2826 2852 Lives on the repository to help extension who might want to augment
2827 2853 this logic. For this purpose, the created transaction is passed to the
2828 2854 method.
2829 2855 """
2830 2856 # we must avoid cyclic reference between repo and transaction.
2831 2857 reporef = weakref.ref(self)
2832 2858
2833 2859 def updater(tr):
2834 2860 repo = reporef()
2835 2861 assert repo is not None # help pytype
2836 2862 repo.updatecaches(tr)
2837 2863
2838 2864 return updater
2839 2865
2840 2866 @unfilteredmethod
2841 2867 def updatecaches(self, tr=None, full=False, caches=None):
2842 2868 """warm appropriate caches
2843 2869
2844 2870 If this function is called after a transaction closed. The transaction
2845 2871 will be available in the 'tr' argument. This can be used to selectively
2846 2872 update caches relevant to the changes in that transaction.
2847 2873
2848 2874 If 'full' is set, make sure all caches the function knows about have
2849 2875 up-to-date data. Even the ones usually loaded more lazily.
2850 2876
2851 2877 The `full` argument can take a special "post-clone" value. In this case
2852 2878 the cache warming is made after a clone and of the slower cache might
2853 2879 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2854 2880 as we plan for a cleaner way to deal with this for 5.9.
2855 2881 """
2856 2882 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2857 2883 # During strip, many caches are invalid but
2858 2884 # later call to `destroyed` will refresh them.
2859 2885 return
2860 2886
2861 2887 unfi = self.unfiltered()
2862 2888
2863 2889 if full:
2864 2890 msg = (
2865 2891 "`full` argument for `repo.updatecaches` is deprecated\n"
2866 2892 "(use `caches=repository.CACHE_ALL` instead)"
2867 2893 )
2868 2894 self.ui.deprecwarn(msg, b"5.9")
2869 2895 caches = repository.CACHES_ALL
2870 2896 if full == b"post-clone":
2871 2897 caches = repository.CACHES_POST_CLONE
2872 2898 caches = repository.CACHES_ALL
2873 2899 elif caches is None:
2874 2900 caches = repository.CACHES_DEFAULT
2875 2901
2876 2902 if repository.CACHE_BRANCHMAP_SERVED in caches:
2877 2903 if tr is None or tr.changes[b'origrepolen'] < len(self):
2878 2904 # accessing the 'served' branchmap should refresh all the others,
2879 2905 self.ui.debug(b'updating the branch cache\n')
2880 2906 self.filtered(b'served').branchmap()
2881 2907 self.filtered(b'served.hidden').branchmap()
2882 2908 # flush all possibly delayed write.
2883 2909 self._branchcaches.write_delayed(self)
2884 2910
2885 2911 if repository.CACHE_CHANGELOG_CACHE in caches:
2886 2912 self.changelog.update_caches(transaction=tr)
2887 2913
2888 2914 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2889 2915 self.manifestlog.update_caches(transaction=tr)
2890 2916
2891 2917 if repository.CACHE_REV_BRANCH in caches:
2892 2918 rbc = unfi.revbranchcache()
2893 2919 for r in unfi.changelog:
2894 2920 rbc.branchinfo(r)
2895 2921 rbc.write()
2896 2922
2897 2923 if repository.CACHE_FULL_MANIFEST in caches:
2898 2924 # ensure the working copy parents are in the manifestfulltextcache
2899 2925 for ctx in self[b'.'].parents():
2900 2926 ctx.manifest() # accessing the manifest is enough
2901 2927
2902 2928 if repository.CACHE_FILE_NODE_TAGS in caches:
2903 2929 # accessing fnode cache warms the cache
2904 2930 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2905 2931
2906 2932 if repository.CACHE_TAGS_DEFAULT in caches:
2907 2933 # accessing tags warm the cache
2908 2934 self.tags()
2909 2935 if repository.CACHE_TAGS_SERVED in caches:
2910 2936 self.filtered(b'served').tags()
2911 2937
2912 2938 if repository.CACHE_BRANCHMAP_ALL in caches:
2913 2939 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2914 2940 # so we're forcing a write to cause these caches to be warmed up
2915 2941 # even if they haven't explicitly been requested yet (if they've
2916 2942 # never been used by hg, they won't ever have been written, even if
2917 2943 # they're a subset of another kind of cache that *has* been used).
2918 2944 for filt in repoview.filtertable.keys():
2919 2945 filtered = self.filtered(filt)
2920 2946 filtered.branchmap().write(filtered)
2921 2947
2922 2948 def invalidatecaches(self):
2923 2949 if '_tagscache' in vars(self):
2924 2950 # can't use delattr on proxy
2925 2951 del self.__dict__['_tagscache']
2926 2952
2927 2953 self._branchcaches.clear()
2928 2954 self.invalidatevolatilesets()
2929 2955 self._sparsesignaturecache.clear()
2930 2956
2931 2957 def invalidatevolatilesets(self):
2932 2958 self.filteredrevcache.clear()
2933 2959 obsolete.clearobscaches(self)
2934 2960 self._quick_access_changeid_invalidate()
2935 2961
2936 2962 def invalidatedirstate(self):
2937 2963 """Invalidates the dirstate, causing the next call to dirstate
2938 2964 to check if it was modified since the last time it was read,
2939 2965 rereading it if it has.
2940 2966
2941 2967 This is different to dirstate.invalidate() that it doesn't always
2942 2968 rereads the dirstate. Use dirstate.invalidate() if you want to
2943 2969 explicitly read the dirstate again (i.e. restoring it to a previous
2944 2970 known good state)."""
2945 2971 if hasunfilteredcache(self, 'dirstate'):
2946 2972 for k in self.dirstate._filecache:
2947 2973 try:
2948 2974 delattr(self.dirstate, k)
2949 2975 except AttributeError:
2950 2976 pass
2951 2977 delattr(self.unfiltered(), 'dirstate')
2952 2978
2953 2979 def invalidate(self, clearfilecache=False):
2954 2980 """Invalidates both store and non-store parts other than dirstate
2955 2981
2956 2982 If a transaction is running, invalidation of store is omitted,
2957 2983 because discarding in-memory changes might cause inconsistency
2958 2984 (e.g. incomplete fncache causes unintentional failure, but
2959 2985 redundant one doesn't).
2960 2986 """
2961 2987 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2962 2988 for k in list(self._filecache.keys()):
2963 2989 # dirstate is invalidated separately in invalidatedirstate()
2964 2990 if k == b'dirstate':
2965 2991 continue
2966 2992 if (
2967 2993 k == b'changelog'
2968 2994 and self.currenttransaction()
2969 2995 and self.changelog._delayed
2970 2996 ):
2971 2997 # The changelog object may store unwritten revisions. We don't
2972 2998 # want to lose them.
2973 2999 # TODO: Solve the problem instead of working around it.
2974 3000 continue
2975 3001
2976 3002 if clearfilecache:
2977 3003 del self._filecache[k]
2978 3004 try:
2979 3005 delattr(unfiltered, k)
2980 3006 except AttributeError:
2981 3007 pass
2982 3008 self.invalidatecaches()
2983 3009 if not self.currenttransaction():
2984 3010 # TODO: Changing contents of store outside transaction
2985 3011 # causes inconsistency. We should make in-memory store
2986 3012 # changes detectable, and abort if changed.
2987 3013 self.store.invalidatecaches()
2988 3014
2989 3015 def invalidateall(self):
2990 3016 """Fully invalidates both store and non-store parts, causing the
2991 3017 subsequent operation to reread any outside changes."""
2992 3018 # extension should hook this to invalidate its caches
2993 3019 self.invalidate()
2994 3020 self.invalidatedirstate()
2995 3021
2996 3022 @unfilteredmethod
2997 3023 def _refreshfilecachestats(self, tr):
2998 3024 """Reload stats of cached files so that they are flagged as valid"""
2999 3025 for k, ce in self._filecache.items():
3000 3026 k = pycompat.sysstr(k)
3001 3027 if k == 'dirstate' or k not in self.__dict__:
3002 3028 continue
3003 3029 ce.refresh()
3004 3030
3005 3031 def _lock(
3006 3032 self,
3007 3033 vfs,
3008 3034 lockname,
3009 3035 wait,
3010 3036 releasefn,
3011 3037 acquirefn,
3012 3038 desc,
3013 3039 ):
3014 3040 timeout = 0
3015 3041 warntimeout = 0
3016 3042 if wait:
3017 3043 timeout = self.ui.configint(b"ui", b"timeout")
3018 3044 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3019 3045 # internal config: ui.signal-safe-lock
3020 3046 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3021 3047
3022 3048 l = lockmod.trylock(
3023 3049 self.ui,
3024 3050 vfs,
3025 3051 lockname,
3026 3052 timeout,
3027 3053 warntimeout,
3028 3054 releasefn=releasefn,
3029 3055 acquirefn=acquirefn,
3030 3056 desc=desc,
3031 3057 signalsafe=signalsafe,
3032 3058 )
3033 3059 return l
3034 3060
3035 3061 def _afterlock(self, callback):
3036 3062 """add a callback to be run when the repository is fully unlocked
3037 3063
3038 3064 The callback will be executed when the outermost lock is released
3039 3065 (with wlock being higher level than 'lock')."""
3040 3066 for ref in (self._wlockref, self._lockref):
3041 3067 l = ref and ref()
3042 3068 if l and l.held:
3043 3069 l.postrelease.append(callback)
3044 3070 break
3045 3071 else: # no lock have been found.
3046 3072 callback(True)
3047 3073
3048 3074 def lock(self, wait=True):
3049 3075 """Lock the repository store (.hg/store) and return a weak reference
3050 3076 to the lock. Use this before modifying the store (e.g. committing or
3051 3077 stripping). If you are opening a transaction, get a lock as well.)
3052 3078
3053 3079 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3054 3080 'wlock' first to avoid a dead-lock hazard."""
3055 3081 l = self._currentlock(self._lockref)
3056 3082 if l is not None:
3057 3083 l.lock()
3058 3084 return l
3059 3085
3060 3086 l = self._lock(
3061 3087 vfs=self.svfs,
3062 3088 lockname=b"lock",
3063 3089 wait=wait,
3064 3090 releasefn=None,
3065 3091 acquirefn=self.invalidate,
3066 3092 desc=_(b'repository %s') % self.origroot,
3067 3093 )
3068 3094 self._lockref = weakref.ref(l)
3069 3095 return l
3070 3096
3071 3097 def wlock(self, wait=True):
3072 3098 """Lock the non-store parts of the repository (everything under
3073 3099 .hg except .hg/store) and return a weak reference to the lock.
3074 3100
3075 3101 Use this before modifying files in .hg.
3076 3102
3077 3103 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3078 3104 'wlock' first to avoid a dead-lock hazard."""
3079 3105 l = self._wlockref() if self._wlockref else None
3080 3106 if l is not None and l.held:
3081 3107 l.lock()
3082 3108 return l
3083 3109
3084 3110 # We do not need to check for non-waiting lock acquisition. Such
3085 3111 # acquisition would not cause dead-lock as they would just fail.
3086 3112 if wait and (
3087 3113 self.ui.configbool(b'devel', b'all-warnings')
3088 3114 or self.ui.configbool(b'devel', b'check-locks')
3089 3115 ):
3090 3116 if self._currentlock(self._lockref) is not None:
3091 3117 self.ui.develwarn(b'"wlock" acquired after "lock"')
3092 3118
3093 3119 def unlock():
3094 3120 if self.dirstate.is_changing_any:
3095 3121 msg = b"wlock release in the middle of a changing parents"
3096 3122 self.ui.develwarn(msg)
3097 3123 self.dirstate.invalidate()
3098 3124 else:
3099 3125 if self.dirstate._dirty:
3100 3126 msg = b"dirty dirstate on wlock release"
3101 3127 self.ui.develwarn(msg)
3102 3128 self.dirstate.write(None)
3103 3129
3104 3130 self._filecache[b'dirstate'].refresh()
3105 3131
3106 3132 l = self._lock(
3107 3133 self.vfs,
3108 3134 b"wlock",
3109 3135 wait,
3110 3136 unlock,
3111 3137 self.invalidatedirstate,
3112 3138 _(b'working directory of %s') % self.origroot,
3113 3139 )
3114 3140 self._wlockref = weakref.ref(l)
3115 3141 return l
3116 3142
3117 3143 def _currentlock(self, lockref):
3118 3144 """Returns the lock if it's held, or None if it's not."""
3119 3145 if lockref is None:
3120 3146 return None
3121 3147 l = lockref()
3122 3148 if l is None or not l.held:
3123 3149 return None
3124 3150 return l
3125 3151
3126 3152 def currentwlock(self):
3127 3153 """Returns the wlock if it's held, or None if it's not."""
3128 3154 return self._currentlock(self._wlockref)
3129 3155
3130 3156 def checkcommitpatterns(self, wctx, match, status, fail):
3131 3157 """check for commit arguments that aren't committable"""
3132 3158 if match.isexact() or match.prefix():
3133 3159 matched = set(status.modified + status.added + status.removed)
3134 3160
3135 3161 for f in match.files():
3136 3162 f = self.dirstate.normalize(f)
3137 3163 if f == b'.' or f in matched or f in wctx.substate:
3138 3164 continue
3139 3165 if f in status.deleted:
3140 3166 fail(f, _(b'file not found!'))
3141 3167 # Is it a directory that exists or used to exist?
3142 3168 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3143 3169 d = f + b'/'
3144 3170 for mf in matched:
3145 3171 if mf.startswith(d):
3146 3172 break
3147 3173 else:
3148 3174 fail(f, _(b"no match under directory!"))
3149 3175 elif f not in self.dirstate:
3150 3176 fail(f, _(b"file not tracked!"))
3151 3177
3152 3178 @unfilteredmethod
3153 3179 def commit(
3154 3180 self,
3155 3181 text=b"",
3156 3182 user=None,
3157 3183 date=None,
3158 3184 match=None,
3159 3185 force=False,
3160 3186 editor=None,
3161 3187 extra=None,
3162 3188 ):
3163 3189 """Add a new revision to current repository.
3164 3190
3165 3191 Revision information is gathered from the working directory,
3166 3192 match can be used to filter the committed files. If editor is
3167 3193 supplied, it is called to get a commit message.
3168 3194 """
3169 3195 if extra is None:
3170 3196 extra = {}
3171 3197
3172 3198 def fail(f, msg):
3173 3199 raise error.InputError(b'%s: %s' % (f, msg))
3174 3200
3175 3201 if not match:
3176 3202 match = matchmod.always()
3177 3203
3178 3204 if not force:
3179 3205 match.bad = fail
3180 3206
3181 3207 # lock() for recent changelog (see issue4368)
3182 3208 with self.wlock(), self.lock():
3183 3209 wctx = self[None]
3184 3210 merge = len(wctx.parents()) > 1
3185 3211
3186 3212 if not force and merge and not match.always():
3187 3213 raise error.Abort(
3188 3214 _(
3189 3215 b'cannot partially commit a merge '
3190 3216 b'(do not specify files or patterns)'
3191 3217 )
3192 3218 )
3193 3219
3194 3220 status = self.status(match=match, clean=force)
3195 3221 if force:
3196 3222 status.modified.extend(
3197 3223 status.clean
3198 3224 ) # mq may commit clean files
3199 3225
3200 3226 # check subrepos
3201 3227 subs, commitsubs, newstate = subrepoutil.precommit(
3202 3228 self.ui, wctx, status, match, force=force
3203 3229 )
3204 3230
3205 3231 # make sure all explicit patterns are matched
3206 3232 if not force:
3207 3233 self.checkcommitpatterns(wctx, match, status, fail)
3208 3234
3209 3235 cctx = context.workingcommitctx(
3210 3236 self, status, text, user, date, extra
3211 3237 )
3212 3238
3213 3239 ms = mergestatemod.mergestate.read(self)
3214 3240 mergeutil.checkunresolved(ms)
3215 3241
3216 3242 # internal config: ui.allowemptycommit
3217 3243 if cctx.isempty() and not self.ui.configbool(
3218 3244 b'ui', b'allowemptycommit'
3219 3245 ):
3220 3246 self.ui.debug(b'nothing to commit, clearing merge state\n')
3221 3247 ms.reset()
3222 3248 return None
3223 3249
3224 3250 if merge and cctx.deleted():
3225 3251 raise error.Abort(_(b"cannot commit merge with missing files"))
3226 3252
3227 3253 if editor:
3228 3254 cctx._text = editor(self, cctx, subs)
3229 3255 edited = text != cctx._text
3230 3256
3231 3257 # Save commit message in case this transaction gets rolled back
3232 3258 # (e.g. by a pretxncommit hook). Leave the content alone on
3233 3259 # the assumption that the user will use the same editor again.
3234 3260 msg_path = self.savecommitmessage(cctx._text)
3235 3261
3236 3262 # commit subs and write new state
3237 3263 if subs:
3238 3264 uipathfn = scmutil.getuipathfn(self)
3239 3265 for s in sorted(commitsubs):
3240 3266 sub = wctx.sub(s)
3241 3267 self.ui.status(
3242 3268 _(b'committing subrepository %s\n')
3243 3269 % uipathfn(subrepoutil.subrelpath(sub))
3244 3270 )
3245 3271 sr = sub.commit(cctx._text, user, date)
3246 3272 newstate[s] = (newstate[s][0], sr)
3247 3273 subrepoutil.writestate(self, newstate)
3248 3274
3249 3275 p1, p2 = self.dirstate.parents()
3250 3276 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3251 3277 try:
3252 3278 self.hook(
3253 3279 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3254 3280 )
3255 3281 with self.transaction(b'commit'):
3256 3282 ret = self.commitctx(cctx, True)
3257 3283 # update bookmarks, dirstate and mergestate
3258 3284 bookmarks.update(self, [p1, p2], ret)
3259 3285 cctx.markcommitted(ret)
3260 3286 ms.reset()
3261 3287 except: # re-raises
3262 3288 if edited:
3263 3289 self.ui.write(
3264 3290 _(b'note: commit message saved in %s\n') % msg_path
3265 3291 )
3266 3292 self.ui.write(
3267 3293 _(
3268 3294 b"note: use 'hg commit --logfile "
3269 3295 b"%s --edit' to reuse it\n"
3270 3296 )
3271 3297 % msg_path
3272 3298 )
3273 3299 raise
3274 3300
3275 3301 def commithook(unused_success):
3276 3302 # hack for command that use a temporary commit (eg: histedit)
3277 3303 # temporary commit got stripped before hook release
3278 3304 if self.changelog.hasnode(ret):
3279 3305 self.hook(
3280 3306 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3281 3307 )
3282 3308
3283 3309 self._afterlock(commithook)
3284 3310 return ret
3285 3311
3286 3312 @unfilteredmethod
3287 3313 def commitctx(self, ctx, error=False, origctx=None):
3288 3314 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3289 3315
3290 3316 @unfilteredmethod
3291 3317 def destroying(self):
3292 3318 """Inform the repository that nodes are about to be destroyed.
3293 3319 Intended for use by strip and rollback, so there's a common
3294 3320 place for anything that has to be done before destroying history.
3295 3321
3296 3322 This is mostly useful for saving state that is in memory and waiting
3297 3323 to be flushed when the current lock is released. Because a call to
3298 3324 destroyed is imminent, the repo will be invalidated causing those
3299 3325 changes to stay in memory (waiting for the next unlock), or vanish
3300 3326 completely.
3301 3327 """
3302 3328 # When using the same lock to commit and strip, the phasecache is left
3303 3329 # dirty after committing. Then when we strip, the repo is invalidated,
3304 3330 # causing those changes to disappear.
3305 3331 if '_phasecache' in vars(self):
3306 3332 self._phasecache.write()
3307 3333
3308 3334 @unfilteredmethod
3309 3335 def destroyed(self):
3310 3336 """Inform the repository that nodes have been destroyed.
3311 3337 Intended for use by strip and rollback, so there's a common
3312 3338 place for anything that has to be done after destroying history.
3313 3339 """
3314 3340 # When one tries to:
3315 3341 # 1) destroy nodes thus calling this method (e.g. strip)
3316 3342 # 2) use phasecache somewhere (e.g. commit)
3317 3343 #
3318 3344 # then 2) will fail because the phasecache contains nodes that were
3319 3345 # removed. We can either remove phasecache from the filecache,
3320 3346 # causing it to reload next time it is accessed, or simply filter
3321 3347 # the removed nodes now and write the updated cache.
3322 3348 self._phasecache.filterunknown(self)
3323 3349 self._phasecache.write()
3324 3350
3325 3351 # refresh all repository caches
3326 3352 self.updatecaches()
3327 3353
3328 3354 # Ensure the persistent tag cache is updated. Doing it now
3329 3355 # means that the tag cache only has to worry about destroyed
3330 3356 # heads immediately after a strip/rollback. That in turn
3331 3357 # guarantees that "cachetip == currenttip" (comparing both rev
3332 3358 # and node) always means no nodes have been added or destroyed.
3333 3359
3334 3360 # XXX this is suboptimal when qrefresh'ing: we strip the current
3335 3361 # head, refresh the tag cache, then immediately add a new head.
3336 3362 # But I think doing it this way is necessary for the "instant
3337 3363 # tag cache retrieval" case to work.
3338 3364 self.invalidate()
3339 3365
3340 3366 def status(
3341 3367 self,
3342 3368 node1=b'.',
3343 3369 node2=None,
3344 3370 match=None,
3345 3371 ignored=False,
3346 3372 clean=False,
3347 3373 unknown=False,
3348 3374 listsubrepos=False,
3349 3375 ):
3350 3376 '''a convenience method that calls node1.status(node2)'''
3351 3377 return self[node1].status(
3352 3378 node2, match, ignored, clean, unknown, listsubrepos
3353 3379 )
3354 3380
3355 3381 def addpostdsstatus(self, ps):
3356 3382 """Add a callback to run within the wlock, at the point at which status
3357 3383 fixups happen.
3358 3384
3359 3385 On status completion, callback(wctx, status) will be called with the
3360 3386 wlock held, unless the dirstate has changed from underneath or the wlock
3361 3387 couldn't be grabbed.
3362 3388
3363 3389 Callbacks should not capture and use a cached copy of the dirstate --
3364 3390 it might change in the meanwhile. Instead, they should access the
3365 3391 dirstate via wctx.repo().dirstate.
3366 3392
3367 3393 This list is emptied out after each status run -- extensions should
3368 3394 make sure it adds to this list each time dirstate.status is called.
3369 3395 Extensions should also make sure they don't call this for statuses
3370 3396 that don't involve the dirstate.
3371 3397 """
3372 3398
3373 3399 # The list is located here for uniqueness reasons -- it is actually
3374 3400 # managed by the workingctx, but that isn't unique per-repo.
3375 3401 self._postdsstatus.append(ps)
3376 3402
3377 3403 def postdsstatus(self):
3378 3404 """Used by workingctx to get the list of post-dirstate-status hooks."""
3379 3405 return self._postdsstatus
3380 3406
3381 3407 def clearpostdsstatus(self):
3382 3408 """Used by workingctx to clear post-dirstate-status hooks."""
3383 3409 del self._postdsstatus[:]
3384 3410
3385 3411 def heads(self, start=None):
3386 3412 if start is None:
3387 3413 cl = self.changelog
3388 3414 headrevs = reversed(cl.headrevs())
3389 3415 return [cl.node(rev) for rev in headrevs]
3390 3416
3391 3417 heads = self.changelog.heads(start)
3392 3418 # sort the output in rev descending order
3393 3419 return sorted(heads, key=self.changelog.rev, reverse=True)
3394 3420
3395 3421 def branchheads(self, branch=None, start=None, closed=False):
3396 3422 """return a (possibly filtered) list of heads for the given branch
3397 3423
3398 3424 Heads are returned in topological order, from newest to oldest.
3399 3425 If branch is None, use the dirstate branch.
3400 3426 If start is not None, return only heads reachable from start.
3401 3427 If closed is True, return heads that are marked as closed as well.
3402 3428 """
3403 3429 if branch is None:
3404 3430 branch = self[None].branch()
3405 3431 branches = self.branchmap()
3406 3432 if not branches.hasbranch(branch):
3407 3433 return []
3408 3434 # the cache returns heads ordered lowest to highest
3409 3435 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3410 3436 if start is not None:
3411 3437 # filter out the heads that cannot be reached from startrev
3412 3438 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3413 3439 bheads = [h for h in bheads if h in fbheads]
3414 3440 return bheads
3415 3441
3416 3442 def branches(self, nodes):
3417 3443 if not nodes:
3418 3444 nodes = [self.changelog.tip()]
3419 3445 b = []
3420 3446 for n in nodes:
3421 3447 t = n
3422 3448 while True:
3423 3449 p = self.changelog.parents(n)
3424 3450 if p[1] != self.nullid or p[0] == self.nullid:
3425 3451 b.append((t, n, p[0], p[1]))
3426 3452 break
3427 3453 n = p[0]
3428 3454 return b
3429 3455
3430 3456 def between(self, pairs):
3431 3457 r = []
3432 3458
3433 3459 for top, bottom in pairs:
3434 3460 n, l, i = top, [], 0
3435 3461 f = 1
3436 3462
3437 3463 while n != bottom and n != self.nullid:
3438 3464 p = self.changelog.parents(n)[0]
3439 3465 if i == f:
3440 3466 l.append(n)
3441 3467 f = f * 2
3442 3468 n = p
3443 3469 i += 1
3444 3470
3445 3471 r.append(l)
3446 3472
3447 3473 return r
3448 3474
3449 3475 def checkpush(self, pushop):
3450 3476 """Extensions can override this function if additional checks have
3451 3477 to be performed before pushing, or call it if they override push
3452 3478 command.
3453 3479 """
3454 3480
3455 3481 @unfilteredpropertycache
3456 3482 def prepushoutgoinghooks(self):
3457 3483 """Return util.hooks consists of a pushop with repo, remote, outgoing
3458 3484 methods, which are called before pushing changesets.
3459 3485 """
3460 3486 return util.hooks()
3461 3487
3462 3488 def pushkey(self, namespace, key, old, new):
3463 3489 try:
3464 3490 tr = self.currenttransaction()
3465 3491 hookargs = {}
3466 3492 if tr is not None:
3467 3493 hookargs.update(tr.hookargs)
3468 3494 hookargs = pycompat.strkwargs(hookargs)
3469 3495 hookargs['namespace'] = namespace
3470 3496 hookargs['key'] = key
3471 3497 hookargs['old'] = old
3472 3498 hookargs['new'] = new
3473 3499 self.hook(b'prepushkey', throw=True, **hookargs)
3474 3500 except error.HookAbort as exc:
3475 3501 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3476 3502 if exc.hint:
3477 3503 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3478 3504 return False
3479 3505 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3480 3506 ret = pushkey.push(self, namespace, key, old, new)
3481 3507
3482 3508 def runhook(unused_success):
3483 3509 self.hook(
3484 3510 b'pushkey',
3485 3511 namespace=namespace,
3486 3512 key=key,
3487 3513 old=old,
3488 3514 new=new,
3489 3515 ret=ret,
3490 3516 )
3491 3517
3492 3518 self._afterlock(runhook)
3493 3519 return ret
3494 3520
3495 3521 def listkeys(self, namespace):
3496 3522 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3497 3523 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3498 3524 values = pushkey.list(self, namespace)
3499 3525 self.hook(b'listkeys', namespace=namespace, values=values)
3500 3526 return values
3501 3527
3502 3528 def debugwireargs(self, one, two, three=None, four=None, five=None):
3503 3529 '''used to test argument passing over the wire'''
3504 3530 return b"%s %s %s %s %s" % (
3505 3531 one,
3506 3532 two,
3507 3533 pycompat.bytestr(three),
3508 3534 pycompat.bytestr(four),
3509 3535 pycompat.bytestr(five),
3510 3536 )
3511 3537
3512 3538 def savecommitmessage(self, text):
3513 3539 fp = self.vfs(b'last-message.txt', b'wb')
3514 3540 try:
3515 3541 fp.write(text)
3516 3542 finally:
3517 3543 fp.close()
3518 3544 return self.pathto(fp.name[len(self.root) + 1 :])
3519 3545
3520 3546 def register_wanted_sidedata(self, category):
3521 3547 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3522 3548 # Only revlogv2 repos can want sidedata.
3523 3549 return
3524 3550 self._wanted_sidedata.add(pycompat.bytestr(category))
3525 3551
3526 3552 def register_sidedata_computer(
3527 3553 self, kind, category, keys, computer, flags, replace=False
3528 3554 ):
3529 3555 if kind not in revlogconst.ALL_KINDS:
3530 3556 msg = _(b"unexpected revlog kind '%s'.")
3531 3557 raise error.ProgrammingError(msg % kind)
3532 3558 category = pycompat.bytestr(category)
3533 3559 already_registered = category in self._sidedata_computers.get(kind, [])
3534 3560 if already_registered and not replace:
3535 3561 msg = _(
3536 3562 b"cannot register a sidedata computer twice for category '%s'."
3537 3563 )
3538 3564 raise error.ProgrammingError(msg % category)
3539 3565 if replace and not already_registered:
3540 3566 msg = _(
3541 3567 b"cannot replace a sidedata computer that isn't registered "
3542 3568 b"for category '%s'."
3543 3569 )
3544 3570 raise error.ProgrammingError(msg % category)
3545 3571 self._sidedata_computers.setdefault(kind, {})
3546 3572 self._sidedata_computers[kind][category] = (keys, computer, flags)
3547 3573
3548 3574
3549 3575 # used to avoid circular references so destructors work
3550 3576 def aftertrans(files):
3551 3577 renamefiles = [tuple(t) for t in files]
3552 3578
3553 3579 def a():
3554 3580 for vfs, src, dest in renamefiles:
3555 3581 # if src and dest refer to a same file, vfs.rename is a no-op,
3556 3582 # leaving both src and dest on disk. delete dest to make sure
3557 3583 # the rename couldn't be such a no-op.
3558 3584 vfs.tryunlink(dest)
3559 3585 try:
3560 3586 vfs.rename(src, dest)
3561 3587 except FileNotFoundError: # journal file does not yet exist
3562 3588 pass
3563 3589
3564 3590 return a
3565 3591
3566 3592
3567 3593 def undoname(fn: bytes) -> bytes:
3568 3594 base, name = os.path.split(fn)
3569 3595 assert name.startswith(b'journal')
3570 3596 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3571 3597
3572 3598
3573 3599 def instance(ui, path: bytes, create, intents=None, createopts=None):
3574 3600 # prevent cyclic import localrepo -> upgrade -> localrepo
3575 3601 from . import upgrade
3576 3602
3577 3603 localpath = urlutil.urllocalpath(path)
3578 3604 if create:
3579 3605 createrepository(ui, localpath, createopts=createopts)
3580 3606
3581 3607 def repo_maker():
3582 3608 return makelocalrepository(ui, localpath, intents=intents)
3583 3609
3584 3610 repo = repo_maker()
3585 3611 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3586 3612 return repo
3587 3613
3588 3614
3589 3615 def islocal(path: bytes) -> bool:
3590 3616 return True
3591 3617
3592 3618
3593 3619 def defaultcreateopts(ui, createopts=None):
3594 3620 """Populate the default creation options for a repository.
3595 3621
3596 3622 A dictionary of explicitly requested creation options can be passed
3597 3623 in. Missing keys will be populated.
3598 3624 """
3599 3625 createopts = dict(createopts or {})
3600 3626
3601 3627 if b'backend' not in createopts:
3602 3628 # experimental config: storage.new-repo-backend
3603 3629 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3604 3630
3605 3631 return createopts
3606 3632
3607 3633
3608 3634 def clone_requirements(ui, createopts, srcrepo):
3609 3635 """clone the requirements of a local repo for a local clone
3610 3636
3611 3637 The store requirements are unchanged while the working copy requirements
3612 3638 depends on the configuration
3613 3639 """
3614 3640 target_requirements = set()
3615 3641 if not srcrepo.requirements:
3616 3642 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3617 3643 # with it.
3618 3644 return target_requirements
3619 3645 createopts = defaultcreateopts(ui, createopts=createopts)
3620 3646 for r in newreporequirements(ui, createopts):
3621 3647 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3622 3648 target_requirements.add(r)
3623 3649
3624 3650 for r in srcrepo.requirements:
3625 3651 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3626 3652 target_requirements.add(r)
3627 3653 return target_requirements
3628 3654
3629 3655
3630 3656 def newreporequirements(ui, createopts):
3631 3657 """Determine the set of requirements for a new local repository.
3632 3658
3633 3659 Extensions can wrap this function to specify custom requirements for
3634 3660 new repositories.
3635 3661 """
3636 3662
3637 3663 if b'backend' not in createopts:
3638 3664 raise error.ProgrammingError(
3639 3665 b'backend key not present in createopts; '
3640 3666 b'was defaultcreateopts() called?'
3641 3667 )
3642 3668
3643 3669 if createopts[b'backend'] != b'revlogv1':
3644 3670 raise error.Abort(
3645 3671 _(
3646 3672 b'unable to determine repository requirements for '
3647 3673 b'storage backend: %s'
3648 3674 )
3649 3675 % createopts[b'backend']
3650 3676 )
3651 3677
3652 3678 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3653 3679 if ui.configbool(b'format', b'usestore'):
3654 3680 requirements.add(requirementsmod.STORE_REQUIREMENT)
3655 3681 if ui.configbool(b'format', b'usefncache'):
3656 3682 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3657 3683 if ui.configbool(b'format', b'dotencode'):
3658 3684 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3659 3685
3660 3686 compengines = ui.configlist(b'format', b'revlog-compression')
3661 3687 for compengine in compengines:
3662 3688 if compengine in util.compengines:
3663 3689 engine = util.compengines[compengine]
3664 3690 if engine.available() and engine.revlogheader():
3665 3691 break
3666 3692 else:
3667 3693 raise error.Abort(
3668 3694 _(
3669 3695 b'compression engines %s defined by '
3670 3696 b'format.revlog-compression not available'
3671 3697 )
3672 3698 % b', '.join(b'"%s"' % e for e in compengines),
3673 3699 hint=_(
3674 3700 b'run "hg debuginstall" to list available '
3675 3701 b'compression engines'
3676 3702 ),
3677 3703 )
3678 3704
3679 3705 # zlib is the historical default and doesn't need an explicit requirement.
3680 3706 if compengine == b'zstd':
3681 3707 requirements.add(b'revlog-compression-zstd')
3682 3708 elif compengine != b'zlib':
3683 3709 requirements.add(b'exp-compression-%s' % compengine)
3684 3710
3685 3711 if scmutil.gdinitconfig(ui):
3686 3712 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3687 3713 if ui.configbool(b'format', b'sparse-revlog'):
3688 3714 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3689 3715
3690 3716 # experimental config: format.use-dirstate-v2
3691 3717 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3692 3718 if ui.configbool(b'format', b'use-dirstate-v2'):
3693 3719 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3694 3720
3695 3721 # experimental config: format.exp-use-copies-side-data-changeset
3696 3722 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3697 3723 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3698 3724 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3699 3725 if ui.configbool(b'experimental', b'treemanifest'):
3700 3726 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3701 3727
3702 3728 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3703 3729 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3704 3730 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3705 3731
3706 3732 revlogv2 = ui.config(b'experimental', b'revlogv2')
3707 3733 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3708 3734 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3709 3735 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3710 3736 # experimental config: format.internal-phase
3711 3737 if ui.configbool(b'format', b'use-internal-phase'):
3712 3738 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3713 3739
3714 3740 # experimental config: format.exp-archived-phase
3715 3741 if ui.configbool(b'format', b'exp-archived-phase'):
3716 3742 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3717 3743
3718 3744 if createopts.get(b'narrowfiles'):
3719 3745 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3720 3746
3721 3747 if createopts.get(b'lfs'):
3722 3748 requirements.add(b'lfs')
3723 3749
3724 3750 if ui.configbool(b'format', b'bookmarks-in-store'):
3725 3751 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3726 3752
3727 3753 if ui.configbool(b'format', b'use-persistent-nodemap'):
3728 3754 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3729 3755
3730 3756 # if share-safe is enabled, let's create the new repository with the new
3731 3757 # requirement
3732 3758 if ui.configbool(b'format', b'use-share-safe'):
3733 3759 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3734 3760
3735 3761 # if we are creating a share-repoΒΉ we have to handle requirement
3736 3762 # differently.
3737 3763 #
3738 3764 # [1] (i.e. reusing the store from another repository, just having a
3739 3765 # working copy)
3740 3766 if b'sharedrepo' in createopts:
3741 3767 source_requirements = set(createopts[b'sharedrepo'].requirements)
3742 3768
3743 3769 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3744 3770 # share to an old school repository, we have to copy the
3745 3771 # requirements and hope for the best.
3746 3772 requirements = source_requirements
3747 3773 else:
3748 3774 # We have control on the working copy only, so "copy" the non
3749 3775 # working copy part over, ignoring previous logic.
3750 3776 to_drop = set()
3751 3777 for req in requirements:
3752 3778 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3753 3779 continue
3754 3780 if req in source_requirements:
3755 3781 continue
3756 3782 to_drop.add(req)
3757 3783 requirements -= to_drop
3758 3784 requirements |= source_requirements
3759 3785
3760 3786 if createopts.get(b'sharedrelative'):
3761 3787 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3762 3788 else:
3763 3789 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3764 3790
3765 3791 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3766 3792 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3767 3793 msg = _(b"ignoring unknown tracked key version: %d\n")
3768 3794 hint = _(
3769 3795 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3770 3796 )
3771 3797 if version != 1:
3772 3798 ui.warn(msg % version, hint=hint)
3773 3799 else:
3774 3800 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3775 3801
3776 3802 return requirements
3777 3803
3778 3804
3779 3805 def checkrequirementscompat(ui, requirements):
3780 3806 """Checks compatibility of repository requirements enabled and disabled.
3781 3807
3782 3808 Returns a set of requirements which needs to be dropped because dependend
3783 3809 requirements are not enabled. Also warns users about it"""
3784 3810
3785 3811 dropped = set()
3786 3812
3787 3813 if requirementsmod.STORE_REQUIREMENT not in requirements:
3788 3814 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3789 3815 ui.warn(
3790 3816 _(
3791 3817 b'ignoring enabled \'format.bookmarks-in-store\' config '
3792 3818 b'beacuse it is incompatible with disabled '
3793 3819 b'\'format.usestore\' config\n'
3794 3820 )
3795 3821 )
3796 3822 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3797 3823
3798 3824 if (
3799 3825 requirementsmod.SHARED_REQUIREMENT in requirements
3800 3826 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3801 3827 ):
3802 3828 raise error.Abort(
3803 3829 _(
3804 3830 b"cannot create shared repository as source was created"
3805 3831 b" with 'format.usestore' config disabled"
3806 3832 )
3807 3833 )
3808 3834
3809 3835 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3810 3836 if ui.hasconfig(b'format', b'use-share-safe'):
3811 3837 msg = _(
3812 3838 b"ignoring enabled 'format.use-share-safe' config because "
3813 3839 b"it is incompatible with disabled 'format.usestore'"
3814 3840 b" config\n"
3815 3841 )
3816 3842 ui.warn(msg)
3817 3843 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3818 3844
3819 3845 return dropped
3820 3846
3821 3847
3822 3848 def filterknowncreateopts(ui, createopts):
3823 3849 """Filters a dict of repo creation options against options that are known.
3824 3850
3825 3851 Receives a dict of repo creation options and returns a dict of those
3826 3852 options that we don't know how to handle.
3827 3853
3828 3854 This function is called as part of repository creation. If the
3829 3855 returned dict contains any items, repository creation will not
3830 3856 be allowed, as it means there was a request to create a repository
3831 3857 with options not recognized by loaded code.
3832 3858
3833 3859 Extensions can wrap this function to filter out creation options
3834 3860 they know how to handle.
3835 3861 """
3836 3862 known = {
3837 3863 b'backend',
3838 3864 b'lfs',
3839 3865 b'narrowfiles',
3840 3866 b'sharedrepo',
3841 3867 b'sharedrelative',
3842 3868 b'shareditems',
3843 3869 b'shallowfilestore',
3844 3870 }
3845 3871
3846 3872 return {k: v for k, v in createopts.items() if k not in known}
3847 3873
3848 3874
3849 3875 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3850 3876 """Create a new repository in a vfs.
3851 3877
3852 3878 ``path`` path to the new repo's working directory.
3853 3879 ``createopts`` options for the new repository.
3854 3880 ``requirement`` predefined set of requirements.
3855 3881 (incompatible with ``createopts``)
3856 3882
3857 3883 The following keys for ``createopts`` are recognized:
3858 3884
3859 3885 backend
3860 3886 The storage backend to use.
3861 3887 lfs
3862 3888 Repository will be created with ``lfs`` requirement. The lfs extension
3863 3889 will automatically be loaded when the repository is accessed.
3864 3890 narrowfiles
3865 3891 Set up repository to support narrow file storage.
3866 3892 sharedrepo
3867 3893 Repository object from which storage should be shared.
3868 3894 sharedrelative
3869 3895 Boolean indicating if the path to the shared repo should be
3870 3896 stored as relative. By default, the pointer to the "parent" repo
3871 3897 is stored as an absolute path.
3872 3898 shareditems
3873 3899 Set of items to share to the new repository (in addition to storage).
3874 3900 shallowfilestore
3875 3901 Indicates that storage for files should be shallow (not all ancestor
3876 3902 revisions are known).
3877 3903 """
3878 3904
3879 3905 if requirements is not None:
3880 3906 if createopts is not None:
3881 3907 msg = b'cannot specify both createopts and requirements'
3882 3908 raise error.ProgrammingError(msg)
3883 3909 createopts = {}
3884 3910 else:
3885 3911 createopts = defaultcreateopts(ui, createopts=createopts)
3886 3912
3887 3913 unknownopts = filterknowncreateopts(ui, createopts)
3888 3914
3889 3915 if not isinstance(unknownopts, dict):
3890 3916 raise error.ProgrammingError(
3891 3917 b'filterknowncreateopts() did not return a dict'
3892 3918 )
3893 3919
3894 3920 if unknownopts:
3895 3921 raise error.Abort(
3896 3922 _(
3897 3923 b'unable to create repository because of unknown '
3898 3924 b'creation option: %s'
3899 3925 )
3900 3926 % b', '.join(sorted(unknownopts)),
3901 3927 hint=_(b'is a required extension not loaded?'),
3902 3928 )
3903 3929
3904 3930 requirements = newreporequirements(ui, createopts=createopts)
3905 3931 requirements -= checkrequirementscompat(ui, requirements)
3906 3932
3907 3933 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3908 3934
3909 3935 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3910 3936 if hgvfs.exists():
3911 3937 raise error.RepoError(_(b'repository %s already exists') % path)
3912 3938
3913 3939 if b'sharedrepo' in createopts:
3914 3940 sharedpath = createopts[b'sharedrepo'].sharedpath
3915 3941
3916 3942 if createopts.get(b'sharedrelative'):
3917 3943 try:
3918 3944 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3919 3945 sharedpath = util.pconvert(sharedpath)
3920 3946 except (IOError, ValueError) as e:
3921 3947 # ValueError is raised on Windows if the drive letters differ
3922 3948 # on each path.
3923 3949 raise error.Abort(
3924 3950 _(b'cannot calculate relative path'),
3925 3951 hint=stringutil.forcebytestr(e),
3926 3952 )
3927 3953
3928 3954 if not wdirvfs.exists():
3929 3955 wdirvfs.makedirs()
3930 3956
3931 3957 hgvfs.makedir(notindexed=True)
3932 3958 if b'sharedrepo' not in createopts:
3933 3959 hgvfs.mkdir(b'cache')
3934 3960 hgvfs.mkdir(b'wcache')
3935 3961
3936 3962 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3937 3963 if has_store and b'sharedrepo' not in createopts:
3938 3964 hgvfs.mkdir(b'store')
3939 3965
3940 3966 # We create an invalid changelog outside the store so very old
3941 3967 # Mercurial versions (which didn't know about the requirements
3942 3968 # file) encounter an error on reading the changelog. This
3943 3969 # effectively locks out old clients and prevents them from
3944 3970 # mucking with a repo in an unknown format.
3945 3971 #
3946 3972 # The revlog header has version 65535, which won't be recognized by
3947 3973 # such old clients.
3948 3974 hgvfs.append(
3949 3975 b'00changelog.i',
3950 3976 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3951 3977 b'layout',
3952 3978 )
3953 3979
3954 3980 # Filter the requirements into working copy and store ones
3955 3981 wcreq, storereq = scmutil.filterrequirements(requirements)
3956 3982 # write working copy ones
3957 3983 scmutil.writerequires(hgvfs, wcreq)
3958 3984 # If there are store requirements and the current repository
3959 3985 # is not a shared one, write stored requirements
3960 3986 # For new shared repository, we don't need to write the store
3961 3987 # requirements as they are already present in store requires
3962 3988 if storereq and b'sharedrepo' not in createopts:
3963 3989 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3964 3990 scmutil.writerequires(storevfs, storereq)
3965 3991
3966 3992 # Write out file telling readers where to find the shared store.
3967 3993 if b'sharedrepo' in createopts:
3968 3994 hgvfs.write(b'sharedpath', sharedpath)
3969 3995
3970 3996 if createopts.get(b'shareditems'):
3971 3997 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3972 3998 hgvfs.write(b'shared', shared)
3973 3999
3974 4000
3975 4001 def poisonrepository(repo):
3976 4002 """Poison a repository instance so it can no longer be used."""
3977 4003 # Perform any cleanup on the instance.
3978 4004 repo.close()
3979 4005
3980 4006 # Our strategy is to replace the type of the object with one that
3981 4007 # has all attribute lookups result in error.
3982 4008 #
3983 4009 # But we have to allow the close() method because some constructors
3984 4010 # of repos call close() on repo references.
3985 4011 class poisonedrepository:
3986 4012 def __getattribute__(self, item):
3987 4013 if item == 'close':
3988 4014 return object.__getattribute__(self, item)
3989 4015
3990 4016 raise error.ProgrammingError(
3991 4017 b'repo instances should not be used after unshare'
3992 4018 )
3993 4019
3994 4020 def close(self):
3995 4021 pass
3996 4022
3997 4023 # We may have a repoview, which intercepts __setattr__. So be sure
3998 4024 # we operate at the lowest level possible.
3999 4025 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now