##// END OF EJS Templates
dirstate: make sure the dirstate is loaded before the changelog (issue6303)...
marmoute -
r45359:35b255e4 stable
parent child Browse files
Show More
@@ -1,1915 +1,1922 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from .pycompat import delattr
19 19
20 20 from hgdemandimport import tracing
21 21
22 22 from . import (
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 txnutil,
32 32 util,
33 33 )
34 34
35 35 from .interfaces import (
36 36 dirstate as intdirstate,
37 37 util as interfaceutil,
38 38 )
39 39
40 40 parsers = policy.importmod('parsers')
41 41 rustmod = policy.importrust('dirstate')
42 42
43 43 propertycache = util.propertycache
44 44 filecache = scmutil.filecache
45 45 _rangemask = 0x7FFFFFFF
46 46
47 47 dirstatetuple = parsers.dirstatetuple
48 48
49 49
50 50 class repocache(filecache):
51 51 """filecache for files in .hg/"""
52 52
53 53 def join(self, obj, fname):
54 54 return obj._opener.join(fname)
55 55
56 56
57 57 class rootcache(filecache):
58 58 """filecache for files in the repository root"""
59 59
60 60 def join(self, obj, fname):
61 61 return obj._join(fname)
62 62
63 63
64 64 def _getfsnow(vfs):
65 65 '''Get "now" timestamp on filesystem'''
66 66 tmpfd, tmpname = vfs.mkstemp()
67 67 try:
68 68 return os.fstat(tmpfd)[stat.ST_MTIME]
69 69 finally:
70 70 os.close(tmpfd)
71 71 vfs.unlink(tmpname)
72 72
73 73
74 74 @interfaceutil.implementer(intdirstate.idirstate)
75 75 class dirstate(object):
76 76 def __init__(self, opener, ui, root, validate, sparsematchfn):
77 77 '''Create a new dirstate object.
78 78
79 79 opener is an open()-like callable that can be used to open the
80 80 dirstate file; root is the root of the directory tracked by
81 81 the dirstate.
82 82 '''
83 83 self._opener = opener
84 84 self._validate = validate
85 85 self._root = root
86 86 self._sparsematchfn = sparsematchfn
87 87 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
88 88 # UNC path pointing to root share (issue4557)
89 89 self._rootdir = pathutil.normasprefix(root)
90 90 self._dirty = False
91 91 self._lastnormaltime = 0
92 92 self._ui = ui
93 93 self._filecache = {}
94 94 self._parentwriters = 0
95 95 self._filename = b'dirstate'
96 96 self._pendingfilename = b'%s.pending' % self._filename
97 97 self._plchangecallbacks = {}
98 98 self._origpl = None
99 99 self._updatedfiles = set()
100 100 self._mapcls = dirstatemap
101 101 # Access and cache cwd early, so we don't access it for the first time
102 102 # after a working-copy update caused it to not exist (accessing it then
103 103 # raises an exception).
104 104 self._cwd
105 105
106 def prefetch_parents(self):
107 """make sure the parents are loaded
108
109 Used to avoid a race condition.
110 """
111 self._pl
112
106 113 @contextlib.contextmanager
107 114 def parentchange(self):
108 115 '''Context manager for handling dirstate parents.
109 116
110 117 If an exception occurs in the scope of the context manager,
111 118 the incoherent dirstate won't be written when wlock is
112 119 released.
113 120 '''
114 121 self._parentwriters += 1
115 122 yield
116 123 # Typically we want the "undo" step of a context manager in a
117 124 # finally block so it happens even when an exception
118 125 # occurs. In this case, however, we only want to decrement
119 126 # parentwriters if the code in the with statement exits
120 127 # normally, so we don't have a try/finally here on purpose.
121 128 self._parentwriters -= 1
122 129
123 130 def pendingparentchange(self):
124 131 '''Returns true if the dirstate is in the middle of a set of changes
125 132 that modify the dirstate parent.
126 133 '''
127 134 return self._parentwriters > 0
128 135
129 136 @propertycache
130 137 def _map(self):
131 138 """Return the dirstate contents (see documentation for dirstatemap)."""
132 139 self._map = self._mapcls(self._ui, self._opener, self._root)
133 140 return self._map
134 141
135 142 @property
136 143 def _sparsematcher(self):
137 144 """The matcher for the sparse checkout.
138 145
139 146 The working directory may not include every file from a manifest. The
140 147 matcher obtained by this property will match a path if it is to be
141 148 included in the working directory.
142 149 """
143 150 # TODO there is potential to cache this property. For now, the matcher
144 151 # is resolved on every access. (But the called function does use a
145 152 # cache to keep the lookup fast.)
146 153 return self._sparsematchfn()
147 154
148 155 @repocache(b'branch')
149 156 def _branch(self):
150 157 try:
151 158 return self._opener.read(b"branch").strip() or b"default"
152 159 except IOError as inst:
153 160 if inst.errno != errno.ENOENT:
154 161 raise
155 162 return b"default"
156 163
157 164 @property
158 165 def _pl(self):
159 166 return self._map.parents()
160 167
161 168 def hasdir(self, d):
162 169 return self._map.hastrackeddir(d)
163 170
164 171 @rootcache(b'.hgignore')
165 172 def _ignore(self):
166 173 files = self._ignorefiles()
167 174 if not files:
168 175 return matchmod.never()
169 176
170 177 pats = [b'include:%s' % f for f in files]
171 178 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
172 179
173 180 @propertycache
174 181 def _slash(self):
175 182 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
176 183
177 184 @propertycache
178 185 def _checklink(self):
179 186 return util.checklink(self._root)
180 187
181 188 @propertycache
182 189 def _checkexec(self):
183 190 return util.checkexec(self._root)
184 191
185 192 @propertycache
186 193 def _checkcase(self):
187 194 return not util.fscasesensitive(self._join(b'.hg'))
188 195
189 196 def _join(self, f):
190 197 # much faster than os.path.join()
191 198 # it's safe because f is always a relative path
192 199 return self._rootdir + f
193 200
194 201 def flagfunc(self, buildfallback):
195 202 if self._checklink and self._checkexec:
196 203
197 204 def f(x):
198 205 try:
199 206 st = os.lstat(self._join(x))
200 207 if util.statislink(st):
201 208 return b'l'
202 209 if util.statisexec(st):
203 210 return b'x'
204 211 except OSError:
205 212 pass
206 213 return b''
207 214
208 215 return f
209 216
210 217 fallback = buildfallback()
211 218 if self._checklink:
212 219
213 220 def f(x):
214 221 if os.path.islink(self._join(x)):
215 222 return b'l'
216 223 if b'x' in fallback(x):
217 224 return b'x'
218 225 return b''
219 226
220 227 return f
221 228 if self._checkexec:
222 229
223 230 def f(x):
224 231 if b'l' in fallback(x):
225 232 return b'l'
226 233 if util.isexec(self._join(x)):
227 234 return b'x'
228 235 return b''
229 236
230 237 return f
231 238 else:
232 239 return fallback
233 240
234 241 @propertycache
235 242 def _cwd(self):
236 243 # internal config: ui.forcecwd
237 244 forcecwd = self._ui.config(b'ui', b'forcecwd')
238 245 if forcecwd:
239 246 return forcecwd
240 247 return encoding.getcwd()
241 248
242 249 def getcwd(self):
243 250 '''Return the path from which a canonical path is calculated.
244 251
245 252 This path should be used to resolve file patterns or to convert
246 253 canonical paths back to file paths for display. It shouldn't be
247 254 used to get real file paths. Use vfs functions instead.
248 255 '''
249 256 cwd = self._cwd
250 257 if cwd == self._root:
251 258 return b''
252 259 # self._root ends with a path separator if self._root is '/' or 'C:\'
253 260 rootsep = self._root
254 261 if not util.endswithsep(rootsep):
255 262 rootsep += pycompat.ossep
256 263 if cwd.startswith(rootsep):
257 264 return cwd[len(rootsep) :]
258 265 else:
259 266 # we're outside the repo. return an absolute path.
260 267 return cwd
261 268
262 269 def pathto(self, f, cwd=None):
263 270 if cwd is None:
264 271 cwd = self.getcwd()
265 272 path = util.pathto(self._root, cwd, f)
266 273 if self._slash:
267 274 return util.pconvert(path)
268 275 return path
269 276
270 277 def __getitem__(self, key):
271 278 '''Return the current state of key (a filename) in the dirstate.
272 279
273 280 States are:
274 281 n normal
275 282 m needs merging
276 283 r marked for removal
277 284 a marked for addition
278 285 ? not tracked
279 286 '''
280 287 return self._map.get(key, (b"?",))[0]
281 288
282 289 def __contains__(self, key):
283 290 return key in self._map
284 291
285 292 def __iter__(self):
286 293 return iter(sorted(self._map))
287 294
288 295 def items(self):
289 296 return pycompat.iteritems(self._map)
290 297
291 298 iteritems = items
292 299
293 300 def parents(self):
294 301 return [self._validate(p) for p in self._pl]
295 302
296 303 def p1(self):
297 304 return self._validate(self._pl[0])
298 305
299 306 def p2(self):
300 307 return self._validate(self._pl[1])
301 308
302 309 def branch(self):
303 310 return encoding.tolocal(self._branch)
304 311
305 312 def setparents(self, p1, p2=nullid):
306 313 """Set dirstate parents to p1 and p2.
307 314
308 315 When moving from two parents to one, 'm' merged entries a
309 316 adjusted to normal and previous copy records discarded and
310 317 returned by the call.
311 318
312 319 See localrepo.setparents()
313 320 """
314 321 if self._parentwriters == 0:
315 322 raise ValueError(
316 323 b"cannot set dirstate parent outside of "
317 324 b"dirstate.parentchange context manager"
318 325 )
319 326
320 327 self._dirty = True
321 328 oldp2 = self._pl[1]
322 329 if self._origpl is None:
323 330 self._origpl = self._pl
324 331 self._map.setparents(p1, p2)
325 332 copies = {}
326 333 if oldp2 != nullid and p2 == nullid:
327 334 candidatefiles = self._map.nonnormalset.union(
328 335 self._map.otherparentset
329 336 )
330 337 for f in candidatefiles:
331 338 s = self._map.get(f)
332 339 if s is None:
333 340 continue
334 341
335 342 # Discard 'm' markers when moving away from a merge state
336 343 if s[0] == b'm':
337 344 source = self._map.copymap.get(f)
338 345 if source:
339 346 copies[f] = source
340 347 self.normallookup(f)
341 348 # Also fix up otherparent markers
342 349 elif s[0] == b'n' and s[2] == -2:
343 350 source = self._map.copymap.get(f)
344 351 if source:
345 352 copies[f] = source
346 353 self.add(f)
347 354 return copies
348 355
349 356 def setbranch(self, branch):
350 357 self.__class__._branch.set(self, encoding.fromlocal(branch))
351 358 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
352 359 try:
353 360 f.write(self._branch + b'\n')
354 361 f.close()
355 362
356 363 # make sure filecache has the correct stat info for _branch after
357 364 # replacing the underlying file
358 365 ce = self._filecache[b'_branch']
359 366 if ce:
360 367 ce.refresh()
361 368 except: # re-raises
362 369 f.discard()
363 370 raise
364 371
365 372 def invalidate(self):
366 373 '''Causes the next access to reread the dirstate.
367 374
368 375 This is different from localrepo.invalidatedirstate() because it always
369 376 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
370 377 check whether the dirstate has changed before rereading it.'''
371 378
372 379 for a in ("_map", "_branch", "_ignore"):
373 380 if a in self.__dict__:
374 381 delattr(self, a)
375 382 self._lastnormaltime = 0
376 383 self._dirty = False
377 384 self._updatedfiles.clear()
378 385 self._parentwriters = 0
379 386 self._origpl = None
380 387
381 388 def copy(self, source, dest):
382 389 """Mark dest as a copy of source. Unmark dest if source is None."""
383 390 if source == dest:
384 391 return
385 392 self._dirty = True
386 393 if source is not None:
387 394 self._map.copymap[dest] = source
388 395 self._updatedfiles.add(source)
389 396 self._updatedfiles.add(dest)
390 397 elif self._map.copymap.pop(dest, None):
391 398 self._updatedfiles.add(dest)
392 399
393 400 def copied(self, file):
394 401 return self._map.copymap.get(file, None)
395 402
396 403 def copies(self):
397 404 return self._map.copymap
398 405
399 406 def _addpath(self, f, state, mode, size, mtime):
400 407 oldstate = self[f]
401 408 if state == b'a' or oldstate == b'r':
402 409 scmutil.checkfilename(f)
403 410 if self._map.hastrackeddir(f):
404 411 raise error.Abort(
405 412 _(b'directory %r already in dirstate') % pycompat.bytestr(f)
406 413 )
407 414 # shadows
408 415 for d in pathutil.finddirs(f):
409 416 if self._map.hastrackeddir(d):
410 417 break
411 418 entry = self._map.get(d)
412 419 if entry is not None and entry[0] != b'r':
413 420 raise error.Abort(
414 421 _(b'file %r in dirstate clashes with %r')
415 422 % (pycompat.bytestr(d), pycompat.bytestr(f))
416 423 )
417 424 self._dirty = True
418 425 self._updatedfiles.add(f)
419 426 self._map.addfile(f, oldstate, state, mode, size, mtime)
420 427
421 428 def normal(self, f, parentfiledata=None):
422 429 '''Mark a file normal and clean.
423 430
424 431 parentfiledata: (mode, size, mtime) of the clean file
425 432
426 433 parentfiledata should be computed from memory (for mode,
427 434 size), as or close as possible from the point where we
428 435 determined the file was clean, to limit the risk of the
429 436 file having been changed by an external process between the
430 437 moment where the file was determined to be clean and now.'''
431 438 if parentfiledata:
432 439 (mode, size, mtime) = parentfiledata
433 440 else:
434 441 s = os.lstat(self._join(f))
435 442 mode = s.st_mode
436 443 size = s.st_size
437 444 mtime = s[stat.ST_MTIME]
438 445 self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
439 446 self._map.copymap.pop(f, None)
440 447 if f in self._map.nonnormalset:
441 448 self._map.nonnormalset.remove(f)
442 449 if mtime > self._lastnormaltime:
443 450 # Remember the most recent modification timeslot for status(),
444 451 # to make sure we won't miss future size-preserving file content
445 452 # modifications that happen within the same timeslot.
446 453 self._lastnormaltime = mtime
447 454
448 455 def normallookup(self, f):
449 456 '''Mark a file normal, but possibly dirty.'''
450 457 if self._pl[1] != nullid:
451 458 # if there is a merge going on and the file was either
452 459 # in state 'm' (-1) or coming from other parent (-2) before
453 460 # being removed, restore that state.
454 461 entry = self._map.get(f)
455 462 if entry is not None:
456 463 if entry[0] == b'r' and entry[2] in (-1, -2):
457 464 source = self._map.copymap.get(f)
458 465 if entry[2] == -1:
459 466 self.merge(f)
460 467 elif entry[2] == -2:
461 468 self.otherparent(f)
462 469 if source:
463 470 self.copy(source, f)
464 471 return
465 472 if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
466 473 return
467 474 self._addpath(f, b'n', 0, -1, -1)
468 475 self._map.copymap.pop(f, None)
469 476
470 477 def otherparent(self, f):
471 478 '''Mark as coming from the other parent, always dirty.'''
472 479 if self._pl[1] == nullid:
473 480 raise error.Abort(
474 481 _(b"setting %r to other parent only allowed in merges") % f
475 482 )
476 483 if f in self and self[f] == b'n':
477 484 # merge-like
478 485 self._addpath(f, b'm', 0, -2, -1)
479 486 else:
480 487 # add-like
481 488 self._addpath(f, b'n', 0, -2, -1)
482 489 self._map.copymap.pop(f, None)
483 490
484 491 def add(self, f):
485 492 '''Mark a file added.'''
486 493 self._addpath(f, b'a', 0, -1, -1)
487 494 self._map.copymap.pop(f, None)
488 495
489 496 def remove(self, f):
490 497 '''Mark a file removed.'''
491 498 self._dirty = True
492 499 oldstate = self[f]
493 500 size = 0
494 501 if self._pl[1] != nullid:
495 502 entry = self._map.get(f)
496 503 if entry is not None:
497 504 # backup the previous state
498 505 if entry[0] == b'm': # merge
499 506 size = -1
500 507 elif entry[0] == b'n' and entry[2] == -2: # other parent
501 508 size = -2
502 509 self._map.otherparentset.add(f)
503 510 self._updatedfiles.add(f)
504 511 self._map.removefile(f, oldstate, size)
505 512 if size == 0:
506 513 self._map.copymap.pop(f, None)
507 514
508 515 def merge(self, f):
509 516 '''Mark a file merged.'''
510 517 if self._pl[1] == nullid:
511 518 return self.normallookup(f)
512 519 return self.otherparent(f)
513 520
514 521 def drop(self, f):
515 522 '''Drop a file from the dirstate'''
516 523 oldstate = self[f]
517 524 if self._map.dropfile(f, oldstate):
518 525 self._dirty = True
519 526 self._updatedfiles.add(f)
520 527 self._map.copymap.pop(f, None)
521 528
522 529 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
523 530 if exists is None:
524 531 exists = os.path.lexists(os.path.join(self._root, path))
525 532 if not exists:
526 533 # Maybe a path component exists
527 534 if not ignoremissing and b'/' in path:
528 535 d, f = path.rsplit(b'/', 1)
529 536 d = self._normalize(d, False, ignoremissing, None)
530 537 folded = d + b"/" + f
531 538 else:
532 539 # No path components, preserve original case
533 540 folded = path
534 541 else:
535 542 # recursively normalize leading directory components
536 543 # against dirstate
537 544 if b'/' in normed:
538 545 d, f = normed.rsplit(b'/', 1)
539 546 d = self._normalize(d, False, ignoremissing, True)
540 547 r = self._root + b"/" + d
541 548 folded = d + b"/" + util.fspath(f, r)
542 549 else:
543 550 folded = util.fspath(normed, self._root)
544 551 storemap[normed] = folded
545 552
546 553 return folded
547 554
548 555 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
549 556 normed = util.normcase(path)
550 557 folded = self._map.filefoldmap.get(normed, None)
551 558 if folded is None:
552 559 if isknown:
553 560 folded = path
554 561 else:
555 562 folded = self._discoverpath(
556 563 path, normed, ignoremissing, exists, self._map.filefoldmap
557 564 )
558 565 return folded
559 566
560 567 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
561 568 normed = util.normcase(path)
562 569 folded = self._map.filefoldmap.get(normed, None)
563 570 if folded is None:
564 571 folded = self._map.dirfoldmap.get(normed, None)
565 572 if folded is None:
566 573 if isknown:
567 574 folded = path
568 575 else:
569 576 # store discovered result in dirfoldmap so that future
570 577 # normalizefile calls don't start matching directories
571 578 folded = self._discoverpath(
572 579 path, normed, ignoremissing, exists, self._map.dirfoldmap
573 580 )
574 581 return folded
575 582
576 583 def normalize(self, path, isknown=False, ignoremissing=False):
577 584 '''
578 585 normalize the case of a pathname when on a casefolding filesystem
579 586
580 587 isknown specifies whether the filename came from walking the
581 588 disk, to avoid extra filesystem access.
582 589
583 590 If ignoremissing is True, missing path are returned
584 591 unchanged. Otherwise, we try harder to normalize possibly
585 592 existing path components.
586 593
587 594 The normalized case is determined based on the following precedence:
588 595
589 596 - version of name already stored in the dirstate
590 597 - version of name stored on disk
591 598 - version provided via command arguments
592 599 '''
593 600
594 601 if self._checkcase:
595 602 return self._normalize(path, isknown, ignoremissing)
596 603 return path
597 604
598 605 def clear(self):
599 606 self._map.clear()
600 607 self._lastnormaltime = 0
601 608 self._updatedfiles.clear()
602 609 self._dirty = True
603 610
604 611 def rebuild(self, parent, allfiles, changedfiles=None):
605 612 if changedfiles is None:
606 613 # Rebuild entire dirstate
607 614 to_lookup = allfiles
608 615 to_drop = []
609 616 lastnormaltime = self._lastnormaltime
610 617 self.clear()
611 618 self._lastnormaltime = lastnormaltime
612 619 elif len(changedfiles) < 10:
613 620 # Avoid turning allfiles into a set, which can be expensive if it's
614 621 # large.
615 622 to_lookup = []
616 623 to_drop = []
617 624 for f in changedfiles:
618 625 if f in allfiles:
619 626 to_lookup.append(f)
620 627 else:
621 628 to_drop.append(f)
622 629 else:
623 630 changedfilesset = set(changedfiles)
624 631 to_lookup = changedfilesset & set(allfiles)
625 632 to_drop = changedfilesset - to_lookup
626 633
627 634 if self._origpl is None:
628 635 self._origpl = self._pl
629 636 self._map.setparents(parent, nullid)
630 637
631 638 for f in to_lookup:
632 639 self.normallookup(f)
633 640 for f in to_drop:
634 641 self.drop(f)
635 642
636 643 self._dirty = True
637 644
638 645 def identity(self):
639 646 '''Return identity of dirstate itself to detect changing in storage
640 647
641 648 If identity of previous dirstate is equal to this, writing
642 649 changes based on the former dirstate out can keep consistency.
643 650 '''
644 651 return self._map.identity
645 652
646 653 def write(self, tr):
647 654 if not self._dirty:
648 655 return
649 656
650 657 filename = self._filename
651 658 if tr:
652 659 # 'dirstate.write()' is not only for writing in-memory
653 660 # changes out, but also for dropping ambiguous timestamp.
654 661 # delayed writing re-raise "ambiguous timestamp issue".
655 662 # See also the wiki page below for detail:
656 663 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
657 664
658 665 # emulate dropping timestamp in 'parsers.pack_dirstate'
659 666 now = _getfsnow(self._opener)
660 667 self._map.clearambiguoustimes(self._updatedfiles, now)
661 668
662 669 # emulate that all 'dirstate.normal' results are written out
663 670 self._lastnormaltime = 0
664 671 self._updatedfiles.clear()
665 672
666 673 # delay writing in-memory changes out
667 674 tr.addfilegenerator(
668 675 b'dirstate',
669 676 (self._filename,),
670 677 self._writedirstate,
671 678 location=b'plain',
672 679 )
673 680 return
674 681
675 682 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
676 683 self._writedirstate(st)
677 684
678 685 def addparentchangecallback(self, category, callback):
679 686 """add a callback to be called when the wd parents are changed
680 687
681 688 Callback will be called with the following arguments:
682 689 dirstate, (oldp1, oldp2), (newp1, newp2)
683 690
684 691 Category is a unique identifier to allow overwriting an old callback
685 692 with a newer callback.
686 693 """
687 694 self._plchangecallbacks[category] = callback
688 695
689 696 def _writedirstate(self, st):
690 697 # notify callbacks about parents change
691 698 if self._origpl is not None and self._origpl != self._pl:
692 699 for c, callback in sorted(
693 700 pycompat.iteritems(self._plchangecallbacks)
694 701 ):
695 702 callback(self, self._origpl, self._pl)
696 703 self._origpl = None
697 704 # use the modification time of the newly created temporary file as the
698 705 # filesystem's notion of 'now'
699 706 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
700 707
701 708 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
702 709 # timestamp of each entries in dirstate, because of 'now > mtime'
703 710 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
704 711 if delaywrite > 0:
705 712 # do we have any files to delay for?
706 713 for f, e in pycompat.iteritems(self._map):
707 714 if e[0] == b'n' and e[3] == now:
708 715 import time # to avoid useless import
709 716
710 717 # rather than sleep n seconds, sleep until the next
711 718 # multiple of n seconds
712 719 clock = time.time()
713 720 start = int(clock) - (int(clock) % delaywrite)
714 721 end = start + delaywrite
715 722 time.sleep(end - clock)
716 723 now = end # trust our estimate that the end is near now
717 724 break
718 725
719 726 self._map.write(st, now)
720 727 self._lastnormaltime = 0
721 728 self._dirty = False
722 729
723 730 def _dirignore(self, f):
724 731 if self._ignore(f):
725 732 return True
726 733 for p in pathutil.finddirs(f):
727 734 if self._ignore(p):
728 735 return True
729 736 return False
730 737
731 738 def _ignorefiles(self):
732 739 files = []
733 740 if os.path.exists(self._join(b'.hgignore')):
734 741 files.append(self._join(b'.hgignore'))
735 742 for name, path in self._ui.configitems(b"ui"):
736 743 if name == b'ignore' or name.startswith(b'ignore.'):
737 744 # we need to use os.path.join here rather than self._join
738 745 # because path is arbitrary and user-specified
739 746 files.append(os.path.join(self._rootdir, util.expandpath(path)))
740 747 return files
741 748
742 749 def _ignorefileandline(self, f):
743 750 files = collections.deque(self._ignorefiles())
744 751 visited = set()
745 752 while files:
746 753 i = files.popleft()
747 754 patterns = matchmod.readpatternfile(
748 755 i, self._ui.warn, sourceinfo=True
749 756 )
750 757 for pattern, lineno, line in patterns:
751 758 kind, p = matchmod._patsplit(pattern, b'glob')
752 759 if kind == b"subinclude":
753 760 if p not in visited:
754 761 files.append(p)
755 762 continue
756 763 m = matchmod.match(
757 764 self._root, b'', [], [pattern], warn=self._ui.warn
758 765 )
759 766 if m(f):
760 767 return (i, lineno, line)
761 768 visited.add(i)
762 769 return (None, -1, b"")
763 770
764 771 def _walkexplicit(self, match, subrepos):
765 772 '''Get stat data about the files explicitly specified by match.
766 773
767 774 Return a triple (results, dirsfound, dirsnotfound).
768 775 - results is a mapping from filename to stat result. It also contains
769 776 listings mapping subrepos and .hg to None.
770 777 - dirsfound is a list of files found to be directories.
771 778 - dirsnotfound is a list of files that the dirstate thinks are
772 779 directories and that were not found.'''
773 780
774 781 def badtype(mode):
775 782 kind = _(b'unknown')
776 783 if stat.S_ISCHR(mode):
777 784 kind = _(b'character device')
778 785 elif stat.S_ISBLK(mode):
779 786 kind = _(b'block device')
780 787 elif stat.S_ISFIFO(mode):
781 788 kind = _(b'fifo')
782 789 elif stat.S_ISSOCK(mode):
783 790 kind = _(b'socket')
784 791 elif stat.S_ISDIR(mode):
785 792 kind = _(b'directory')
786 793 return _(b'unsupported file type (type is %s)') % kind
787 794
788 795 badfn = match.bad
789 796 dmap = self._map
790 797 lstat = os.lstat
791 798 getkind = stat.S_IFMT
792 799 dirkind = stat.S_IFDIR
793 800 regkind = stat.S_IFREG
794 801 lnkkind = stat.S_IFLNK
795 802 join = self._join
796 803 dirsfound = []
797 804 foundadd = dirsfound.append
798 805 dirsnotfound = []
799 806 notfoundadd = dirsnotfound.append
800 807
801 808 if not match.isexact() and self._checkcase:
802 809 normalize = self._normalize
803 810 else:
804 811 normalize = None
805 812
806 813 files = sorted(match.files())
807 814 subrepos.sort()
808 815 i, j = 0, 0
809 816 while i < len(files) and j < len(subrepos):
810 817 subpath = subrepos[j] + b"/"
811 818 if files[i] < subpath:
812 819 i += 1
813 820 continue
814 821 while i < len(files) and files[i].startswith(subpath):
815 822 del files[i]
816 823 j += 1
817 824
818 825 if not files or b'' in files:
819 826 files = [b'']
820 827 # constructing the foldmap is expensive, so don't do it for the
821 828 # common case where files is ['']
822 829 normalize = None
823 830 results = dict.fromkeys(subrepos)
824 831 results[b'.hg'] = None
825 832
826 833 for ff in files:
827 834 if normalize:
828 835 nf = normalize(ff, False, True)
829 836 else:
830 837 nf = ff
831 838 if nf in results:
832 839 continue
833 840
834 841 try:
835 842 st = lstat(join(nf))
836 843 kind = getkind(st.st_mode)
837 844 if kind == dirkind:
838 845 if nf in dmap:
839 846 # file replaced by dir on disk but still in dirstate
840 847 results[nf] = None
841 848 foundadd((nf, ff))
842 849 elif kind == regkind or kind == lnkkind:
843 850 results[nf] = st
844 851 else:
845 852 badfn(ff, badtype(kind))
846 853 if nf in dmap:
847 854 results[nf] = None
848 855 except OSError as inst: # nf not found on disk - it is dirstate only
849 856 if nf in dmap: # does it exactly match a missing file?
850 857 results[nf] = None
851 858 else: # does it match a missing directory?
852 859 if self._map.hasdir(nf):
853 860 notfoundadd(nf)
854 861 else:
855 862 badfn(ff, encoding.strtolocal(inst.strerror))
856 863
857 864 # match.files() may contain explicitly-specified paths that shouldn't
858 865 # be taken; drop them from the list of files found. dirsfound/notfound
859 866 # aren't filtered here because they will be tested later.
860 867 if match.anypats():
861 868 for f in list(results):
862 869 if f == b'.hg' or f in subrepos:
863 870 # keep sentinel to disable further out-of-repo walks
864 871 continue
865 872 if not match(f):
866 873 del results[f]
867 874
868 875 # Case insensitive filesystems cannot rely on lstat() failing to detect
869 876 # a case-only rename. Prune the stat object for any file that does not
870 877 # match the case in the filesystem, if there are multiple files that
871 878 # normalize to the same path.
872 879 if match.isexact() and self._checkcase:
873 880 normed = {}
874 881
875 882 for f, st in pycompat.iteritems(results):
876 883 if st is None:
877 884 continue
878 885
879 886 nc = util.normcase(f)
880 887 paths = normed.get(nc)
881 888
882 889 if paths is None:
883 890 paths = set()
884 891 normed[nc] = paths
885 892
886 893 paths.add(f)
887 894
888 895 for norm, paths in pycompat.iteritems(normed):
889 896 if len(paths) > 1:
890 897 for path in paths:
891 898 folded = self._discoverpath(
892 899 path, norm, True, None, self._map.dirfoldmap
893 900 )
894 901 if path != folded:
895 902 results[path] = None
896 903
897 904 return results, dirsfound, dirsnotfound
898 905
899 906 def walk(self, match, subrepos, unknown, ignored, full=True):
900 907 '''
901 908 Walk recursively through the directory tree, finding all files
902 909 matched by match.
903 910
904 911 If full is False, maybe skip some known-clean files.
905 912
906 913 Return a dict mapping filename to stat-like object (either
907 914 mercurial.osutil.stat instance or return value of os.stat()).
908 915
909 916 '''
910 917 # full is a flag that extensions that hook into walk can use -- this
911 918 # implementation doesn't use it at all. This satisfies the contract
912 919 # because we only guarantee a "maybe".
913 920
914 921 if ignored:
915 922 ignore = util.never
916 923 dirignore = util.never
917 924 elif unknown:
918 925 ignore = self._ignore
919 926 dirignore = self._dirignore
920 927 else:
921 928 # if not unknown and not ignored, drop dir recursion and step 2
922 929 ignore = util.always
923 930 dirignore = util.always
924 931
925 932 matchfn = match.matchfn
926 933 matchalways = match.always()
927 934 matchtdir = match.traversedir
928 935 dmap = self._map
929 936 listdir = util.listdir
930 937 lstat = os.lstat
931 938 dirkind = stat.S_IFDIR
932 939 regkind = stat.S_IFREG
933 940 lnkkind = stat.S_IFLNK
934 941 join = self._join
935 942
936 943 exact = skipstep3 = False
937 944 if match.isexact(): # match.exact
938 945 exact = True
939 946 dirignore = util.always # skip step 2
940 947 elif match.prefix(): # match.match, no patterns
941 948 skipstep3 = True
942 949
943 950 if not exact and self._checkcase:
944 951 normalize = self._normalize
945 952 normalizefile = self._normalizefile
946 953 skipstep3 = False
947 954 else:
948 955 normalize = self._normalize
949 956 normalizefile = None
950 957
951 958 # step 1: find all explicit files
952 959 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
953 960 if matchtdir:
954 961 for d in work:
955 962 matchtdir(d[0])
956 963 for d in dirsnotfound:
957 964 matchtdir(d)
958 965
959 966 skipstep3 = skipstep3 and not (work or dirsnotfound)
960 967 work = [d for d in work if not dirignore(d[0])]
961 968
962 969 # step 2: visit subdirectories
963 970 def traverse(work, alreadynormed):
964 971 wadd = work.append
965 972 while work:
966 973 tracing.counter('dirstate.walk work', len(work))
967 974 nd = work.pop()
968 975 visitentries = match.visitchildrenset(nd)
969 976 if not visitentries:
970 977 continue
971 978 if visitentries == b'this' or visitentries == b'all':
972 979 visitentries = None
973 980 skip = None
974 981 if nd != b'':
975 982 skip = b'.hg'
976 983 try:
977 984 with tracing.log('dirstate.walk.traverse listdir %s', nd):
978 985 entries = listdir(join(nd), stat=True, skip=skip)
979 986 except OSError as inst:
980 987 if inst.errno in (errno.EACCES, errno.ENOENT):
981 988 match.bad(
982 989 self.pathto(nd), encoding.strtolocal(inst.strerror)
983 990 )
984 991 continue
985 992 raise
986 993 for f, kind, st in entries:
987 994 # Some matchers may return files in the visitentries set,
988 995 # instead of 'this', if the matcher explicitly mentions them
989 996 # and is not an exactmatcher. This is acceptable; we do not
990 997 # make any hard assumptions about file-or-directory below
991 998 # based on the presence of `f` in visitentries. If
992 999 # visitchildrenset returned a set, we can always skip the
993 1000 # entries *not* in the set it provided regardless of whether
994 1001 # they're actually a file or a directory.
995 1002 if visitentries and f not in visitentries:
996 1003 continue
997 1004 if normalizefile:
998 1005 # even though f might be a directory, we're only
999 1006 # interested in comparing it to files currently in the
1000 1007 # dmap -- therefore normalizefile is enough
1001 1008 nf = normalizefile(
1002 1009 nd and (nd + b"/" + f) or f, True, True
1003 1010 )
1004 1011 else:
1005 1012 nf = nd and (nd + b"/" + f) or f
1006 1013 if nf not in results:
1007 1014 if kind == dirkind:
1008 1015 if not ignore(nf):
1009 1016 if matchtdir:
1010 1017 matchtdir(nf)
1011 1018 wadd(nf)
1012 1019 if nf in dmap and (matchalways or matchfn(nf)):
1013 1020 results[nf] = None
1014 1021 elif kind == regkind or kind == lnkkind:
1015 1022 if nf in dmap:
1016 1023 if matchalways or matchfn(nf):
1017 1024 results[nf] = st
1018 1025 elif (matchalways or matchfn(nf)) and not ignore(
1019 1026 nf
1020 1027 ):
1021 1028 # unknown file -- normalize if necessary
1022 1029 if not alreadynormed:
1023 1030 nf = normalize(nf, False, True)
1024 1031 results[nf] = st
1025 1032 elif nf in dmap and (matchalways or matchfn(nf)):
1026 1033 results[nf] = None
1027 1034
1028 1035 for nd, d in work:
1029 1036 # alreadynormed means that processwork doesn't have to do any
1030 1037 # expensive directory normalization
1031 1038 alreadynormed = not normalize or nd == d
1032 1039 traverse([d], alreadynormed)
1033 1040
1034 1041 for s in subrepos:
1035 1042 del results[s]
1036 1043 del results[b'.hg']
1037 1044
1038 1045 # step 3: visit remaining files from dmap
1039 1046 if not skipstep3 and not exact:
1040 1047 # If a dmap file is not in results yet, it was either
1041 1048 # a) not matching matchfn b) ignored, c) missing, or d) under a
1042 1049 # symlink directory.
1043 1050 if not results and matchalways:
1044 1051 visit = [f for f in dmap]
1045 1052 else:
1046 1053 visit = [f for f in dmap if f not in results and matchfn(f)]
1047 1054 visit.sort()
1048 1055
1049 1056 if unknown:
1050 1057 # unknown == True means we walked all dirs under the roots
1051 1058 # that wasn't ignored, and everything that matched was stat'ed
1052 1059 # and is already in results.
1053 1060 # The rest must thus be ignored or under a symlink.
1054 1061 audit_path = pathutil.pathauditor(self._root, cached=True)
1055 1062
1056 1063 for nf in iter(visit):
1057 1064 # If a stat for the same file was already added with a
1058 1065 # different case, don't add one for this, since that would
1059 1066 # make it appear as if the file exists under both names
1060 1067 # on disk.
1061 1068 if (
1062 1069 normalizefile
1063 1070 and normalizefile(nf, True, True) in results
1064 1071 ):
1065 1072 results[nf] = None
1066 1073 # Report ignored items in the dmap as long as they are not
1067 1074 # under a symlink directory.
1068 1075 elif audit_path.check(nf):
1069 1076 try:
1070 1077 results[nf] = lstat(join(nf))
1071 1078 # file was just ignored, no links, and exists
1072 1079 except OSError:
1073 1080 # file doesn't exist
1074 1081 results[nf] = None
1075 1082 else:
1076 1083 # It's either missing or under a symlink directory
1077 1084 # which we in this case report as missing
1078 1085 results[nf] = None
1079 1086 else:
1080 1087 # We may not have walked the full directory tree above,
1081 1088 # so stat and check everything we missed.
1082 1089 iv = iter(visit)
1083 1090 for st in util.statfiles([join(i) for i in visit]):
1084 1091 results[next(iv)] = st
1085 1092 return results
1086 1093
1087 1094 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1088 1095 # Force Rayon (Rust parallelism library) to respect the number of
1089 1096 # workers. This is a temporary workaround until Rust code knows
1090 1097 # how to read the config file.
1091 1098 numcpus = self._ui.configint(b"worker", b"numcpus")
1092 1099 if numcpus is not None:
1093 1100 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1094 1101
1095 1102 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1096 1103 if not workers_enabled:
1097 1104 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1098 1105
1099 1106 (
1100 1107 lookup,
1101 1108 modified,
1102 1109 added,
1103 1110 removed,
1104 1111 deleted,
1105 1112 clean,
1106 1113 ignored,
1107 1114 unknown,
1108 1115 warnings,
1109 1116 bad,
1110 1117 ) = rustmod.status(
1111 1118 self._map._rustmap,
1112 1119 matcher,
1113 1120 self._rootdir,
1114 1121 self._ignorefiles(),
1115 1122 self._checkexec,
1116 1123 self._lastnormaltime,
1117 1124 bool(list_clean),
1118 1125 bool(list_ignored),
1119 1126 bool(list_unknown),
1120 1127 )
1121 1128 if self._ui.warn:
1122 1129 for item in warnings:
1123 1130 if isinstance(item, tuple):
1124 1131 file_path, syntax = item
1125 1132 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1126 1133 file_path,
1127 1134 syntax,
1128 1135 )
1129 1136 self._ui.warn(msg)
1130 1137 else:
1131 1138 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1132 1139 self._ui.warn(
1133 1140 msg
1134 1141 % (
1135 1142 pathutil.canonpath(
1136 1143 self._rootdir, self._rootdir, item
1137 1144 ),
1138 1145 b"No such file or directory",
1139 1146 )
1140 1147 )
1141 1148
1142 1149 for (fn, message) in bad:
1143 1150 matcher.bad(fn, encoding.strtolocal(message))
1144 1151
1145 1152 status = scmutil.status(
1146 1153 modified=modified,
1147 1154 added=added,
1148 1155 removed=removed,
1149 1156 deleted=deleted,
1150 1157 unknown=unknown,
1151 1158 ignored=ignored,
1152 1159 clean=clean,
1153 1160 )
1154 1161 return (lookup, status)
1155 1162
1156 1163 def status(self, match, subrepos, ignored, clean, unknown):
1157 1164 '''Determine the status of the working copy relative to the
1158 1165 dirstate and return a pair of (unsure, status), where status is of type
1159 1166 scmutil.status and:
1160 1167
1161 1168 unsure:
1162 1169 files that might have been modified since the dirstate was
1163 1170 written, but need to be read to be sure (size is the same
1164 1171 but mtime differs)
1165 1172 status.modified:
1166 1173 files that have definitely been modified since the dirstate
1167 1174 was written (different size or mode)
1168 1175 status.clean:
1169 1176 files that have definitely not been modified since the
1170 1177 dirstate was written
1171 1178 '''
1172 1179 listignored, listclean, listunknown = ignored, clean, unknown
1173 1180 lookup, modified, added, unknown, ignored = [], [], [], [], []
1174 1181 removed, deleted, clean = [], [], []
1175 1182
1176 1183 dmap = self._map
1177 1184 dmap.preload()
1178 1185
1179 1186 use_rust = True
1180 1187
1181 1188 allowed_matchers = (
1182 1189 matchmod.alwaysmatcher,
1183 1190 matchmod.exactmatcher,
1184 1191 matchmod.includematcher,
1185 1192 )
1186 1193
1187 1194 if rustmod is None:
1188 1195 use_rust = False
1189 1196 elif self._checkcase:
1190 1197 # Case-insensitive filesystems are not handled yet
1191 1198 use_rust = False
1192 1199 elif subrepos:
1193 1200 use_rust = False
1194 1201 elif sparse.enabled:
1195 1202 use_rust = False
1196 1203 elif match.traversedir is not None:
1197 1204 use_rust = False
1198 1205 elif not isinstance(match, allowed_matchers):
1199 1206 # Matchers have yet to be implemented
1200 1207 use_rust = False
1201 1208
1202 1209 if use_rust:
1203 1210 try:
1204 1211 return self._rust_status(
1205 1212 match, listclean, listignored, listunknown
1206 1213 )
1207 1214 except rustmod.FallbackError:
1208 1215 pass
1209 1216
1210 1217 def noop(f):
1211 1218 pass
1212 1219
1213 1220 dcontains = dmap.__contains__
1214 1221 dget = dmap.__getitem__
1215 1222 ladd = lookup.append # aka "unsure"
1216 1223 madd = modified.append
1217 1224 aadd = added.append
1218 1225 uadd = unknown.append if listunknown else noop
1219 1226 iadd = ignored.append if listignored else noop
1220 1227 radd = removed.append
1221 1228 dadd = deleted.append
1222 1229 cadd = clean.append if listclean else noop
1223 1230 mexact = match.exact
1224 1231 dirignore = self._dirignore
1225 1232 checkexec = self._checkexec
1226 1233 copymap = self._map.copymap
1227 1234 lastnormaltime = self._lastnormaltime
1228 1235
1229 1236 # We need to do full walks when either
1230 1237 # - we're listing all clean files, or
1231 1238 # - match.traversedir does something, because match.traversedir should
1232 1239 # be called for every dir in the working dir
1233 1240 full = listclean or match.traversedir is not None
1234 1241 for fn, st in pycompat.iteritems(
1235 1242 self.walk(match, subrepos, listunknown, listignored, full=full)
1236 1243 ):
1237 1244 if not dcontains(fn):
1238 1245 if (listignored or mexact(fn)) and dirignore(fn):
1239 1246 if listignored:
1240 1247 iadd(fn)
1241 1248 else:
1242 1249 uadd(fn)
1243 1250 continue
1244 1251
1245 1252 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1246 1253 # written like that for performance reasons. dmap[fn] is not a
1247 1254 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1248 1255 # opcode has fast paths when the value to be unpacked is a tuple or
1249 1256 # a list, but falls back to creating a full-fledged iterator in
1250 1257 # general. That is much slower than simply accessing and storing the
1251 1258 # tuple members one by one.
1252 1259 t = dget(fn)
1253 1260 state = t[0]
1254 1261 mode = t[1]
1255 1262 size = t[2]
1256 1263 time = t[3]
1257 1264
1258 1265 if not st and state in b"nma":
1259 1266 dadd(fn)
1260 1267 elif state == b'n':
1261 1268 if (
1262 1269 size >= 0
1263 1270 and (
1264 1271 (size != st.st_size and size != st.st_size & _rangemask)
1265 1272 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1266 1273 )
1267 1274 or size == -2 # other parent
1268 1275 or fn in copymap
1269 1276 ):
1270 1277 madd(fn)
1271 1278 elif (
1272 1279 time != st[stat.ST_MTIME]
1273 1280 and time != st[stat.ST_MTIME] & _rangemask
1274 1281 ):
1275 1282 ladd(fn)
1276 1283 elif st[stat.ST_MTIME] == lastnormaltime:
1277 1284 # fn may have just been marked as normal and it may have
1278 1285 # changed in the same second without changing its size.
1279 1286 # This can happen if we quickly do multiple commits.
1280 1287 # Force lookup, so we don't miss such a racy file change.
1281 1288 ladd(fn)
1282 1289 elif listclean:
1283 1290 cadd(fn)
1284 1291 elif state == b'm':
1285 1292 madd(fn)
1286 1293 elif state == b'a':
1287 1294 aadd(fn)
1288 1295 elif state == b'r':
1289 1296 radd(fn)
1290 1297 status = scmutil.status(
1291 1298 modified, added, removed, deleted, unknown, ignored, clean
1292 1299 )
1293 1300 return (lookup, status)
1294 1301
1295 1302 def matches(self, match):
1296 1303 '''
1297 1304 return files in the dirstate (in whatever state) filtered by match
1298 1305 '''
1299 1306 dmap = self._map
1300 1307 if rustmod is not None:
1301 1308 dmap = self._map._rustmap
1302 1309
1303 1310 if match.always():
1304 1311 return dmap.keys()
1305 1312 files = match.files()
1306 1313 if match.isexact():
1307 1314 # fast path -- filter the other way around, since typically files is
1308 1315 # much smaller than dmap
1309 1316 return [f for f in files if f in dmap]
1310 1317 if match.prefix() and all(fn in dmap for fn in files):
1311 1318 # fast path -- all the values are known to be files, so just return
1312 1319 # that
1313 1320 return list(files)
1314 1321 return [f for f in dmap if match(f)]
1315 1322
1316 1323 def _actualfilename(self, tr):
1317 1324 if tr:
1318 1325 return self._pendingfilename
1319 1326 else:
1320 1327 return self._filename
1321 1328
1322 1329 def savebackup(self, tr, backupname):
1323 1330 '''Save current dirstate into backup file'''
1324 1331 filename = self._actualfilename(tr)
1325 1332 assert backupname != filename
1326 1333
1327 1334 # use '_writedirstate' instead of 'write' to write changes certainly,
1328 1335 # because the latter omits writing out if transaction is running.
1329 1336 # output file will be used to create backup of dirstate at this point.
1330 1337 if self._dirty or not self._opener.exists(filename):
1331 1338 self._writedirstate(
1332 1339 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1333 1340 )
1334 1341
1335 1342 if tr:
1336 1343 # ensure that subsequent tr.writepending returns True for
1337 1344 # changes written out above, even if dirstate is never
1338 1345 # changed after this
1339 1346 tr.addfilegenerator(
1340 1347 b'dirstate',
1341 1348 (self._filename,),
1342 1349 self._writedirstate,
1343 1350 location=b'plain',
1344 1351 )
1345 1352
1346 1353 # ensure that pending file written above is unlinked at
1347 1354 # failure, even if tr.writepending isn't invoked until the
1348 1355 # end of this transaction
1349 1356 tr.registertmp(filename, location=b'plain')
1350 1357
1351 1358 self._opener.tryunlink(backupname)
1352 1359 # hardlink backup is okay because _writedirstate is always called
1353 1360 # with an "atomictemp=True" file.
1354 1361 util.copyfile(
1355 1362 self._opener.join(filename),
1356 1363 self._opener.join(backupname),
1357 1364 hardlink=True,
1358 1365 )
1359 1366
1360 1367 def restorebackup(self, tr, backupname):
1361 1368 '''Restore dirstate by backup file'''
1362 1369 # this "invalidate()" prevents "wlock.release()" from writing
1363 1370 # changes of dirstate out after restoring from backup file
1364 1371 self.invalidate()
1365 1372 filename = self._actualfilename(tr)
1366 1373 o = self._opener
1367 1374 if util.samefile(o.join(backupname), o.join(filename)):
1368 1375 o.unlink(backupname)
1369 1376 else:
1370 1377 o.rename(backupname, filename, checkambig=True)
1371 1378
1372 1379 def clearbackup(self, tr, backupname):
1373 1380 '''Clear backup file'''
1374 1381 self._opener.unlink(backupname)
1375 1382
1376 1383
1377 1384 class dirstatemap(object):
1378 1385 """Map encapsulating the dirstate's contents.
1379 1386
1380 1387 The dirstate contains the following state:
1381 1388
1382 1389 - `identity` is the identity of the dirstate file, which can be used to
1383 1390 detect when changes have occurred to the dirstate file.
1384 1391
1385 1392 - `parents` is a pair containing the parents of the working copy. The
1386 1393 parents are updated by calling `setparents`.
1387 1394
1388 1395 - the state map maps filenames to tuples of (state, mode, size, mtime),
1389 1396 where state is a single character representing 'normal', 'added',
1390 1397 'removed', or 'merged'. It is read by treating the dirstate as a
1391 1398 dict. File state is updated by calling the `addfile`, `removefile` and
1392 1399 `dropfile` methods.
1393 1400
1394 1401 - `copymap` maps destination filenames to their source filename.
1395 1402
1396 1403 The dirstate also provides the following views onto the state:
1397 1404
1398 1405 - `nonnormalset` is a set of the filenames that have state other
1399 1406 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1400 1407
1401 1408 - `otherparentset` is a set of the filenames that are marked as coming
1402 1409 from the second parent when the dirstate is currently being merged.
1403 1410
1404 1411 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1405 1412 form that they appear as in the dirstate.
1406 1413
1407 1414 - `dirfoldmap` is a dict mapping normalized directory names to the
1408 1415 denormalized form that they appear as in the dirstate.
1409 1416 """
1410 1417
1411 1418 def __init__(self, ui, opener, root):
1412 1419 self._ui = ui
1413 1420 self._opener = opener
1414 1421 self._root = root
1415 1422 self._filename = b'dirstate'
1416 1423
1417 1424 self._parents = None
1418 1425 self._dirtyparents = False
1419 1426
1420 1427 # for consistent view between _pl() and _read() invocations
1421 1428 self._pendingmode = None
1422 1429
1423 1430 @propertycache
1424 1431 def _map(self):
1425 1432 self._map = {}
1426 1433 self.read()
1427 1434 return self._map
1428 1435
1429 1436 @propertycache
1430 1437 def copymap(self):
1431 1438 self.copymap = {}
1432 1439 self._map
1433 1440 return self.copymap
1434 1441
1435 1442 def clear(self):
1436 1443 self._map.clear()
1437 1444 self.copymap.clear()
1438 1445 self.setparents(nullid, nullid)
1439 1446 util.clearcachedproperty(self, b"_dirs")
1440 1447 util.clearcachedproperty(self, b"_alldirs")
1441 1448 util.clearcachedproperty(self, b"filefoldmap")
1442 1449 util.clearcachedproperty(self, b"dirfoldmap")
1443 1450 util.clearcachedproperty(self, b"nonnormalset")
1444 1451 util.clearcachedproperty(self, b"otherparentset")
1445 1452
1446 1453 def items(self):
1447 1454 return pycompat.iteritems(self._map)
1448 1455
1449 1456 # forward for python2,3 compat
1450 1457 iteritems = items
1451 1458
1452 1459 def __len__(self):
1453 1460 return len(self._map)
1454 1461
1455 1462 def __iter__(self):
1456 1463 return iter(self._map)
1457 1464
1458 1465 def get(self, key, default=None):
1459 1466 return self._map.get(key, default)
1460 1467
1461 1468 def __contains__(self, key):
1462 1469 return key in self._map
1463 1470
1464 1471 def __getitem__(self, key):
1465 1472 return self._map[key]
1466 1473
1467 1474 def keys(self):
1468 1475 return self._map.keys()
1469 1476
1470 1477 def preload(self):
1471 1478 """Loads the underlying data, if it's not already loaded"""
1472 1479 self._map
1473 1480
1474 1481 def addfile(self, f, oldstate, state, mode, size, mtime):
1475 1482 """Add a tracked file to the dirstate."""
1476 1483 if oldstate in b"?r" and "_dirs" in self.__dict__:
1477 1484 self._dirs.addpath(f)
1478 1485 if oldstate == b"?" and "_alldirs" in self.__dict__:
1479 1486 self._alldirs.addpath(f)
1480 1487 self._map[f] = dirstatetuple(state, mode, size, mtime)
1481 1488 if state != b'n' or mtime == -1:
1482 1489 self.nonnormalset.add(f)
1483 1490 if size == -2:
1484 1491 self.otherparentset.add(f)
1485 1492
1486 1493 def removefile(self, f, oldstate, size):
1487 1494 """
1488 1495 Mark a file as removed in the dirstate.
1489 1496
1490 1497 The `size` parameter is used to store sentinel values that indicate
1491 1498 the file's previous state. In the future, we should refactor this
1492 1499 to be more explicit about what that state is.
1493 1500 """
1494 1501 if oldstate not in b"?r" and "_dirs" in self.__dict__:
1495 1502 self._dirs.delpath(f)
1496 1503 if oldstate == b"?" and "_alldirs" in self.__dict__:
1497 1504 self._alldirs.addpath(f)
1498 1505 if "filefoldmap" in self.__dict__:
1499 1506 normed = util.normcase(f)
1500 1507 self.filefoldmap.pop(normed, None)
1501 1508 self._map[f] = dirstatetuple(b'r', 0, size, 0)
1502 1509 self.nonnormalset.add(f)
1503 1510
1504 1511 def dropfile(self, f, oldstate):
1505 1512 """
1506 1513 Remove a file from the dirstate. Returns True if the file was
1507 1514 previously recorded.
1508 1515 """
1509 1516 exists = self._map.pop(f, None) is not None
1510 1517 if exists:
1511 1518 if oldstate != b"r" and "_dirs" in self.__dict__:
1512 1519 self._dirs.delpath(f)
1513 1520 if "_alldirs" in self.__dict__:
1514 1521 self._alldirs.delpath(f)
1515 1522 if "filefoldmap" in self.__dict__:
1516 1523 normed = util.normcase(f)
1517 1524 self.filefoldmap.pop(normed, None)
1518 1525 self.nonnormalset.discard(f)
1519 1526 return exists
1520 1527
1521 1528 def clearambiguoustimes(self, files, now):
1522 1529 for f in files:
1523 1530 e = self.get(f)
1524 1531 if e is not None and e[0] == b'n' and e[3] == now:
1525 1532 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1526 1533 self.nonnormalset.add(f)
1527 1534
1528 1535 def nonnormalentries(self):
1529 1536 '''Compute the nonnormal dirstate entries from the dmap'''
1530 1537 try:
1531 1538 return parsers.nonnormalotherparententries(self._map)
1532 1539 except AttributeError:
1533 1540 nonnorm = set()
1534 1541 otherparent = set()
1535 1542 for fname, e in pycompat.iteritems(self._map):
1536 1543 if e[0] != b'n' or e[3] == -1:
1537 1544 nonnorm.add(fname)
1538 1545 if e[0] == b'n' and e[2] == -2:
1539 1546 otherparent.add(fname)
1540 1547 return nonnorm, otherparent
1541 1548
1542 1549 @propertycache
1543 1550 def filefoldmap(self):
1544 1551 """Returns a dictionary mapping normalized case paths to their
1545 1552 non-normalized versions.
1546 1553 """
1547 1554 try:
1548 1555 makefilefoldmap = parsers.make_file_foldmap
1549 1556 except AttributeError:
1550 1557 pass
1551 1558 else:
1552 1559 return makefilefoldmap(
1553 1560 self._map, util.normcasespec, util.normcasefallback
1554 1561 )
1555 1562
1556 1563 f = {}
1557 1564 normcase = util.normcase
1558 1565 for name, s in pycompat.iteritems(self._map):
1559 1566 if s[0] != b'r':
1560 1567 f[normcase(name)] = name
1561 1568 f[b'.'] = b'.' # prevents useless util.fspath() invocation
1562 1569 return f
1563 1570
1564 1571 def hastrackeddir(self, d):
1565 1572 """
1566 1573 Returns True if the dirstate contains a tracked (not removed) file
1567 1574 in this directory.
1568 1575 """
1569 1576 return d in self._dirs
1570 1577
1571 1578 def hasdir(self, d):
1572 1579 """
1573 1580 Returns True if the dirstate contains a file (tracked or removed)
1574 1581 in this directory.
1575 1582 """
1576 1583 return d in self._alldirs
1577 1584
1578 1585 @propertycache
1579 1586 def _dirs(self):
1580 1587 return pathutil.dirs(self._map, b'r')
1581 1588
1582 1589 @propertycache
1583 1590 def _alldirs(self):
1584 1591 return pathutil.dirs(self._map)
1585 1592
1586 1593 def _opendirstatefile(self):
1587 1594 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1588 1595 if self._pendingmode is not None and self._pendingmode != mode:
1589 1596 fp.close()
1590 1597 raise error.Abort(
1591 1598 _(b'working directory state may be changed parallelly')
1592 1599 )
1593 1600 self._pendingmode = mode
1594 1601 return fp
1595 1602
1596 1603 def parents(self):
1597 1604 if not self._parents:
1598 1605 try:
1599 1606 fp = self._opendirstatefile()
1600 1607 st = fp.read(40)
1601 1608 fp.close()
1602 1609 except IOError as err:
1603 1610 if err.errno != errno.ENOENT:
1604 1611 raise
1605 1612 # File doesn't exist, so the current state is empty
1606 1613 st = b''
1607 1614
1608 1615 l = len(st)
1609 1616 if l == 40:
1610 1617 self._parents = (st[:20], st[20:40])
1611 1618 elif l == 0:
1612 1619 self._parents = (nullid, nullid)
1613 1620 else:
1614 1621 raise error.Abort(
1615 1622 _(b'working directory state appears damaged!')
1616 1623 )
1617 1624
1618 1625 return self._parents
1619 1626
1620 1627 def setparents(self, p1, p2):
1621 1628 self._parents = (p1, p2)
1622 1629 self._dirtyparents = True
1623 1630
1624 1631 def read(self):
1625 1632 # ignore HG_PENDING because identity is used only for writing
1626 1633 self.identity = util.filestat.frompath(
1627 1634 self._opener.join(self._filename)
1628 1635 )
1629 1636
1630 1637 try:
1631 1638 fp = self._opendirstatefile()
1632 1639 try:
1633 1640 st = fp.read()
1634 1641 finally:
1635 1642 fp.close()
1636 1643 except IOError as err:
1637 1644 if err.errno != errno.ENOENT:
1638 1645 raise
1639 1646 return
1640 1647 if not st:
1641 1648 return
1642 1649
1643 1650 if util.safehasattr(parsers, b'dict_new_presized'):
1644 1651 # Make an estimate of the number of files in the dirstate based on
1645 1652 # its size. From a linear regression on a set of real-world repos,
1646 1653 # all over 10,000 files, the size of a dirstate entry is 85
1647 1654 # bytes. The cost of resizing is significantly higher than the cost
1648 1655 # of filling in a larger presized dict, so subtract 20% from the
1649 1656 # size.
1650 1657 #
1651 1658 # This heuristic is imperfect in many ways, so in a future dirstate
1652 1659 # format update it makes sense to just record the number of entries
1653 1660 # on write.
1654 1661 self._map = parsers.dict_new_presized(len(st) // 71)
1655 1662
1656 1663 # Python's garbage collector triggers a GC each time a certain number
1657 1664 # of container objects (the number being defined by
1658 1665 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1659 1666 # for each file in the dirstate. The C version then immediately marks
1660 1667 # them as not to be tracked by the collector. However, this has no
1661 1668 # effect on when GCs are triggered, only on what objects the GC looks
1662 1669 # into. This means that O(number of files) GCs are unavoidable.
1663 1670 # Depending on when in the process's lifetime the dirstate is parsed,
1664 1671 # this can get very expensive. As a workaround, disable GC while
1665 1672 # parsing the dirstate.
1666 1673 #
1667 1674 # (we cannot decorate the function directly since it is in a C module)
1668 1675 parse_dirstate = util.nogc(parsers.parse_dirstate)
1669 1676 p = parse_dirstate(self._map, self.copymap, st)
1670 1677 if not self._dirtyparents:
1671 1678 self.setparents(*p)
1672 1679
1673 1680 # Avoid excess attribute lookups by fast pathing certain checks
1674 1681 self.__contains__ = self._map.__contains__
1675 1682 self.__getitem__ = self._map.__getitem__
1676 1683 self.get = self._map.get
1677 1684
1678 1685 def write(self, st, now):
1679 1686 st.write(
1680 1687 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
1681 1688 )
1682 1689 st.close()
1683 1690 self._dirtyparents = False
1684 1691 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1685 1692
1686 1693 @propertycache
1687 1694 def nonnormalset(self):
1688 1695 nonnorm, otherparents = self.nonnormalentries()
1689 1696 self.otherparentset = otherparents
1690 1697 return nonnorm
1691 1698
1692 1699 @propertycache
1693 1700 def otherparentset(self):
1694 1701 nonnorm, otherparents = self.nonnormalentries()
1695 1702 self.nonnormalset = nonnorm
1696 1703 return otherparents
1697 1704
1698 1705 @propertycache
1699 1706 def identity(self):
1700 1707 self._map
1701 1708 return self.identity
1702 1709
1703 1710 @propertycache
1704 1711 def dirfoldmap(self):
1705 1712 f = {}
1706 1713 normcase = util.normcase
1707 1714 for name in self._dirs:
1708 1715 f[normcase(name)] = name
1709 1716 return f
1710 1717
1711 1718
1712 1719 if rustmod is not None:
1713 1720
1714 1721 class dirstatemap(object):
1715 1722 def __init__(self, ui, opener, root):
1716 1723 self._ui = ui
1717 1724 self._opener = opener
1718 1725 self._root = root
1719 1726 self._filename = b'dirstate'
1720 1727 self._parents = None
1721 1728 self._dirtyparents = False
1722 1729
1723 1730 # for consistent view between _pl() and _read() invocations
1724 1731 self._pendingmode = None
1725 1732
1726 1733 def addfile(self, *args, **kwargs):
1727 1734 return self._rustmap.addfile(*args, **kwargs)
1728 1735
1729 1736 def removefile(self, *args, **kwargs):
1730 1737 return self._rustmap.removefile(*args, **kwargs)
1731 1738
1732 1739 def dropfile(self, *args, **kwargs):
1733 1740 return self._rustmap.dropfile(*args, **kwargs)
1734 1741
1735 1742 def clearambiguoustimes(self, *args, **kwargs):
1736 1743 return self._rustmap.clearambiguoustimes(*args, **kwargs)
1737 1744
1738 1745 def nonnormalentries(self):
1739 1746 return self._rustmap.nonnormalentries()
1740 1747
1741 1748 def get(self, *args, **kwargs):
1742 1749 return self._rustmap.get(*args, **kwargs)
1743 1750
1744 1751 @propertycache
1745 1752 def _rustmap(self):
1746 1753 """
1747 1754 Fills the Dirstatemap when called.
1748 1755 Use `self._inner_rustmap` if reading the dirstate is not necessary.
1749 1756 """
1750 1757 self._rustmap = self._inner_rustmap
1751 1758 self.read()
1752 1759 return self._rustmap
1753 1760
1754 1761 @propertycache
1755 1762 def _inner_rustmap(self):
1756 1763 """
1757 1764 Does not fill the Dirstatemap when called. This allows for
1758 1765 optimizations where only setting/getting the parents is needed.
1759 1766 """
1760 1767 self._inner_rustmap = rustmod.DirstateMap(self._root)
1761 1768 return self._inner_rustmap
1762 1769
1763 1770 @property
1764 1771 def copymap(self):
1765 1772 return self._rustmap.copymap()
1766 1773
1767 1774 def preload(self):
1768 1775 self._rustmap
1769 1776
1770 1777 def clear(self):
1771 1778 self._rustmap.clear()
1772 1779 self._inner_rustmap.clear()
1773 1780 self.setparents(nullid, nullid)
1774 1781 util.clearcachedproperty(self, b"_dirs")
1775 1782 util.clearcachedproperty(self, b"_alldirs")
1776 1783 util.clearcachedproperty(self, b"dirfoldmap")
1777 1784
1778 1785 def items(self):
1779 1786 return self._rustmap.items()
1780 1787
1781 1788 def keys(self):
1782 1789 return iter(self._rustmap)
1783 1790
1784 1791 def __contains__(self, key):
1785 1792 return key in self._rustmap
1786 1793
1787 1794 def __getitem__(self, item):
1788 1795 return self._rustmap[item]
1789 1796
1790 1797 def __len__(self):
1791 1798 return len(self._rustmap)
1792 1799
1793 1800 def __iter__(self):
1794 1801 return iter(self._rustmap)
1795 1802
1796 1803 # forward for python2,3 compat
1797 1804 iteritems = items
1798 1805
1799 1806 def _opendirstatefile(self):
1800 1807 fp, mode = txnutil.trypending(
1801 1808 self._root, self._opener, self._filename
1802 1809 )
1803 1810 if self._pendingmode is not None and self._pendingmode != mode:
1804 1811 fp.close()
1805 1812 raise error.Abort(
1806 1813 _(b'working directory state may be changed parallelly')
1807 1814 )
1808 1815 self._pendingmode = mode
1809 1816 return fp
1810 1817
1811 1818 def setparents(self, p1, p2):
1812 1819 self._rustmap.setparents(p1, p2)
1813 1820 self._parents = (p1, p2)
1814 1821 self._dirtyparents = True
1815 1822
1816 1823 def parents(self):
1817 1824 if not self._parents:
1818 1825 try:
1819 1826 fp = self._opendirstatefile()
1820 1827 st = fp.read(40)
1821 1828 fp.close()
1822 1829 except IOError as err:
1823 1830 if err.errno != errno.ENOENT:
1824 1831 raise
1825 1832 # File doesn't exist, so the current state is empty
1826 1833 st = b''
1827 1834
1828 1835 try:
1829 1836 self._parents = self._inner_rustmap.parents(st)
1830 1837 except ValueError:
1831 1838 raise error.Abort(
1832 1839 _(b'working directory state appears damaged!')
1833 1840 )
1834 1841
1835 1842 return self._parents
1836 1843
1837 1844 def read(self):
1838 1845 # ignore HG_PENDING because identity is used only for writing
1839 1846 self.identity = util.filestat.frompath(
1840 1847 self._opener.join(self._filename)
1841 1848 )
1842 1849
1843 1850 try:
1844 1851 fp = self._opendirstatefile()
1845 1852 try:
1846 1853 st = fp.read()
1847 1854 finally:
1848 1855 fp.close()
1849 1856 except IOError as err:
1850 1857 if err.errno != errno.ENOENT:
1851 1858 raise
1852 1859 return
1853 1860 if not st:
1854 1861 return
1855 1862
1856 1863 parse_dirstate = util.nogc(self._rustmap.read)
1857 1864 parents = parse_dirstate(st)
1858 1865 if parents and not self._dirtyparents:
1859 1866 self.setparents(*parents)
1860 1867
1861 1868 self.__contains__ = self._rustmap.__contains__
1862 1869 self.__getitem__ = self._rustmap.__getitem__
1863 1870 self.get = self._rustmap.get
1864 1871
1865 1872 def write(self, st, now):
1866 1873 parents = self.parents()
1867 1874 st.write(self._rustmap.write(parents[0], parents[1], now))
1868 1875 st.close()
1869 1876 self._dirtyparents = False
1870 1877
1871 1878 @propertycache
1872 1879 def filefoldmap(self):
1873 1880 """Returns a dictionary mapping normalized case paths to their
1874 1881 non-normalized versions.
1875 1882 """
1876 1883 return self._rustmap.filefoldmapasdict()
1877 1884
1878 1885 def hastrackeddir(self, d):
1879 1886 self._dirs # Trigger Python's propertycache
1880 1887 return self._rustmap.hastrackeddir(d)
1881 1888
1882 1889 def hasdir(self, d):
1883 1890 self._dirs # Trigger Python's propertycache
1884 1891 return self._rustmap.hasdir(d)
1885 1892
1886 1893 @propertycache
1887 1894 def _dirs(self):
1888 1895 return self._rustmap.getdirs()
1889 1896
1890 1897 @propertycache
1891 1898 def _alldirs(self):
1892 1899 return self._rustmap.getalldirs()
1893 1900
1894 1901 @propertycache
1895 1902 def identity(self):
1896 1903 self._rustmap
1897 1904 return self.identity
1898 1905
1899 1906 @property
1900 1907 def nonnormalset(self):
1901 1908 nonnorm = self._rustmap.non_normal_entries()
1902 1909 return nonnorm
1903 1910
1904 1911 @propertycache
1905 1912 def otherparentset(self):
1906 1913 otherparents = self._rustmap.other_parent_entries()
1907 1914 return otherparents
1908 1915
1909 1916 @propertycache
1910 1917 def dirfoldmap(self):
1911 1918 f = {}
1912 1919 normcase = util.normcase
1913 1920 for name in self._dirs:
1914 1921 f[normcase(name)] = name
1915 1922 return f
@@ -1,3812 +1,3814 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import random
13 13 import sys
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 )
25 25 from .pycompat import (
26 26 delattr,
27 27 getattr,
28 28 )
29 29 from . import (
30 30 bookmarks,
31 31 branchmap,
32 32 bundle2,
33 33 changegroup,
34 34 color,
35 35 context,
36 36 dirstate,
37 37 dirstateguard,
38 38 discovery,
39 39 encoding,
40 40 error,
41 41 exchange,
42 42 extensions,
43 43 filelog,
44 44 hook,
45 45 lock as lockmod,
46 46 match as matchmod,
47 47 merge as mergemod,
48 48 mergeutil,
49 49 namespaces,
50 50 narrowspec,
51 51 obsolete,
52 52 pathutil,
53 53 phases,
54 54 pushkey,
55 55 pycompat,
56 56 rcutil,
57 57 repoview,
58 58 revset,
59 59 revsetlang,
60 60 scmutil,
61 61 sparse,
62 62 store as storemod,
63 63 subrepoutil,
64 64 tags as tagsmod,
65 65 transaction,
66 66 txnutil,
67 67 util,
68 68 vfs as vfsmod,
69 69 )
70 70
71 71 from .interfaces import (
72 72 repository,
73 73 util as interfaceutil,
74 74 )
75 75
76 76 from .utils import (
77 77 hashutil,
78 78 procutil,
79 79 stringutil,
80 80 )
81 81
82 82 from .revlogutils import constants as revlogconst
83 83
84 84 release = lockmod.release
85 85 urlerr = util.urlerr
86 86 urlreq = util.urlreq
87 87
88 88 # set of (path, vfs-location) tuples. vfs-location is:
89 89 # - 'plain for vfs relative paths
90 90 # - '' for svfs relative paths
91 91 _cachedfiles = set()
92 92
93 93
94 94 class _basefilecache(scmutil.filecache):
95 95 """All filecache usage on repo are done for logic that should be unfiltered
96 96 """
97 97
98 98 def __get__(self, repo, type=None):
99 99 if repo is None:
100 100 return self
101 101 # proxy to unfiltered __dict__ since filtered repo has no entry
102 102 unfi = repo.unfiltered()
103 103 try:
104 104 return unfi.__dict__[self.sname]
105 105 except KeyError:
106 106 pass
107 107 return super(_basefilecache, self).__get__(unfi, type)
108 108
109 109 def set(self, repo, value):
110 110 return super(_basefilecache, self).set(repo.unfiltered(), value)
111 111
112 112
113 113 class repofilecache(_basefilecache):
114 114 """filecache for files in .hg but outside of .hg/store"""
115 115
116 116 def __init__(self, *paths):
117 117 super(repofilecache, self).__init__(*paths)
118 118 for path in paths:
119 119 _cachedfiles.add((path, b'plain'))
120 120
121 121 def join(self, obj, fname):
122 122 return obj.vfs.join(fname)
123 123
124 124
125 125 class storecache(_basefilecache):
126 126 """filecache for files in the store"""
127 127
128 128 def __init__(self, *paths):
129 129 super(storecache, self).__init__(*paths)
130 130 for path in paths:
131 131 _cachedfiles.add((path, b''))
132 132
133 133 def join(self, obj, fname):
134 134 return obj.sjoin(fname)
135 135
136 136
137 137 class mixedrepostorecache(_basefilecache):
138 138 """filecache for a mix files in .hg/store and outside"""
139 139
140 140 def __init__(self, *pathsandlocations):
141 141 # scmutil.filecache only uses the path for passing back into our
142 142 # join(), so we can safely pass a list of paths and locations
143 143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 144 _cachedfiles.update(pathsandlocations)
145 145
146 146 def join(self, obj, fnameandlocation):
147 147 fname, location = fnameandlocation
148 148 if location == b'plain':
149 149 return obj.vfs.join(fname)
150 150 else:
151 151 if location != b'':
152 152 raise error.ProgrammingError(
153 153 b'unexpected location: %s' % location
154 154 )
155 155 return obj.sjoin(fname)
156 156
157 157
158 158 def isfilecached(repo, name):
159 159 """check if a repo has already cached "name" filecache-ed property
160 160
161 161 This returns (cachedobj-or-None, iscached) tuple.
162 162 """
163 163 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 164 if not cacheentry:
165 165 return None, False
166 166 return cacheentry.obj, True
167 167
168 168
169 169 class unfilteredpropertycache(util.propertycache):
170 170 """propertycache that apply to unfiltered repo only"""
171 171
172 172 def __get__(self, repo, type=None):
173 173 unfi = repo.unfiltered()
174 174 if unfi is repo:
175 175 return super(unfilteredpropertycache, self).__get__(unfi)
176 176 return getattr(unfi, self.name)
177 177
178 178
179 179 class filteredpropertycache(util.propertycache):
180 180 """propertycache that must take filtering in account"""
181 181
182 182 def cachevalue(self, obj, value):
183 183 object.__setattr__(obj, self.name, value)
184 184
185 185
186 186 def hasunfilteredcache(repo, name):
187 187 """check if a repo has an unfilteredpropertycache value for <name>"""
188 188 return name in vars(repo.unfiltered())
189 189
190 190
191 191 def unfilteredmethod(orig):
192 192 """decorate method that always need to be run on unfiltered version"""
193 193
194 194 def wrapper(repo, *args, **kwargs):
195 195 return orig(repo.unfiltered(), *args, **kwargs)
196 196
197 197 return wrapper
198 198
199 199
200 200 moderncaps = {
201 201 b'lookup',
202 202 b'branchmap',
203 203 b'pushkey',
204 204 b'known',
205 205 b'getbundle',
206 206 b'unbundle',
207 207 }
208 208 legacycaps = moderncaps.union({b'changegroupsubset'})
209 209
210 210
211 211 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 212 class localcommandexecutor(object):
213 213 def __init__(self, peer):
214 214 self._peer = peer
215 215 self._sent = False
216 216 self._closed = False
217 217
218 218 def __enter__(self):
219 219 return self
220 220
221 221 def __exit__(self, exctype, excvalue, exctb):
222 222 self.close()
223 223
224 224 def callcommand(self, command, args):
225 225 if self._sent:
226 226 raise error.ProgrammingError(
227 227 b'callcommand() cannot be used after sendcommands()'
228 228 )
229 229
230 230 if self._closed:
231 231 raise error.ProgrammingError(
232 232 b'callcommand() cannot be used after close()'
233 233 )
234 234
235 235 # We don't need to support anything fancy. Just call the named
236 236 # method on the peer and return a resolved future.
237 237 fn = getattr(self._peer, pycompat.sysstr(command))
238 238
239 239 f = pycompat.futures.Future()
240 240
241 241 try:
242 242 result = fn(**pycompat.strkwargs(args))
243 243 except Exception:
244 244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 245 else:
246 246 f.set_result(result)
247 247
248 248 return f
249 249
250 250 def sendcommands(self):
251 251 self._sent = True
252 252
253 253 def close(self):
254 254 self._closed = True
255 255
256 256
257 257 @interfaceutil.implementer(repository.ipeercommands)
258 258 class localpeer(repository.peer):
259 259 '''peer for a local repo; reflects only the most recent API'''
260 260
261 261 def __init__(self, repo, caps=None):
262 262 super(localpeer, self).__init__()
263 263
264 264 if caps is None:
265 265 caps = moderncaps.copy()
266 266 self._repo = repo.filtered(b'served')
267 267 self.ui = repo.ui
268 268 self._caps = repo._restrictcapabilities(caps)
269 269
270 270 # Begin of _basepeer interface.
271 271
272 272 def url(self):
273 273 return self._repo.url()
274 274
275 275 def local(self):
276 276 return self._repo
277 277
278 278 def peer(self):
279 279 return self
280 280
281 281 def canpush(self):
282 282 return True
283 283
284 284 def close(self):
285 285 self._repo.close()
286 286
287 287 # End of _basepeer interface.
288 288
289 289 # Begin of _basewirecommands interface.
290 290
291 291 def branchmap(self):
292 292 return self._repo.branchmap()
293 293
294 294 def capabilities(self):
295 295 return self._caps
296 296
297 297 def clonebundles(self):
298 298 return self._repo.tryread(b'clonebundles.manifest')
299 299
300 300 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 301 """Used to test argument passing over the wire"""
302 302 return b"%s %s %s %s %s" % (
303 303 one,
304 304 two,
305 305 pycompat.bytestr(three),
306 306 pycompat.bytestr(four),
307 307 pycompat.bytestr(five),
308 308 )
309 309
310 310 def getbundle(
311 311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 312 ):
313 313 chunks = exchange.getbundlechunks(
314 314 self._repo,
315 315 source,
316 316 heads=heads,
317 317 common=common,
318 318 bundlecaps=bundlecaps,
319 319 **kwargs
320 320 )[1]
321 321 cb = util.chunkbuffer(chunks)
322 322
323 323 if exchange.bundle2requested(bundlecaps):
324 324 # When requesting a bundle2, getbundle returns a stream to make the
325 325 # wire level function happier. We need to build a proper object
326 326 # from it in local peer.
327 327 return bundle2.getunbundler(self.ui, cb)
328 328 else:
329 329 return changegroup.getunbundler(b'01', cb, None)
330 330
331 331 def heads(self):
332 332 return self._repo.heads()
333 333
334 334 def known(self, nodes):
335 335 return self._repo.known(nodes)
336 336
337 337 def listkeys(self, namespace):
338 338 return self._repo.listkeys(namespace)
339 339
340 340 def lookup(self, key):
341 341 return self._repo.lookup(key)
342 342
343 343 def pushkey(self, namespace, key, old, new):
344 344 return self._repo.pushkey(namespace, key, old, new)
345 345
346 346 def stream_out(self):
347 347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348 348
349 349 def unbundle(self, bundle, heads, url):
350 350 """apply a bundle on a repo
351 351
352 352 This function handles the repo locking itself."""
353 353 try:
354 354 try:
355 355 bundle = exchange.readbundle(self.ui, bundle, None)
356 356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 357 if util.safehasattr(ret, b'getchunks'):
358 358 # This is a bundle20 object, turn it into an unbundler.
359 359 # This little dance should be dropped eventually when the
360 360 # API is finally improved.
361 361 stream = util.chunkbuffer(ret.getchunks())
362 362 ret = bundle2.getunbundler(self.ui, stream)
363 363 return ret
364 364 except Exception as exc:
365 365 # If the exception contains output salvaged from a bundle2
366 366 # reply, we need to make sure it is printed before continuing
367 367 # to fail. So we build a bundle2 with such output and consume
368 368 # it directly.
369 369 #
370 370 # This is not very elegant but allows a "simple" solution for
371 371 # issue4594
372 372 output = getattr(exc, '_bundle2salvagedoutput', ())
373 373 if output:
374 374 bundler = bundle2.bundle20(self._repo.ui)
375 375 for out in output:
376 376 bundler.addpart(out)
377 377 stream = util.chunkbuffer(bundler.getchunks())
378 378 b = bundle2.getunbundler(self.ui, stream)
379 379 bundle2.processbundle(self._repo, b)
380 380 raise
381 381 except error.PushRaced as exc:
382 382 raise error.ResponseError(
383 383 _(b'push failed:'), stringutil.forcebytestr(exc)
384 384 )
385 385
386 386 # End of _basewirecommands interface.
387 387
388 388 # Begin of peer interface.
389 389
390 390 def commandexecutor(self):
391 391 return localcommandexecutor(self)
392 392
393 393 # End of peer interface.
394 394
395 395
396 396 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 397 class locallegacypeer(localpeer):
398 398 '''peer extension which implements legacy methods too; used for tests with
399 399 restricted capabilities'''
400 400
401 401 def __init__(self, repo):
402 402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403 403
404 404 # Begin of baselegacywirecommands interface.
405 405
406 406 def between(self, pairs):
407 407 return self._repo.between(pairs)
408 408
409 409 def branches(self, nodes):
410 410 return self._repo.branches(nodes)
411 411
412 412 def changegroup(self, nodes, source):
413 413 outgoing = discovery.outgoing(
414 414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
415 415 )
416 416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417 417
418 418 def changegroupsubset(self, bases, heads, source):
419 419 outgoing = discovery.outgoing(
420 420 self._repo, missingroots=bases, missingheads=heads
421 421 )
422 422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423 423
424 424 # End of baselegacywirecommands interface.
425 425
426 426
427 427 # Increment the sub-version when the revlog v2 format changes to lock out old
428 428 # clients.
429 429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430 430
431 431 # A repository with the sparserevlog feature will have delta chains that
432 432 # can spread over a larger span. Sparse reading cuts these large spans into
433 433 # pieces, so that each piece isn't too big.
434 434 # Without the sparserevlog capability, reading from the repository could use
435 435 # huge amounts of memory, because the whole span would be read at once,
436 436 # including all the intermediate revisions that aren't pertinent for the chain.
437 437 # This is why once a repository has enabled sparse-read, it becomes required.
438 438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439 439
440 440 # A repository with the sidedataflag requirement will allow to store extra
441 441 # information for revision without altering their original hashes.
442 442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443 443
444 444 # A repository with the the copies-sidedata-changeset requirement will store
445 445 # copies related information in changeset's sidedata.
446 446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447 447
448 448 # Functions receiving (ui, features) that extensions can register to impact
449 449 # the ability to load repositories with custom requirements. Only
450 450 # functions defined in loaded extensions are called.
451 451 #
452 452 # The function receives a set of requirement strings that the repository
453 453 # is capable of opening. Functions will typically add elements to the
454 454 # set to reflect that the extension knows how to handle that requirements.
455 455 featuresetupfuncs = set()
456 456
457 457
458 458 def makelocalrepository(baseui, path, intents=None):
459 459 """Create a local repository object.
460 460
461 461 Given arguments needed to construct a local repository, this function
462 462 performs various early repository loading functionality (such as
463 463 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
464 464 the repository can be opened, derives a type suitable for representing
465 465 that repository, and returns an instance of it.
466 466
467 467 The returned object conforms to the ``repository.completelocalrepository``
468 468 interface.
469 469
470 470 The repository type is derived by calling a series of factory functions
471 471 for each aspect/interface of the final repository. These are defined by
472 472 ``REPO_INTERFACES``.
473 473
474 474 Each factory function is called to produce a type implementing a specific
475 475 interface. The cumulative list of returned types will be combined into a
476 476 new type and that type will be instantiated to represent the local
477 477 repository.
478 478
479 479 The factory functions each receive various state that may be consulted
480 480 as part of deriving a type.
481 481
482 482 Extensions should wrap these factory functions to customize repository type
483 483 creation. Note that an extension's wrapped function may be called even if
484 484 that extension is not loaded for the repo being constructed. Extensions
485 485 should check if their ``__name__`` appears in the
486 486 ``extensionmodulenames`` set passed to the factory function and no-op if
487 487 not.
488 488 """
489 489 ui = baseui.copy()
490 490 # Prevent copying repo configuration.
491 491 ui.copy = baseui.copy
492 492
493 493 # Working directory VFS rooted at repository root.
494 494 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
495 495
496 496 # Main VFS for .hg/ directory.
497 497 hgpath = wdirvfs.join(b'.hg')
498 498 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
499 499
500 500 # The .hg/ path should exist and should be a directory. All other
501 501 # cases are errors.
502 502 if not hgvfs.isdir():
503 503 try:
504 504 hgvfs.stat()
505 505 except OSError as e:
506 506 if e.errno != errno.ENOENT:
507 507 raise
508 508
509 509 raise error.RepoError(_(b'repository %s not found') % path)
510 510
511 511 # .hg/requires file contains a newline-delimited list of
512 512 # features/capabilities the opener (us) must have in order to use
513 513 # the repository. This file was introduced in Mercurial 0.9.2,
514 514 # which means very old repositories may not have one. We assume
515 515 # a missing file translates to no requirements.
516 516 try:
517 517 requirements = set(hgvfs.read(b'requires').splitlines())
518 518 except IOError as e:
519 519 if e.errno != errno.ENOENT:
520 520 raise
521 521 requirements = set()
522 522
523 523 # The .hg/hgrc file may load extensions or contain config options
524 524 # that influence repository construction. Attempt to load it and
525 525 # process any new extensions that it may have pulled in.
526 526 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
527 527 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
528 528 extensions.loadall(ui)
529 529 extensions.populateui(ui)
530 530
531 531 # Set of module names of extensions loaded for this repository.
532 532 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
533 533
534 534 supportedrequirements = gathersupportedrequirements(ui)
535 535
536 536 # We first validate the requirements are known.
537 537 ensurerequirementsrecognized(requirements, supportedrequirements)
538 538
539 539 # Then we validate that the known set is reasonable to use together.
540 540 ensurerequirementscompatible(ui, requirements)
541 541
542 542 # TODO there are unhandled edge cases related to opening repositories with
543 543 # shared storage. If storage is shared, we should also test for requirements
544 544 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
545 545 # that repo, as that repo may load extensions needed to open it. This is a
546 546 # bit complicated because we don't want the other hgrc to overwrite settings
547 547 # in this hgrc.
548 548 #
549 549 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
550 550 # file when sharing repos. But if a requirement is added after the share is
551 551 # performed, thereby introducing a new requirement for the opener, we may
552 552 # will not see that and could encounter a run-time error interacting with
553 553 # that shared store since it has an unknown-to-us requirement.
554 554
555 555 # At this point, we know we should be capable of opening the repository.
556 556 # Now get on with doing that.
557 557
558 558 features = set()
559 559
560 560 # The "store" part of the repository holds versioned data. How it is
561 561 # accessed is determined by various requirements. The ``shared`` or
562 562 # ``relshared`` requirements indicate the store lives in the path contained
563 563 # in the ``.hg/sharedpath`` file. This is an absolute path for
564 564 # ``shared`` and relative to ``.hg/`` for ``relshared``.
565 565 if b'shared' in requirements or b'relshared' in requirements:
566 566 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
567 567 if b'relshared' in requirements:
568 568 sharedpath = hgvfs.join(sharedpath)
569 569
570 570 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
571 571
572 572 if not sharedvfs.exists():
573 573 raise error.RepoError(
574 574 _(b'.hg/sharedpath points to nonexistent directory %s')
575 575 % sharedvfs.base
576 576 )
577 577
578 578 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
579 579
580 580 storebasepath = sharedvfs.base
581 581 cachepath = sharedvfs.join(b'cache')
582 582 else:
583 583 storebasepath = hgvfs.base
584 584 cachepath = hgvfs.join(b'cache')
585 585 wcachepath = hgvfs.join(b'wcache')
586 586
587 587 # The store has changed over time and the exact layout is dictated by
588 588 # requirements. The store interface abstracts differences across all
589 589 # of them.
590 590 store = makestore(
591 591 requirements,
592 592 storebasepath,
593 593 lambda base: vfsmod.vfs(base, cacheaudited=True),
594 594 )
595 595 hgvfs.createmode = store.createmode
596 596
597 597 storevfs = store.vfs
598 598 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
599 599
600 600 # The cache vfs is used to manage cache files.
601 601 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
602 602 cachevfs.createmode = store.createmode
603 603 # The cache vfs is used to manage cache files related to the working copy
604 604 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
605 605 wcachevfs.createmode = store.createmode
606 606
607 607 # Now resolve the type for the repository object. We do this by repeatedly
608 608 # calling a factory function to produces types for specific aspects of the
609 609 # repo's operation. The aggregate returned types are used as base classes
610 610 # for a dynamically-derived type, which will represent our new repository.
611 611
612 612 bases = []
613 613 extrastate = {}
614 614
615 615 for iface, fn in REPO_INTERFACES:
616 616 # We pass all potentially useful state to give extensions tons of
617 617 # flexibility.
618 618 typ = fn()(
619 619 ui=ui,
620 620 intents=intents,
621 621 requirements=requirements,
622 622 features=features,
623 623 wdirvfs=wdirvfs,
624 624 hgvfs=hgvfs,
625 625 store=store,
626 626 storevfs=storevfs,
627 627 storeoptions=storevfs.options,
628 628 cachevfs=cachevfs,
629 629 wcachevfs=wcachevfs,
630 630 extensionmodulenames=extensionmodulenames,
631 631 extrastate=extrastate,
632 632 baseclasses=bases,
633 633 )
634 634
635 635 if not isinstance(typ, type):
636 636 raise error.ProgrammingError(
637 637 b'unable to construct type for %s' % iface
638 638 )
639 639
640 640 bases.append(typ)
641 641
642 642 # type() allows you to use characters in type names that wouldn't be
643 643 # recognized as Python symbols in source code. We abuse that to add
644 644 # rich information about our constructed repo.
645 645 name = pycompat.sysstr(
646 646 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
647 647 )
648 648
649 649 cls = type(name, tuple(bases), {})
650 650
651 651 return cls(
652 652 baseui=baseui,
653 653 ui=ui,
654 654 origroot=path,
655 655 wdirvfs=wdirvfs,
656 656 hgvfs=hgvfs,
657 657 requirements=requirements,
658 658 supportedrequirements=supportedrequirements,
659 659 sharedpath=storebasepath,
660 660 store=store,
661 661 cachevfs=cachevfs,
662 662 wcachevfs=wcachevfs,
663 663 features=features,
664 664 intents=intents,
665 665 )
666 666
667 667
668 668 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
669 669 """Load hgrc files/content into a ui instance.
670 670
671 671 This is called during repository opening to load any additional
672 672 config files or settings relevant to the current repository.
673 673
674 674 Returns a bool indicating whether any additional configs were loaded.
675 675
676 676 Extensions should monkeypatch this function to modify how per-repo
677 677 configs are loaded. For example, an extension may wish to pull in
678 678 configs from alternate files or sources.
679 679 """
680 680 if not rcutil.use_repo_hgrc():
681 681 return False
682 682 try:
683 683 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
684 684 return True
685 685 except IOError:
686 686 return False
687 687
688 688
689 689 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
690 690 """Perform additional actions after .hg/hgrc is loaded.
691 691
692 692 This function is called during repository loading immediately after
693 693 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
694 694
695 695 The function can be used to validate configs, automatically add
696 696 options (including extensions) based on requirements, etc.
697 697 """
698 698
699 699 # Map of requirements to list of extensions to load automatically when
700 700 # requirement is present.
701 701 autoextensions = {
702 702 b'git': [b'git'],
703 703 b'largefiles': [b'largefiles'],
704 704 b'lfs': [b'lfs'],
705 705 }
706 706
707 707 for requirement, names in sorted(autoextensions.items()):
708 708 if requirement not in requirements:
709 709 continue
710 710
711 711 for name in names:
712 712 if not ui.hasconfig(b'extensions', name):
713 713 ui.setconfig(b'extensions', name, b'', source=b'autoload')
714 714
715 715
716 716 def gathersupportedrequirements(ui):
717 717 """Determine the complete set of recognized requirements."""
718 718 # Start with all requirements supported by this file.
719 719 supported = set(localrepository._basesupported)
720 720
721 721 # Execute ``featuresetupfuncs`` entries if they belong to an extension
722 722 # relevant to this ui instance.
723 723 modules = {m.__name__ for n, m in extensions.extensions(ui)}
724 724
725 725 for fn in featuresetupfuncs:
726 726 if fn.__module__ in modules:
727 727 fn(ui, supported)
728 728
729 729 # Add derived requirements from registered compression engines.
730 730 for name in util.compengines:
731 731 engine = util.compengines[name]
732 732 if engine.available() and engine.revlogheader():
733 733 supported.add(b'exp-compression-%s' % name)
734 734 if engine.name() == b'zstd':
735 735 supported.add(b'revlog-compression-zstd')
736 736
737 737 return supported
738 738
739 739
740 740 def ensurerequirementsrecognized(requirements, supported):
741 741 """Validate that a set of local requirements is recognized.
742 742
743 743 Receives a set of requirements. Raises an ``error.RepoError`` if there
744 744 exists any requirement in that set that currently loaded code doesn't
745 745 recognize.
746 746
747 747 Returns a set of supported requirements.
748 748 """
749 749 missing = set()
750 750
751 751 for requirement in requirements:
752 752 if requirement in supported:
753 753 continue
754 754
755 755 if not requirement or not requirement[0:1].isalnum():
756 756 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
757 757
758 758 missing.add(requirement)
759 759
760 760 if missing:
761 761 raise error.RequirementError(
762 762 _(b'repository requires features unknown to this Mercurial: %s')
763 763 % b' '.join(sorted(missing)),
764 764 hint=_(
765 765 b'see https://mercurial-scm.org/wiki/MissingRequirement '
766 766 b'for more information'
767 767 ),
768 768 )
769 769
770 770
771 771 def ensurerequirementscompatible(ui, requirements):
772 772 """Validates that a set of recognized requirements is mutually compatible.
773 773
774 774 Some requirements may not be compatible with others or require
775 775 config options that aren't enabled. This function is called during
776 776 repository opening to ensure that the set of requirements needed
777 777 to open a repository is sane and compatible with config options.
778 778
779 779 Extensions can monkeypatch this function to perform additional
780 780 checking.
781 781
782 782 ``error.RepoError`` should be raised on failure.
783 783 """
784 784 if b'exp-sparse' in requirements and not sparse.enabled:
785 785 raise error.RepoError(
786 786 _(
787 787 b'repository is using sparse feature but '
788 788 b'sparse is not enabled; enable the '
789 789 b'"sparse" extensions to access'
790 790 )
791 791 )
792 792
793 793
794 794 def makestore(requirements, path, vfstype):
795 795 """Construct a storage object for a repository."""
796 796 if b'store' in requirements:
797 797 if b'fncache' in requirements:
798 798 return storemod.fncachestore(
799 799 path, vfstype, b'dotencode' in requirements
800 800 )
801 801
802 802 return storemod.encodedstore(path, vfstype)
803 803
804 804 return storemod.basicstore(path, vfstype)
805 805
806 806
807 807 def resolvestorevfsoptions(ui, requirements, features):
808 808 """Resolve the options to pass to the store vfs opener.
809 809
810 810 The returned dict is used to influence behavior of the storage layer.
811 811 """
812 812 options = {}
813 813
814 814 if b'treemanifest' in requirements:
815 815 options[b'treemanifest'] = True
816 816
817 817 # experimental config: format.manifestcachesize
818 818 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
819 819 if manifestcachesize is not None:
820 820 options[b'manifestcachesize'] = manifestcachesize
821 821
822 822 # In the absence of another requirement superseding a revlog-related
823 823 # requirement, we have to assume the repo is using revlog version 0.
824 824 # This revlog format is super old and we don't bother trying to parse
825 825 # opener options for it because those options wouldn't do anything
826 826 # meaningful on such old repos.
827 827 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
828 828 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
829 829 else: # explicitly mark repo as using revlogv0
830 830 options[b'revlogv0'] = True
831 831
832 832 if COPIESSDC_REQUIREMENT in requirements:
833 833 options[b'copies-storage'] = b'changeset-sidedata'
834 834 else:
835 835 writecopiesto = ui.config(b'experimental', b'copies.write-to')
836 836 copiesextramode = (b'changeset-only', b'compatibility')
837 837 if writecopiesto in copiesextramode:
838 838 options[b'copies-storage'] = b'extra'
839 839
840 840 return options
841 841
842 842
843 843 def resolverevlogstorevfsoptions(ui, requirements, features):
844 844 """Resolve opener options specific to revlogs."""
845 845
846 846 options = {}
847 847 options[b'flagprocessors'] = {}
848 848
849 849 if b'revlogv1' in requirements:
850 850 options[b'revlogv1'] = True
851 851 if REVLOGV2_REQUIREMENT in requirements:
852 852 options[b'revlogv2'] = True
853 853
854 854 if b'generaldelta' in requirements:
855 855 options[b'generaldelta'] = True
856 856
857 857 # experimental config: format.chunkcachesize
858 858 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
859 859 if chunkcachesize is not None:
860 860 options[b'chunkcachesize'] = chunkcachesize
861 861
862 862 deltabothparents = ui.configbool(
863 863 b'storage', b'revlog.optimize-delta-parent-choice'
864 864 )
865 865 options[b'deltabothparents'] = deltabothparents
866 866
867 867 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
868 868 lazydeltabase = False
869 869 if lazydelta:
870 870 lazydeltabase = ui.configbool(
871 871 b'storage', b'revlog.reuse-external-delta-parent'
872 872 )
873 873 if lazydeltabase is None:
874 874 lazydeltabase = not scmutil.gddeltaconfig(ui)
875 875 options[b'lazydelta'] = lazydelta
876 876 options[b'lazydeltabase'] = lazydeltabase
877 877
878 878 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
879 879 if 0 <= chainspan:
880 880 options[b'maxdeltachainspan'] = chainspan
881 881
882 882 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
883 883 if mmapindexthreshold is not None:
884 884 options[b'mmapindexthreshold'] = mmapindexthreshold
885 885
886 886 withsparseread = ui.configbool(b'experimental', b'sparse-read')
887 887 srdensitythres = float(
888 888 ui.config(b'experimental', b'sparse-read.density-threshold')
889 889 )
890 890 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
891 891 options[b'with-sparse-read'] = withsparseread
892 892 options[b'sparse-read-density-threshold'] = srdensitythres
893 893 options[b'sparse-read-min-gap-size'] = srmingapsize
894 894
895 895 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
896 896 options[b'sparse-revlog'] = sparserevlog
897 897 if sparserevlog:
898 898 options[b'generaldelta'] = True
899 899
900 900 sidedata = SIDEDATA_REQUIREMENT in requirements
901 901 options[b'side-data'] = sidedata
902 902
903 903 maxchainlen = None
904 904 if sparserevlog:
905 905 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
906 906 # experimental config: format.maxchainlen
907 907 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
908 908 if maxchainlen is not None:
909 909 options[b'maxchainlen'] = maxchainlen
910 910
911 911 for r in requirements:
912 912 # we allow multiple compression engine requirement to co-exist because
913 913 # strickly speaking, revlog seems to support mixed compression style.
914 914 #
915 915 # The compression used for new entries will be "the last one"
916 916 prefix = r.startswith
917 917 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
918 918 options[b'compengine'] = r.split(b'-', 2)[2]
919 919
920 920 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
921 921 if options[b'zlib.level'] is not None:
922 922 if not (0 <= options[b'zlib.level'] <= 9):
923 923 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
924 924 raise error.Abort(msg % options[b'zlib.level'])
925 925 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
926 926 if options[b'zstd.level'] is not None:
927 927 if not (0 <= options[b'zstd.level'] <= 22):
928 928 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
929 929 raise error.Abort(msg % options[b'zstd.level'])
930 930
931 931 if repository.NARROW_REQUIREMENT in requirements:
932 932 options[b'enableellipsis'] = True
933 933
934 934 if ui.configbool(b'experimental', b'rust.index'):
935 935 options[b'rust.index'] = True
936 936 if ui.configbool(b'experimental', b'exp-persistent-nodemap'):
937 937 options[b'exp-persistent-nodemap'] = True
938 938 if ui.configbool(b'experimental', b'exp-persistent-nodemap.mmap'):
939 939 options[b'exp-persistent-nodemap.mmap'] = True
940 940 if ui.configbool(b'devel', b'persistent-nodemap'):
941 941 options[b'devel-force-nodemap'] = True
942 942
943 943 return options
944 944
945 945
946 946 def makemain(**kwargs):
947 947 """Produce a type conforming to ``ilocalrepositorymain``."""
948 948 return localrepository
949 949
950 950
951 951 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
952 952 class revlogfilestorage(object):
953 953 """File storage when using revlogs."""
954 954
955 955 def file(self, path):
956 956 if path[0] == b'/':
957 957 path = path[1:]
958 958
959 959 return filelog.filelog(self.svfs, path)
960 960
961 961
962 962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
963 963 class revlognarrowfilestorage(object):
964 964 """File storage when using revlogs and narrow files."""
965 965
966 966 def file(self, path):
967 967 if path[0] == b'/':
968 968 path = path[1:]
969 969
970 970 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
971 971
972 972
973 973 def makefilestorage(requirements, features, **kwargs):
974 974 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
975 975 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
976 976 features.add(repository.REPO_FEATURE_STREAM_CLONE)
977 977
978 978 if repository.NARROW_REQUIREMENT in requirements:
979 979 return revlognarrowfilestorage
980 980 else:
981 981 return revlogfilestorage
982 982
983 983
984 984 # List of repository interfaces and factory functions for them. Each
985 985 # will be called in order during ``makelocalrepository()`` to iteratively
986 986 # derive the final type for a local repository instance. We capture the
987 987 # function as a lambda so we don't hold a reference and the module-level
988 988 # functions can be wrapped.
989 989 REPO_INTERFACES = [
990 990 (repository.ilocalrepositorymain, lambda: makemain),
991 991 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
992 992 ]
993 993
994 994
995 995 @interfaceutil.implementer(repository.ilocalrepositorymain)
996 996 class localrepository(object):
997 997 """Main class for representing local repositories.
998 998
999 999 All local repositories are instances of this class.
1000 1000
1001 1001 Constructed on its own, instances of this class are not usable as
1002 1002 repository objects. To obtain a usable repository object, call
1003 1003 ``hg.repository()``, ``localrepo.instance()``, or
1004 1004 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1005 1005 ``instance()`` adds support for creating new repositories.
1006 1006 ``hg.repository()`` adds more extension integration, including calling
1007 1007 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1008 1008 used.
1009 1009 """
1010 1010
1011 1011 # obsolete experimental requirements:
1012 1012 # - manifestv2: An experimental new manifest format that allowed
1013 1013 # for stem compression of long paths. Experiment ended up not
1014 1014 # being successful (repository sizes went up due to worse delta
1015 1015 # chains), and the code was deleted in 4.6.
1016 1016 supportedformats = {
1017 1017 b'revlogv1',
1018 1018 b'generaldelta',
1019 1019 b'treemanifest',
1020 1020 COPIESSDC_REQUIREMENT,
1021 1021 REVLOGV2_REQUIREMENT,
1022 1022 SIDEDATA_REQUIREMENT,
1023 1023 SPARSEREVLOG_REQUIREMENT,
1024 1024 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1025 1025 }
1026 1026 _basesupported = supportedformats | {
1027 1027 b'store',
1028 1028 b'fncache',
1029 1029 b'shared',
1030 1030 b'relshared',
1031 1031 b'dotencode',
1032 1032 b'exp-sparse',
1033 1033 b'internal-phase',
1034 1034 }
1035 1035
1036 1036 # list of prefix for file which can be written without 'wlock'
1037 1037 # Extensions should extend this list when needed
1038 1038 _wlockfreeprefix = {
1039 1039 # We migh consider requiring 'wlock' for the next
1040 1040 # two, but pretty much all the existing code assume
1041 1041 # wlock is not needed so we keep them excluded for
1042 1042 # now.
1043 1043 b'hgrc',
1044 1044 b'requires',
1045 1045 # XXX cache is a complicatged business someone
1046 1046 # should investigate this in depth at some point
1047 1047 b'cache/',
1048 1048 # XXX shouldn't be dirstate covered by the wlock?
1049 1049 b'dirstate',
1050 1050 # XXX bisect was still a bit too messy at the time
1051 1051 # this changeset was introduced. Someone should fix
1052 1052 # the remainig bit and drop this line
1053 1053 b'bisect.state',
1054 1054 }
1055 1055
1056 1056 def __init__(
1057 1057 self,
1058 1058 baseui,
1059 1059 ui,
1060 1060 origroot,
1061 1061 wdirvfs,
1062 1062 hgvfs,
1063 1063 requirements,
1064 1064 supportedrequirements,
1065 1065 sharedpath,
1066 1066 store,
1067 1067 cachevfs,
1068 1068 wcachevfs,
1069 1069 features,
1070 1070 intents=None,
1071 1071 ):
1072 1072 """Create a new local repository instance.
1073 1073
1074 1074 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1075 1075 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1076 1076 object.
1077 1077
1078 1078 Arguments:
1079 1079
1080 1080 baseui
1081 1081 ``ui.ui`` instance that ``ui`` argument was based off of.
1082 1082
1083 1083 ui
1084 1084 ``ui.ui`` instance for use by the repository.
1085 1085
1086 1086 origroot
1087 1087 ``bytes`` path to working directory root of this repository.
1088 1088
1089 1089 wdirvfs
1090 1090 ``vfs.vfs`` rooted at the working directory.
1091 1091
1092 1092 hgvfs
1093 1093 ``vfs.vfs`` rooted at .hg/
1094 1094
1095 1095 requirements
1096 1096 ``set`` of bytestrings representing repository opening requirements.
1097 1097
1098 1098 supportedrequirements
1099 1099 ``set`` of bytestrings representing repository requirements that we
1100 1100 know how to open. May be a supetset of ``requirements``.
1101 1101
1102 1102 sharedpath
1103 1103 ``bytes`` Defining path to storage base directory. Points to a
1104 1104 ``.hg/`` directory somewhere.
1105 1105
1106 1106 store
1107 1107 ``store.basicstore`` (or derived) instance providing access to
1108 1108 versioned storage.
1109 1109
1110 1110 cachevfs
1111 1111 ``vfs.vfs`` used for cache files.
1112 1112
1113 1113 wcachevfs
1114 1114 ``vfs.vfs`` used for cache files related to the working copy.
1115 1115
1116 1116 features
1117 1117 ``set`` of bytestrings defining features/capabilities of this
1118 1118 instance.
1119 1119
1120 1120 intents
1121 1121 ``set`` of system strings indicating what this repo will be used
1122 1122 for.
1123 1123 """
1124 1124 self.baseui = baseui
1125 1125 self.ui = ui
1126 1126 self.origroot = origroot
1127 1127 # vfs rooted at working directory.
1128 1128 self.wvfs = wdirvfs
1129 1129 self.root = wdirvfs.base
1130 1130 # vfs rooted at .hg/. Used to access most non-store paths.
1131 1131 self.vfs = hgvfs
1132 1132 self.path = hgvfs.base
1133 1133 self.requirements = requirements
1134 1134 self.supported = supportedrequirements
1135 1135 self.sharedpath = sharedpath
1136 1136 self.store = store
1137 1137 self.cachevfs = cachevfs
1138 1138 self.wcachevfs = wcachevfs
1139 1139 self.features = features
1140 1140
1141 1141 self.filtername = None
1142 1142
1143 1143 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1144 1144 b'devel', b'check-locks'
1145 1145 ):
1146 1146 self.vfs.audit = self._getvfsward(self.vfs.audit)
1147 1147 # A list of callback to shape the phase if no data were found.
1148 1148 # Callback are in the form: func(repo, roots) --> processed root.
1149 1149 # This list it to be filled by extension during repo setup
1150 1150 self._phasedefaults = []
1151 1151
1152 1152 color.setup(self.ui)
1153 1153
1154 1154 self.spath = self.store.path
1155 1155 self.svfs = self.store.vfs
1156 1156 self.sjoin = self.store.join
1157 1157 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1158 1158 b'devel', b'check-locks'
1159 1159 ):
1160 1160 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1161 1161 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1162 1162 else: # standard vfs
1163 1163 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1164 1164
1165 1165 self._dirstatevalidatewarned = False
1166 1166
1167 1167 self._branchcaches = branchmap.BranchMapCache()
1168 1168 self._revbranchcache = None
1169 1169 self._filterpats = {}
1170 1170 self._datafilters = {}
1171 1171 self._transref = self._lockref = self._wlockref = None
1172 1172
1173 1173 # A cache for various files under .hg/ that tracks file changes,
1174 1174 # (used by the filecache decorator)
1175 1175 #
1176 1176 # Maps a property name to its util.filecacheentry
1177 1177 self._filecache = {}
1178 1178
1179 1179 # hold sets of revision to be filtered
1180 1180 # should be cleared when something might have changed the filter value:
1181 1181 # - new changesets,
1182 1182 # - phase change,
1183 1183 # - new obsolescence marker,
1184 1184 # - working directory parent change,
1185 1185 # - bookmark changes
1186 1186 self.filteredrevcache = {}
1187 1187
1188 1188 # post-dirstate-status hooks
1189 1189 self._postdsstatus = []
1190 1190
1191 1191 # generic mapping between names and nodes
1192 1192 self.names = namespaces.namespaces()
1193 1193
1194 1194 # Key to signature value.
1195 1195 self._sparsesignaturecache = {}
1196 1196 # Signature to cached matcher instance.
1197 1197 self._sparsematchercache = {}
1198 1198
1199 1199 self._extrafilterid = repoview.extrafilter(ui)
1200 1200
1201 1201 self.filecopiesmode = None
1202 1202 if COPIESSDC_REQUIREMENT in self.requirements:
1203 1203 self.filecopiesmode = b'changeset-sidedata'
1204 1204
1205 1205 def _getvfsward(self, origfunc):
1206 1206 """build a ward for self.vfs"""
1207 1207 rref = weakref.ref(self)
1208 1208
1209 1209 def checkvfs(path, mode=None):
1210 1210 ret = origfunc(path, mode=mode)
1211 1211 repo = rref()
1212 1212 if (
1213 1213 repo is None
1214 1214 or not util.safehasattr(repo, b'_wlockref')
1215 1215 or not util.safehasattr(repo, b'_lockref')
1216 1216 ):
1217 1217 return
1218 1218 if mode in (None, b'r', b'rb'):
1219 1219 return
1220 1220 if path.startswith(repo.path):
1221 1221 # truncate name relative to the repository (.hg)
1222 1222 path = path[len(repo.path) + 1 :]
1223 1223 if path.startswith(b'cache/'):
1224 1224 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1225 1225 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1226 1226 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1227 1227 # journal is covered by 'lock'
1228 1228 if repo._currentlock(repo._lockref) is None:
1229 1229 repo.ui.develwarn(
1230 1230 b'write with no lock: "%s"' % path,
1231 1231 stacklevel=3,
1232 1232 config=b'check-locks',
1233 1233 )
1234 1234 elif repo._currentlock(repo._wlockref) is None:
1235 1235 # rest of vfs files are covered by 'wlock'
1236 1236 #
1237 1237 # exclude special files
1238 1238 for prefix in self._wlockfreeprefix:
1239 1239 if path.startswith(prefix):
1240 1240 return
1241 1241 repo.ui.develwarn(
1242 1242 b'write with no wlock: "%s"' % path,
1243 1243 stacklevel=3,
1244 1244 config=b'check-locks',
1245 1245 )
1246 1246 return ret
1247 1247
1248 1248 return checkvfs
1249 1249
1250 1250 def _getsvfsward(self, origfunc):
1251 1251 """build a ward for self.svfs"""
1252 1252 rref = weakref.ref(self)
1253 1253
1254 1254 def checksvfs(path, mode=None):
1255 1255 ret = origfunc(path, mode=mode)
1256 1256 repo = rref()
1257 1257 if repo is None or not util.safehasattr(repo, b'_lockref'):
1258 1258 return
1259 1259 if mode in (None, b'r', b'rb'):
1260 1260 return
1261 1261 if path.startswith(repo.sharedpath):
1262 1262 # truncate name relative to the repository (.hg)
1263 1263 path = path[len(repo.sharedpath) + 1 :]
1264 1264 if repo._currentlock(repo._lockref) is None:
1265 1265 repo.ui.develwarn(
1266 1266 b'write with no lock: "%s"' % path, stacklevel=4
1267 1267 )
1268 1268 return ret
1269 1269
1270 1270 return checksvfs
1271 1271
1272 1272 def close(self):
1273 1273 self._writecaches()
1274 1274
1275 1275 def _writecaches(self):
1276 1276 if self._revbranchcache:
1277 1277 self._revbranchcache.write()
1278 1278
1279 1279 def _restrictcapabilities(self, caps):
1280 1280 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1281 1281 caps = set(caps)
1282 1282 capsblob = bundle2.encodecaps(
1283 1283 bundle2.getrepocaps(self, role=b'client')
1284 1284 )
1285 1285 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1286 1286 return caps
1287 1287
1288 1288 def _writerequirements(self):
1289 1289 scmutil.writerequires(self.vfs, self.requirements)
1290 1290
1291 1291 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1292 1292 # self -> auditor -> self._checknested -> self
1293 1293
1294 1294 @property
1295 1295 def auditor(self):
1296 1296 # This is only used by context.workingctx.match in order to
1297 1297 # detect files in subrepos.
1298 1298 return pathutil.pathauditor(self.root, callback=self._checknested)
1299 1299
1300 1300 @property
1301 1301 def nofsauditor(self):
1302 1302 # This is only used by context.basectx.match in order to detect
1303 1303 # files in subrepos.
1304 1304 return pathutil.pathauditor(
1305 1305 self.root, callback=self._checknested, realfs=False, cached=True
1306 1306 )
1307 1307
1308 1308 def _checknested(self, path):
1309 1309 """Determine if path is a legal nested repository."""
1310 1310 if not path.startswith(self.root):
1311 1311 return False
1312 1312 subpath = path[len(self.root) + 1 :]
1313 1313 normsubpath = util.pconvert(subpath)
1314 1314
1315 1315 # XXX: Checking against the current working copy is wrong in
1316 1316 # the sense that it can reject things like
1317 1317 #
1318 1318 # $ hg cat -r 10 sub/x.txt
1319 1319 #
1320 1320 # if sub/ is no longer a subrepository in the working copy
1321 1321 # parent revision.
1322 1322 #
1323 1323 # However, it can of course also allow things that would have
1324 1324 # been rejected before, such as the above cat command if sub/
1325 1325 # is a subrepository now, but was a normal directory before.
1326 1326 # The old path auditor would have rejected by mistake since it
1327 1327 # panics when it sees sub/.hg/.
1328 1328 #
1329 1329 # All in all, checking against the working copy seems sensible
1330 1330 # since we want to prevent access to nested repositories on
1331 1331 # the filesystem *now*.
1332 1332 ctx = self[None]
1333 1333 parts = util.splitpath(subpath)
1334 1334 while parts:
1335 1335 prefix = b'/'.join(parts)
1336 1336 if prefix in ctx.substate:
1337 1337 if prefix == normsubpath:
1338 1338 return True
1339 1339 else:
1340 1340 sub = ctx.sub(prefix)
1341 1341 return sub.checknested(subpath[len(prefix) + 1 :])
1342 1342 else:
1343 1343 parts.pop()
1344 1344 return False
1345 1345
1346 1346 def peer(self):
1347 1347 return localpeer(self) # not cached to avoid reference cycle
1348 1348
1349 1349 def unfiltered(self):
1350 1350 """Return unfiltered version of the repository
1351 1351
1352 1352 Intended to be overwritten by filtered repo."""
1353 1353 return self
1354 1354
1355 1355 def filtered(self, name, visibilityexceptions=None):
1356 1356 """Return a filtered version of a repository
1357 1357
1358 1358 The `name` parameter is the identifier of the requested view. This
1359 1359 will return a repoview object set "exactly" to the specified view.
1360 1360
1361 1361 This function does not apply recursive filtering to a repository. For
1362 1362 example calling `repo.filtered("served")` will return a repoview using
1363 1363 the "served" view, regardless of the initial view used by `repo`.
1364 1364
1365 1365 In other word, there is always only one level of `repoview` "filtering".
1366 1366 """
1367 1367 if self._extrafilterid is not None and b'%' not in name:
1368 1368 name = name + b'%' + self._extrafilterid
1369 1369
1370 1370 cls = repoview.newtype(self.unfiltered().__class__)
1371 1371 return cls(self, name, visibilityexceptions)
1372 1372
1373 1373 @mixedrepostorecache(
1374 1374 (b'bookmarks', b'plain'),
1375 1375 (b'bookmarks.current', b'plain'),
1376 1376 (b'bookmarks', b''),
1377 1377 (b'00changelog.i', b''),
1378 1378 )
1379 1379 def _bookmarks(self):
1380 1380 # Since the multiple files involved in the transaction cannot be
1381 1381 # written atomically (with current repository format), there is a race
1382 1382 # condition here.
1383 1383 #
1384 1384 # 1) changelog content A is read
1385 1385 # 2) outside transaction update changelog to content B
1386 1386 # 3) outside transaction update bookmark file referring to content B
1387 1387 # 4) bookmarks file content is read and filtered against changelog-A
1388 1388 #
1389 1389 # When this happens, bookmarks against nodes missing from A are dropped.
1390 1390 #
1391 1391 # Having this happening during read is not great, but it become worse
1392 1392 # when this happen during write because the bookmarks to the "unknown"
1393 1393 # nodes will be dropped for good. However, writes happen within locks.
1394 1394 # This locking makes it possible to have a race free consistent read.
1395 1395 # For this purpose data read from disc before locking are
1396 1396 # "invalidated" right after the locks are taken. This invalidations are
1397 1397 # "light", the `filecache` mechanism keep the data in memory and will
1398 1398 # reuse them if the underlying files did not changed. Not parsing the
1399 1399 # same data multiple times helps performances.
1400 1400 #
1401 1401 # Unfortunately in the case describe above, the files tracked by the
1402 1402 # bookmarks file cache might not have changed, but the in-memory
1403 1403 # content is still "wrong" because we used an older changelog content
1404 1404 # to process the on-disk data. So after locking, the changelog would be
1405 1405 # refreshed but `_bookmarks` would be preserved.
1406 1406 # Adding `00changelog.i` to the list of tracked file is not
1407 1407 # enough, because at the time we build the content for `_bookmarks` in
1408 1408 # (4), the changelog file has already diverged from the content used
1409 1409 # for loading `changelog` in (1)
1410 1410 #
1411 1411 # To prevent the issue, we force the changelog to be explicitly
1412 1412 # reloaded while computing `_bookmarks`. The data race can still happen
1413 1413 # without the lock (with a narrower window), but it would no longer go
1414 1414 # undetected during the lock time refresh.
1415 1415 #
1416 1416 # The new schedule is as follow
1417 1417 #
1418 1418 # 1) filecache logic detect that `_bookmarks` needs to be computed
1419 1419 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1420 1420 # 3) We force `changelog` filecache to be tested
1421 1421 # 4) cachestat for `changelog` are captured (for changelog)
1422 1422 # 5) `_bookmarks` is computed and cached
1423 1423 #
1424 1424 # The step in (3) ensure we have a changelog at least as recent as the
1425 1425 # cache stat computed in (1). As a result at locking time:
1426 1426 # * if the changelog did not changed since (1) -> we can reuse the data
1427 1427 # * otherwise -> the bookmarks get refreshed.
1428 1428 self._refreshchangelog()
1429 1429 return bookmarks.bmstore(self)
1430 1430
1431 1431 def _refreshchangelog(self):
1432 1432 """make sure the in memory changelog match the on-disk one"""
1433 1433 if 'changelog' in vars(self) and self.currenttransaction() is None:
1434 1434 del self.changelog
1435 1435
1436 1436 @property
1437 1437 def _activebookmark(self):
1438 1438 return self._bookmarks.active
1439 1439
1440 1440 # _phasesets depend on changelog. what we need is to call
1441 1441 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1442 1442 # can't be easily expressed in filecache mechanism.
1443 1443 @storecache(b'phaseroots', b'00changelog.i')
1444 1444 def _phasecache(self):
1445 1445 return phases.phasecache(self, self._phasedefaults)
1446 1446
1447 1447 @storecache(b'obsstore')
1448 1448 def obsstore(self):
1449 1449 return obsolete.makestore(self.ui, self)
1450 1450
1451 1451 @storecache(b'00changelog.i')
1452 1452 def changelog(self):
1453 # load dirstate before changelog to avoid race see issue6303
1454 self.dirstate.prefetch_parents()
1453 1455 return self.store.changelog(txnutil.mayhavepending(self.root))
1454 1456
1455 1457 @storecache(b'00manifest.i')
1456 1458 def manifestlog(self):
1457 1459 return self.store.manifestlog(self, self._storenarrowmatch)
1458 1460
1459 1461 @repofilecache(b'dirstate')
1460 1462 def dirstate(self):
1461 1463 return self._makedirstate()
1462 1464
1463 1465 def _makedirstate(self):
1464 1466 """Extension point for wrapping the dirstate per-repo."""
1465 1467 sparsematchfn = lambda: sparse.matcher(self)
1466 1468
1467 1469 return dirstate.dirstate(
1468 1470 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1469 1471 )
1470 1472
1471 1473 def _dirstatevalidate(self, node):
1472 1474 try:
1473 1475 self.changelog.rev(node)
1474 1476 return node
1475 1477 except error.LookupError:
1476 1478 if not self._dirstatevalidatewarned:
1477 1479 self._dirstatevalidatewarned = True
1478 1480 self.ui.warn(
1479 1481 _(b"warning: ignoring unknown working parent %s!\n")
1480 1482 % short(node)
1481 1483 )
1482 1484 return nullid
1483 1485
1484 1486 @storecache(narrowspec.FILENAME)
1485 1487 def narrowpats(self):
1486 1488 """matcher patterns for this repository's narrowspec
1487 1489
1488 1490 A tuple of (includes, excludes).
1489 1491 """
1490 1492 return narrowspec.load(self)
1491 1493
1492 1494 @storecache(narrowspec.FILENAME)
1493 1495 def _storenarrowmatch(self):
1494 1496 if repository.NARROW_REQUIREMENT not in self.requirements:
1495 1497 return matchmod.always()
1496 1498 include, exclude = self.narrowpats
1497 1499 return narrowspec.match(self.root, include=include, exclude=exclude)
1498 1500
1499 1501 @storecache(narrowspec.FILENAME)
1500 1502 def _narrowmatch(self):
1501 1503 if repository.NARROW_REQUIREMENT not in self.requirements:
1502 1504 return matchmod.always()
1503 1505 narrowspec.checkworkingcopynarrowspec(self)
1504 1506 include, exclude = self.narrowpats
1505 1507 return narrowspec.match(self.root, include=include, exclude=exclude)
1506 1508
1507 1509 def narrowmatch(self, match=None, includeexact=False):
1508 1510 """matcher corresponding the the repo's narrowspec
1509 1511
1510 1512 If `match` is given, then that will be intersected with the narrow
1511 1513 matcher.
1512 1514
1513 1515 If `includeexact` is True, then any exact matches from `match` will
1514 1516 be included even if they're outside the narrowspec.
1515 1517 """
1516 1518 if match:
1517 1519 if includeexact and not self._narrowmatch.always():
1518 1520 # do not exclude explicitly-specified paths so that they can
1519 1521 # be warned later on
1520 1522 em = matchmod.exact(match.files())
1521 1523 nm = matchmod.unionmatcher([self._narrowmatch, em])
1522 1524 return matchmod.intersectmatchers(match, nm)
1523 1525 return matchmod.intersectmatchers(match, self._narrowmatch)
1524 1526 return self._narrowmatch
1525 1527
1526 1528 def setnarrowpats(self, newincludes, newexcludes):
1527 1529 narrowspec.save(self, newincludes, newexcludes)
1528 1530 self.invalidate(clearfilecache=True)
1529 1531
1530 1532 @unfilteredpropertycache
1531 1533 def _quick_access_changeid_null(self):
1532 1534 return {
1533 1535 b'null': (nullrev, nullid),
1534 1536 nullrev: (nullrev, nullid),
1535 1537 nullid: (nullrev, nullid),
1536 1538 }
1537 1539
1538 1540 @unfilteredpropertycache
1539 1541 def _quick_access_changeid_wc(self):
1540 1542 # also fast path access to the working copy parents
1541 1543 # however, only do it for filter that ensure wc is visible.
1542 1544 quick = {}
1543 1545 cl = self.unfiltered().changelog
1544 1546 for node in self.dirstate.parents():
1545 1547 if node == nullid:
1546 1548 continue
1547 1549 rev = cl.index.get_rev(node)
1548 1550 if rev is None:
1549 1551 # unknown working copy parent case:
1550 1552 #
1551 1553 # skip the fast path and let higher code deal with it
1552 1554 continue
1553 1555 pair = (rev, node)
1554 1556 quick[rev] = pair
1555 1557 quick[node] = pair
1556 1558 # also add the parents of the parents
1557 1559 for r in cl.parentrevs(rev):
1558 1560 if r == nullrev:
1559 1561 continue
1560 1562 n = cl.node(r)
1561 1563 pair = (r, n)
1562 1564 quick[r] = pair
1563 1565 quick[n] = pair
1564 1566 p1node = self.dirstate.p1()
1565 1567 if p1node != nullid:
1566 1568 quick[b'.'] = quick[p1node]
1567 1569 return quick
1568 1570
1569 1571 @unfilteredmethod
1570 1572 def _quick_access_changeid_invalidate(self):
1571 1573 if '_quick_access_changeid_wc' in vars(self):
1572 1574 del self.__dict__['_quick_access_changeid_wc']
1573 1575
1574 1576 @property
1575 1577 def _quick_access_changeid(self):
1576 1578 """an helper dictionnary for __getitem__ calls
1577 1579
1578 1580 This contains a list of symbol we can recognise right away without
1579 1581 further processing.
1580 1582 """
1581 1583 mapping = self._quick_access_changeid_null
1582 1584 if self.filtername in repoview.filter_has_wc:
1583 1585 mapping = mapping.copy()
1584 1586 mapping.update(self._quick_access_changeid_wc)
1585 1587 return mapping
1586 1588
1587 1589 def __getitem__(self, changeid):
1588 1590 # dealing with special cases
1589 1591 if changeid is None:
1590 1592 return context.workingctx(self)
1591 1593 if isinstance(changeid, context.basectx):
1592 1594 return changeid
1593 1595
1594 1596 # dealing with multiple revisions
1595 1597 if isinstance(changeid, slice):
1596 1598 # wdirrev isn't contiguous so the slice shouldn't include it
1597 1599 return [
1598 1600 self[i]
1599 1601 for i in pycompat.xrange(*changeid.indices(len(self)))
1600 1602 if i not in self.changelog.filteredrevs
1601 1603 ]
1602 1604
1603 1605 # dealing with some special values
1604 1606 quick_access = self._quick_access_changeid.get(changeid)
1605 1607 if quick_access is not None:
1606 1608 rev, node = quick_access
1607 1609 return context.changectx(self, rev, node, maybe_filtered=False)
1608 1610 if changeid == b'tip':
1609 1611 node = self.changelog.tip()
1610 1612 rev = self.changelog.rev(node)
1611 1613 return context.changectx(self, rev, node)
1612 1614
1613 1615 # dealing with arbitrary values
1614 1616 try:
1615 1617 if isinstance(changeid, int):
1616 1618 node = self.changelog.node(changeid)
1617 1619 rev = changeid
1618 1620 elif changeid == b'.':
1619 1621 # this is a hack to delay/avoid loading obsmarkers
1620 1622 # when we know that '.' won't be hidden
1621 1623 node = self.dirstate.p1()
1622 1624 rev = self.unfiltered().changelog.rev(node)
1623 1625 elif len(changeid) == 20:
1624 1626 try:
1625 1627 node = changeid
1626 1628 rev = self.changelog.rev(changeid)
1627 1629 except error.FilteredLookupError:
1628 1630 changeid = hex(changeid) # for the error message
1629 1631 raise
1630 1632 except LookupError:
1631 1633 # check if it might have come from damaged dirstate
1632 1634 #
1633 1635 # XXX we could avoid the unfiltered if we had a recognizable
1634 1636 # exception for filtered changeset access
1635 1637 if (
1636 1638 self.local()
1637 1639 and changeid in self.unfiltered().dirstate.parents()
1638 1640 ):
1639 1641 msg = _(b"working directory has unknown parent '%s'!")
1640 1642 raise error.Abort(msg % short(changeid))
1641 1643 changeid = hex(changeid) # for the error message
1642 1644 raise
1643 1645
1644 1646 elif len(changeid) == 40:
1645 1647 node = bin(changeid)
1646 1648 rev = self.changelog.rev(node)
1647 1649 else:
1648 1650 raise error.ProgrammingError(
1649 1651 b"unsupported changeid '%s' of type %s"
1650 1652 % (changeid, pycompat.bytestr(type(changeid)))
1651 1653 )
1652 1654
1653 1655 return context.changectx(self, rev, node)
1654 1656
1655 1657 except (error.FilteredIndexError, error.FilteredLookupError):
1656 1658 raise error.FilteredRepoLookupError(
1657 1659 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1658 1660 )
1659 1661 except (IndexError, LookupError):
1660 1662 raise error.RepoLookupError(
1661 1663 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1662 1664 )
1663 1665 except error.WdirUnsupported:
1664 1666 return context.workingctx(self)
1665 1667
1666 1668 def __contains__(self, changeid):
1667 1669 """True if the given changeid exists
1668 1670
1669 1671 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1670 1672 specified.
1671 1673 """
1672 1674 try:
1673 1675 self[changeid]
1674 1676 return True
1675 1677 except error.RepoLookupError:
1676 1678 return False
1677 1679
1678 1680 def __nonzero__(self):
1679 1681 return True
1680 1682
1681 1683 __bool__ = __nonzero__
1682 1684
1683 1685 def __len__(self):
1684 1686 # no need to pay the cost of repoview.changelog
1685 1687 unfi = self.unfiltered()
1686 1688 return len(unfi.changelog)
1687 1689
1688 1690 def __iter__(self):
1689 1691 return iter(self.changelog)
1690 1692
1691 1693 def revs(self, expr, *args):
1692 1694 '''Find revisions matching a revset.
1693 1695
1694 1696 The revset is specified as a string ``expr`` that may contain
1695 1697 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1696 1698
1697 1699 Revset aliases from the configuration are not expanded. To expand
1698 1700 user aliases, consider calling ``scmutil.revrange()`` or
1699 1701 ``repo.anyrevs([expr], user=True)``.
1700 1702
1701 1703 Returns a smartset.abstractsmartset, which is a list-like interface
1702 1704 that contains integer revisions.
1703 1705 '''
1704 1706 tree = revsetlang.spectree(expr, *args)
1705 1707 return revset.makematcher(tree)(self)
1706 1708
1707 1709 def set(self, expr, *args):
1708 1710 '''Find revisions matching a revset and emit changectx instances.
1709 1711
1710 1712 This is a convenience wrapper around ``revs()`` that iterates the
1711 1713 result and is a generator of changectx instances.
1712 1714
1713 1715 Revset aliases from the configuration are not expanded. To expand
1714 1716 user aliases, consider calling ``scmutil.revrange()``.
1715 1717 '''
1716 1718 for r in self.revs(expr, *args):
1717 1719 yield self[r]
1718 1720
1719 1721 def anyrevs(self, specs, user=False, localalias=None):
1720 1722 '''Find revisions matching one of the given revsets.
1721 1723
1722 1724 Revset aliases from the configuration are not expanded by default. To
1723 1725 expand user aliases, specify ``user=True``. To provide some local
1724 1726 definitions overriding user aliases, set ``localalias`` to
1725 1727 ``{name: definitionstring}``.
1726 1728 '''
1727 1729 if specs == [b'null']:
1728 1730 return revset.baseset([nullrev])
1729 1731 if specs == [b'.']:
1730 1732 quick_data = self._quick_access_changeid.get(b'.')
1731 1733 if quick_data is not None:
1732 1734 return revset.baseset([quick_data[0]])
1733 1735 if user:
1734 1736 m = revset.matchany(
1735 1737 self.ui,
1736 1738 specs,
1737 1739 lookup=revset.lookupfn(self),
1738 1740 localalias=localalias,
1739 1741 )
1740 1742 else:
1741 1743 m = revset.matchany(None, specs, localalias=localalias)
1742 1744 return m(self)
1743 1745
1744 1746 def url(self):
1745 1747 return b'file:' + self.root
1746 1748
1747 1749 def hook(self, name, throw=False, **args):
1748 1750 """Call a hook, passing this repo instance.
1749 1751
1750 1752 This a convenience method to aid invoking hooks. Extensions likely
1751 1753 won't call this unless they have registered a custom hook or are
1752 1754 replacing code that is expected to call a hook.
1753 1755 """
1754 1756 return hook.hook(self.ui, self, name, throw, **args)
1755 1757
1756 1758 @filteredpropertycache
1757 1759 def _tagscache(self):
1758 1760 '''Returns a tagscache object that contains various tags related
1759 1761 caches.'''
1760 1762
1761 1763 # This simplifies its cache management by having one decorated
1762 1764 # function (this one) and the rest simply fetch things from it.
1763 1765 class tagscache(object):
1764 1766 def __init__(self):
1765 1767 # These two define the set of tags for this repository. tags
1766 1768 # maps tag name to node; tagtypes maps tag name to 'global' or
1767 1769 # 'local'. (Global tags are defined by .hgtags across all
1768 1770 # heads, and local tags are defined in .hg/localtags.)
1769 1771 # They constitute the in-memory cache of tags.
1770 1772 self.tags = self.tagtypes = None
1771 1773
1772 1774 self.nodetagscache = self.tagslist = None
1773 1775
1774 1776 cache = tagscache()
1775 1777 cache.tags, cache.tagtypes = self._findtags()
1776 1778
1777 1779 return cache
1778 1780
1779 1781 def tags(self):
1780 1782 '''return a mapping of tag to node'''
1781 1783 t = {}
1782 1784 if self.changelog.filteredrevs:
1783 1785 tags, tt = self._findtags()
1784 1786 else:
1785 1787 tags = self._tagscache.tags
1786 1788 rev = self.changelog.rev
1787 1789 for k, v in pycompat.iteritems(tags):
1788 1790 try:
1789 1791 # ignore tags to unknown nodes
1790 1792 rev(v)
1791 1793 t[k] = v
1792 1794 except (error.LookupError, ValueError):
1793 1795 pass
1794 1796 return t
1795 1797
1796 1798 def _findtags(self):
1797 1799 '''Do the hard work of finding tags. Return a pair of dicts
1798 1800 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1799 1801 maps tag name to a string like \'global\' or \'local\'.
1800 1802 Subclasses or extensions are free to add their own tags, but
1801 1803 should be aware that the returned dicts will be retained for the
1802 1804 duration of the localrepo object.'''
1803 1805
1804 1806 # XXX what tagtype should subclasses/extensions use? Currently
1805 1807 # mq and bookmarks add tags, but do not set the tagtype at all.
1806 1808 # Should each extension invent its own tag type? Should there
1807 1809 # be one tagtype for all such "virtual" tags? Or is the status
1808 1810 # quo fine?
1809 1811
1810 1812 # map tag name to (node, hist)
1811 1813 alltags = tagsmod.findglobaltags(self.ui, self)
1812 1814 # map tag name to tag type
1813 1815 tagtypes = {tag: b'global' for tag in alltags}
1814 1816
1815 1817 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1816 1818
1817 1819 # Build the return dicts. Have to re-encode tag names because
1818 1820 # the tags module always uses UTF-8 (in order not to lose info
1819 1821 # writing to the cache), but the rest of Mercurial wants them in
1820 1822 # local encoding.
1821 1823 tags = {}
1822 1824 for (name, (node, hist)) in pycompat.iteritems(alltags):
1823 1825 if node != nullid:
1824 1826 tags[encoding.tolocal(name)] = node
1825 1827 tags[b'tip'] = self.changelog.tip()
1826 1828 tagtypes = {
1827 1829 encoding.tolocal(name): value
1828 1830 for (name, value) in pycompat.iteritems(tagtypes)
1829 1831 }
1830 1832 return (tags, tagtypes)
1831 1833
1832 1834 def tagtype(self, tagname):
1833 1835 '''
1834 1836 return the type of the given tag. result can be:
1835 1837
1836 1838 'local' : a local tag
1837 1839 'global' : a global tag
1838 1840 None : tag does not exist
1839 1841 '''
1840 1842
1841 1843 return self._tagscache.tagtypes.get(tagname)
1842 1844
1843 1845 def tagslist(self):
1844 1846 '''return a list of tags ordered by revision'''
1845 1847 if not self._tagscache.tagslist:
1846 1848 l = []
1847 1849 for t, n in pycompat.iteritems(self.tags()):
1848 1850 l.append((self.changelog.rev(n), t, n))
1849 1851 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1850 1852
1851 1853 return self._tagscache.tagslist
1852 1854
1853 1855 def nodetags(self, node):
1854 1856 '''return the tags associated with a node'''
1855 1857 if not self._tagscache.nodetagscache:
1856 1858 nodetagscache = {}
1857 1859 for t, n in pycompat.iteritems(self._tagscache.tags):
1858 1860 nodetagscache.setdefault(n, []).append(t)
1859 1861 for tags in pycompat.itervalues(nodetagscache):
1860 1862 tags.sort()
1861 1863 self._tagscache.nodetagscache = nodetagscache
1862 1864 return self._tagscache.nodetagscache.get(node, [])
1863 1865
1864 1866 def nodebookmarks(self, node):
1865 1867 """return the list of bookmarks pointing to the specified node"""
1866 1868 return self._bookmarks.names(node)
1867 1869
1868 1870 def branchmap(self):
1869 1871 '''returns a dictionary {branch: [branchheads]} with branchheads
1870 1872 ordered by increasing revision number'''
1871 1873 return self._branchcaches[self]
1872 1874
1873 1875 @unfilteredmethod
1874 1876 def revbranchcache(self):
1875 1877 if not self._revbranchcache:
1876 1878 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1877 1879 return self._revbranchcache
1878 1880
1879 1881 def branchtip(self, branch, ignoremissing=False):
1880 1882 '''return the tip node for a given branch
1881 1883
1882 1884 If ignoremissing is True, then this method will not raise an error.
1883 1885 This is helpful for callers that only expect None for a missing branch
1884 1886 (e.g. namespace).
1885 1887
1886 1888 '''
1887 1889 try:
1888 1890 return self.branchmap().branchtip(branch)
1889 1891 except KeyError:
1890 1892 if not ignoremissing:
1891 1893 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1892 1894 else:
1893 1895 pass
1894 1896
1895 1897 def lookup(self, key):
1896 1898 node = scmutil.revsymbol(self, key).node()
1897 1899 if node is None:
1898 1900 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1899 1901 return node
1900 1902
1901 1903 def lookupbranch(self, key):
1902 1904 if self.branchmap().hasbranch(key):
1903 1905 return key
1904 1906
1905 1907 return scmutil.revsymbol(self, key).branch()
1906 1908
1907 1909 def known(self, nodes):
1908 1910 cl = self.changelog
1909 1911 get_rev = cl.index.get_rev
1910 1912 filtered = cl.filteredrevs
1911 1913 result = []
1912 1914 for n in nodes:
1913 1915 r = get_rev(n)
1914 1916 resp = not (r is None or r in filtered)
1915 1917 result.append(resp)
1916 1918 return result
1917 1919
1918 1920 def local(self):
1919 1921 return self
1920 1922
1921 1923 def publishing(self):
1922 1924 # it's safe (and desirable) to trust the publish flag unconditionally
1923 1925 # so that we don't finalize changes shared between users via ssh or nfs
1924 1926 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1925 1927
1926 1928 def cancopy(self):
1927 1929 # so statichttprepo's override of local() works
1928 1930 if not self.local():
1929 1931 return False
1930 1932 if not self.publishing():
1931 1933 return True
1932 1934 # if publishing we can't copy if there is filtered content
1933 1935 return not self.filtered(b'visible').changelog.filteredrevs
1934 1936
1935 1937 def shared(self):
1936 1938 '''the type of shared repository (None if not shared)'''
1937 1939 if self.sharedpath != self.path:
1938 1940 return b'store'
1939 1941 return None
1940 1942
1941 1943 def wjoin(self, f, *insidef):
1942 1944 return self.vfs.reljoin(self.root, f, *insidef)
1943 1945
1944 1946 def setparents(self, p1, p2=nullid):
1945 1947 self[None].setparents(p1, p2)
1946 1948 self._quick_access_changeid_invalidate()
1947 1949
1948 1950 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1949 1951 """changeid must be a changeset revision, if specified.
1950 1952 fileid can be a file revision or node."""
1951 1953 return context.filectx(
1952 1954 self, path, changeid, fileid, changectx=changectx
1953 1955 )
1954 1956
1955 1957 def getcwd(self):
1956 1958 return self.dirstate.getcwd()
1957 1959
1958 1960 def pathto(self, f, cwd=None):
1959 1961 return self.dirstate.pathto(f, cwd)
1960 1962
1961 1963 def _loadfilter(self, filter):
1962 1964 if filter not in self._filterpats:
1963 1965 l = []
1964 1966 for pat, cmd in self.ui.configitems(filter):
1965 1967 if cmd == b'!':
1966 1968 continue
1967 1969 mf = matchmod.match(self.root, b'', [pat])
1968 1970 fn = None
1969 1971 params = cmd
1970 1972 for name, filterfn in pycompat.iteritems(self._datafilters):
1971 1973 if cmd.startswith(name):
1972 1974 fn = filterfn
1973 1975 params = cmd[len(name) :].lstrip()
1974 1976 break
1975 1977 if not fn:
1976 1978 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1977 1979 fn.__name__ = 'commandfilter'
1978 1980 # Wrap old filters not supporting keyword arguments
1979 1981 if not pycompat.getargspec(fn)[2]:
1980 1982 oldfn = fn
1981 1983 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1982 1984 fn.__name__ = 'compat-' + oldfn.__name__
1983 1985 l.append((mf, fn, params))
1984 1986 self._filterpats[filter] = l
1985 1987 return self._filterpats[filter]
1986 1988
1987 1989 def _filter(self, filterpats, filename, data):
1988 1990 for mf, fn, cmd in filterpats:
1989 1991 if mf(filename):
1990 1992 self.ui.debug(
1991 1993 b"filtering %s through %s\n"
1992 1994 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1993 1995 )
1994 1996 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1995 1997 break
1996 1998
1997 1999 return data
1998 2000
1999 2001 @unfilteredpropertycache
2000 2002 def _encodefilterpats(self):
2001 2003 return self._loadfilter(b'encode')
2002 2004
2003 2005 @unfilteredpropertycache
2004 2006 def _decodefilterpats(self):
2005 2007 return self._loadfilter(b'decode')
2006 2008
2007 2009 def adddatafilter(self, name, filter):
2008 2010 self._datafilters[name] = filter
2009 2011
2010 2012 def wread(self, filename):
2011 2013 if self.wvfs.islink(filename):
2012 2014 data = self.wvfs.readlink(filename)
2013 2015 else:
2014 2016 data = self.wvfs.read(filename)
2015 2017 return self._filter(self._encodefilterpats, filename, data)
2016 2018
2017 2019 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2018 2020 """write ``data`` into ``filename`` in the working directory
2019 2021
2020 2022 This returns length of written (maybe decoded) data.
2021 2023 """
2022 2024 data = self._filter(self._decodefilterpats, filename, data)
2023 2025 if b'l' in flags:
2024 2026 self.wvfs.symlink(data, filename)
2025 2027 else:
2026 2028 self.wvfs.write(
2027 2029 filename, data, backgroundclose=backgroundclose, **kwargs
2028 2030 )
2029 2031 if b'x' in flags:
2030 2032 self.wvfs.setflags(filename, False, True)
2031 2033 else:
2032 2034 self.wvfs.setflags(filename, False, False)
2033 2035 return len(data)
2034 2036
2035 2037 def wwritedata(self, filename, data):
2036 2038 return self._filter(self._decodefilterpats, filename, data)
2037 2039
2038 2040 def currenttransaction(self):
2039 2041 """return the current transaction or None if non exists"""
2040 2042 if self._transref:
2041 2043 tr = self._transref()
2042 2044 else:
2043 2045 tr = None
2044 2046
2045 2047 if tr and tr.running():
2046 2048 return tr
2047 2049 return None
2048 2050
2049 2051 def transaction(self, desc, report=None):
2050 2052 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2051 2053 b'devel', b'check-locks'
2052 2054 ):
2053 2055 if self._currentlock(self._lockref) is None:
2054 2056 raise error.ProgrammingError(b'transaction requires locking')
2055 2057 tr = self.currenttransaction()
2056 2058 if tr is not None:
2057 2059 return tr.nest(name=desc)
2058 2060
2059 2061 # abort here if the journal already exists
2060 2062 if self.svfs.exists(b"journal"):
2061 2063 raise error.RepoError(
2062 2064 _(b"abandoned transaction found"),
2063 2065 hint=_(b"run 'hg recover' to clean up transaction"),
2064 2066 )
2065 2067
2066 2068 idbase = b"%.40f#%f" % (random.random(), time.time())
2067 2069 ha = hex(hashutil.sha1(idbase).digest())
2068 2070 txnid = b'TXN:' + ha
2069 2071 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2070 2072
2071 2073 self._writejournal(desc)
2072 2074 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2073 2075 if report:
2074 2076 rp = report
2075 2077 else:
2076 2078 rp = self.ui.warn
2077 2079 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2078 2080 # we must avoid cyclic reference between repo and transaction.
2079 2081 reporef = weakref.ref(self)
2080 2082 # Code to track tag movement
2081 2083 #
2082 2084 # Since tags are all handled as file content, it is actually quite hard
2083 2085 # to track these movement from a code perspective. So we fallback to a
2084 2086 # tracking at the repository level. One could envision to track changes
2085 2087 # to the '.hgtags' file through changegroup apply but that fails to
2086 2088 # cope with case where transaction expose new heads without changegroup
2087 2089 # being involved (eg: phase movement).
2088 2090 #
2089 2091 # For now, We gate the feature behind a flag since this likely comes
2090 2092 # with performance impacts. The current code run more often than needed
2091 2093 # and do not use caches as much as it could. The current focus is on
2092 2094 # the behavior of the feature so we disable it by default. The flag
2093 2095 # will be removed when we are happy with the performance impact.
2094 2096 #
2095 2097 # Once this feature is no longer experimental move the following
2096 2098 # documentation to the appropriate help section:
2097 2099 #
2098 2100 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2099 2101 # tags (new or changed or deleted tags). In addition the details of
2100 2102 # these changes are made available in a file at:
2101 2103 # ``REPOROOT/.hg/changes/tags.changes``.
2102 2104 # Make sure you check for HG_TAG_MOVED before reading that file as it
2103 2105 # might exist from a previous transaction even if no tag were touched
2104 2106 # in this one. Changes are recorded in a line base format::
2105 2107 #
2106 2108 # <action> <hex-node> <tag-name>\n
2107 2109 #
2108 2110 # Actions are defined as follow:
2109 2111 # "-R": tag is removed,
2110 2112 # "+A": tag is added,
2111 2113 # "-M": tag is moved (old value),
2112 2114 # "+M": tag is moved (new value),
2113 2115 tracktags = lambda x: None
2114 2116 # experimental config: experimental.hook-track-tags
2115 2117 shouldtracktags = self.ui.configbool(
2116 2118 b'experimental', b'hook-track-tags'
2117 2119 )
2118 2120 if desc != b'strip' and shouldtracktags:
2119 2121 oldheads = self.changelog.headrevs()
2120 2122
2121 2123 def tracktags(tr2):
2122 2124 repo = reporef()
2123 2125 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2124 2126 newheads = repo.changelog.headrevs()
2125 2127 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2126 2128 # notes: we compare lists here.
2127 2129 # As we do it only once buiding set would not be cheaper
2128 2130 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2129 2131 if changes:
2130 2132 tr2.hookargs[b'tag_moved'] = b'1'
2131 2133 with repo.vfs(
2132 2134 b'changes/tags.changes', b'w', atomictemp=True
2133 2135 ) as changesfile:
2134 2136 # note: we do not register the file to the transaction
2135 2137 # because we needs it to still exist on the transaction
2136 2138 # is close (for txnclose hooks)
2137 2139 tagsmod.writediff(changesfile, changes)
2138 2140
2139 2141 def validate(tr2):
2140 2142 """will run pre-closing hooks"""
2141 2143 # XXX the transaction API is a bit lacking here so we take a hacky
2142 2144 # path for now
2143 2145 #
2144 2146 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2145 2147 # dict is copied before these run. In addition we needs the data
2146 2148 # available to in memory hooks too.
2147 2149 #
2148 2150 # Moreover, we also need to make sure this runs before txnclose
2149 2151 # hooks and there is no "pending" mechanism that would execute
2150 2152 # logic only if hooks are about to run.
2151 2153 #
2152 2154 # Fixing this limitation of the transaction is also needed to track
2153 2155 # other families of changes (bookmarks, phases, obsolescence).
2154 2156 #
2155 2157 # This will have to be fixed before we remove the experimental
2156 2158 # gating.
2157 2159 tracktags(tr2)
2158 2160 repo = reporef()
2159 2161
2160 2162 singleheadopt = (b'experimental', b'single-head-per-branch')
2161 2163 singlehead = repo.ui.configbool(*singleheadopt)
2162 2164 if singlehead:
2163 2165 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2164 2166 accountclosed = singleheadsub.get(
2165 2167 b"account-closed-heads", False
2166 2168 )
2167 2169 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2168 2170 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2169 2171 for name, (old, new) in sorted(
2170 2172 tr.changes[b'bookmarks'].items()
2171 2173 ):
2172 2174 args = tr.hookargs.copy()
2173 2175 args.update(bookmarks.preparehookargs(name, old, new))
2174 2176 repo.hook(
2175 2177 b'pretxnclose-bookmark',
2176 2178 throw=True,
2177 2179 **pycompat.strkwargs(args)
2178 2180 )
2179 2181 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2180 2182 cl = repo.unfiltered().changelog
2181 2183 for revs, (old, new) in tr.changes[b'phases']:
2182 2184 for rev in revs:
2183 2185 args = tr.hookargs.copy()
2184 2186 node = hex(cl.node(rev))
2185 2187 args.update(phases.preparehookargs(node, old, new))
2186 2188 repo.hook(
2187 2189 b'pretxnclose-phase',
2188 2190 throw=True,
2189 2191 **pycompat.strkwargs(args)
2190 2192 )
2191 2193
2192 2194 repo.hook(
2193 2195 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2194 2196 )
2195 2197
2196 2198 def releasefn(tr, success):
2197 2199 repo = reporef()
2198 2200 if repo is None:
2199 2201 # If the repo has been GC'd (and this release function is being
2200 2202 # called from transaction.__del__), there's not much we can do,
2201 2203 # so just leave the unfinished transaction there and let the
2202 2204 # user run `hg recover`.
2203 2205 return
2204 2206 if success:
2205 2207 # this should be explicitly invoked here, because
2206 2208 # in-memory changes aren't written out at closing
2207 2209 # transaction, if tr.addfilegenerator (via
2208 2210 # dirstate.write or so) isn't invoked while
2209 2211 # transaction running
2210 2212 repo.dirstate.write(None)
2211 2213 else:
2212 2214 # discard all changes (including ones already written
2213 2215 # out) in this transaction
2214 2216 narrowspec.restorebackup(self, b'journal.narrowspec')
2215 2217 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2216 2218 repo.dirstate.restorebackup(None, b'journal.dirstate')
2217 2219
2218 2220 repo.invalidate(clearfilecache=True)
2219 2221
2220 2222 tr = transaction.transaction(
2221 2223 rp,
2222 2224 self.svfs,
2223 2225 vfsmap,
2224 2226 b"journal",
2225 2227 b"undo",
2226 2228 aftertrans(renames),
2227 2229 self.store.createmode,
2228 2230 validator=validate,
2229 2231 releasefn=releasefn,
2230 2232 checkambigfiles=_cachedfiles,
2231 2233 name=desc,
2232 2234 )
2233 2235 tr.changes[b'origrepolen'] = len(self)
2234 2236 tr.changes[b'obsmarkers'] = set()
2235 2237 tr.changes[b'phases'] = []
2236 2238 tr.changes[b'bookmarks'] = {}
2237 2239
2238 2240 tr.hookargs[b'txnid'] = txnid
2239 2241 tr.hookargs[b'txnname'] = desc
2240 2242 # note: writing the fncache only during finalize mean that the file is
2241 2243 # outdated when running hooks. As fncache is used for streaming clone,
2242 2244 # this is not expected to break anything that happen during the hooks.
2243 2245 tr.addfinalize(b'flush-fncache', self.store.write)
2244 2246
2245 2247 def txnclosehook(tr2):
2246 2248 """To be run if transaction is successful, will schedule a hook run
2247 2249 """
2248 2250 # Don't reference tr2 in hook() so we don't hold a reference.
2249 2251 # This reduces memory consumption when there are multiple
2250 2252 # transactions per lock. This can likely go away if issue5045
2251 2253 # fixes the function accumulation.
2252 2254 hookargs = tr2.hookargs
2253 2255
2254 2256 def hookfunc(unused_success):
2255 2257 repo = reporef()
2256 2258 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2257 2259 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2258 2260 for name, (old, new) in bmchanges:
2259 2261 args = tr.hookargs.copy()
2260 2262 args.update(bookmarks.preparehookargs(name, old, new))
2261 2263 repo.hook(
2262 2264 b'txnclose-bookmark',
2263 2265 throw=False,
2264 2266 **pycompat.strkwargs(args)
2265 2267 )
2266 2268
2267 2269 if hook.hashook(repo.ui, b'txnclose-phase'):
2268 2270 cl = repo.unfiltered().changelog
2269 2271 phasemv = sorted(
2270 2272 tr.changes[b'phases'], key=lambda r: r[0][0]
2271 2273 )
2272 2274 for revs, (old, new) in phasemv:
2273 2275 for rev in revs:
2274 2276 args = tr.hookargs.copy()
2275 2277 node = hex(cl.node(rev))
2276 2278 args.update(phases.preparehookargs(node, old, new))
2277 2279 repo.hook(
2278 2280 b'txnclose-phase',
2279 2281 throw=False,
2280 2282 **pycompat.strkwargs(args)
2281 2283 )
2282 2284
2283 2285 repo.hook(
2284 2286 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2285 2287 )
2286 2288
2287 2289 reporef()._afterlock(hookfunc)
2288 2290
2289 2291 tr.addfinalize(b'txnclose-hook', txnclosehook)
2290 2292 # Include a leading "-" to make it happen before the transaction summary
2291 2293 # reports registered via scmutil.registersummarycallback() whose names
2292 2294 # are 00-txnreport etc. That way, the caches will be warm when the
2293 2295 # callbacks run.
2294 2296 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2295 2297
2296 2298 def txnaborthook(tr2):
2297 2299 """To be run if transaction is aborted
2298 2300 """
2299 2301 reporef().hook(
2300 2302 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2301 2303 )
2302 2304
2303 2305 tr.addabort(b'txnabort-hook', txnaborthook)
2304 2306 # avoid eager cache invalidation. in-memory data should be identical
2305 2307 # to stored data if transaction has no error.
2306 2308 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2307 2309 self._transref = weakref.ref(tr)
2308 2310 scmutil.registersummarycallback(self, tr, desc)
2309 2311 return tr
2310 2312
2311 2313 def _journalfiles(self):
2312 2314 return (
2313 2315 (self.svfs, b'journal'),
2314 2316 (self.svfs, b'journal.narrowspec'),
2315 2317 (self.vfs, b'journal.narrowspec.dirstate'),
2316 2318 (self.vfs, b'journal.dirstate'),
2317 2319 (self.vfs, b'journal.branch'),
2318 2320 (self.vfs, b'journal.desc'),
2319 2321 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2320 2322 (self.svfs, b'journal.phaseroots'),
2321 2323 )
2322 2324
2323 2325 def undofiles(self):
2324 2326 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2325 2327
2326 2328 @unfilteredmethod
2327 2329 def _writejournal(self, desc):
2328 2330 self.dirstate.savebackup(None, b'journal.dirstate')
2329 2331 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2330 2332 narrowspec.savebackup(self, b'journal.narrowspec')
2331 2333 self.vfs.write(
2332 2334 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2333 2335 )
2334 2336 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2335 2337 bookmarksvfs = bookmarks.bookmarksvfs(self)
2336 2338 bookmarksvfs.write(
2337 2339 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2338 2340 )
2339 2341 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2340 2342
2341 2343 def recover(self):
2342 2344 with self.lock():
2343 2345 if self.svfs.exists(b"journal"):
2344 2346 self.ui.status(_(b"rolling back interrupted transaction\n"))
2345 2347 vfsmap = {
2346 2348 b'': self.svfs,
2347 2349 b'plain': self.vfs,
2348 2350 }
2349 2351 transaction.rollback(
2350 2352 self.svfs,
2351 2353 vfsmap,
2352 2354 b"journal",
2353 2355 self.ui.warn,
2354 2356 checkambigfiles=_cachedfiles,
2355 2357 )
2356 2358 self.invalidate()
2357 2359 return True
2358 2360 else:
2359 2361 self.ui.warn(_(b"no interrupted transaction available\n"))
2360 2362 return False
2361 2363
2362 2364 def rollback(self, dryrun=False, force=False):
2363 2365 wlock = lock = dsguard = None
2364 2366 try:
2365 2367 wlock = self.wlock()
2366 2368 lock = self.lock()
2367 2369 if self.svfs.exists(b"undo"):
2368 2370 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2369 2371
2370 2372 return self._rollback(dryrun, force, dsguard)
2371 2373 else:
2372 2374 self.ui.warn(_(b"no rollback information available\n"))
2373 2375 return 1
2374 2376 finally:
2375 2377 release(dsguard, lock, wlock)
2376 2378
2377 2379 @unfilteredmethod # Until we get smarter cache management
2378 2380 def _rollback(self, dryrun, force, dsguard):
2379 2381 ui = self.ui
2380 2382 try:
2381 2383 args = self.vfs.read(b'undo.desc').splitlines()
2382 2384 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2383 2385 if len(args) >= 3:
2384 2386 detail = args[2]
2385 2387 oldtip = oldlen - 1
2386 2388
2387 2389 if detail and ui.verbose:
2388 2390 msg = _(
2389 2391 b'repository tip rolled back to revision %d'
2390 2392 b' (undo %s: %s)\n'
2391 2393 ) % (oldtip, desc, detail)
2392 2394 else:
2393 2395 msg = _(
2394 2396 b'repository tip rolled back to revision %d (undo %s)\n'
2395 2397 ) % (oldtip, desc)
2396 2398 except IOError:
2397 2399 msg = _(b'rolling back unknown transaction\n')
2398 2400 desc = None
2399 2401
2400 2402 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2401 2403 raise error.Abort(
2402 2404 _(
2403 2405 b'rollback of last commit while not checked out '
2404 2406 b'may lose data'
2405 2407 ),
2406 2408 hint=_(b'use -f to force'),
2407 2409 )
2408 2410
2409 2411 ui.status(msg)
2410 2412 if dryrun:
2411 2413 return 0
2412 2414
2413 2415 parents = self.dirstate.parents()
2414 2416 self.destroying()
2415 2417 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2416 2418 transaction.rollback(
2417 2419 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2418 2420 )
2419 2421 bookmarksvfs = bookmarks.bookmarksvfs(self)
2420 2422 if bookmarksvfs.exists(b'undo.bookmarks'):
2421 2423 bookmarksvfs.rename(
2422 2424 b'undo.bookmarks', b'bookmarks', checkambig=True
2423 2425 )
2424 2426 if self.svfs.exists(b'undo.phaseroots'):
2425 2427 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2426 2428 self.invalidate()
2427 2429
2428 2430 has_node = self.changelog.index.has_node
2429 2431 parentgone = any(not has_node(p) for p in parents)
2430 2432 if parentgone:
2431 2433 # prevent dirstateguard from overwriting already restored one
2432 2434 dsguard.close()
2433 2435
2434 2436 narrowspec.restorebackup(self, b'undo.narrowspec')
2435 2437 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2436 2438 self.dirstate.restorebackup(None, b'undo.dirstate')
2437 2439 try:
2438 2440 branch = self.vfs.read(b'undo.branch')
2439 2441 self.dirstate.setbranch(encoding.tolocal(branch))
2440 2442 except IOError:
2441 2443 ui.warn(
2442 2444 _(
2443 2445 b'named branch could not be reset: '
2444 2446 b'current branch is still \'%s\'\n'
2445 2447 )
2446 2448 % self.dirstate.branch()
2447 2449 )
2448 2450
2449 2451 parents = tuple([p.rev() for p in self[None].parents()])
2450 2452 if len(parents) > 1:
2451 2453 ui.status(
2452 2454 _(
2453 2455 b'working directory now based on '
2454 2456 b'revisions %d and %d\n'
2455 2457 )
2456 2458 % parents
2457 2459 )
2458 2460 else:
2459 2461 ui.status(
2460 2462 _(b'working directory now based on revision %d\n') % parents
2461 2463 )
2462 2464 mergemod.mergestate.clean(self, self[b'.'].node())
2463 2465
2464 2466 # TODO: if we know which new heads may result from this rollback, pass
2465 2467 # them to destroy(), which will prevent the branchhead cache from being
2466 2468 # invalidated.
2467 2469 self.destroyed()
2468 2470 return 0
2469 2471
2470 2472 def _buildcacheupdater(self, newtransaction):
2471 2473 """called during transaction to build the callback updating cache
2472 2474
2473 2475 Lives on the repository to help extension who might want to augment
2474 2476 this logic. For this purpose, the created transaction is passed to the
2475 2477 method.
2476 2478 """
2477 2479 # we must avoid cyclic reference between repo and transaction.
2478 2480 reporef = weakref.ref(self)
2479 2481
2480 2482 def updater(tr):
2481 2483 repo = reporef()
2482 2484 repo.updatecaches(tr)
2483 2485
2484 2486 return updater
2485 2487
2486 2488 @unfilteredmethod
2487 2489 def updatecaches(self, tr=None, full=False):
2488 2490 """warm appropriate caches
2489 2491
2490 2492 If this function is called after a transaction closed. The transaction
2491 2493 will be available in the 'tr' argument. This can be used to selectively
2492 2494 update caches relevant to the changes in that transaction.
2493 2495
2494 2496 If 'full' is set, make sure all caches the function knows about have
2495 2497 up-to-date data. Even the ones usually loaded more lazily.
2496 2498 """
2497 2499 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2498 2500 # During strip, many caches are invalid but
2499 2501 # later call to `destroyed` will refresh them.
2500 2502 return
2501 2503
2502 2504 if tr is None or tr.changes[b'origrepolen'] < len(self):
2503 2505 # accessing the 'ser ved' branchmap should refresh all the others,
2504 2506 self.ui.debug(b'updating the branch cache\n')
2505 2507 self.filtered(b'served').branchmap()
2506 2508 self.filtered(b'served.hidden').branchmap()
2507 2509
2508 2510 if full:
2509 2511 unfi = self.unfiltered()
2510 2512
2511 2513 self.changelog.update_caches(transaction=tr)
2512 2514
2513 2515 rbc = unfi.revbranchcache()
2514 2516 for r in unfi.changelog:
2515 2517 rbc.branchinfo(r)
2516 2518 rbc.write()
2517 2519
2518 2520 # ensure the working copy parents are in the manifestfulltextcache
2519 2521 for ctx in self[b'.'].parents():
2520 2522 ctx.manifest() # accessing the manifest is enough
2521 2523
2522 2524 # accessing fnode cache warms the cache
2523 2525 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2524 2526 # accessing tags warm the cache
2525 2527 self.tags()
2526 2528 self.filtered(b'served').tags()
2527 2529
2528 2530 # The `full` arg is documented as updating even the lazily-loaded
2529 2531 # caches immediately, so we're forcing a write to cause these caches
2530 2532 # to be warmed up even if they haven't explicitly been requested
2531 2533 # yet (if they've never been used by hg, they won't ever have been
2532 2534 # written, even if they're a subset of another kind of cache that
2533 2535 # *has* been used).
2534 2536 for filt in repoview.filtertable.keys():
2535 2537 filtered = self.filtered(filt)
2536 2538 filtered.branchmap().write(filtered)
2537 2539
2538 2540 def invalidatecaches(self):
2539 2541
2540 2542 if '_tagscache' in vars(self):
2541 2543 # can't use delattr on proxy
2542 2544 del self.__dict__['_tagscache']
2543 2545
2544 2546 self._branchcaches.clear()
2545 2547 self.invalidatevolatilesets()
2546 2548 self._sparsesignaturecache.clear()
2547 2549
2548 2550 def invalidatevolatilesets(self):
2549 2551 self.filteredrevcache.clear()
2550 2552 obsolete.clearobscaches(self)
2551 2553 self._quick_access_changeid_invalidate()
2552 2554
2553 2555 def invalidatedirstate(self):
2554 2556 '''Invalidates the dirstate, causing the next call to dirstate
2555 2557 to check if it was modified since the last time it was read,
2556 2558 rereading it if it has.
2557 2559
2558 2560 This is different to dirstate.invalidate() that it doesn't always
2559 2561 rereads the dirstate. Use dirstate.invalidate() if you want to
2560 2562 explicitly read the dirstate again (i.e. restoring it to a previous
2561 2563 known good state).'''
2562 2564 if hasunfilteredcache(self, 'dirstate'):
2563 2565 for k in self.dirstate._filecache:
2564 2566 try:
2565 2567 delattr(self.dirstate, k)
2566 2568 except AttributeError:
2567 2569 pass
2568 2570 delattr(self.unfiltered(), 'dirstate')
2569 2571
2570 2572 def invalidate(self, clearfilecache=False):
2571 2573 '''Invalidates both store and non-store parts other than dirstate
2572 2574
2573 2575 If a transaction is running, invalidation of store is omitted,
2574 2576 because discarding in-memory changes might cause inconsistency
2575 2577 (e.g. incomplete fncache causes unintentional failure, but
2576 2578 redundant one doesn't).
2577 2579 '''
2578 2580 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2579 2581 for k in list(self._filecache.keys()):
2580 2582 # dirstate is invalidated separately in invalidatedirstate()
2581 2583 if k == b'dirstate':
2582 2584 continue
2583 2585 if (
2584 2586 k == b'changelog'
2585 2587 and self.currenttransaction()
2586 2588 and self.changelog._delayed
2587 2589 ):
2588 2590 # The changelog object may store unwritten revisions. We don't
2589 2591 # want to lose them.
2590 2592 # TODO: Solve the problem instead of working around it.
2591 2593 continue
2592 2594
2593 2595 if clearfilecache:
2594 2596 del self._filecache[k]
2595 2597 try:
2596 2598 delattr(unfiltered, k)
2597 2599 except AttributeError:
2598 2600 pass
2599 2601 self.invalidatecaches()
2600 2602 if not self.currenttransaction():
2601 2603 # TODO: Changing contents of store outside transaction
2602 2604 # causes inconsistency. We should make in-memory store
2603 2605 # changes detectable, and abort if changed.
2604 2606 self.store.invalidatecaches()
2605 2607
2606 2608 def invalidateall(self):
2607 2609 '''Fully invalidates both store and non-store parts, causing the
2608 2610 subsequent operation to reread any outside changes.'''
2609 2611 # extension should hook this to invalidate its caches
2610 2612 self.invalidate()
2611 2613 self.invalidatedirstate()
2612 2614
2613 2615 @unfilteredmethod
2614 2616 def _refreshfilecachestats(self, tr):
2615 2617 """Reload stats of cached files so that they are flagged as valid"""
2616 2618 for k, ce in self._filecache.items():
2617 2619 k = pycompat.sysstr(k)
2618 2620 if k == 'dirstate' or k not in self.__dict__:
2619 2621 continue
2620 2622 ce.refresh()
2621 2623
2622 2624 def _lock(
2623 2625 self,
2624 2626 vfs,
2625 2627 lockname,
2626 2628 wait,
2627 2629 releasefn,
2628 2630 acquirefn,
2629 2631 desc,
2630 2632 inheritchecker=None,
2631 2633 parentenvvar=None,
2632 2634 ):
2633 2635 parentlock = None
2634 2636 # the contents of parentenvvar are used by the underlying lock to
2635 2637 # determine whether it can be inherited
2636 2638 if parentenvvar is not None:
2637 2639 parentlock = encoding.environ.get(parentenvvar)
2638 2640
2639 2641 timeout = 0
2640 2642 warntimeout = 0
2641 2643 if wait:
2642 2644 timeout = self.ui.configint(b"ui", b"timeout")
2643 2645 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2644 2646 # internal config: ui.signal-safe-lock
2645 2647 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2646 2648
2647 2649 l = lockmod.trylock(
2648 2650 self.ui,
2649 2651 vfs,
2650 2652 lockname,
2651 2653 timeout,
2652 2654 warntimeout,
2653 2655 releasefn=releasefn,
2654 2656 acquirefn=acquirefn,
2655 2657 desc=desc,
2656 2658 inheritchecker=inheritchecker,
2657 2659 parentlock=parentlock,
2658 2660 signalsafe=signalsafe,
2659 2661 )
2660 2662 return l
2661 2663
2662 2664 def _afterlock(self, callback):
2663 2665 """add a callback to be run when the repository is fully unlocked
2664 2666
2665 2667 The callback will be executed when the outermost lock is released
2666 2668 (with wlock being higher level than 'lock')."""
2667 2669 for ref in (self._wlockref, self._lockref):
2668 2670 l = ref and ref()
2669 2671 if l and l.held:
2670 2672 l.postrelease.append(callback)
2671 2673 break
2672 2674 else: # no lock have been found.
2673 2675 callback(True)
2674 2676
2675 2677 def lock(self, wait=True):
2676 2678 '''Lock the repository store (.hg/store) and return a weak reference
2677 2679 to the lock. Use this before modifying the store (e.g. committing or
2678 2680 stripping). If you are opening a transaction, get a lock as well.)
2679 2681
2680 2682 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2681 2683 'wlock' first to avoid a dead-lock hazard.'''
2682 2684 l = self._currentlock(self._lockref)
2683 2685 if l is not None:
2684 2686 l.lock()
2685 2687 return l
2686 2688
2687 2689 l = self._lock(
2688 2690 vfs=self.svfs,
2689 2691 lockname=b"lock",
2690 2692 wait=wait,
2691 2693 releasefn=None,
2692 2694 acquirefn=self.invalidate,
2693 2695 desc=_(b'repository %s') % self.origroot,
2694 2696 )
2695 2697 self._lockref = weakref.ref(l)
2696 2698 return l
2697 2699
2698 2700 def _wlockchecktransaction(self):
2699 2701 if self.currenttransaction() is not None:
2700 2702 raise error.LockInheritanceContractViolation(
2701 2703 b'wlock cannot be inherited in the middle of a transaction'
2702 2704 )
2703 2705
2704 2706 def wlock(self, wait=True):
2705 2707 '''Lock the non-store parts of the repository (everything under
2706 2708 .hg except .hg/store) and return a weak reference to the lock.
2707 2709
2708 2710 Use this before modifying files in .hg.
2709 2711
2710 2712 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2711 2713 'wlock' first to avoid a dead-lock hazard.'''
2712 2714 l = self._wlockref and self._wlockref()
2713 2715 if l is not None and l.held:
2714 2716 l.lock()
2715 2717 return l
2716 2718
2717 2719 # We do not need to check for non-waiting lock acquisition. Such
2718 2720 # acquisition would not cause dead-lock as they would just fail.
2719 2721 if wait and (
2720 2722 self.ui.configbool(b'devel', b'all-warnings')
2721 2723 or self.ui.configbool(b'devel', b'check-locks')
2722 2724 ):
2723 2725 if self._currentlock(self._lockref) is not None:
2724 2726 self.ui.develwarn(b'"wlock" acquired after "lock"')
2725 2727
2726 2728 def unlock():
2727 2729 if self.dirstate.pendingparentchange():
2728 2730 self.dirstate.invalidate()
2729 2731 else:
2730 2732 self.dirstate.write(None)
2731 2733
2732 2734 self._filecache[b'dirstate'].refresh()
2733 2735
2734 2736 l = self._lock(
2735 2737 self.vfs,
2736 2738 b"wlock",
2737 2739 wait,
2738 2740 unlock,
2739 2741 self.invalidatedirstate,
2740 2742 _(b'working directory of %s') % self.origroot,
2741 2743 inheritchecker=self._wlockchecktransaction,
2742 2744 parentenvvar=b'HG_WLOCK_LOCKER',
2743 2745 )
2744 2746 self._wlockref = weakref.ref(l)
2745 2747 return l
2746 2748
2747 2749 def _currentlock(self, lockref):
2748 2750 """Returns the lock if it's held, or None if it's not."""
2749 2751 if lockref is None:
2750 2752 return None
2751 2753 l = lockref()
2752 2754 if l is None or not l.held:
2753 2755 return None
2754 2756 return l
2755 2757
2756 2758 def currentwlock(self):
2757 2759 """Returns the wlock if it's held, or None if it's not."""
2758 2760 return self._currentlock(self._wlockref)
2759 2761
2760 2762 def _filecommit(
2761 2763 self,
2762 2764 fctx,
2763 2765 manifest1,
2764 2766 manifest2,
2765 2767 linkrev,
2766 2768 tr,
2767 2769 changelist,
2768 2770 includecopymeta,
2769 2771 ):
2770 2772 """
2771 2773 commit an individual file as part of a larger transaction
2772 2774 """
2773 2775
2774 2776 fname = fctx.path()
2775 2777 fparent1 = manifest1.get(fname, nullid)
2776 2778 fparent2 = manifest2.get(fname, nullid)
2777 2779 if isinstance(fctx, context.filectx):
2778 2780 node = fctx.filenode()
2779 2781 if node in [fparent1, fparent2]:
2780 2782 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2781 2783 if (
2782 2784 fparent1 != nullid
2783 2785 and manifest1.flags(fname) != fctx.flags()
2784 2786 ) or (
2785 2787 fparent2 != nullid
2786 2788 and manifest2.flags(fname) != fctx.flags()
2787 2789 ):
2788 2790 changelist.append(fname)
2789 2791 return node
2790 2792
2791 2793 flog = self.file(fname)
2792 2794 meta = {}
2793 2795 cfname = fctx.copysource()
2794 2796 if cfname and cfname != fname:
2795 2797 # Mark the new revision of this file as a copy of another
2796 2798 # file. This copy data will effectively act as a parent
2797 2799 # of this new revision. If this is a merge, the first
2798 2800 # parent will be the nullid (meaning "look up the copy data")
2799 2801 # and the second one will be the other parent. For example:
2800 2802 #
2801 2803 # 0 --- 1 --- 3 rev1 changes file foo
2802 2804 # \ / rev2 renames foo to bar and changes it
2803 2805 # \- 2 -/ rev3 should have bar with all changes and
2804 2806 # should record that bar descends from
2805 2807 # bar in rev2 and foo in rev1
2806 2808 #
2807 2809 # this allows this merge to succeed:
2808 2810 #
2809 2811 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2810 2812 # \ / merging rev3 and rev4 should use bar@rev2
2811 2813 # \- 2 --- 4 as the merge base
2812 2814 #
2813 2815
2814 2816 cnode = manifest1.get(cfname)
2815 2817 newfparent = fparent2
2816 2818
2817 2819 if manifest2: # branch merge
2818 2820 if fparent2 == nullid or cnode is None: # copied on remote side
2819 2821 if cfname in manifest2:
2820 2822 cnode = manifest2[cfname]
2821 2823 newfparent = fparent1
2822 2824
2823 2825 # Here, we used to search backwards through history to try to find
2824 2826 # where the file copy came from if the source of a copy was not in
2825 2827 # the parent directory. However, this doesn't actually make sense to
2826 2828 # do (what does a copy from something not in your working copy even
2827 2829 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2828 2830 # the user that copy information was dropped, so if they didn't
2829 2831 # expect this outcome it can be fixed, but this is the correct
2830 2832 # behavior in this circumstance.
2831 2833
2832 2834 if cnode:
2833 2835 self.ui.debug(
2834 2836 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2835 2837 )
2836 2838 if includecopymeta:
2837 2839 meta[b"copy"] = cfname
2838 2840 meta[b"copyrev"] = hex(cnode)
2839 2841 fparent1, fparent2 = nullid, newfparent
2840 2842 else:
2841 2843 self.ui.warn(
2842 2844 _(
2843 2845 b"warning: can't find ancestor for '%s' "
2844 2846 b"copied from '%s'!\n"
2845 2847 )
2846 2848 % (fname, cfname)
2847 2849 )
2848 2850
2849 2851 elif fparent1 == nullid:
2850 2852 fparent1, fparent2 = fparent2, nullid
2851 2853 elif fparent2 != nullid:
2852 2854 # is one parent an ancestor of the other?
2853 2855 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2854 2856 if fparent1 in fparentancestors:
2855 2857 fparent1, fparent2 = fparent2, nullid
2856 2858 elif fparent2 in fparentancestors:
2857 2859 fparent2 = nullid
2858 2860 elif not fparentancestors:
2859 2861 # TODO: this whole if-else might be simplified much more
2860 2862 ms = mergemod.mergestate.read(self)
2861 2863 if (
2862 2864 fname in ms
2863 2865 and ms[fname] == mergemod.MERGE_RECORD_MERGED_OTHER
2864 2866 ):
2865 2867 fparent1, fparent2 = fparent2, nullid
2866 2868
2867 2869 # is the file changed?
2868 2870 text = fctx.data()
2869 2871 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2870 2872 changelist.append(fname)
2871 2873 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2872 2874 # are just the flags changed during merge?
2873 2875 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2874 2876 changelist.append(fname)
2875 2877
2876 2878 return fparent1
2877 2879
2878 2880 def checkcommitpatterns(self, wctx, match, status, fail):
2879 2881 """check for commit arguments that aren't committable"""
2880 2882 if match.isexact() or match.prefix():
2881 2883 matched = set(status.modified + status.added + status.removed)
2882 2884
2883 2885 for f in match.files():
2884 2886 f = self.dirstate.normalize(f)
2885 2887 if f == b'.' or f in matched or f in wctx.substate:
2886 2888 continue
2887 2889 if f in status.deleted:
2888 2890 fail(f, _(b'file not found!'))
2889 2891 # Is it a directory that exists or used to exist?
2890 2892 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2891 2893 d = f + b'/'
2892 2894 for mf in matched:
2893 2895 if mf.startswith(d):
2894 2896 break
2895 2897 else:
2896 2898 fail(f, _(b"no match under directory!"))
2897 2899 elif f not in self.dirstate:
2898 2900 fail(f, _(b"file not tracked!"))
2899 2901
2900 2902 @unfilteredmethod
2901 2903 def commit(
2902 2904 self,
2903 2905 text=b"",
2904 2906 user=None,
2905 2907 date=None,
2906 2908 match=None,
2907 2909 force=False,
2908 2910 editor=None,
2909 2911 extra=None,
2910 2912 ):
2911 2913 """Add a new revision to current repository.
2912 2914
2913 2915 Revision information is gathered from the working directory,
2914 2916 match can be used to filter the committed files. If editor is
2915 2917 supplied, it is called to get a commit message.
2916 2918 """
2917 2919 if extra is None:
2918 2920 extra = {}
2919 2921
2920 2922 def fail(f, msg):
2921 2923 raise error.Abort(b'%s: %s' % (f, msg))
2922 2924
2923 2925 if not match:
2924 2926 match = matchmod.always()
2925 2927
2926 2928 if not force:
2927 2929 match.bad = fail
2928 2930
2929 2931 # lock() for recent changelog (see issue4368)
2930 2932 with self.wlock(), self.lock():
2931 2933 wctx = self[None]
2932 2934 merge = len(wctx.parents()) > 1
2933 2935
2934 2936 if not force and merge and not match.always():
2935 2937 raise error.Abort(
2936 2938 _(
2937 2939 b'cannot partially commit a merge '
2938 2940 b'(do not specify files or patterns)'
2939 2941 )
2940 2942 )
2941 2943
2942 2944 status = self.status(match=match, clean=force)
2943 2945 if force:
2944 2946 status.modified.extend(
2945 2947 status.clean
2946 2948 ) # mq may commit clean files
2947 2949
2948 2950 # check subrepos
2949 2951 subs, commitsubs, newstate = subrepoutil.precommit(
2950 2952 self.ui, wctx, status, match, force=force
2951 2953 )
2952 2954
2953 2955 # make sure all explicit patterns are matched
2954 2956 if not force:
2955 2957 self.checkcommitpatterns(wctx, match, status, fail)
2956 2958
2957 2959 cctx = context.workingcommitctx(
2958 2960 self, status, text, user, date, extra
2959 2961 )
2960 2962
2961 2963 ms = mergemod.mergestate.read(self)
2962 2964 mergeutil.checkunresolved(ms)
2963 2965
2964 2966 # internal config: ui.allowemptycommit
2965 2967 allowemptycommit = (
2966 2968 wctx.branch() != wctx.p1().branch()
2967 2969 or extra.get(b'close')
2968 2970 or merge
2969 2971 or cctx.files()
2970 2972 or self.ui.configbool(b'ui', b'allowemptycommit')
2971 2973 )
2972 2974 if not allowemptycommit:
2973 2975 self.ui.debug(b'nothing to commit, clearing merge state\n')
2974 2976 ms.reset()
2975 2977 return None
2976 2978
2977 2979 if merge and cctx.deleted():
2978 2980 raise error.Abort(_(b"cannot commit merge with missing files"))
2979 2981
2980 2982 if editor:
2981 2983 cctx._text = editor(self, cctx, subs)
2982 2984 edited = text != cctx._text
2983 2985
2984 2986 # Save commit message in case this transaction gets rolled back
2985 2987 # (e.g. by a pretxncommit hook). Leave the content alone on
2986 2988 # the assumption that the user will use the same editor again.
2987 2989 msgfn = self.savecommitmessage(cctx._text)
2988 2990
2989 2991 # commit subs and write new state
2990 2992 if subs:
2991 2993 uipathfn = scmutil.getuipathfn(self)
2992 2994 for s in sorted(commitsubs):
2993 2995 sub = wctx.sub(s)
2994 2996 self.ui.status(
2995 2997 _(b'committing subrepository %s\n')
2996 2998 % uipathfn(subrepoutil.subrelpath(sub))
2997 2999 )
2998 3000 sr = sub.commit(cctx._text, user, date)
2999 3001 newstate[s] = (newstate[s][0], sr)
3000 3002 subrepoutil.writestate(self, newstate)
3001 3003
3002 3004 p1, p2 = self.dirstate.parents()
3003 3005 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3004 3006 try:
3005 3007 self.hook(
3006 3008 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3007 3009 )
3008 3010 with self.transaction(b'commit'):
3009 3011 ret = self.commitctx(cctx, True)
3010 3012 # update bookmarks, dirstate and mergestate
3011 3013 bookmarks.update(self, [p1, p2], ret)
3012 3014 cctx.markcommitted(ret)
3013 3015 ms.reset()
3014 3016 except: # re-raises
3015 3017 if edited:
3016 3018 self.ui.write(
3017 3019 _(b'note: commit message saved in %s\n') % msgfn
3018 3020 )
3019 3021 raise
3020 3022
3021 3023 def commithook(unused_success):
3022 3024 # hack for command that use a temporary commit (eg: histedit)
3023 3025 # temporary commit got stripped before hook release
3024 3026 if self.changelog.hasnode(ret):
3025 3027 self.hook(
3026 3028 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3027 3029 )
3028 3030
3029 3031 self._afterlock(commithook)
3030 3032 return ret
3031 3033
3032 3034 @unfilteredmethod
3033 3035 def commitctx(self, ctx, error=False, origctx=None):
3034 3036 """Add a new revision to current repository.
3035 3037 Revision information is passed via the context argument.
3036 3038
3037 3039 ctx.files() should list all files involved in this commit, i.e.
3038 3040 modified/added/removed files. On merge, it may be wider than the
3039 3041 ctx.files() to be committed, since any file nodes derived directly
3040 3042 from p1 or p2 are excluded from the committed ctx.files().
3041 3043
3042 3044 origctx is for convert to work around the problem that bug
3043 3045 fixes to the files list in changesets change hashes. For
3044 3046 convert to be the identity, it can pass an origctx and this
3045 3047 function will use the same files list when it makes sense to
3046 3048 do so.
3047 3049 """
3048 3050
3049 3051 p1, p2 = ctx.p1(), ctx.p2()
3050 3052 user = ctx.user()
3051 3053
3052 3054 if self.filecopiesmode == b'changeset-sidedata':
3053 3055 writechangesetcopy = True
3054 3056 writefilecopymeta = True
3055 3057 writecopiesto = None
3056 3058 else:
3057 3059 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3058 3060 writefilecopymeta = writecopiesto != b'changeset-only'
3059 3061 writechangesetcopy = writecopiesto in (
3060 3062 b'changeset-only',
3061 3063 b'compatibility',
3062 3064 )
3063 3065 p1copies, p2copies = None, None
3064 3066 if writechangesetcopy:
3065 3067 p1copies = ctx.p1copies()
3066 3068 p2copies = ctx.p2copies()
3067 3069 filesadded, filesremoved = None, None
3068 3070 with self.lock(), self.transaction(b"commit") as tr:
3069 3071 trp = weakref.proxy(tr)
3070 3072
3071 3073 if ctx.manifestnode():
3072 3074 # reuse an existing manifest revision
3073 3075 self.ui.debug(b'reusing known manifest\n')
3074 3076 mn = ctx.manifestnode()
3075 3077 files = ctx.files()
3076 3078 if writechangesetcopy:
3077 3079 filesadded = ctx.filesadded()
3078 3080 filesremoved = ctx.filesremoved()
3079 3081 elif ctx.files():
3080 3082 m1ctx = p1.manifestctx()
3081 3083 m2ctx = p2.manifestctx()
3082 3084 mctx = m1ctx.copy()
3083 3085
3084 3086 m = mctx.read()
3085 3087 m1 = m1ctx.read()
3086 3088 m2 = m2ctx.read()
3087 3089
3088 3090 # check in files
3089 3091 added = []
3090 3092 changed = []
3091 3093 removed = list(ctx.removed())
3092 3094 linkrev = len(self)
3093 3095 self.ui.note(_(b"committing files:\n"))
3094 3096 uipathfn = scmutil.getuipathfn(self)
3095 3097 for f in sorted(ctx.modified() + ctx.added()):
3096 3098 self.ui.note(uipathfn(f) + b"\n")
3097 3099 try:
3098 3100 fctx = ctx[f]
3099 3101 if fctx is None:
3100 3102 removed.append(f)
3101 3103 else:
3102 3104 added.append(f)
3103 3105 m[f] = self._filecommit(
3104 3106 fctx,
3105 3107 m1,
3106 3108 m2,
3107 3109 linkrev,
3108 3110 trp,
3109 3111 changed,
3110 3112 writefilecopymeta,
3111 3113 )
3112 3114 m.setflag(f, fctx.flags())
3113 3115 except OSError:
3114 3116 self.ui.warn(
3115 3117 _(b"trouble committing %s!\n") % uipathfn(f)
3116 3118 )
3117 3119 raise
3118 3120 except IOError as inst:
3119 3121 errcode = getattr(inst, 'errno', errno.ENOENT)
3120 3122 if error or errcode and errcode != errno.ENOENT:
3121 3123 self.ui.warn(
3122 3124 _(b"trouble committing %s!\n") % uipathfn(f)
3123 3125 )
3124 3126 raise
3125 3127
3126 3128 # update manifest
3127 3129 removed = [f for f in removed if f in m1 or f in m2]
3128 3130 drop = sorted([f for f in removed if f in m])
3129 3131 for f in drop:
3130 3132 del m[f]
3131 3133 if p2.rev() != nullrev:
3132 3134
3133 3135 @util.cachefunc
3134 3136 def mas():
3135 3137 p1n = p1.node()
3136 3138 p2n = p2.node()
3137 3139 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3138 3140 if not cahs:
3139 3141 cahs = [nullrev]
3140 3142 return [self[r].manifest() for r in cahs]
3141 3143
3142 3144 def deletionfromparent(f):
3143 3145 # When a file is removed relative to p1 in a merge, this
3144 3146 # function determines whether the absence is due to a
3145 3147 # deletion from a parent, or whether the merge commit
3146 3148 # itself deletes the file. We decide this by doing a
3147 3149 # simplified three way merge of the manifest entry for
3148 3150 # the file. There are two ways we decide the merge
3149 3151 # itself didn't delete a file:
3150 3152 # - neither parent (nor the merge) contain the file
3151 3153 # - exactly one parent contains the file, and that
3152 3154 # parent has the same filelog entry as the merge
3153 3155 # ancestor (or all of them if there two). In other
3154 3156 # words, that parent left the file unchanged while the
3155 3157 # other one deleted it.
3156 3158 # One way to think about this is that deleting a file is
3157 3159 # similar to emptying it, so the list of changed files
3158 3160 # should be similar either way. The computation
3159 3161 # described above is not done directly in _filecommit
3160 3162 # when creating the list of changed files, however
3161 3163 # it does something very similar by comparing filelog
3162 3164 # nodes.
3163 3165 if f in m1:
3164 3166 return f not in m2 and all(
3165 3167 f in ma and ma.find(f) == m1.find(f)
3166 3168 for ma in mas()
3167 3169 )
3168 3170 elif f in m2:
3169 3171 return all(
3170 3172 f in ma and ma.find(f) == m2.find(f)
3171 3173 for ma in mas()
3172 3174 )
3173 3175 else:
3174 3176 return True
3175 3177
3176 3178 removed = [f for f in removed if not deletionfromparent(f)]
3177 3179
3178 3180 files = changed + removed
3179 3181 md = None
3180 3182 if not files:
3181 3183 # if no "files" actually changed in terms of the changelog,
3182 3184 # try hard to detect unmodified manifest entry so that the
3183 3185 # exact same commit can be reproduced later on convert.
3184 3186 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3185 3187 if not files and md:
3186 3188 self.ui.debug(
3187 3189 b'not reusing manifest (no file change in '
3188 3190 b'changelog, but manifest differs)\n'
3189 3191 )
3190 3192 if files or md:
3191 3193 self.ui.note(_(b"committing manifest\n"))
3192 3194 # we're using narrowmatch here since it's already applied at
3193 3195 # other stages (such as dirstate.walk), so we're already
3194 3196 # ignoring things outside of narrowspec in most cases. The
3195 3197 # one case where we might have files outside the narrowspec
3196 3198 # at this point is merges, and we already error out in the
3197 3199 # case where the merge has files outside of the narrowspec,
3198 3200 # so this is safe.
3199 3201 mn = mctx.write(
3200 3202 trp,
3201 3203 linkrev,
3202 3204 p1.manifestnode(),
3203 3205 p2.manifestnode(),
3204 3206 added,
3205 3207 drop,
3206 3208 match=self.narrowmatch(),
3207 3209 )
3208 3210
3209 3211 if writechangesetcopy:
3210 3212 filesadded = [
3211 3213 f for f in changed if not (f in m1 or f in m2)
3212 3214 ]
3213 3215 filesremoved = removed
3214 3216 else:
3215 3217 self.ui.debug(
3216 3218 b'reusing manifest from p1 (listed files '
3217 3219 b'actually unchanged)\n'
3218 3220 )
3219 3221 mn = p1.manifestnode()
3220 3222 else:
3221 3223 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3222 3224 mn = p1.manifestnode()
3223 3225 files = []
3224 3226
3225 3227 if writecopiesto == b'changeset-only':
3226 3228 # If writing only to changeset extras, use None to indicate that
3227 3229 # no entry should be written. If writing to both, write an empty
3228 3230 # entry to prevent the reader from falling back to reading
3229 3231 # filelogs.
3230 3232 p1copies = p1copies or None
3231 3233 p2copies = p2copies or None
3232 3234 filesadded = filesadded or None
3233 3235 filesremoved = filesremoved or None
3234 3236
3235 3237 if origctx and origctx.manifestnode() == mn:
3236 3238 files = origctx.files()
3237 3239
3238 3240 # update changelog
3239 3241 self.ui.note(_(b"committing changelog\n"))
3240 3242 self.changelog.delayupdate(tr)
3241 3243 n = self.changelog.add(
3242 3244 mn,
3243 3245 files,
3244 3246 ctx.description(),
3245 3247 trp,
3246 3248 p1.node(),
3247 3249 p2.node(),
3248 3250 user,
3249 3251 ctx.date(),
3250 3252 ctx.extra().copy(),
3251 3253 p1copies,
3252 3254 p2copies,
3253 3255 filesadded,
3254 3256 filesremoved,
3255 3257 )
3256 3258 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3257 3259 self.hook(
3258 3260 b'pretxncommit',
3259 3261 throw=True,
3260 3262 node=hex(n),
3261 3263 parent1=xp1,
3262 3264 parent2=xp2,
3263 3265 )
3264 3266 # set the new commit is proper phase
3265 3267 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3266 3268 if targetphase:
3267 3269 # retract boundary do not alter parent changeset.
3268 3270 # if a parent have higher the resulting phase will
3269 3271 # be compliant anyway
3270 3272 #
3271 3273 # if minimal phase was 0 we don't need to retract anything
3272 3274 phases.registernew(self, tr, targetphase, [n])
3273 3275 return n
3274 3276
3275 3277 @unfilteredmethod
3276 3278 def destroying(self):
3277 3279 '''Inform the repository that nodes are about to be destroyed.
3278 3280 Intended for use by strip and rollback, so there's a common
3279 3281 place for anything that has to be done before destroying history.
3280 3282
3281 3283 This is mostly useful for saving state that is in memory and waiting
3282 3284 to be flushed when the current lock is released. Because a call to
3283 3285 destroyed is imminent, the repo will be invalidated causing those
3284 3286 changes to stay in memory (waiting for the next unlock), or vanish
3285 3287 completely.
3286 3288 '''
3287 3289 # When using the same lock to commit and strip, the phasecache is left
3288 3290 # dirty after committing. Then when we strip, the repo is invalidated,
3289 3291 # causing those changes to disappear.
3290 3292 if '_phasecache' in vars(self):
3291 3293 self._phasecache.write()
3292 3294
3293 3295 @unfilteredmethod
3294 3296 def destroyed(self):
3295 3297 '''Inform the repository that nodes have been destroyed.
3296 3298 Intended for use by strip and rollback, so there's a common
3297 3299 place for anything that has to be done after destroying history.
3298 3300 '''
3299 3301 # When one tries to:
3300 3302 # 1) destroy nodes thus calling this method (e.g. strip)
3301 3303 # 2) use phasecache somewhere (e.g. commit)
3302 3304 #
3303 3305 # then 2) will fail because the phasecache contains nodes that were
3304 3306 # removed. We can either remove phasecache from the filecache,
3305 3307 # causing it to reload next time it is accessed, or simply filter
3306 3308 # the removed nodes now and write the updated cache.
3307 3309 self._phasecache.filterunknown(self)
3308 3310 self._phasecache.write()
3309 3311
3310 3312 # refresh all repository caches
3311 3313 self.updatecaches()
3312 3314
3313 3315 # Ensure the persistent tag cache is updated. Doing it now
3314 3316 # means that the tag cache only has to worry about destroyed
3315 3317 # heads immediately after a strip/rollback. That in turn
3316 3318 # guarantees that "cachetip == currenttip" (comparing both rev
3317 3319 # and node) always means no nodes have been added or destroyed.
3318 3320
3319 3321 # XXX this is suboptimal when qrefresh'ing: we strip the current
3320 3322 # head, refresh the tag cache, then immediately add a new head.
3321 3323 # But I think doing it this way is necessary for the "instant
3322 3324 # tag cache retrieval" case to work.
3323 3325 self.invalidate()
3324 3326
3325 3327 def status(
3326 3328 self,
3327 3329 node1=b'.',
3328 3330 node2=None,
3329 3331 match=None,
3330 3332 ignored=False,
3331 3333 clean=False,
3332 3334 unknown=False,
3333 3335 listsubrepos=False,
3334 3336 ):
3335 3337 '''a convenience method that calls node1.status(node2)'''
3336 3338 return self[node1].status(
3337 3339 node2, match, ignored, clean, unknown, listsubrepos
3338 3340 )
3339 3341
3340 3342 def addpostdsstatus(self, ps):
3341 3343 """Add a callback to run within the wlock, at the point at which status
3342 3344 fixups happen.
3343 3345
3344 3346 On status completion, callback(wctx, status) will be called with the
3345 3347 wlock held, unless the dirstate has changed from underneath or the wlock
3346 3348 couldn't be grabbed.
3347 3349
3348 3350 Callbacks should not capture and use a cached copy of the dirstate --
3349 3351 it might change in the meanwhile. Instead, they should access the
3350 3352 dirstate via wctx.repo().dirstate.
3351 3353
3352 3354 This list is emptied out after each status run -- extensions should
3353 3355 make sure it adds to this list each time dirstate.status is called.
3354 3356 Extensions should also make sure they don't call this for statuses
3355 3357 that don't involve the dirstate.
3356 3358 """
3357 3359
3358 3360 # The list is located here for uniqueness reasons -- it is actually
3359 3361 # managed by the workingctx, but that isn't unique per-repo.
3360 3362 self._postdsstatus.append(ps)
3361 3363
3362 3364 def postdsstatus(self):
3363 3365 """Used by workingctx to get the list of post-dirstate-status hooks."""
3364 3366 return self._postdsstatus
3365 3367
3366 3368 def clearpostdsstatus(self):
3367 3369 """Used by workingctx to clear post-dirstate-status hooks."""
3368 3370 del self._postdsstatus[:]
3369 3371
3370 3372 def heads(self, start=None):
3371 3373 if start is None:
3372 3374 cl = self.changelog
3373 3375 headrevs = reversed(cl.headrevs())
3374 3376 return [cl.node(rev) for rev in headrevs]
3375 3377
3376 3378 heads = self.changelog.heads(start)
3377 3379 # sort the output in rev descending order
3378 3380 return sorted(heads, key=self.changelog.rev, reverse=True)
3379 3381
3380 3382 def branchheads(self, branch=None, start=None, closed=False):
3381 3383 '''return a (possibly filtered) list of heads for the given branch
3382 3384
3383 3385 Heads are returned in topological order, from newest to oldest.
3384 3386 If branch is None, use the dirstate branch.
3385 3387 If start is not None, return only heads reachable from start.
3386 3388 If closed is True, return heads that are marked as closed as well.
3387 3389 '''
3388 3390 if branch is None:
3389 3391 branch = self[None].branch()
3390 3392 branches = self.branchmap()
3391 3393 if not branches.hasbranch(branch):
3392 3394 return []
3393 3395 # the cache returns heads ordered lowest to highest
3394 3396 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3395 3397 if start is not None:
3396 3398 # filter out the heads that cannot be reached from startrev
3397 3399 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3398 3400 bheads = [h for h in bheads if h in fbheads]
3399 3401 return bheads
3400 3402
3401 3403 def branches(self, nodes):
3402 3404 if not nodes:
3403 3405 nodes = [self.changelog.tip()]
3404 3406 b = []
3405 3407 for n in nodes:
3406 3408 t = n
3407 3409 while True:
3408 3410 p = self.changelog.parents(n)
3409 3411 if p[1] != nullid or p[0] == nullid:
3410 3412 b.append((t, n, p[0], p[1]))
3411 3413 break
3412 3414 n = p[0]
3413 3415 return b
3414 3416
3415 3417 def between(self, pairs):
3416 3418 r = []
3417 3419
3418 3420 for top, bottom in pairs:
3419 3421 n, l, i = top, [], 0
3420 3422 f = 1
3421 3423
3422 3424 while n != bottom and n != nullid:
3423 3425 p = self.changelog.parents(n)[0]
3424 3426 if i == f:
3425 3427 l.append(n)
3426 3428 f = f * 2
3427 3429 n = p
3428 3430 i += 1
3429 3431
3430 3432 r.append(l)
3431 3433
3432 3434 return r
3433 3435
3434 3436 def checkpush(self, pushop):
3435 3437 """Extensions can override this function if additional checks have
3436 3438 to be performed before pushing, or call it if they override push
3437 3439 command.
3438 3440 """
3439 3441
3440 3442 @unfilteredpropertycache
3441 3443 def prepushoutgoinghooks(self):
3442 3444 """Return util.hooks consists of a pushop with repo, remote, outgoing
3443 3445 methods, which are called before pushing changesets.
3444 3446 """
3445 3447 return util.hooks()
3446 3448
3447 3449 def pushkey(self, namespace, key, old, new):
3448 3450 try:
3449 3451 tr = self.currenttransaction()
3450 3452 hookargs = {}
3451 3453 if tr is not None:
3452 3454 hookargs.update(tr.hookargs)
3453 3455 hookargs = pycompat.strkwargs(hookargs)
3454 3456 hookargs['namespace'] = namespace
3455 3457 hookargs['key'] = key
3456 3458 hookargs['old'] = old
3457 3459 hookargs['new'] = new
3458 3460 self.hook(b'prepushkey', throw=True, **hookargs)
3459 3461 except error.HookAbort as exc:
3460 3462 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3461 3463 if exc.hint:
3462 3464 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3463 3465 return False
3464 3466 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3465 3467 ret = pushkey.push(self, namespace, key, old, new)
3466 3468
3467 3469 def runhook(unused_success):
3468 3470 self.hook(
3469 3471 b'pushkey',
3470 3472 namespace=namespace,
3471 3473 key=key,
3472 3474 old=old,
3473 3475 new=new,
3474 3476 ret=ret,
3475 3477 )
3476 3478
3477 3479 self._afterlock(runhook)
3478 3480 return ret
3479 3481
3480 3482 def listkeys(self, namespace):
3481 3483 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3482 3484 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3483 3485 values = pushkey.list(self, namespace)
3484 3486 self.hook(b'listkeys', namespace=namespace, values=values)
3485 3487 return values
3486 3488
3487 3489 def debugwireargs(self, one, two, three=None, four=None, five=None):
3488 3490 '''used to test argument passing over the wire'''
3489 3491 return b"%s %s %s %s %s" % (
3490 3492 one,
3491 3493 two,
3492 3494 pycompat.bytestr(three),
3493 3495 pycompat.bytestr(four),
3494 3496 pycompat.bytestr(five),
3495 3497 )
3496 3498
3497 3499 def savecommitmessage(self, text):
3498 3500 fp = self.vfs(b'last-message.txt', b'wb')
3499 3501 try:
3500 3502 fp.write(text)
3501 3503 finally:
3502 3504 fp.close()
3503 3505 return self.pathto(fp.name[len(self.root) + 1 :])
3504 3506
3505 3507
3506 3508 # used to avoid circular references so destructors work
3507 3509 def aftertrans(files):
3508 3510 renamefiles = [tuple(t) for t in files]
3509 3511
3510 3512 def a():
3511 3513 for vfs, src, dest in renamefiles:
3512 3514 # if src and dest refer to a same file, vfs.rename is a no-op,
3513 3515 # leaving both src and dest on disk. delete dest to make sure
3514 3516 # the rename couldn't be such a no-op.
3515 3517 vfs.tryunlink(dest)
3516 3518 try:
3517 3519 vfs.rename(src, dest)
3518 3520 except OSError: # journal file does not yet exist
3519 3521 pass
3520 3522
3521 3523 return a
3522 3524
3523 3525
3524 3526 def undoname(fn):
3525 3527 base, name = os.path.split(fn)
3526 3528 assert name.startswith(b'journal')
3527 3529 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3528 3530
3529 3531
3530 3532 def instance(ui, path, create, intents=None, createopts=None):
3531 3533 localpath = util.urllocalpath(path)
3532 3534 if create:
3533 3535 createrepository(ui, localpath, createopts=createopts)
3534 3536
3535 3537 return makelocalrepository(ui, localpath, intents=intents)
3536 3538
3537 3539
3538 3540 def islocal(path):
3539 3541 return True
3540 3542
3541 3543
3542 3544 def defaultcreateopts(ui, createopts=None):
3543 3545 """Populate the default creation options for a repository.
3544 3546
3545 3547 A dictionary of explicitly requested creation options can be passed
3546 3548 in. Missing keys will be populated.
3547 3549 """
3548 3550 createopts = dict(createopts or {})
3549 3551
3550 3552 if b'backend' not in createopts:
3551 3553 # experimental config: storage.new-repo-backend
3552 3554 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3553 3555
3554 3556 return createopts
3555 3557
3556 3558
3557 3559 def newreporequirements(ui, createopts):
3558 3560 """Determine the set of requirements for a new local repository.
3559 3561
3560 3562 Extensions can wrap this function to specify custom requirements for
3561 3563 new repositories.
3562 3564 """
3563 3565 # If the repo is being created from a shared repository, we copy
3564 3566 # its requirements.
3565 3567 if b'sharedrepo' in createopts:
3566 3568 requirements = set(createopts[b'sharedrepo'].requirements)
3567 3569 if createopts.get(b'sharedrelative'):
3568 3570 requirements.add(b'relshared')
3569 3571 else:
3570 3572 requirements.add(b'shared')
3571 3573
3572 3574 return requirements
3573 3575
3574 3576 if b'backend' not in createopts:
3575 3577 raise error.ProgrammingError(
3576 3578 b'backend key not present in createopts; '
3577 3579 b'was defaultcreateopts() called?'
3578 3580 )
3579 3581
3580 3582 if createopts[b'backend'] != b'revlogv1':
3581 3583 raise error.Abort(
3582 3584 _(
3583 3585 b'unable to determine repository requirements for '
3584 3586 b'storage backend: %s'
3585 3587 )
3586 3588 % createopts[b'backend']
3587 3589 )
3588 3590
3589 3591 requirements = {b'revlogv1'}
3590 3592 if ui.configbool(b'format', b'usestore'):
3591 3593 requirements.add(b'store')
3592 3594 if ui.configbool(b'format', b'usefncache'):
3593 3595 requirements.add(b'fncache')
3594 3596 if ui.configbool(b'format', b'dotencode'):
3595 3597 requirements.add(b'dotencode')
3596 3598
3597 3599 compengines = ui.configlist(b'format', b'revlog-compression')
3598 3600 for compengine in compengines:
3599 3601 if compengine in util.compengines:
3600 3602 break
3601 3603 else:
3602 3604 raise error.Abort(
3603 3605 _(
3604 3606 b'compression engines %s defined by '
3605 3607 b'format.revlog-compression not available'
3606 3608 )
3607 3609 % b', '.join(b'"%s"' % e for e in compengines),
3608 3610 hint=_(
3609 3611 b'run "hg debuginstall" to list available '
3610 3612 b'compression engines'
3611 3613 ),
3612 3614 )
3613 3615
3614 3616 # zlib is the historical default and doesn't need an explicit requirement.
3615 3617 if compengine == b'zstd':
3616 3618 requirements.add(b'revlog-compression-zstd')
3617 3619 elif compengine != b'zlib':
3618 3620 requirements.add(b'exp-compression-%s' % compengine)
3619 3621
3620 3622 if scmutil.gdinitconfig(ui):
3621 3623 requirements.add(b'generaldelta')
3622 3624 if ui.configbool(b'format', b'sparse-revlog'):
3623 3625 requirements.add(SPARSEREVLOG_REQUIREMENT)
3624 3626
3625 3627 # experimental config: format.exp-use-side-data
3626 3628 if ui.configbool(b'format', b'exp-use-side-data'):
3627 3629 requirements.add(SIDEDATA_REQUIREMENT)
3628 3630 # experimental config: format.exp-use-copies-side-data-changeset
3629 3631 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3630 3632 requirements.add(SIDEDATA_REQUIREMENT)
3631 3633 requirements.add(COPIESSDC_REQUIREMENT)
3632 3634 if ui.configbool(b'experimental', b'treemanifest'):
3633 3635 requirements.add(b'treemanifest')
3634 3636
3635 3637 revlogv2 = ui.config(b'experimental', b'revlogv2')
3636 3638 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3637 3639 requirements.remove(b'revlogv1')
3638 3640 # generaldelta is implied by revlogv2.
3639 3641 requirements.discard(b'generaldelta')
3640 3642 requirements.add(REVLOGV2_REQUIREMENT)
3641 3643 # experimental config: format.internal-phase
3642 3644 if ui.configbool(b'format', b'internal-phase'):
3643 3645 requirements.add(b'internal-phase')
3644 3646
3645 3647 if createopts.get(b'narrowfiles'):
3646 3648 requirements.add(repository.NARROW_REQUIREMENT)
3647 3649
3648 3650 if createopts.get(b'lfs'):
3649 3651 requirements.add(b'lfs')
3650 3652
3651 3653 if ui.configbool(b'format', b'bookmarks-in-store'):
3652 3654 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3653 3655
3654 3656 return requirements
3655 3657
3656 3658
3657 3659 def filterknowncreateopts(ui, createopts):
3658 3660 """Filters a dict of repo creation options against options that are known.
3659 3661
3660 3662 Receives a dict of repo creation options and returns a dict of those
3661 3663 options that we don't know how to handle.
3662 3664
3663 3665 This function is called as part of repository creation. If the
3664 3666 returned dict contains any items, repository creation will not
3665 3667 be allowed, as it means there was a request to create a repository
3666 3668 with options not recognized by loaded code.
3667 3669
3668 3670 Extensions can wrap this function to filter out creation options
3669 3671 they know how to handle.
3670 3672 """
3671 3673 known = {
3672 3674 b'backend',
3673 3675 b'lfs',
3674 3676 b'narrowfiles',
3675 3677 b'sharedrepo',
3676 3678 b'sharedrelative',
3677 3679 b'shareditems',
3678 3680 b'shallowfilestore',
3679 3681 }
3680 3682
3681 3683 return {k: v for k, v in createopts.items() if k not in known}
3682 3684
3683 3685
3684 3686 def createrepository(ui, path, createopts=None):
3685 3687 """Create a new repository in a vfs.
3686 3688
3687 3689 ``path`` path to the new repo's working directory.
3688 3690 ``createopts`` options for the new repository.
3689 3691
3690 3692 The following keys for ``createopts`` are recognized:
3691 3693
3692 3694 backend
3693 3695 The storage backend to use.
3694 3696 lfs
3695 3697 Repository will be created with ``lfs`` requirement. The lfs extension
3696 3698 will automatically be loaded when the repository is accessed.
3697 3699 narrowfiles
3698 3700 Set up repository to support narrow file storage.
3699 3701 sharedrepo
3700 3702 Repository object from which storage should be shared.
3701 3703 sharedrelative
3702 3704 Boolean indicating if the path to the shared repo should be
3703 3705 stored as relative. By default, the pointer to the "parent" repo
3704 3706 is stored as an absolute path.
3705 3707 shareditems
3706 3708 Set of items to share to the new repository (in addition to storage).
3707 3709 shallowfilestore
3708 3710 Indicates that storage for files should be shallow (not all ancestor
3709 3711 revisions are known).
3710 3712 """
3711 3713 createopts = defaultcreateopts(ui, createopts=createopts)
3712 3714
3713 3715 unknownopts = filterknowncreateopts(ui, createopts)
3714 3716
3715 3717 if not isinstance(unknownopts, dict):
3716 3718 raise error.ProgrammingError(
3717 3719 b'filterknowncreateopts() did not return a dict'
3718 3720 )
3719 3721
3720 3722 if unknownopts:
3721 3723 raise error.Abort(
3722 3724 _(
3723 3725 b'unable to create repository because of unknown '
3724 3726 b'creation option: %s'
3725 3727 )
3726 3728 % b', '.join(sorted(unknownopts)),
3727 3729 hint=_(b'is a required extension not loaded?'),
3728 3730 )
3729 3731
3730 3732 requirements = newreporequirements(ui, createopts=createopts)
3731 3733
3732 3734 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3733 3735
3734 3736 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3735 3737 if hgvfs.exists():
3736 3738 raise error.RepoError(_(b'repository %s already exists') % path)
3737 3739
3738 3740 if b'sharedrepo' in createopts:
3739 3741 sharedpath = createopts[b'sharedrepo'].sharedpath
3740 3742
3741 3743 if createopts.get(b'sharedrelative'):
3742 3744 try:
3743 3745 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3744 3746 except (IOError, ValueError) as e:
3745 3747 # ValueError is raised on Windows if the drive letters differ
3746 3748 # on each path.
3747 3749 raise error.Abort(
3748 3750 _(b'cannot calculate relative path'),
3749 3751 hint=stringutil.forcebytestr(e),
3750 3752 )
3751 3753
3752 3754 if not wdirvfs.exists():
3753 3755 wdirvfs.makedirs()
3754 3756
3755 3757 hgvfs.makedir(notindexed=True)
3756 3758 if b'sharedrepo' not in createopts:
3757 3759 hgvfs.mkdir(b'cache')
3758 3760 hgvfs.mkdir(b'wcache')
3759 3761
3760 3762 if b'store' in requirements and b'sharedrepo' not in createopts:
3761 3763 hgvfs.mkdir(b'store')
3762 3764
3763 3765 # We create an invalid changelog outside the store so very old
3764 3766 # Mercurial versions (which didn't know about the requirements
3765 3767 # file) encounter an error on reading the changelog. This
3766 3768 # effectively locks out old clients and prevents them from
3767 3769 # mucking with a repo in an unknown format.
3768 3770 #
3769 3771 # The revlog header has version 2, which won't be recognized by
3770 3772 # such old clients.
3771 3773 hgvfs.append(
3772 3774 b'00changelog.i',
3773 3775 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3774 3776 b'layout',
3775 3777 )
3776 3778
3777 3779 scmutil.writerequires(hgvfs, requirements)
3778 3780
3779 3781 # Write out file telling readers where to find the shared store.
3780 3782 if b'sharedrepo' in createopts:
3781 3783 hgvfs.write(b'sharedpath', sharedpath)
3782 3784
3783 3785 if createopts.get(b'shareditems'):
3784 3786 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3785 3787 hgvfs.write(b'shared', shared)
3786 3788
3787 3789
3788 3790 def poisonrepository(repo):
3789 3791 """Poison a repository instance so it can no longer be used."""
3790 3792 # Perform any cleanup on the instance.
3791 3793 repo.close()
3792 3794
3793 3795 # Our strategy is to replace the type of the object with one that
3794 3796 # has all attribute lookups result in error.
3795 3797 #
3796 3798 # But we have to allow the close() method because some constructors
3797 3799 # of repos call close() on repo references.
3798 3800 class poisonedrepository(object):
3799 3801 def __getattribute__(self, item):
3800 3802 if item == 'close':
3801 3803 return object.__getattribute__(self, item)
3802 3804
3803 3805 raise error.ProgrammingError(
3804 3806 b'repo instances should not be used after unshare'
3805 3807 )
3806 3808
3807 3809 def close(self):
3808 3810 pass
3809 3811
3810 3812 # We may have a repoview, which intercepts __setattr__. So be sure
3811 3813 # we operate at the lowest level possible.
3812 3814 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,294 +1,299 b''
1 1 #require no-reposimplestore
2 2
3 3 $ hg clone http://localhost:$HGPORT/ copy
4 4 abort: * (glob)
5 5 [255]
6 6 $ test -d copy
7 7 [1]
8 8
9 9 This server doesn't do range requests so it's basically only good for
10 10 one pull
11 11
12 12 $ "$PYTHON" "$TESTDIR/dumbhttp.py" -p $HGPORT --pid dumb.pid \
13 13 > --logfile server.log
14 14 $ cat dumb.pid >> $DAEMON_PIDS
15 15 $ hg init remote
16 16 $ cd remote
17 17 $ echo foo > bar
18 18 $ echo c2 > '.dotfile with spaces'
19 19 $ hg add
20 20 adding .dotfile with spaces
21 21 adding bar
22 22 $ hg commit -m"test"
23 23 $ hg tip
24 24 changeset: 0:02770d679fb8
25 25 tag: tip
26 26 user: test
27 27 date: Thu Jan 01 00:00:00 1970 +0000
28 28 summary: test
29 29
30 30 $ cd ..
31 31 $ hg clone static-http://localhost:$HGPORT/remote local
32 32 requesting all changes
33 33 adding changesets
34 34 adding manifests
35 35 adding file changes
36 36 added 1 changesets with 2 changes to 2 files
37 37 new changesets 02770d679fb8
38 38 updating to branch default
39 39 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 40 $ cd local
41 41 $ hg verify
42 42 checking changesets
43 43 checking manifests
44 44 crosschecking files in changesets and manifests
45 45 checking files
46 46 checked 1 changesets with 2 changes to 2 files
47 47 $ cat bar
48 48 foo
49 49 $ cd ../remote
50 50 $ echo baz > quux
51 51 $ hg commit -A -mtest2
52 52 adding quux
53 53
54 54 check for HTTP opener failures when cachefile does not exist
55 55
56 56 $ rm .hg/cache/*
57 57 $ cd ../local
58 58 $ cat >> .hg/hgrc <<EOF
59 59 > [hooks]
60 60 > changegroup = sh -c "printenv.py --line changegroup"
61 61 > EOF
62 62 $ hg pull
63 63 pulling from static-http://localhost:$HGPORT/remote
64 64 searching for changes
65 65 adding changesets
66 66 adding manifests
67 67 adding file changes
68 68 added 1 changesets with 1 changes to 1 files
69 69 new changesets 4ac2e3648604
70 70 changegroup hook: HG_HOOKNAME=changegroup
71 71 HG_HOOKTYPE=changegroup
72 72 HG_NODE=4ac2e3648604439c580c69b09ec9d93a88d93432
73 73 HG_NODE_LAST=4ac2e3648604439c580c69b09ec9d93a88d93432
74 74 HG_SOURCE=pull
75 75 HG_TXNID=TXN:$ID$
76 76 HG_TXNNAME=pull
77 77 http://localhost:$HGPORT/remote
78 78 HG_URL=http://localhost:$HGPORT/remote
79 79
80 80 (run 'hg update' to get a working copy)
81 81
82 82 trying to push
83 83
84 84 $ hg update
85 85 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
86 86 $ echo more foo >> bar
87 87 $ hg commit -m"test"
88 88 $ hg push
89 89 pushing to static-http://localhost:$HGPORT/remote
90 90 abort: destination does not support push
91 91 [255]
92 92
93 93 trying clone -r
94 94
95 95 $ cd ..
96 96 $ hg clone -r doesnotexist static-http://localhost:$HGPORT/remote local0
97 97 abort: unknown revision 'doesnotexist'!
98 98 [255]
99 99 $ hg clone -r 0 static-http://localhost:$HGPORT/remote local0
100 100 adding changesets
101 101 adding manifests
102 102 adding file changes
103 103 added 1 changesets with 2 changes to 2 files
104 104 new changesets 02770d679fb8
105 105 updating to branch default
106 106 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
107 107
108 108 test with "/" URI (issue747) and subrepo
109 109
110 110 $ hg init
111 111 $ hg init sub
112 112 $ touch sub/test
113 113 $ hg -R sub commit -A -m "test"
114 114 adding test
115 115 $ hg -R sub tag not-empty
116 116 $ echo sub=sub > .hgsub
117 117 $ echo a > a
118 118 $ hg add a .hgsub
119 119 $ hg -q ci -ma
120 120 $ hg clone static-http://localhost:$HGPORT/ local2
121 121 requesting all changes
122 122 adding changesets
123 123 adding manifests
124 124 adding file changes
125 125 added 1 changesets with 3 changes to 3 files
126 126 new changesets a9ebfbe8e587
127 127 updating to branch default
128 128 cloning subrepo sub from static-http://localhost:$HGPORT/sub
129 129 requesting all changes
130 130 adding changesets
131 131 adding manifests
132 132 adding file changes
133 133 added 2 changesets with 2 changes to 2 files
134 134 new changesets be090ea66256:322ea90975df
135 135 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
136 136 $ cd local2
137 137 $ hg verify
138 138 checking changesets
139 139 checking manifests
140 140 crosschecking files in changesets and manifests
141 141 checking files
142 142 checked 1 changesets with 3 changes to 3 files
143 143 checking subrepo links
144 144 $ cat a
145 145 a
146 146 $ hg paths
147 147 default = static-http://localhost:$HGPORT/
148 148
149 149 test with empty repo (issue965)
150 150
151 151 $ cd ..
152 152 $ hg init remotempty
153 153 $ hg clone static-http://localhost:$HGPORT/remotempty local3
154 154 no changes found
155 155 updating to branch default
156 156 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
157 157 $ cd local3
158 158 $ hg verify
159 159 checking changesets
160 160 checking manifests
161 161 crosschecking files in changesets and manifests
162 162 checking files
163 163 checked 0 changesets with 0 changes to 0 files
164 164 $ hg paths
165 165 default = static-http://localhost:$HGPORT/remotempty
166 166
167 167 test with non-repo
168 168
169 169 $ cd ..
170 170 $ mkdir notarepo
171 171 $ hg clone static-http://localhost:$HGPORT/notarepo local3
172 172 abort: 'http://localhost:$HGPORT/notarepo' does not appear to be an hg repository!
173 173 [255]
174 174
175 175 Clone with tags and branches works
176 176
177 177 $ hg init remote-with-names
178 178 $ cd remote-with-names
179 179 $ echo 0 > foo
180 180 $ hg -q commit -A -m initial
181 181 $ echo 1 > foo
182 182 $ hg commit -m 'commit 1'
183 183 $ hg -q up 0
184 184 $ hg branch mybranch
185 185 marked working directory as branch mybranch
186 186 (branches are permanent and global, did you want a bookmark?)
187 187 $ echo 2 > foo
188 188 $ hg commit -m 'commit 2 (mybranch)'
189 189 $ hg tag -r 1 'default-tag'
190 190 $ hg tag -r 2 'branch-tag'
191 191
192 192 $ cd ..
193 193
194 194 $ hg clone static-http://localhost:$HGPORT/remote-with-names local-with-names
195 195 requesting all changes
196 196 adding changesets
197 197 adding manifests
198 198 adding file changes
199 199 added 5 changesets with 5 changes to 2 files (+1 heads)
200 200 new changesets 68986213bd44:0c325bd2b5a7
201 201 updating to branch default
202 202 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
203 203
204 204 Clone a specific branch works
205 205
206 206 $ hg clone -r mybranch static-http://localhost:$HGPORT/remote-with-names local-with-names-branch
207 207 adding changesets
208 208 adding manifests
209 209 adding file changes
210 210 added 4 changesets with 4 changes to 2 files
211 211 new changesets 68986213bd44:0c325bd2b5a7
212 212 updating to branch mybranch
213 213 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
214 214
215 215 Clone a specific tag works
216 216
217 217 $ hg clone -r default-tag static-http://localhost:$HGPORT/remote-with-names local-with-names-tag
218 218 adding changesets
219 219 adding manifests
220 220 adding file changes
221 221 added 2 changesets with 2 changes to 1 files
222 222 new changesets 68986213bd44:4ee3fcef1c80
223 223 updating to branch default
224 224 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
225 225
226 226 $ killdaemons.py
227 227
228 228 List of files accessed over HTTP:
229 229
230 230 $ cat server.log | sed -n -e 's|.*GET \(/[^ ]*\).*|\1|p' | sort -u
231 231 /.hg/bookmarks
232 232 /.hg/bookmarks.current
233 233 /.hg/cache/hgtagsfnodes1
234 234 /.hg/cache/rbc-names-v1
235 235 /.hg/cache/rbc-revs-v1
236 /.hg/dirstate
236 237 /.hg/requires
237 238 /.hg/store/00changelog.i
238 239 /.hg/store/00manifest.i
239 240 /.hg/store/data/%7E2ehgsub.i (no-py37 !)
240 241 /.hg/store/data/%7E2ehgsubstate.i (no-py37 !)
241 242 /.hg/store/data/a.i
242 243 /.hg/store/data/~2ehgsub.i (py37 !)
243 244 /.hg/store/data/~2ehgsubstate.i (py37 !)
244 245 /notarepo/.hg/00changelog.i
245 246 /notarepo/.hg/requires
246 247 /remote-with-names/.hg/bookmarks
247 248 /remote-with-names/.hg/bookmarks.current
248 249 /remote-with-names/.hg/cache/branch2-served
249 250 /remote-with-names/.hg/cache/hgtagsfnodes1
250 251 /remote-with-names/.hg/cache/rbc-names-v1
251 252 /remote-with-names/.hg/cache/rbc-revs-v1
252 253 /remote-with-names/.hg/cache/tags2-served
254 /remote-with-names/.hg/dirstate
253 255 /remote-with-names/.hg/localtags
254 256 /remote-with-names/.hg/requires
255 257 /remote-with-names/.hg/store/00changelog.i
256 258 /remote-with-names/.hg/store/00manifest.i
257 259 /remote-with-names/.hg/store/data/%7E2ehgtags.i (no-py37 !)
258 260 /remote-with-names/.hg/store/data/foo.i
259 261 /remote-with-names/.hg/store/data/~2ehgtags.i (py37 !)
260 262 /remote/.hg/bookmarks
261 263 /remote/.hg/bookmarks.current
262 264 /remote/.hg/cache/branch2-base
263 265 /remote/.hg/cache/branch2-immutable
264 266 /remote/.hg/cache/branch2-served
265 267 /remote/.hg/cache/hgtagsfnodes1
266 268 /remote/.hg/cache/rbc-names-v1
267 269 /remote/.hg/cache/rbc-revs-v1
268 270 /remote/.hg/cache/tags2-served
271 /remote/.hg/dirstate
269 272 /remote/.hg/localtags
270 273 /remote/.hg/requires
271 274 /remote/.hg/store/00changelog.i
272 275 /remote/.hg/store/00manifest.i
273 276 /remote/.hg/store/data/%7E2edotfile%20with%20spaces.i (no-py37 !)
274 277 /remote/.hg/store/data/%7E2ehgtags.i (no-py37 !)
275 278 /remote/.hg/store/data/bar.i
276 279 /remote/.hg/store/data/quux.i
277 280 /remote/.hg/store/data/~2edotfile%20with%20spaces.i (py37 !)
278 281 /remote/.hg/store/data/~2ehgtags.i (py37 !)
279 282 /remotempty/.hg/bookmarks
280 283 /remotempty/.hg/bookmarks.current
284 /remotempty/.hg/dirstate
281 285 /remotempty/.hg/requires
282 286 /remotempty/.hg/store/00changelog.i
283 287 /remotempty/.hg/store/00manifest.i
284 288 /sub/.hg/bookmarks
285 289 /sub/.hg/bookmarks.current
286 290 /sub/.hg/cache/hgtagsfnodes1
287 291 /sub/.hg/cache/rbc-names-v1
288 292 /sub/.hg/cache/rbc-revs-v1
293 /sub/.hg/dirstate
289 294 /sub/.hg/requires
290 295 /sub/.hg/store/00changelog.i
291 296 /sub/.hg/store/00manifest.i
292 297 /sub/.hg/store/data/%7E2ehgtags.i (no-py37 !)
293 298 /sub/.hg/store/data/test.i
294 299 /sub/.hg/store/data/~2ehgtags.i (py37 !)
General Comments 0
You need to be logged in to leave comments. Login now