##// END OF EJS Templates
dirstate: group return logic and clarify each function in flagfunc...
Raphaël Gomès -
r49103:0d6a099b default
parent child Browse files
Show More
@@ -1,1528 +1,1531 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .dirstateutils import (
35 35 timestamp,
36 36 )
37 37
38 38 from .interfaces import (
39 39 dirstate as intdirstate,
40 40 util as interfaceutil,
41 41 )
42 42
43 43 parsers = policy.importmod('parsers')
44 44 rustmod = policy.importrust('dirstate')
45 45
46 46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
47 47
48 48 propertycache = util.propertycache
49 49 filecache = scmutil.filecache
50 50 _rangemask = dirstatemap.rangemask
51 51
52 52 DirstateItem = dirstatemap.DirstateItem
53 53
54 54
55 55 class repocache(filecache):
56 56 """filecache for files in .hg/"""
57 57
58 58 def join(self, obj, fname):
59 59 return obj._opener.join(fname)
60 60
61 61
62 62 class rootcache(filecache):
63 63 """filecache for files in the repository root"""
64 64
65 65 def join(self, obj, fname):
66 66 return obj._join(fname)
67 67
68 68
69 69 def _getfsnow(vfs):
70 70 '''Get "now" timestamp on filesystem'''
71 71 tmpfd, tmpname = vfs.mkstemp()
72 72 try:
73 73 return timestamp.mtime_of(os.fstat(tmpfd))
74 74 finally:
75 75 os.close(tmpfd)
76 76 vfs.unlink(tmpname)
77 77
78 78
79 79 def requires_parents_change(func):
80 80 def wrap(self, *args, **kwargs):
81 81 if not self.pendingparentchange():
82 82 msg = 'calling `%s` outside of a parentchange context'
83 83 msg %= func.__name__
84 84 raise error.ProgrammingError(msg)
85 85 return func(self, *args, **kwargs)
86 86
87 87 return wrap
88 88
89 89
90 90 def requires_no_parents_change(func):
91 91 def wrap(self, *args, **kwargs):
92 92 if self.pendingparentchange():
93 93 msg = 'calling `%s` inside of a parentchange context'
94 94 msg %= func.__name__
95 95 raise error.ProgrammingError(msg)
96 96 return func(self, *args, **kwargs)
97 97
98 98 return wrap
99 99
100 100
101 101 @interfaceutil.implementer(intdirstate.idirstate)
102 102 class dirstate(object):
103 103 def __init__(
104 104 self,
105 105 opener,
106 106 ui,
107 107 root,
108 108 validate,
109 109 sparsematchfn,
110 110 nodeconstants,
111 111 use_dirstate_v2,
112 112 ):
113 113 """Create a new dirstate object.
114 114
115 115 opener is an open()-like callable that can be used to open the
116 116 dirstate file; root is the root of the directory tracked by
117 117 the dirstate.
118 118 """
119 119 self._use_dirstate_v2 = use_dirstate_v2
120 120 self._nodeconstants = nodeconstants
121 121 self._opener = opener
122 122 self._validate = validate
123 123 self._root = root
124 124 self._sparsematchfn = sparsematchfn
125 125 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
126 126 # UNC path pointing to root share (issue4557)
127 127 self._rootdir = pathutil.normasprefix(root)
128 128 self._dirty = False
129 129 self._lastnormaltime = timestamp.zero()
130 130 self._ui = ui
131 131 self._filecache = {}
132 132 self._parentwriters = 0
133 133 self._filename = b'dirstate'
134 134 self._pendingfilename = b'%s.pending' % self._filename
135 135 self._plchangecallbacks = {}
136 136 self._origpl = None
137 137 self._mapcls = dirstatemap.dirstatemap
138 138 # Access and cache cwd early, so we don't access it for the first time
139 139 # after a working-copy update caused it to not exist (accessing it then
140 140 # raises an exception).
141 141 self._cwd
142 142
143 143 def prefetch_parents(self):
144 144 """make sure the parents are loaded
145 145
146 146 Used to avoid a race condition.
147 147 """
148 148 self._pl
149 149
150 150 @contextlib.contextmanager
151 151 def parentchange(self):
152 152 """Context manager for handling dirstate parents.
153 153
154 154 If an exception occurs in the scope of the context manager,
155 155 the incoherent dirstate won't be written when wlock is
156 156 released.
157 157 """
158 158 self._parentwriters += 1
159 159 yield
160 160 # Typically we want the "undo" step of a context manager in a
161 161 # finally block so it happens even when an exception
162 162 # occurs. In this case, however, we only want to decrement
163 163 # parentwriters if the code in the with statement exits
164 164 # normally, so we don't have a try/finally here on purpose.
165 165 self._parentwriters -= 1
166 166
167 167 def pendingparentchange(self):
168 168 """Returns true if the dirstate is in the middle of a set of changes
169 169 that modify the dirstate parent.
170 170 """
171 171 return self._parentwriters > 0
172 172
173 173 @propertycache
174 174 def _map(self):
175 175 """Return the dirstate contents (see documentation for dirstatemap)."""
176 176 self._map = self._mapcls(
177 177 self._ui,
178 178 self._opener,
179 179 self._root,
180 180 self._nodeconstants,
181 181 self._use_dirstate_v2,
182 182 )
183 183 return self._map
184 184
185 185 @property
186 186 def _sparsematcher(self):
187 187 """The matcher for the sparse checkout.
188 188
189 189 The working directory may not include every file from a manifest. The
190 190 matcher obtained by this property will match a path if it is to be
191 191 included in the working directory.
192 192 """
193 193 # TODO there is potential to cache this property. For now, the matcher
194 194 # is resolved on every access. (But the called function does use a
195 195 # cache to keep the lookup fast.)
196 196 return self._sparsematchfn()
197 197
198 198 @repocache(b'branch')
199 199 def _branch(self):
200 200 try:
201 201 return self._opener.read(b"branch").strip() or b"default"
202 202 except IOError as inst:
203 203 if inst.errno != errno.ENOENT:
204 204 raise
205 205 return b"default"
206 206
207 207 @property
208 208 def _pl(self):
209 209 return self._map.parents()
210 210
211 211 def hasdir(self, d):
212 212 return self._map.hastrackeddir(d)
213 213
214 214 @rootcache(b'.hgignore')
215 215 def _ignore(self):
216 216 files = self._ignorefiles()
217 217 if not files:
218 218 return matchmod.never()
219 219
220 220 pats = [b'include:%s' % f for f in files]
221 221 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
222 222
223 223 @propertycache
224 224 def _slash(self):
225 225 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
226 226
227 227 @propertycache
228 228 def _checklink(self):
229 229 return util.checklink(self._root)
230 230
231 231 @propertycache
232 232 def _checkexec(self):
233 233 return bool(util.checkexec(self._root))
234 234
235 235 @propertycache
236 236 def _checkcase(self):
237 237 return not util.fscasesensitive(self._join(b'.hg'))
238 238
239 239 def _join(self, f):
240 240 # much faster than os.path.join()
241 241 # it's safe because f is always a relative path
242 242 return self._rootdir + f
243 243
244 244 def flagfunc(self, buildfallback):
245 if self._checklink and self._checkexec:
246
247 def f(x):
248 try:
249 st = os.lstat(self._join(x))
250 if util.statislink(st):
251 return b'l'
252 if util.statisexec(st):
253 return b'x'
254 except OSError:
255 pass
256 return b''
245 if not (self._checklink and self._checkexec):
246 fallback = buildfallback()
257 247
258 return f
259
260 fallback = buildfallback()
261 if self._checklink:
262
263 def f(x):
264 if os.path.islink(self._join(x)):
248 def check_both(x):
249 """This platform supports symlinks and exec permissions"""
250 try:
251 st = os.lstat(self._join(x))
252 if util.statislink(st):
265 253 return b'l'
266 entry = self.get_entry(x)
267 if entry.has_fallback_exec:
268 if entry.fallback_exec:
269 return b'x'
270 elif b'x' in fallback(x):
254 if util.statisexec(st):
271 255 return b'x'
272 return b''
256 except OSError:
257 pass
258 return b''
259
260 def check_link(x):
261 """This platform only supports symlinks"""
262 if os.path.islink(self._join(x)):
263 return b'l'
264 entry = self.get_entry(x)
265 if entry.has_fallback_exec:
266 if entry.fallback_exec:
267 return b'x'
268 elif b'x' in fallback(x):
269 return b'x'
270 return b''
273 271
274 return f
275 if self._checkexec:
276
277 def f(x):
278 if b'l' in fallback(x):
272 def check_exec(x):
273 """This platform only supports exec permissions"""
274 if b'l' in fallback(x):
275 return b'l'
276 entry = self.get_entry(x)
277 if entry.has_fallback_symlink:
278 if entry.fallback_symlink:
279 279 return b'l'
280 entry = self.get_entry(x)
281 if entry.has_fallback_symlink:
282 if entry.fallback_symlink:
283 return b'l'
284 if util.isexec(self._join(x)):
285 return b'x'
286 return b''
280 if util.isexec(self._join(x)):
281 return b'x'
282 return b''
287 283
288 return f
289 else:
284 def check_fallback(x):
285 """This platform supports neither symlinks nor exec permissions, so
286 check the fallback in the dirstate if it exists, otherwise figure it
287 out the more expensive way from the parents."""
288 entry = self.get_entry(x)
289 if entry.has_fallback_symlink:
290 if entry.fallback_symlink:
291 return b'l'
292 if entry.has_fallback_exec:
293 if entry.fallback_exec:
294 return b'x'
295 elif entry.has_fallback_symlink:
296 return b''
297 return fallback(x)
290 298
291 def f(x):
292 entry = self.get_entry(x)
293 if entry.has_fallback_symlink:
294 if entry.fallback_symlink:
295 return b'l'
296 if entry.has_fallback_exec:
297 if entry.fallback_exec:
298 return b'x'
299 elif entry.has_fallback_symlink:
300 return b''
301 return fallback(x)
302
303 return f
299 if self._checklink and self._checkexec:
300 return check_both
301 elif self._checklink:
302 return check_link
303 elif self._checkexec:
304 return check_exec
305 else:
306 return check_fallback
304 307
305 308 @propertycache
306 309 def _cwd(self):
307 310 # internal config: ui.forcecwd
308 311 forcecwd = self._ui.config(b'ui', b'forcecwd')
309 312 if forcecwd:
310 313 return forcecwd
311 314 return encoding.getcwd()
312 315
313 316 def getcwd(self):
314 317 """Return the path from which a canonical path is calculated.
315 318
316 319 This path should be used to resolve file patterns or to convert
317 320 canonical paths back to file paths for display. It shouldn't be
318 321 used to get real file paths. Use vfs functions instead.
319 322 """
320 323 cwd = self._cwd
321 324 if cwd == self._root:
322 325 return b''
323 326 # self._root ends with a path separator if self._root is '/' or 'C:\'
324 327 rootsep = self._root
325 328 if not util.endswithsep(rootsep):
326 329 rootsep += pycompat.ossep
327 330 if cwd.startswith(rootsep):
328 331 return cwd[len(rootsep) :]
329 332 else:
330 333 # we're outside the repo. return an absolute path.
331 334 return cwd
332 335
333 336 def pathto(self, f, cwd=None):
334 337 if cwd is None:
335 338 cwd = self.getcwd()
336 339 path = util.pathto(self._root, cwd, f)
337 340 if self._slash:
338 341 return util.pconvert(path)
339 342 return path
340 343
341 344 def __getitem__(self, key):
342 345 """Return the current state of key (a filename) in the dirstate.
343 346
344 347 States are:
345 348 n normal
346 349 m needs merging
347 350 r marked for removal
348 351 a marked for addition
349 352 ? not tracked
350 353
351 354 XXX The "state" is a bit obscure to be in the "public" API. we should
352 355 consider migrating all user of this to going through the dirstate entry
353 356 instead.
354 357 """
355 358 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
356 359 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
357 360 entry = self._map.get(key)
358 361 if entry is not None:
359 362 return entry.state
360 363 return b'?'
361 364
362 365 def get_entry(self, path):
363 366 """return a DirstateItem for the associated path"""
364 367 entry = self._map.get(path)
365 368 if entry is None:
366 369 return DirstateItem()
367 370 return entry
368 371
369 372 def __contains__(self, key):
370 373 return key in self._map
371 374
372 375 def __iter__(self):
373 376 return iter(sorted(self._map))
374 377
375 378 def items(self):
376 379 return pycompat.iteritems(self._map)
377 380
378 381 iteritems = items
379 382
380 383 def parents(self):
381 384 return [self._validate(p) for p in self._pl]
382 385
383 386 def p1(self):
384 387 return self._validate(self._pl[0])
385 388
386 389 def p2(self):
387 390 return self._validate(self._pl[1])
388 391
389 392 @property
390 393 def in_merge(self):
391 394 """True if a merge is in progress"""
392 395 return self._pl[1] != self._nodeconstants.nullid
393 396
394 397 def branch(self):
395 398 return encoding.tolocal(self._branch)
396 399
397 400 def setparents(self, p1, p2=None):
398 401 """Set dirstate parents to p1 and p2.
399 402
400 403 When moving from two parents to one, "merged" entries a
401 404 adjusted to normal and previous copy records discarded and
402 405 returned by the call.
403 406
404 407 See localrepo.setparents()
405 408 """
406 409 if p2 is None:
407 410 p2 = self._nodeconstants.nullid
408 411 if self._parentwriters == 0:
409 412 raise ValueError(
410 413 b"cannot set dirstate parent outside of "
411 414 b"dirstate.parentchange context manager"
412 415 )
413 416
414 417 self._dirty = True
415 418 oldp2 = self._pl[1]
416 419 if self._origpl is None:
417 420 self._origpl = self._pl
418 421 nullid = self._nodeconstants.nullid
419 422 # True if we need to fold p2 related state back to a linear case
420 423 fold_p2 = oldp2 != nullid and p2 == nullid
421 424 return self._map.setparents(p1, p2, fold_p2=fold_p2)
422 425
423 426 def setbranch(self, branch):
424 427 self.__class__._branch.set(self, encoding.fromlocal(branch))
425 428 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
426 429 try:
427 430 f.write(self._branch + b'\n')
428 431 f.close()
429 432
430 433 # make sure filecache has the correct stat info for _branch after
431 434 # replacing the underlying file
432 435 ce = self._filecache[b'_branch']
433 436 if ce:
434 437 ce.refresh()
435 438 except: # re-raises
436 439 f.discard()
437 440 raise
438 441
439 442 def invalidate(self):
440 443 """Causes the next access to reread the dirstate.
441 444
442 445 This is different from localrepo.invalidatedirstate() because it always
443 446 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
444 447 check whether the dirstate has changed before rereading it."""
445 448
446 449 for a in ("_map", "_branch", "_ignore"):
447 450 if a in self.__dict__:
448 451 delattr(self, a)
449 452 self._lastnormaltime = timestamp.zero()
450 453 self._dirty = False
451 454 self._parentwriters = 0
452 455 self._origpl = None
453 456
454 457 def copy(self, source, dest):
455 458 """Mark dest as a copy of source. Unmark dest if source is None."""
456 459 if source == dest:
457 460 return
458 461 self._dirty = True
459 462 if source is not None:
460 463 self._map.copymap[dest] = source
461 464 else:
462 465 self._map.copymap.pop(dest, None)
463 466
464 467 def copied(self, file):
465 468 return self._map.copymap.get(file, None)
466 469
467 470 def copies(self):
468 471 return self._map.copymap
469 472
470 473 @requires_no_parents_change
471 474 def set_tracked(self, filename):
472 475 """a "public" method for generic code to mark a file as tracked
473 476
474 477 This function is to be called outside of "update/merge" case. For
475 478 example by a command like `hg add X`.
476 479
477 480 return True the file was previously untracked, False otherwise.
478 481 """
479 482 self._dirty = True
480 483 entry = self._map.get(filename)
481 484 if entry is None or not entry.tracked:
482 485 self._check_new_tracked_filename(filename)
483 486 return self._map.set_tracked(filename)
484 487
485 488 @requires_no_parents_change
486 489 def set_untracked(self, filename):
487 490 """a "public" method for generic code to mark a file as untracked
488 491
489 492 This function is to be called outside of "update/merge" case. For
490 493 example by a command like `hg remove X`.
491 494
492 495 return True the file was previously tracked, False otherwise.
493 496 """
494 497 ret = self._map.set_untracked(filename)
495 498 if ret:
496 499 self._dirty = True
497 500 return ret
498 501
499 502 @requires_no_parents_change
500 503 def set_clean(self, filename, parentfiledata=None):
501 504 """record that the current state of the file on disk is known to be clean"""
502 505 self._dirty = True
503 506 if parentfiledata:
504 507 (mode, size, mtime) = parentfiledata
505 508 else:
506 509 (mode, size, mtime) = self._get_filedata(filename)
507 510 if not self._map[filename].tracked:
508 511 self._check_new_tracked_filename(filename)
509 512 self._map.set_clean(filename, mode, size, mtime)
510 513 if mtime > self._lastnormaltime:
511 514 # Remember the most recent modification timeslot for status(),
512 515 # to make sure we won't miss future size-preserving file content
513 516 # modifications that happen within the same timeslot.
514 517 self._lastnormaltime = mtime
515 518
516 519 @requires_no_parents_change
517 520 def set_possibly_dirty(self, filename):
518 521 """record that the current state of the file on disk is unknown"""
519 522 self._dirty = True
520 523 self._map.set_possibly_dirty(filename)
521 524
522 525 @requires_parents_change
523 526 def update_file_p1(
524 527 self,
525 528 filename,
526 529 p1_tracked,
527 530 ):
528 531 """Set a file as tracked in the parent (or not)
529 532
530 533 This is to be called when adjust the dirstate to a new parent after an history
531 534 rewriting operation.
532 535
533 536 It should not be called during a merge (p2 != nullid) and only within
534 537 a `with dirstate.parentchange():` context.
535 538 """
536 539 if self.in_merge:
537 540 msg = b'update_file_reference should not be called when merging'
538 541 raise error.ProgrammingError(msg)
539 542 entry = self._map.get(filename)
540 543 if entry is None:
541 544 wc_tracked = False
542 545 else:
543 546 wc_tracked = entry.tracked
544 547 if not (p1_tracked or wc_tracked):
545 548 # the file is no longer relevant to anyone
546 549 if self._map.get(filename) is not None:
547 550 self._map.reset_state(filename)
548 551 self._dirty = True
549 552 elif (not p1_tracked) and wc_tracked:
550 553 if entry is not None and entry.added:
551 554 return # avoid dropping copy information (maybe?)
552 555
553 556 parentfiledata = None
554 557 if wc_tracked and p1_tracked:
555 558 parentfiledata = self._get_filedata(filename)
556 559
557 560 self._map.reset_state(
558 561 filename,
559 562 wc_tracked,
560 563 p1_tracked,
561 564 # the underlying reference might have changed, we will have to
562 565 # check it.
563 566 has_meaningful_mtime=False,
564 567 parentfiledata=parentfiledata,
565 568 )
566 569 if (
567 570 parentfiledata is not None
568 571 and parentfiledata[2] > self._lastnormaltime
569 572 ):
570 573 # Remember the most recent modification timeslot for status(),
571 574 # to make sure we won't miss future size-preserving file content
572 575 # modifications that happen within the same timeslot.
573 576 self._lastnormaltime = parentfiledata[2]
574 577
575 578 @requires_parents_change
576 579 def update_file(
577 580 self,
578 581 filename,
579 582 wc_tracked,
580 583 p1_tracked,
581 584 p2_info=False,
582 585 possibly_dirty=False,
583 586 parentfiledata=None,
584 587 ):
585 588 """update the information about a file in the dirstate
586 589
587 590 This is to be called when the direstates parent changes to keep track
588 591 of what is the file situation in regards to the working copy and its parent.
589 592
590 593 This function must be called within a `dirstate.parentchange` context.
591 594
592 595 note: the API is at an early stage and we might need to adjust it
593 596 depending of what information ends up being relevant and useful to
594 597 other processing.
595 598 """
596 599
597 600 # note: I do not think we need to double check name clash here since we
598 601 # are in a update/merge case that should already have taken care of
599 602 # this. The test agrees
600 603
601 604 self._dirty = True
602 605
603 606 need_parent_file_data = (
604 607 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
605 608 )
606 609
607 610 if need_parent_file_data and parentfiledata is None:
608 611 parentfiledata = self._get_filedata(filename)
609 612
610 613 self._map.reset_state(
611 614 filename,
612 615 wc_tracked,
613 616 p1_tracked,
614 617 p2_info=p2_info,
615 618 has_meaningful_mtime=not possibly_dirty,
616 619 parentfiledata=parentfiledata,
617 620 )
618 621 if (
619 622 parentfiledata is not None
620 623 and parentfiledata[2] > self._lastnormaltime
621 624 ):
622 625 # Remember the most recent modification timeslot for status(),
623 626 # to make sure we won't miss future size-preserving file content
624 627 # modifications that happen within the same timeslot.
625 628 self._lastnormaltime = parentfiledata[2]
626 629
627 630 def _check_new_tracked_filename(self, filename):
628 631 scmutil.checkfilename(filename)
629 632 if self._map.hastrackeddir(filename):
630 633 msg = _(b'directory %r already in dirstate')
631 634 msg %= pycompat.bytestr(filename)
632 635 raise error.Abort(msg)
633 636 # shadows
634 637 for d in pathutil.finddirs(filename):
635 638 if self._map.hastrackeddir(d):
636 639 break
637 640 entry = self._map.get(d)
638 641 if entry is not None and not entry.removed:
639 642 msg = _(b'file %r in dirstate clashes with %r')
640 643 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
641 644 raise error.Abort(msg)
642 645
643 646 def _get_filedata(self, filename):
644 647 """returns"""
645 648 s = os.lstat(self._join(filename))
646 649 mode = s.st_mode
647 650 size = s.st_size
648 651 mtime = timestamp.mtime_of(s)
649 652 return (mode, size, mtime)
650 653
651 654 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
652 655 if exists is None:
653 656 exists = os.path.lexists(os.path.join(self._root, path))
654 657 if not exists:
655 658 # Maybe a path component exists
656 659 if not ignoremissing and b'/' in path:
657 660 d, f = path.rsplit(b'/', 1)
658 661 d = self._normalize(d, False, ignoremissing, None)
659 662 folded = d + b"/" + f
660 663 else:
661 664 # No path components, preserve original case
662 665 folded = path
663 666 else:
664 667 # recursively normalize leading directory components
665 668 # against dirstate
666 669 if b'/' in normed:
667 670 d, f = normed.rsplit(b'/', 1)
668 671 d = self._normalize(d, False, ignoremissing, True)
669 672 r = self._root + b"/" + d
670 673 folded = d + b"/" + util.fspath(f, r)
671 674 else:
672 675 folded = util.fspath(normed, self._root)
673 676 storemap[normed] = folded
674 677
675 678 return folded
676 679
677 680 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
678 681 normed = util.normcase(path)
679 682 folded = self._map.filefoldmap.get(normed, None)
680 683 if folded is None:
681 684 if isknown:
682 685 folded = path
683 686 else:
684 687 folded = self._discoverpath(
685 688 path, normed, ignoremissing, exists, self._map.filefoldmap
686 689 )
687 690 return folded
688 691
689 692 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
690 693 normed = util.normcase(path)
691 694 folded = self._map.filefoldmap.get(normed, None)
692 695 if folded is None:
693 696 folded = self._map.dirfoldmap.get(normed, None)
694 697 if folded is None:
695 698 if isknown:
696 699 folded = path
697 700 else:
698 701 # store discovered result in dirfoldmap so that future
699 702 # normalizefile calls don't start matching directories
700 703 folded = self._discoverpath(
701 704 path, normed, ignoremissing, exists, self._map.dirfoldmap
702 705 )
703 706 return folded
704 707
705 708 def normalize(self, path, isknown=False, ignoremissing=False):
706 709 """
707 710 normalize the case of a pathname when on a casefolding filesystem
708 711
709 712 isknown specifies whether the filename came from walking the
710 713 disk, to avoid extra filesystem access.
711 714
712 715 If ignoremissing is True, missing path are returned
713 716 unchanged. Otherwise, we try harder to normalize possibly
714 717 existing path components.
715 718
716 719 The normalized case is determined based on the following precedence:
717 720
718 721 - version of name already stored in the dirstate
719 722 - version of name stored on disk
720 723 - version provided via command arguments
721 724 """
722 725
723 726 if self._checkcase:
724 727 return self._normalize(path, isknown, ignoremissing)
725 728 return path
726 729
727 730 def clear(self):
728 731 self._map.clear()
729 732 self._lastnormaltime = timestamp.zero()
730 733 self._dirty = True
731 734
732 735 def rebuild(self, parent, allfiles, changedfiles=None):
733 736 if changedfiles is None:
734 737 # Rebuild entire dirstate
735 738 to_lookup = allfiles
736 739 to_drop = []
737 740 lastnormaltime = self._lastnormaltime
738 741 self.clear()
739 742 self._lastnormaltime = lastnormaltime
740 743 elif len(changedfiles) < 10:
741 744 # Avoid turning allfiles into a set, which can be expensive if it's
742 745 # large.
743 746 to_lookup = []
744 747 to_drop = []
745 748 for f in changedfiles:
746 749 if f in allfiles:
747 750 to_lookup.append(f)
748 751 else:
749 752 to_drop.append(f)
750 753 else:
751 754 changedfilesset = set(changedfiles)
752 755 to_lookup = changedfilesset & set(allfiles)
753 756 to_drop = changedfilesset - to_lookup
754 757
755 758 if self._origpl is None:
756 759 self._origpl = self._pl
757 760 self._map.setparents(parent, self._nodeconstants.nullid)
758 761
759 762 for f in to_lookup:
760 763
761 764 if self.in_merge:
762 765 self.set_tracked(f)
763 766 else:
764 767 self._map.reset_state(
765 768 f,
766 769 wc_tracked=True,
767 770 p1_tracked=True,
768 771 )
769 772 for f in to_drop:
770 773 self._map.reset_state(f)
771 774
772 775 self._dirty = True
773 776
774 777 def identity(self):
775 778 """Return identity of dirstate itself to detect changing in storage
776 779
777 780 If identity of previous dirstate is equal to this, writing
778 781 changes based on the former dirstate out can keep consistency.
779 782 """
780 783 return self._map.identity
781 784
782 785 def write(self, tr):
783 786 if not self._dirty:
784 787 return
785 788
786 789 filename = self._filename
787 790 if tr:
788 791 # 'dirstate.write()' is not only for writing in-memory
789 792 # changes out, but also for dropping ambiguous timestamp.
790 793 # delayed writing re-raise "ambiguous timestamp issue".
791 794 # See also the wiki page below for detail:
792 795 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
793 796
794 797 # record when mtime start to be ambiguous
795 798 now = _getfsnow(self._opener)
796 799
797 800 # delay writing in-memory changes out
798 801 tr.addfilegenerator(
799 802 b'dirstate',
800 803 (self._filename,),
801 804 lambda f: self._writedirstate(tr, f, now=now),
802 805 location=b'plain',
803 806 )
804 807 return
805 808
806 809 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
807 810 self._writedirstate(tr, st)
808 811
809 812 def addparentchangecallback(self, category, callback):
810 813 """add a callback to be called when the wd parents are changed
811 814
812 815 Callback will be called with the following arguments:
813 816 dirstate, (oldp1, oldp2), (newp1, newp2)
814 817
815 818 Category is a unique identifier to allow overwriting an old callback
816 819 with a newer callback.
817 820 """
818 821 self._plchangecallbacks[category] = callback
819 822
820 823 def _writedirstate(self, tr, st, now=None):
821 824 # notify callbacks about parents change
822 825 if self._origpl is not None and self._origpl != self._pl:
823 826 for c, callback in sorted(
824 827 pycompat.iteritems(self._plchangecallbacks)
825 828 ):
826 829 callback(self, self._origpl, self._pl)
827 830 self._origpl = None
828 831
829 832 if now is None:
830 833 # use the modification time of the newly created temporary file as the
831 834 # filesystem's notion of 'now'
832 835 now = timestamp.mtime_of(util.fstat(st))
833 836
834 837 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
835 838 # timestamp of each entries in dirstate, because of 'now > mtime'
836 839 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
837 840 if delaywrite > 0:
838 841 # do we have any files to delay for?
839 842 for f, e in pycompat.iteritems(self._map):
840 843 if e.need_delay(now):
841 844 import time # to avoid useless import
842 845
843 846 # rather than sleep n seconds, sleep until the next
844 847 # multiple of n seconds
845 848 clock = time.time()
846 849 start = int(clock) - (int(clock) % delaywrite)
847 850 end = start + delaywrite
848 851 time.sleep(end - clock)
849 852 # trust our estimate that the end is near now
850 853 now = timestamp.timestamp((end, 0))
851 854 break
852 855
853 856 self._map.write(tr, st, now)
854 857 self._lastnormaltime = timestamp.zero()
855 858 self._dirty = False
856 859
857 860 def _dirignore(self, f):
858 861 if self._ignore(f):
859 862 return True
860 863 for p in pathutil.finddirs(f):
861 864 if self._ignore(p):
862 865 return True
863 866 return False
864 867
865 868 def _ignorefiles(self):
866 869 files = []
867 870 if os.path.exists(self._join(b'.hgignore')):
868 871 files.append(self._join(b'.hgignore'))
869 872 for name, path in self._ui.configitems(b"ui"):
870 873 if name == b'ignore' or name.startswith(b'ignore.'):
871 874 # we need to use os.path.join here rather than self._join
872 875 # because path is arbitrary and user-specified
873 876 files.append(os.path.join(self._rootdir, util.expandpath(path)))
874 877 return files
875 878
876 879 def _ignorefileandline(self, f):
877 880 files = collections.deque(self._ignorefiles())
878 881 visited = set()
879 882 while files:
880 883 i = files.popleft()
881 884 patterns = matchmod.readpatternfile(
882 885 i, self._ui.warn, sourceinfo=True
883 886 )
884 887 for pattern, lineno, line in patterns:
885 888 kind, p = matchmod._patsplit(pattern, b'glob')
886 889 if kind == b"subinclude":
887 890 if p not in visited:
888 891 files.append(p)
889 892 continue
890 893 m = matchmod.match(
891 894 self._root, b'', [], [pattern], warn=self._ui.warn
892 895 )
893 896 if m(f):
894 897 return (i, lineno, line)
895 898 visited.add(i)
896 899 return (None, -1, b"")
897 900
898 901 def _walkexplicit(self, match, subrepos):
899 902 """Get stat data about the files explicitly specified by match.
900 903
901 904 Return a triple (results, dirsfound, dirsnotfound).
902 905 - results is a mapping from filename to stat result. It also contains
903 906 listings mapping subrepos and .hg to None.
904 907 - dirsfound is a list of files found to be directories.
905 908 - dirsnotfound is a list of files that the dirstate thinks are
906 909 directories and that were not found."""
907 910
908 911 def badtype(mode):
909 912 kind = _(b'unknown')
910 913 if stat.S_ISCHR(mode):
911 914 kind = _(b'character device')
912 915 elif stat.S_ISBLK(mode):
913 916 kind = _(b'block device')
914 917 elif stat.S_ISFIFO(mode):
915 918 kind = _(b'fifo')
916 919 elif stat.S_ISSOCK(mode):
917 920 kind = _(b'socket')
918 921 elif stat.S_ISDIR(mode):
919 922 kind = _(b'directory')
920 923 return _(b'unsupported file type (type is %s)') % kind
921 924
922 925 badfn = match.bad
923 926 dmap = self._map
924 927 lstat = os.lstat
925 928 getkind = stat.S_IFMT
926 929 dirkind = stat.S_IFDIR
927 930 regkind = stat.S_IFREG
928 931 lnkkind = stat.S_IFLNK
929 932 join = self._join
930 933 dirsfound = []
931 934 foundadd = dirsfound.append
932 935 dirsnotfound = []
933 936 notfoundadd = dirsnotfound.append
934 937
935 938 if not match.isexact() and self._checkcase:
936 939 normalize = self._normalize
937 940 else:
938 941 normalize = None
939 942
940 943 files = sorted(match.files())
941 944 subrepos.sort()
942 945 i, j = 0, 0
943 946 while i < len(files) and j < len(subrepos):
944 947 subpath = subrepos[j] + b"/"
945 948 if files[i] < subpath:
946 949 i += 1
947 950 continue
948 951 while i < len(files) and files[i].startswith(subpath):
949 952 del files[i]
950 953 j += 1
951 954
952 955 if not files or b'' in files:
953 956 files = [b'']
954 957 # constructing the foldmap is expensive, so don't do it for the
955 958 # common case where files is ['']
956 959 normalize = None
957 960 results = dict.fromkeys(subrepos)
958 961 results[b'.hg'] = None
959 962
960 963 for ff in files:
961 964 if normalize:
962 965 nf = normalize(ff, False, True)
963 966 else:
964 967 nf = ff
965 968 if nf in results:
966 969 continue
967 970
968 971 try:
969 972 st = lstat(join(nf))
970 973 kind = getkind(st.st_mode)
971 974 if kind == dirkind:
972 975 if nf in dmap:
973 976 # file replaced by dir on disk but still in dirstate
974 977 results[nf] = None
975 978 foundadd((nf, ff))
976 979 elif kind == regkind or kind == lnkkind:
977 980 results[nf] = st
978 981 else:
979 982 badfn(ff, badtype(kind))
980 983 if nf in dmap:
981 984 results[nf] = None
982 985 except OSError as inst: # nf not found on disk - it is dirstate only
983 986 if nf in dmap: # does it exactly match a missing file?
984 987 results[nf] = None
985 988 else: # does it match a missing directory?
986 989 if self._map.hasdir(nf):
987 990 notfoundadd(nf)
988 991 else:
989 992 badfn(ff, encoding.strtolocal(inst.strerror))
990 993
991 994 # match.files() may contain explicitly-specified paths that shouldn't
992 995 # be taken; drop them from the list of files found. dirsfound/notfound
993 996 # aren't filtered here because they will be tested later.
994 997 if match.anypats():
995 998 for f in list(results):
996 999 if f == b'.hg' or f in subrepos:
997 1000 # keep sentinel to disable further out-of-repo walks
998 1001 continue
999 1002 if not match(f):
1000 1003 del results[f]
1001 1004
1002 1005 # Case insensitive filesystems cannot rely on lstat() failing to detect
1003 1006 # a case-only rename. Prune the stat object for any file that does not
1004 1007 # match the case in the filesystem, if there are multiple files that
1005 1008 # normalize to the same path.
1006 1009 if match.isexact() and self._checkcase:
1007 1010 normed = {}
1008 1011
1009 1012 for f, st in pycompat.iteritems(results):
1010 1013 if st is None:
1011 1014 continue
1012 1015
1013 1016 nc = util.normcase(f)
1014 1017 paths = normed.get(nc)
1015 1018
1016 1019 if paths is None:
1017 1020 paths = set()
1018 1021 normed[nc] = paths
1019 1022
1020 1023 paths.add(f)
1021 1024
1022 1025 for norm, paths in pycompat.iteritems(normed):
1023 1026 if len(paths) > 1:
1024 1027 for path in paths:
1025 1028 folded = self._discoverpath(
1026 1029 path, norm, True, None, self._map.dirfoldmap
1027 1030 )
1028 1031 if path != folded:
1029 1032 results[path] = None
1030 1033
1031 1034 return results, dirsfound, dirsnotfound
1032 1035
1033 1036 def walk(self, match, subrepos, unknown, ignored, full=True):
1034 1037 """
1035 1038 Walk recursively through the directory tree, finding all files
1036 1039 matched by match.
1037 1040
1038 1041 If full is False, maybe skip some known-clean files.
1039 1042
1040 1043 Return a dict mapping filename to stat-like object (either
1041 1044 mercurial.osutil.stat instance or return value of os.stat()).
1042 1045
1043 1046 """
1044 1047 # full is a flag that extensions that hook into walk can use -- this
1045 1048 # implementation doesn't use it at all. This satisfies the contract
1046 1049 # because we only guarantee a "maybe".
1047 1050
1048 1051 if ignored:
1049 1052 ignore = util.never
1050 1053 dirignore = util.never
1051 1054 elif unknown:
1052 1055 ignore = self._ignore
1053 1056 dirignore = self._dirignore
1054 1057 else:
1055 1058 # if not unknown and not ignored, drop dir recursion and step 2
1056 1059 ignore = util.always
1057 1060 dirignore = util.always
1058 1061
1059 1062 matchfn = match.matchfn
1060 1063 matchalways = match.always()
1061 1064 matchtdir = match.traversedir
1062 1065 dmap = self._map
1063 1066 listdir = util.listdir
1064 1067 lstat = os.lstat
1065 1068 dirkind = stat.S_IFDIR
1066 1069 regkind = stat.S_IFREG
1067 1070 lnkkind = stat.S_IFLNK
1068 1071 join = self._join
1069 1072
1070 1073 exact = skipstep3 = False
1071 1074 if match.isexact(): # match.exact
1072 1075 exact = True
1073 1076 dirignore = util.always # skip step 2
1074 1077 elif match.prefix(): # match.match, no patterns
1075 1078 skipstep3 = True
1076 1079
1077 1080 if not exact and self._checkcase:
1078 1081 normalize = self._normalize
1079 1082 normalizefile = self._normalizefile
1080 1083 skipstep3 = False
1081 1084 else:
1082 1085 normalize = self._normalize
1083 1086 normalizefile = None
1084 1087
1085 1088 # step 1: find all explicit files
1086 1089 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1087 1090 if matchtdir:
1088 1091 for d in work:
1089 1092 matchtdir(d[0])
1090 1093 for d in dirsnotfound:
1091 1094 matchtdir(d)
1092 1095
1093 1096 skipstep3 = skipstep3 and not (work or dirsnotfound)
1094 1097 work = [d for d in work if not dirignore(d[0])]
1095 1098
1096 1099 # step 2: visit subdirectories
1097 1100 def traverse(work, alreadynormed):
1098 1101 wadd = work.append
1099 1102 while work:
1100 1103 tracing.counter('dirstate.walk work', len(work))
1101 1104 nd = work.pop()
1102 1105 visitentries = match.visitchildrenset(nd)
1103 1106 if not visitentries:
1104 1107 continue
1105 1108 if visitentries == b'this' or visitentries == b'all':
1106 1109 visitentries = None
1107 1110 skip = None
1108 1111 if nd != b'':
1109 1112 skip = b'.hg'
1110 1113 try:
1111 1114 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1112 1115 entries = listdir(join(nd), stat=True, skip=skip)
1113 1116 except OSError as inst:
1114 1117 if inst.errno in (errno.EACCES, errno.ENOENT):
1115 1118 match.bad(
1116 1119 self.pathto(nd), encoding.strtolocal(inst.strerror)
1117 1120 )
1118 1121 continue
1119 1122 raise
1120 1123 for f, kind, st in entries:
1121 1124 # Some matchers may return files in the visitentries set,
1122 1125 # instead of 'this', if the matcher explicitly mentions them
1123 1126 # and is not an exactmatcher. This is acceptable; we do not
1124 1127 # make any hard assumptions about file-or-directory below
1125 1128 # based on the presence of `f` in visitentries. If
1126 1129 # visitchildrenset returned a set, we can always skip the
1127 1130 # entries *not* in the set it provided regardless of whether
1128 1131 # they're actually a file or a directory.
1129 1132 if visitentries and f not in visitentries:
1130 1133 continue
1131 1134 if normalizefile:
1132 1135 # even though f might be a directory, we're only
1133 1136 # interested in comparing it to files currently in the
1134 1137 # dmap -- therefore normalizefile is enough
1135 1138 nf = normalizefile(
1136 1139 nd and (nd + b"/" + f) or f, True, True
1137 1140 )
1138 1141 else:
1139 1142 nf = nd and (nd + b"/" + f) or f
1140 1143 if nf not in results:
1141 1144 if kind == dirkind:
1142 1145 if not ignore(nf):
1143 1146 if matchtdir:
1144 1147 matchtdir(nf)
1145 1148 wadd(nf)
1146 1149 if nf in dmap and (matchalways or matchfn(nf)):
1147 1150 results[nf] = None
1148 1151 elif kind == regkind or kind == lnkkind:
1149 1152 if nf in dmap:
1150 1153 if matchalways or matchfn(nf):
1151 1154 results[nf] = st
1152 1155 elif (matchalways or matchfn(nf)) and not ignore(
1153 1156 nf
1154 1157 ):
1155 1158 # unknown file -- normalize if necessary
1156 1159 if not alreadynormed:
1157 1160 nf = normalize(nf, False, True)
1158 1161 results[nf] = st
1159 1162 elif nf in dmap and (matchalways or matchfn(nf)):
1160 1163 results[nf] = None
1161 1164
1162 1165 for nd, d in work:
1163 1166 # alreadynormed means that processwork doesn't have to do any
1164 1167 # expensive directory normalization
1165 1168 alreadynormed = not normalize or nd == d
1166 1169 traverse([d], alreadynormed)
1167 1170
1168 1171 for s in subrepos:
1169 1172 del results[s]
1170 1173 del results[b'.hg']
1171 1174
1172 1175 # step 3: visit remaining files from dmap
1173 1176 if not skipstep3 and not exact:
1174 1177 # If a dmap file is not in results yet, it was either
1175 1178 # a) not matching matchfn b) ignored, c) missing, or d) under a
1176 1179 # symlink directory.
1177 1180 if not results and matchalways:
1178 1181 visit = [f for f in dmap]
1179 1182 else:
1180 1183 visit = [f for f in dmap if f not in results and matchfn(f)]
1181 1184 visit.sort()
1182 1185
1183 1186 if unknown:
1184 1187 # unknown == True means we walked all dirs under the roots
1185 1188 # that wasn't ignored, and everything that matched was stat'ed
1186 1189 # and is already in results.
1187 1190 # The rest must thus be ignored or under a symlink.
1188 1191 audit_path = pathutil.pathauditor(self._root, cached=True)
1189 1192
1190 1193 for nf in iter(visit):
1191 1194 # If a stat for the same file was already added with a
1192 1195 # different case, don't add one for this, since that would
1193 1196 # make it appear as if the file exists under both names
1194 1197 # on disk.
1195 1198 if (
1196 1199 normalizefile
1197 1200 and normalizefile(nf, True, True) in results
1198 1201 ):
1199 1202 results[nf] = None
1200 1203 # Report ignored items in the dmap as long as they are not
1201 1204 # under a symlink directory.
1202 1205 elif audit_path.check(nf):
1203 1206 try:
1204 1207 results[nf] = lstat(join(nf))
1205 1208 # file was just ignored, no links, and exists
1206 1209 except OSError:
1207 1210 # file doesn't exist
1208 1211 results[nf] = None
1209 1212 else:
1210 1213 # It's either missing or under a symlink directory
1211 1214 # which we in this case report as missing
1212 1215 results[nf] = None
1213 1216 else:
1214 1217 # We may not have walked the full directory tree above,
1215 1218 # so stat and check everything we missed.
1216 1219 iv = iter(visit)
1217 1220 for st in util.statfiles([join(i) for i in visit]):
1218 1221 results[next(iv)] = st
1219 1222 return results
1220 1223
1221 1224 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1222 1225 # Force Rayon (Rust parallelism library) to respect the number of
1223 1226 # workers. This is a temporary workaround until Rust code knows
1224 1227 # how to read the config file.
1225 1228 numcpus = self._ui.configint(b"worker", b"numcpus")
1226 1229 if numcpus is not None:
1227 1230 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1228 1231
1229 1232 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1230 1233 if not workers_enabled:
1231 1234 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1232 1235
1233 1236 (
1234 1237 lookup,
1235 1238 modified,
1236 1239 added,
1237 1240 removed,
1238 1241 deleted,
1239 1242 clean,
1240 1243 ignored,
1241 1244 unknown,
1242 1245 warnings,
1243 1246 bad,
1244 1247 traversed,
1245 1248 dirty,
1246 1249 ) = rustmod.status(
1247 1250 self._map._map,
1248 1251 matcher,
1249 1252 self._rootdir,
1250 1253 self._ignorefiles(),
1251 1254 self._checkexec,
1252 1255 self._lastnormaltime,
1253 1256 bool(list_clean),
1254 1257 bool(list_ignored),
1255 1258 bool(list_unknown),
1256 1259 bool(matcher.traversedir),
1257 1260 )
1258 1261
1259 1262 self._dirty |= dirty
1260 1263
1261 1264 if matcher.traversedir:
1262 1265 for dir in traversed:
1263 1266 matcher.traversedir(dir)
1264 1267
1265 1268 if self._ui.warn:
1266 1269 for item in warnings:
1267 1270 if isinstance(item, tuple):
1268 1271 file_path, syntax = item
1269 1272 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1270 1273 file_path,
1271 1274 syntax,
1272 1275 )
1273 1276 self._ui.warn(msg)
1274 1277 else:
1275 1278 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1276 1279 self._ui.warn(
1277 1280 msg
1278 1281 % (
1279 1282 pathutil.canonpath(
1280 1283 self._rootdir, self._rootdir, item
1281 1284 ),
1282 1285 b"No such file or directory",
1283 1286 )
1284 1287 )
1285 1288
1286 1289 for (fn, message) in bad:
1287 1290 matcher.bad(fn, encoding.strtolocal(message))
1288 1291
1289 1292 status = scmutil.status(
1290 1293 modified=modified,
1291 1294 added=added,
1292 1295 removed=removed,
1293 1296 deleted=deleted,
1294 1297 unknown=unknown,
1295 1298 ignored=ignored,
1296 1299 clean=clean,
1297 1300 )
1298 1301 return (lookup, status)
1299 1302
1300 1303 def status(self, match, subrepos, ignored, clean, unknown):
1301 1304 """Determine the status of the working copy relative to the
1302 1305 dirstate and return a pair of (unsure, status), where status is of type
1303 1306 scmutil.status and:
1304 1307
1305 1308 unsure:
1306 1309 files that might have been modified since the dirstate was
1307 1310 written, but need to be read to be sure (size is the same
1308 1311 but mtime differs)
1309 1312 status.modified:
1310 1313 files that have definitely been modified since the dirstate
1311 1314 was written (different size or mode)
1312 1315 status.clean:
1313 1316 files that have definitely not been modified since the
1314 1317 dirstate was written
1315 1318 """
1316 1319 listignored, listclean, listunknown = ignored, clean, unknown
1317 1320 lookup, modified, added, unknown, ignored = [], [], [], [], []
1318 1321 removed, deleted, clean = [], [], []
1319 1322
1320 1323 dmap = self._map
1321 1324 dmap.preload()
1322 1325
1323 1326 use_rust = True
1324 1327
1325 1328 allowed_matchers = (
1326 1329 matchmod.alwaysmatcher,
1327 1330 matchmod.exactmatcher,
1328 1331 matchmod.includematcher,
1329 1332 )
1330 1333
1331 1334 if rustmod is None:
1332 1335 use_rust = False
1333 1336 elif self._checkcase:
1334 1337 # Case-insensitive filesystems are not handled yet
1335 1338 use_rust = False
1336 1339 elif subrepos:
1337 1340 use_rust = False
1338 1341 elif sparse.enabled:
1339 1342 use_rust = False
1340 1343 elif not isinstance(match, allowed_matchers):
1341 1344 # Some matchers have yet to be implemented
1342 1345 use_rust = False
1343 1346
1344 1347 if use_rust:
1345 1348 try:
1346 1349 return self._rust_status(
1347 1350 match, listclean, listignored, listunknown
1348 1351 )
1349 1352 except rustmod.FallbackError:
1350 1353 pass
1351 1354
1352 1355 def noop(f):
1353 1356 pass
1354 1357
1355 1358 dcontains = dmap.__contains__
1356 1359 dget = dmap.__getitem__
1357 1360 ladd = lookup.append # aka "unsure"
1358 1361 madd = modified.append
1359 1362 aadd = added.append
1360 1363 uadd = unknown.append if listunknown else noop
1361 1364 iadd = ignored.append if listignored else noop
1362 1365 radd = removed.append
1363 1366 dadd = deleted.append
1364 1367 cadd = clean.append if listclean else noop
1365 1368 mexact = match.exact
1366 1369 dirignore = self._dirignore
1367 1370 checkexec = self._checkexec
1368 1371 copymap = self._map.copymap
1369 1372 lastnormaltime = self._lastnormaltime
1370 1373
1371 1374 # We need to do full walks when either
1372 1375 # - we're listing all clean files, or
1373 1376 # - match.traversedir does something, because match.traversedir should
1374 1377 # be called for every dir in the working dir
1375 1378 full = listclean or match.traversedir is not None
1376 1379 for fn, st in pycompat.iteritems(
1377 1380 self.walk(match, subrepos, listunknown, listignored, full=full)
1378 1381 ):
1379 1382 if not dcontains(fn):
1380 1383 if (listignored or mexact(fn)) and dirignore(fn):
1381 1384 if listignored:
1382 1385 iadd(fn)
1383 1386 else:
1384 1387 uadd(fn)
1385 1388 continue
1386 1389
1387 1390 t = dget(fn)
1388 1391 mode = t.mode
1389 1392 size = t.size
1390 1393
1391 1394 if not st and t.tracked:
1392 1395 dadd(fn)
1393 1396 elif t.p2_info:
1394 1397 madd(fn)
1395 1398 elif t.added:
1396 1399 aadd(fn)
1397 1400 elif t.removed:
1398 1401 radd(fn)
1399 1402 elif t.tracked:
1400 1403 if (
1401 1404 size >= 0
1402 1405 and (
1403 1406 (size != st.st_size and size != st.st_size & _rangemask)
1404 1407 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1405 1408 )
1406 1409 or fn in copymap
1407 1410 ):
1408 1411 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1409 1412 # issue6456: Size returned may be longer due to
1410 1413 # encryption on EXT-4 fscrypt, undecided.
1411 1414 ladd(fn)
1412 1415 else:
1413 1416 madd(fn)
1414 1417 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1415 1418 ladd(fn)
1416 1419 elif timestamp.mtime_of(st) == lastnormaltime:
1417 1420 # fn may have just been marked as normal and it may have
1418 1421 # changed in the same second without changing its size.
1419 1422 # This can happen if we quickly do multiple commits.
1420 1423 # Force lookup, so we don't miss such a racy file change.
1421 1424 ladd(fn)
1422 1425 elif listclean:
1423 1426 cadd(fn)
1424 1427 status = scmutil.status(
1425 1428 modified, added, removed, deleted, unknown, ignored, clean
1426 1429 )
1427 1430 return (lookup, status)
1428 1431
1429 1432 def matches(self, match):
1430 1433 """
1431 1434 return files in the dirstate (in whatever state) filtered by match
1432 1435 """
1433 1436 dmap = self._map
1434 1437 if rustmod is not None:
1435 1438 dmap = self._map._map
1436 1439
1437 1440 if match.always():
1438 1441 return dmap.keys()
1439 1442 files = match.files()
1440 1443 if match.isexact():
1441 1444 # fast path -- filter the other way around, since typically files is
1442 1445 # much smaller than dmap
1443 1446 return [f for f in files if f in dmap]
1444 1447 if match.prefix() and all(fn in dmap for fn in files):
1445 1448 # fast path -- all the values are known to be files, so just return
1446 1449 # that
1447 1450 return list(files)
1448 1451 return [f for f in dmap if match(f)]
1449 1452
1450 1453 def _actualfilename(self, tr):
1451 1454 if tr:
1452 1455 return self._pendingfilename
1453 1456 else:
1454 1457 return self._filename
1455 1458
1456 1459 def savebackup(self, tr, backupname):
1457 1460 '''Save current dirstate into backup file'''
1458 1461 filename = self._actualfilename(tr)
1459 1462 assert backupname != filename
1460 1463
1461 1464 # use '_writedirstate' instead of 'write' to write changes certainly,
1462 1465 # because the latter omits writing out if transaction is running.
1463 1466 # output file will be used to create backup of dirstate at this point.
1464 1467 if self._dirty or not self._opener.exists(filename):
1465 1468 self._writedirstate(
1466 1469 tr,
1467 1470 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1468 1471 )
1469 1472
1470 1473 if tr:
1471 1474 # ensure that subsequent tr.writepending returns True for
1472 1475 # changes written out above, even if dirstate is never
1473 1476 # changed after this
1474 1477 tr.addfilegenerator(
1475 1478 b'dirstate',
1476 1479 (self._filename,),
1477 1480 lambda f: self._writedirstate(tr, f),
1478 1481 location=b'plain',
1479 1482 )
1480 1483
1481 1484 # ensure that pending file written above is unlinked at
1482 1485 # failure, even if tr.writepending isn't invoked until the
1483 1486 # end of this transaction
1484 1487 tr.registertmp(filename, location=b'plain')
1485 1488
1486 1489 self._opener.tryunlink(backupname)
1487 1490 # hardlink backup is okay because _writedirstate is always called
1488 1491 # with an "atomictemp=True" file.
1489 1492 util.copyfile(
1490 1493 self._opener.join(filename),
1491 1494 self._opener.join(backupname),
1492 1495 hardlink=True,
1493 1496 )
1494 1497
1495 1498 def restorebackup(self, tr, backupname):
1496 1499 '''Restore dirstate by backup file'''
1497 1500 # this "invalidate()" prevents "wlock.release()" from writing
1498 1501 # changes of dirstate out after restoring from backup file
1499 1502 self.invalidate()
1500 1503 filename = self._actualfilename(tr)
1501 1504 o = self._opener
1502 1505 if util.samefile(o.join(backupname), o.join(filename)):
1503 1506 o.unlink(backupname)
1504 1507 else:
1505 1508 o.rename(backupname, filename, checkambig=True)
1506 1509
1507 1510 def clearbackup(self, tr, backupname):
1508 1511 '''Clear backup file'''
1509 1512 self._opener.unlink(backupname)
1510 1513
1511 1514 def verify(self, m1, m2):
1512 1515 """check the dirstate content again the parent manifest and yield errors"""
1513 1516 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1514 1517 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1515 1518 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1516 1519 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1517 1520 for f, entry in self.items():
1518 1521 state = entry.state
1519 1522 if state in b"nr" and f not in m1:
1520 1523 yield (missing_from_p1, f, state)
1521 1524 if state in b"a" and f in m1:
1522 1525 yield (unexpected_in_p1, f, state)
1523 1526 if state in b"m" and f not in m1 and f not in m2:
1524 1527 yield (missing_from_ps, f, state)
1525 1528 for f in m1:
1526 1529 state = self.get_entry(f).state
1527 1530 if state not in b"nrm":
1528 1531 yield (missing_from_ds, f, state)
General Comments 0
You need to be logged in to leave comments. Login now