##// END OF EJS Templates
dirstate: drop the deprecated `drop` method...
marmoute -
r48726:51cd60c0 default
parent child Browse files
Show More
@@ -1,72 +1,71
1 1 # dirstatenonnormalcheck.py - extension to check the consistency of the
2 2 # dirstate's non-normal map
3 3 #
4 4 # For most operations on dirstate, this extensions checks that the nonnormalset
5 5 # contains the right entries.
6 6 # It compares the nonnormal file to a nonnormalset built from the map of all
7 7 # the files in the dirstate to check that they contain the same files.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 from mercurial import (
12 12 dirstate,
13 13 extensions,
14 14 pycompat,
15 15 )
16 16
17 17
18 18 def nonnormalentries(dmap):
19 19 """Compute nonnormal entries from dirstate's dmap"""
20 20 res = set()
21 21 for f, e in dmap.iteritems():
22 22 if e.state != b'n' or e.mtime == -1:
23 23 res.add(f)
24 24 return res
25 25
26 26
27 27 def checkconsistency(ui, orig, dmap, _nonnormalset, label):
28 28 """Compute nonnormalset from dmap, check that it matches _nonnormalset"""
29 29 nonnormalcomputedmap = nonnormalentries(dmap)
30 30 if _nonnormalset != nonnormalcomputedmap:
31 31 b_orig = pycompat.sysbytes(repr(orig))
32 32 ui.develwarn(b"%s call to %s\n" % (label, b_orig), config=b'dirstate')
33 33 ui.develwarn(b"inconsistency in nonnormalset\n", config=b'dirstate')
34 34 b_nonnormal = pycompat.sysbytes(repr(_nonnormalset))
35 35 ui.develwarn(b"[nonnormalset] %s\n" % b_nonnormal, config=b'dirstate')
36 36 b_nonnormalcomputed = pycompat.sysbytes(repr(nonnormalcomputedmap))
37 37 ui.develwarn(b"[map] %s\n" % b_nonnormalcomputed, config=b'dirstate')
38 38
39 39
40 40 def _checkdirstate(orig, self, *args, **kwargs):
41 41 """Check nonnormal set consistency before and after the call to orig"""
42 42 checkconsistency(
43 43 self._ui, orig, self._map, self._map.nonnormalset, b"before"
44 44 )
45 45 r = orig(self, *args, **kwargs)
46 46 checkconsistency(
47 47 self._ui, orig, self._map, self._map.nonnormalset, b"after"
48 48 )
49 49 return r
50 50
51 51
52 52 def extsetup(ui):
53 53 """Wrap functions modifying dirstate to check nonnormalset consistency"""
54 54 dirstatecl = dirstate.dirstate
55 55 devel = ui.configbool(b'devel', b'all-warnings')
56 56 paranoid = ui.configbool(b'experimental', b'nonnormalparanoidcheck')
57 57 if devel:
58 58 extensions.wrapfunction(dirstatecl, '_writedirstate', _checkdirstate)
59 59 if paranoid:
60 60 # We don't do all these checks when paranoid is disable as it would
61 61 # make the extension run very slowly on large repos
62 62 extensions.wrapfunction(dirstatecl, 'write', _checkdirstate)
63 extensions.wrapfunction(dirstatecl, 'drop', _checkdirstate)
64 63 extensions.wrapfunction(dirstatecl, 'set_tracked', _checkdirstate)
65 64 extensions.wrapfunction(dirstatecl, 'set_untracked', _checkdirstate)
66 65 extensions.wrapfunction(
67 66 dirstatecl, 'set_possibly_dirty', _checkdirstate
68 67 )
69 68 extensions.wrapfunction(
70 69 dirstatecl, 'update_file_p1', _checkdirstate
71 70 )
72 71 extensions.wrapfunction(dirstatecl, 'update_file', _checkdirstate)
@@ -1,1660 +1,1642
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = dirstatemap.rangemask
47 47
48 48 DirstateItem = parsers.DirstateItem
49 49
50 50
51 51 class repocache(filecache):
52 52 """filecache for files in .hg/"""
53 53
54 54 def join(self, obj, fname):
55 55 return obj._opener.join(fname)
56 56
57 57
58 58 class rootcache(filecache):
59 59 """filecache for files in the repository root"""
60 60
61 61 def join(self, obj, fname):
62 62 return obj._join(fname)
63 63
64 64
65 65 def _getfsnow(vfs):
66 66 '''Get "now" timestamp on filesystem'''
67 67 tmpfd, tmpname = vfs.mkstemp()
68 68 try:
69 69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 70 finally:
71 71 os.close(tmpfd)
72 72 vfs.unlink(tmpname)
73 73
74 74
75 75 def requires_parents_change(func):
76 76 def wrap(self, *args, **kwargs):
77 77 if not self.pendingparentchange():
78 78 msg = 'calling `%s` outside of a parentchange context'
79 79 msg %= func.__name__
80 80 raise error.ProgrammingError(msg)
81 81 return func(self, *args, **kwargs)
82 82
83 83 return wrap
84 84
85 85
86 86 def requires_no_parents_change(func):
87 87 def wrap(self, *args, **kwargs):
88 88 if self.pendingparentchange():
89 89 msg = 'calling `%s` inside of a parentchange context'
90 90 msg %= func.__name__
91 91 raise error.ProgrammingError(msg)
92 92 return func(self, *args, **kwargs)
93 93
94 94 return wrap
95 95
96 96
97 97 @interfaceutil.implementer(intdirstate.idirstate)
98 98 class dirstate(object):
99 99 def __init__(
100 100 self,
101 101 opener,
102 102 ui,
103 103 root,
104 104 validate,
105 105 sparsematchfn,
106 106 nodeconstants,
107 107 use_dirstate_v2,
108 108 ):
109 109 """Create a new dirstate object.
110 110
111 111 opener is an open()-like callable that can be used to open the
112 112 dirstate file; root is the root of the directory tracked by
113 113 the dirstate.
114 114 """
115 115 self._use_dirstate_v2 = use_dirstate_v2
116 116 self._nodeconstants = nodeconstants
117 117 self._opener = opener
118 118 self._validate = validate
119 119 self._root = root
120 120 self._sparsematchfn = sparsematchfn
121 121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 122 # UNC path pointing to root share (issue4557)
123 123 self._rootdir = pathutil.normasprefix(root)
124 124 self._dirty = False
125 125 self._lastnormaltime = 0
126 126 self._ui = ui
127 127 self._filecache = {}
128 128 self._parentwriters = 0
129 129 self._filename = b'dirstate'
130 130 self._pendingfilename = b'%s.pending' % self._filename
131 131 self._plchangecallbacks = {}
132 132 self._origpl = None
133 133 self._updatedfiles = set()
134 134 self._mapcls = dirstatemap.dirstatemap
135 135 # Access and cache cwd early, so we don't access it for the first time
136 136 # after a working-copy update caused it to not exist (accessing it then
137 137 # raises an exception).
138 138 self._cwd
139 139
140 140 def prefetch_parents(self):
141 141 """make sure the parents are loaded
142 142
143 143 Used to avoid a race condition.
144 144 """
145 145 self._pl
146 146
147 147 @contextlib.contextmanager
148 148 def parentchange(self):
149 149 """Context manager for handling dirstate parents.
150 150
151 151 If an exception occurs in the scope of the context manager,
152 152 the incoherent dirstate won't be written when wlock is
153 153 released.
154 154 """
155 155 self._parentwriters += 1
156 156 yield
157 157 # Typically we want the "undo" step of a context manager in a
158 158 # finally block so it happens even when an exception
159 159 # occurs. In this case, however, we only want to decrement
160 160 # parentwriters if the code in the with statement exits
161 161 # normally, so we don't have a try/finally here on purpose.
162 162 self._parentwriters -= 1
163 163
164 164 def pendingparentchange(self):
165 165 """Returns true if the dirstate is in the middle of a set of changes
166 166 that modify the dirstate parent.
167 167 """
168 168 return self._parentwriters > 0
169 169
170 170 @propertycache
171 171 def _map(self):
172 172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 173 self._map = self._mapcls(
174 174 self._ui,
175 175 self._opener,
176 176 self._root,
177 177 self._nodeconstants,
178 178 self._use_dirstate_v2,
179 179 )
180 180 return self._map
181 181
182 182 @property
183 183 def _sparsematcher(self):
184 184 """The matcher for the sparse checkout.
185 185
186 186 The working directory may not include every file from a manifest. The
187 187 matcher obtained by this property will match a path if it is to be
188 188 included in the working directory.
189 189 """
190 190 # TODO there is potential to cache this property. For now, the matcher
191 191 # is resolved on every access. (But the called function does use a
192 192 # cache to keep the lookup fast.)
193 193 return self._sparsematchfn()
194 194
195 195 @repocache(b'branch')
196 196 def _branch(self):
197 197 try:
198 198 return self._opener.read(b"branch").strip() or b"default"
199 199 except IOError as inst:
200 200 if inst.errno != errno.ENOENT:
201 201 raise
202 202 return b"default"
203 203
204 204 @property
205 205 def _pl(self):
206 206 return self._map.parents()
207 207
208 208 def hasdir(self, d):
209 209 return self._map.hastrackeddir(d)
210 210
211 211 @rootcache(b'.hgignore')
212 212 def _ignore(self):
213 213 files = self._ignorefiles()
214 214 if not files:
215 215 return matchmod.never()
216 216
217 217 pats = [b'include:%s' % f for f in files]
218 218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219 219
220 220 @propertycache
221 221 def _slash(self):
222 222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223 223
224 224 @propertycache
225 225 def _checklink(self):
226 226 return util.checklink(self._root)
227 227
228 228 @propertycache
229 229 def _checkexec(self):
230 230 return bool(util.checkexec(self._root))
231 231
232 232 @propertycache
233 233 def _checkcase(self):
234 234 return not util.fscasesensitive(self._join(b'.hg'))
235 235
236 236 def _join(self, f):
237 237 # much faster than os.path.join()
238 238 # it's safe because f is always a relative path
239 239 return self._rootdir + f
240 240
241 241 def flagfunc(self, buildfallback):
242 242 if self._checklink and self._checkexec:
243 243
244 244 def f(x):
245 245 try:
246 246 st = os.lstat(self._join(x))
247 247 if util.statislink(st):
248 248 return b'l'
249 249 if util.statisexec(st):
250 250 return b'x'
251 251 except OSError:
252 252 pass
253 253 return b''
254 254
255 255 return f
256 256
257 257 fallback = buildfallback()
258 258 if self._checklink:
259 259
260 260 def f(x):
261 261 if os.path.islink(self._join(x)):
262 262 return b'l'
263 263 if b'x' in fallback(x):
264 264 return b'x'
265 265 return b''
266 266
267 267 return f
268 268 if self._checkexec:
269 269
270 270 def f(x):
271 271 if b'l' in fallback(x):
272 272 return b'l'
273 273 if util.isexec(self._join(x)):
274 274 return b'x'
275 275 return b''
276 276
277 277 return f
278 278 else:
279 279 return fallback
280 280
281 281 @propertycache
282 282 def _cwd(self):
283 283 # internal config: ui.forcecwd
284 284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 285 if forcecwd:
286 286 return forcecwd
287 287 return encoding.getcwd()
288 288
289 289 def getcwd(self):
290 290 """Return the path from which a canonical path is calculated.
291 291
292 292 This path should be used to resolve file patterns or to convert
293 293 canonical paths back to file paths for display. It shouldn't be
294 294 used to get real file paths. Use vfs functions instead.
295 295 """
296 296 cwd = self._cwd
297 297 if cwd == self._root:
298 298 return b''
299 299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 300 rootsep = self._root
301 301 if not util.endswithsep(rootsep):
302 302 rootsep += pycompat.ossep
303 303 if cwd.startswith(rootsep):
304 304 return cwd[len(rootsep) :]
305 305 else:
306 306 # we're outside the repo. return an absolute path.
307 307 return cwd
308 308
309 309 def pathto(self, f, cwd=None):
310 310 if cwd is None:
311 311 cwd = self.getcwd()
312 312 path = util.pathto(self._root, cwd, f)
313 313 if self._slash:
314 314 return util.pconvert(path)
315 315 return path
316 316
317 317 def __getitem__(self, key):
318 318 """Return the current state of key (a filename) in the dirstate.
319 319
320 320 States are:
321 321 n normal
322 322 m needs merging
323 323 r marked for removal
324 324 a marked for addition
325 325 ? not tracked
326 326
327 327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 328 consider migrating all user of this to going through the dirstate entry
329 329 instead.
330 330 """
331 331 entry = self._map.get(key)
332 332 if entry is not None:
333 333 return entry.state
334 334 return b'?'
335 335
336 336 def __contains__(self, key):
337 337 return key in self._map
338 338
339 339 def __iter__(self):
340 340 return iter(sorted(self._map))
341 341
342 342 def items(self):
343 343 return pycompat.iteritems(self._map)
344 344
345 345 iteritems = items
346 346
347 347 def directories(self):
348 348 return self._map.directories()
349 349
350 350 def parents(self):
351 351 return [self._validate(p) for p in self._pl]
352 352
353 353 def p1(self):
354 354 return self._validate(self._pl[0])
355 355
356 356 def p2(self):
357 357 return self._validate(self._pl[1])
358 358
359 359 @property
360 360 def in_merge(self):
361 361 """True if a merge is in progress"""
362 362 return self._pl[1] != self._nodeconstants.nullid
363 363
364 364 def branch(self):
365 365 return encoding.tolocal(self._branch)
366 366
367 367 def setparents(self, p1, p2=None):
368 368 """Set dirstate parents to p1 and p2.
369 369
370 370 When moving from two parents to one, "merged" entries a
371 371 adjusted to normal and previous copy records discarded and
372 372 returned by the call.
373 373
374 374 See localrepo.setparents()
375 375 """
376 376 if p2 is None:
377 377 p2 = self._nodeconstants.nullid
378 378 if self._parentwriters == 0:
379 379 raise ValueError(
380 380 b"cannot set dirstate parent outside of "
381 381 b"dirstate.parentchange context manager"
382 382 )
383 383
384 384 self._dirty = True
385 385 oldp2 = self._pl[1]
386 386 if self._origpl is None:
387 387 self._origpl = self._pl
388 388 self._map.setparents(p1, p2)
389 389 copies = {}
390 390 if (
391 391 oldp2 != self._nodeconstants.nullid
392 392 and p2 == self._nodeconstants.nullid
393 393 ):
394 394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395 395
396 396 for f in candidatefiles:
397 397 s = self._map.get(f)
398 398 if s is None:
399 399 continue
400 400
401 401 # Discard "merged" markers when moving away from a merge state
402 402 if s.merged:
403 403 source = self._map.copymap.get(f)
404 404 if source:
405 405 copies[f] = source
406 406 self._normallookup(f)
407 407 # Also fix up otherparent markers
408 408 elif s.from_p2:
409 409 source = self._map.copymap.get(f)
410 410 if source:
411 411 copies[f] = source
412 412 self._add(f)
413 413 return copies
414 414
415 415 def setbranch(self, branch):
416 416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 418 try:
419 419 f.write(self._branch + b'\n')
420 420 f.close()
421 421
422 422 # make sure filecache has the correct stat info for _branch after
423 423 # replacing the underlying file
424 424 ce = self._filecache[b'_branch']
425 425 if ce:
426 426 ce.refresh()
427 427 except: # re-raises
428 428 f.discard()
429 429 raise
430 430
431 431 def invalidate(self):
432 432 """Causes the next access to reread the dirstate.
433 433
434 434 This is different from localrepo.invalidatedirstate() because it always
435 435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 436 check whether the dirstate has changed before rereading it."""
437 437
438 438 for a in ("_map", "_branch", "_ignore"):
439 439 if a in self.__dict__:
440 440 delattr(self, a)
441 441 self._lastnormaltime = 0
442 442 self._dirty = False
443 443 self._updatedfiles.clear()
444 444 self._parentwriters = 0
445 445 self._origpl = None
446 446
447 447 def copy(self, source, dest):
448 448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 449 if source == dest:
450 450 return
451 451 self._dirty = True
452 452 if source is not None:
453 453 self._map.copymap[dest] = source
454 454 self._updatedfiles.add(source)
455 455 self._updatedfiles.add(dest)
456 456 elif self._map.copymap.pop(dest, None):
457 457 self._updatedfiles.add(dest)
458 458
459 459 def copied(self, file):
460 460 return self._map.copymap.get(file, None)
461 461
462 462 def copies(self):
463 463 return self._map.copymap
464 464
465 465 @requires_no_parents_change
466 466 def set_tracked(self, filename):
467 467 """a "public" method for generic code to mark a file as tracked
468 468
469 469 This function is to be called outside of "update/merge" case. For
470 470 example by a command like `hg add X`.
471 471
472 472 return True the file was previously untracked, False otherwise.
473 473 """
474 474 entry = self._map.get(filename)
475 475 if entry is None:
476 476 self._add(filename)
477 477 return True
478 478 elif not entry.tracked:
479 479 self._normallookup(filename)
480 480 return True
481 481 # XXX This is probably overkill for more case, but we need this to
482 482 # fully replace the `normallookup` call with `set_tracked` one.
483 483 # Consider smoothing this in the future.
484 484 self.set_possibly_dirty(filename)
485 485 return False
486 486
487 487 @requires_no_parents_change
488 488 def set_untracked(self, filename):
489 489 """a "public" method for generic code to mark a file as untracked
490 490
491 491 This function is to be called outside of "update/merge" case. For
492 492 example by a command like `hg remove X`.
493 493
494 494 return True the file was previously tracked, False otherwise.
495 495 """
496 496 entry = self._map.get(filename)
497 497 if entry is None:
498 498 return False
499 499 elif entry.added:
500 500 self._drop(filename)
501 501 return True
502 502 else:
503 503 self._dirty = True
504 504 self._updatedfiles.add(filename)
505 505 self._map.set_untracked(filename)
506 506 return True
507 507
508 508 @requires_no_parents_change
509 509 def set_clean(self, filename, parentfiledata=None):
510 510 """record that the current state of the file on disk is known to be clean"""
511 511 self._dirty = True
512 512 self._updatedfiles.add(filename)
513 513 self._normal(filename, parentfiledata=parentfiledata)
514 514
515 515 @requires_no_parents_change
516 516 def set_possibly_dirty(self, filename):
517 517 """record that the current state of the file on disk is unknown"""
518 518 self._dirty = True
519 519 self._updatedfiles.add(filename)
520 520 self._map.set_possibly_dirty(filename)
521 521
522 522 @requires_parents_change
523 523 def update_file_p1(
524 524 self,
525 525 filename,
526 526 p1_tracked,
527 527 ):
528 528 """Set a file as tracked in the parent (or not)
529 529
530 530 This is to be called when adjust the dirstate to a new parent after an history
531 531 rewriting operation.
532 532
533 533 It should not be called during a merge (p2 != nullid) and only within
534 534 a `with dirstate.parentchange():` context.
535 535 """
536 536 if self.in_merge:
537 537 msg = b'update_file_reference should not be called when merging'
538 538 raise error.ProgrammingError(msg)
539 539 entry = self._map.get(filename)
540 540 if entry is None:
541 541 wc_tracked = False
542 542 else:
543 543 wc_tracked = entry.tracked
544 544 possibly_dirty = False
545 545 if p1_tracked and wc_tracked:
546 546 # the underlying reference might have changed, we will have to
547 547 # check it.
548 548 possibly_dirty = True
549 549 elif not (p1_tracked or wc_tracked):
550 550 # the file is no longer relevant to anyone
551 551 self._drop(filename)
552 552 elif (not p1_tracked) and wc_tracked:
553 553 if entry is not None and entry.added:
554 554 return # avoid dropping copy information (maybe?)
555 555 elif p1_tracked and not wc_tracked:
556 556 pass
557 557 else:
558 558 assert False, 'unreachable'
559 559
560 560 # this mean we are doing call for file we do not really care about the
561 561 # data (eg: added or removed), however this should be a minor overhead
562 562 # compared to the overall update process calling this.
563 563 parentfiledata = None
564 564 if wc_tracked:
565 565 parentfiledata = self._get_filedata(filename)
566 566
567 567 self._updatedfiles.add(filename)
568 568 self._map.reset_state(
569 569 filename,
570 570 wc_tracked,
571 571 p1_tracked,
572 572 possibly_dirty=possibly_dirty,
573 573 parentfiledata=parentfiledata,
574 574 )
575 575 if (
576 576 parentfiledata is not None
577 577 and parentfiledata[2] > self._lastnormaltime
578 578 ):
579 579 # Remember the most recent modification timeslot for status(),
580 580 # to make sure we won't miss future size-preserving file content
581 581 # modifications that happen within the same timeslot.
582 582 self._lastnormaltime = parentfiledata[2]
583 583
584 584 @requires_parents_change
585 585 def update_file(
586 586 self,
587 587 filename,
588 588 wc_tracked,
589 589 p1_tracked,
590 590 p2_tracked=False,
591 591 merged=False,
592 592 clean_p1=False,
593 593 clean_p2=False,
594 594 possibly_dirty=False,
595 595 parentfiledata=None,
596 596 ):
597 597 """update the information about a file in the dirstate
598 598
599 599 This is to be called when the direstates parent changes to keep track
600 600 of what is the file situation in regards to the working copy and its parent.
601 601
602 602 This function must be called within a `dirstate.parentchange` context.
603 603
604 604 note: the API is at an early stage and we might need to adjust it
605 605 depending of what information ends up being relevant and useful to
606 606 other processing.
607 607 """
608 608 if merged and (clean_p1 or clean_p2):
609 609 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
610 610 raise error.ProgrammingError(msg)
611 611
612 612 # note: I do not think we need to double check name clash here since we
613 613 # are in a update/merge case that should already have taken care of
614 614 # this. The test agrees
615 615
616 616 self._dirty = True
617 617 self._updatedfiles.add(filename)
618 618
619 619 need_parent_file_data = (
620 620 not (possibly_dirty or clean_p2 or merged)
621 621 and wc_tracked
622 622 and p1_tracked
623 623 )
624 624
625 625 # this mean we are doing call for file we do not really care about the
626 626 # data (eg: added or removed), however this should be a minor overhead
627 627 # compared to the overall update process calling this.
628 628 if need_parent_file_data:
629 629 if parentfiledata is None:
630 630 parentfiledata = self._get_filedata(filename)
631 631 mtime = parentfiledata[2]
632 632
633 633 if mtime > self._lastnormaltime:
634 634 # Remember the most recent modification timeslot for
635 635 # status(), to make sure we won't miss future
636 636 # size-preserving file content modifications that happen
637 637 # within the same timeslot.
638 638 self._lastnormaltime = mtime
639 639
640 640 self._map.reset_state(
641 641 filename,
642 642 wc_tracked,
643 643 p1_tracked,
644 644 p2_tracked=p2_tracked,
645 645 merged=merged,
646 646 clean_p1=clean_p1,
647 647 clean_p2=clean_p2,
648 648 possibly_dirty=possibly_dirty,
649 649 parentfiledata=parentfiledata,
650 650 )
651 651 if (
652 652 parentfiledata is not None
653 653 and parentfiledata[2] > self._lastnormaltime
654 654 ):
655 655 # Remember the most recent modification timeslot for status(),
656 656 # to make sure we won't miss future size-preserving file content
657 657 # modifications that happen within the same timeslot.
658 658 self._lastnormaltime = parentfiledata[2]
659 659
660 660 def _addpath(
661 661 self,
662 662 f,
663 663 mode=0,
664 664 size=None,
665 665 mtime=None,
666 666 added=False,
667 667 merged=False,
668 668 from_p2=False,
669 669 possibly_dirty=False,
670 670 ):
671 671 entry = self._map.get(f)
672 672 if added or entry is not None and entry.removed:
673 673 scmutil.checkfilename(f)
674 674 if self._map.hastrackeddir(f):
675 675 msg = _(b'directory %r already in dirstate')
676 676 msg %= pycompat.bytestr(f)
677 677 raise error.Abort(msg)
678 678 # shadows
679 679 for d in pathutil.finddirs(f):
680 680 if self._map.hastrackeddir(d):
681 681 break
682 682 entry = self._map.get(d)
683 683 if entry is not None and not entry.removed:
684 684 msg = _(b'file %r in dirstate clashes with %r')
685 685 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
686 686 raise error.Abort(msg)
687 687 self._dirty = True
688 688 self._updatedfiles.add(f)
689 689 self._map.addfile(
690 690 f,
691 691 mode=mode,
692 692 size=size,
693 693 mtime=mtime,
694 694 added=added,
695 695 merged=merged,
696 696 from_p2=from_p2,
697 697 possibly_dirty=possibly_dirty,
698 698 )
699 699
700 700 def _get_filedata(self, filename):
701 701 """returns"""
702 702 s = os.lstat(self._join(filename))
703 703 mode = s.st_mode
704 704 size = s.st_size
705 705 mtime = s[stat.ST_MTIME]
706 706 return (mode, size, mtime)
707 707
708 708 def _normal(self, f, parentfiledata=None):
709 709 if parentfiledata:
710 710 (mode, size, mtime) = parentfiledata
711 711 else:
712 712 (mode, size, mtime) = self._get_filedata(f)
713 713 self._addpath(f, mode=mode, size=size, mtime=mtime)
714 714 self._map.copymap.pop(f, None)
715 715 if f in self._map.nonnormalset:
716 716 self._map.nonnormalset.remove(f)
717 717 if mtime > self._lastnormaltime:
718 718 # Remember the most recent modification timeslot for status(),
719 719 # to make sure we won't miss future size-preserving file content
720 720 # modifications that happen within the same timeslot.
721 721 self._lastnormaltime = mtime
722 722
723 723 def _normallookup(self, f):
724 724 '''Mark a file normal, but possibly dirty.'''
725 725 if self.in_merge:
726 726 # if there is a merge going on and the file was either
727 727 # "merged" or coming from other parent (-2) before
728 728 # being removed, restore that state.
729 729 entry = self._map.get(f)
730 730 if entry is not None:
731 731 # XXX this should probably be dealt with a a lower level
732 732 # (see `merged_removed` and `from_p2_removed`)
733 733 if entry.merged_removed or entry.from_p2_removed:
734 734 source = self._map.copymap.get(f)
735 735 if entry.merged_removed:
736 736 self._merge(f)
737 737 elif entry.from_p2_removed:
738 738 self._otherparent(f)
739 739 if source is not None:
740 740 self.copy(source, f)
741 741 return
742 742 elif entry.merged or entry.from_p2:
743 743 return
744 744 self._addpath(f, possibly_dirty=True)
745 745 self._map.copymap.pop(f, None)
746 746
747 747 def _otherparent(self, f):
748 748 if not self.in_merge:
749 749 msg = _(b"setting %r to other parent only allowed in merges") % f
750 750 raise error.Abort(msg)
751 751 entry = self._map.get(f)
752 752 if entry is not None and entry.tracked:
753 753 # merge-like
754 754 self._addpath(f, merged=True)
755 755 else:
756 756 # add-like
757 757 self._addpath(f, from_p2=True)
758 758 self._map.copymap.pop(f, None)
759 759
760 760 def _add(self, filename):
761 761 """internal function to mark a file as added"""
762 762 self._addpath(filename, added=True)
763 763 self._map.copymap.pop(filename, None)
764 764
765 765 def _merge(self, f):
766 766 if not self.in_merge:
767 767 return self._normallookup(f)
768 768 return self._otherparent(f)
769 769
770 def drop(self, f):
771 '''Drop a file from the dirstate'''
772 if self.pendingparentchange():
773 util.nouideprecwarn(
774 b"do not use `drop` inside of update/merge context."
775 b" Use `update_file`",
776 b'6.0',
777 stacklevel=2,
778 )
779 else:
780 util.nouideprecwarn(
781 b"do not use `drop` outside of update/merge context."
782 b" Use `set_untracked`",
783 b'6.0',
784 stacklevel=2,
785 )
786 self._drop(f)
787
788 770 def _drop(self, filename):
789 771 """internal function to drop a file from the dirstate"""
790 772 if self._map.dropfile(filename):
791 773 self._dirty = True
792 774 self._updatedfiles.add(filename)
793 775 self._map.copymap.pop(filename, None)
794 776
795 777 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
796 778 if exists is None:
797 779 exists = os.path.lexists(os.path.join(self._root, path))
798 780 if not exists:
799 781 # Maybe a path component exists
800 782 if not ignoremissing and b'/' in path:
801 783 d, f = path.rsplit(b'/', 1)
802 784 d = self._normalize(d, False, ignoremissing, None)
803 785 folded = d + b"/" + f
804 786 else:
805 787 # No path components, preserve original case
806 788 folded = path
807 789 else:
808 790 # recursively normalize leading directory components
809 791 # against dirstate
810 792 if b'/' in normed:
811 793 d, f = normed.rsplit(b'/', 1)
812 794 d = self._normalize(d, False, ignoremissing, True)
813 795 r = self._root + b"/" + d
814 796 folded = d + b"/" + util.fspath(f, r)
815 797 else:
816 798 folded = util.fspath(normed, self._root)
817 799 storemap[normed] = folded
818 800
819 801 return folded
820 802
821 803 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
822 804 normed = util.normcase(path)
823 805 folded = self._map.filefoldmap.get(normed, None)
824 806 if folded is None:
825 807 if isknown:
826 808 folded = path
827 809 else:
828 810 folded = self._discoverpath(
829 811 path, normed, ignoremissing, exists, self._map.filefoldmap
830 812 )
831 813 return folded
832 814
833 815 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
834 816 normed = util.normcase(path)
835 817 folded = self._map.filefoldmap.get(normed, None)
836 818 if folded is None:
837 819 folded = self._map.dirfoldmap.get(normed, None)
838 820 if folded is None:
839 821 if isknown:
840 822 folded = path
841 823 else:
842 824 # store discovered result in dirfoldmap so that future
843 825 # normalizefile calls don't start matching directories
844 826 folded = self._discoverpath(
845 827 path, normed, ignoremissing, exists, self._map.dirfoldmap
846 828 )
847 829 return folded
848 830
849 831 def normalize(self, path, isknown=False, ignoremissing=False):
850 832 """
851 833 normalize the case of a pathname when on a casefolding filesystem
852 834
853 835 isknown specifies whether the filename came from walking the
854 836 disk, to avoid extra filesystem access.
855 837
856 838 If ignoremissing is True, missing path are returned
857 839 unchanged. Otherwise, we try harder to normalize possibly
858 840 existing path components.
859 841
860 842 The normalized case is determined based on the following precedence:
861 843
862 844 - version of name already stored in the dirstate
863 845 - version of name stored on disk
864 846 - version provided via command arguments
865 847 """
866 848
867 849 if self._checkcase:
868 850 return self._normalize(path, isknown, ignoremissing)
869 851 return path
870 852
871 853 def clear(self):
872 854 self._map.clear()
873 855 self._lastnormaltime = 0
874 856 self._updatedfiles.clear()
875 857 self._dirty = True
876 858
877 859 def rebuild(self, parent, allfiles, changedfiles=None):
878 860 if changedfiles is None:
879 861 # Rebuild entire dirstate
880 862 to_lookup = allfiles
881 863 to_drop = []
882 864 lastnormaltime = self._lastnormaltime
883 865 self.clear()
884 866 self._lastnormaltime = lastnormaltime
885 867 elif len(changedfiles) < 10:
886 868 # Avoid turning allfiles into a set, which can be expensive if it's
887 869 # large.
888 870 to_lookup = []
889 871 to_drop = []
890 872 for f in changedfiles:
891 873 if f in allfiles:
892 874 to_lookup.append(f)
893 875 else:
894 876 to_drop.append(f)
895 877 else:
896 878 changedfilesset = set(changedfiles)
897 879 to_lookup = changedfilesset & set(allfiles)
898 880 to_drop = changedfilesset - to_lookup
899 881
900 882 if self._origpl is None:
901 883 self._origpl = self._pl
902 884 self._map.setparents(parent, self._nodeconstants.nullid)
903 885
904 886 for f in to_lookup:
905 887 self._normallookup(f)
906 888 for f in to_drop:
907 889 self._drop(f)
908 890
909 891 self._dirty = True
910 892
911 893 def identity(self):
912 894 """Return identity of dirstate itself to detect changing in storage
913 895
914 896 If identity of previous dirstate is equal to this, writing
915 897 changes based on the former dirstate out can keep consistency.
916 898 """
917 899 return self._map.identity
918 900
919 901 def write(self, tr):
920 902 if not self._dirty:
921 903 return
922 904
923 905 filename = self._filename
924 906 if tr:
925 907 # 'dirstate.write()' is not only for writing in-memory
926 908 # changes out, but also for dropping ambiguous timestamp.
927 909 # delayed writing re-raise "ambiguous timestamp issue".
928 910 # See also the wiki page below for detail:
929 911 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
930 912
931 913 # emulate dropping timestamp in 'parsers.pack_dirstate'
932 914 now = _getfsnow(self._opener)
933 915 self._map.clearambiguoustimes(self._updatedfiles, now)
934 916
935 917 # emulate that all 'dirstate.normal' results are written out
936 918 self._lastnormaltime = 0
937 919 self._updatedfiles.clear()
938 920
939 921 # delay writing in-memory changes out
940 922 tr.addfilegenerator(
941 923 b'dirstate',
942 924 (self._filename,),
943 925 lambda f: self._writedirstate(tr, f),
944 926 location=b'plain',
945 927 )
946 928 return
947 929
948 930 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
949 931 self._writedirstate(tr, st)
950 932
951 933 def addparentchangecallback(self, category, callback):
952 934 """add a callback to be called when the wd parents are changed
953 935
954 936 Callback will be called with the following arguments:
955 937 dirstate, (oldp1, oldp2), (newp1, newp2)
956 938
957 939 Category is a unique identifier to allow overwriting an old callback
958 940 with a newer callback.
959 941 """
960 942 self._plchangecallbacks[category] = callback
961 943
962 944 def _writedirstate(self, tr, st):
963 945 # notify callbacks about parents change
964 946 if self._origpl is not None and self._origpl != self._pl:
965 947 for c, callback in sorted(
966 948 pycompat.iteritems(self._plchangecallbacks)
967 949 ):
968 950 callback(self, self._origpl, self._pl)
969 951 self._origpl = None
970 952 # use the modification time of the newly created temporary file as the
971 953 # filesystem's notion of 'now'
972 954 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
973 955
974 956 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
975 957 # timestamp of each entries in dirstate, because of 'now > mtime'
976 958 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
977 959 if delaywrite > 0:
978 960 # do we have any files to delay for?
979 961 for f, e in pycompat.iteritems(self._map):
980 962 if e.need_delay(now):
981 963 import time # to avoid useless import
982 964
983 965 # rather than sleep n seconds, sleep until the next
984 966 # multiple of n seconds
985 967 clock = time.time()
986 968 start = int(clock) - (int(clock) % delaywrite)
987 969 end = start + delaywrite
988 970 time.sleep(end - clock)
989 971 now = end # trust our estimate that the end is near now
990 972 break
991 973
992 974 self._map.write(tr, st, now)
993 975 self._lastnormaltime = 0
994 976 self._dirty = False
995 977
996 978 def _dirignore(self, f):
997 979 if self._ignore(f):
998 980 return True
999 981 for p in pathutil.finddirs(f):
1000 982 if self._ignore(p):
1001 983 return True
1002 984 return False
1003 985
1004 986 def _ignorefiles(self):
1005 987 files = []
1006 988 if os.path.exists(self._join(b'.hgignore')):
1007 989 files.append(self._join(b'.hgignore'))
1008 990 for name, path in self._ui.configitems(b"ui"):
1009 991 if name == b'ignore' or name.startswith(b'ignore.'):
1010 992 # we need to use os.path.join here rather than self._join
1011 993 # because path is arbitrary and user-specified
1012 994 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1013 995 return files
1014 996
1015 997 def _ignorefileandline(self, f):
1016 998 files = collections.deque(self._ignorefiles())
1017 999 visited = set()
1018 1000 while files:
1019 1001 i = files.popleft()
1020 1002 patterns = matchmod.readpatternfile(
1021 1003 i, self._ui.warn, sourceinfo=True
1022 1004 )
1023 1005 for pattern, lineno, line in patterns:
1024 1006 kind, p = matchmod._patsplit(pattern, b'glob')
1025 1007 if kind == b"subinclude":
1026 1008 if p not in visited:
1027 1009 files.append(p)
1028 1010 continue
1029 1011 m = matchmod.match(
1030 1012 self._root, b'', [], [pattern], warn=self._ui.warn
1031 1013 )
1032 1014 if m(f):
1033 1015 return (i, lineno, line)
1034 1016 visited.add(i)
1035 1017 return (None, -1, b"")
1036 1018
1037 1019 def _walkexplicit(self, match, subrepos):
1038 1020 """Get stat data about the files explicitly specified by match.
1039 1021
1040 1022 Return a triple (results, dirsfound, dirsnotfound).
1041 1023 - results is a mapping from filename to stat result. It also contains
1042 1024 listings mapping subrepos and .hg to None.
1043 1025 - dirsfound is a list of files found to be directories.
1044 1026 - dirsnotfound is a list of files that the dirstate thinks are
1045 1027 directories and that were not found."""
1046 1028
1047 1029 def badtype(mode):
1048 1030 kind = _(b'unknown')
1049 1031 if stat.S_ISCHR(mode):
1050 1032 kind = _(b'character device')
1051 1033 elif stat.S_ISBLK(mode):
1052 1034 kind = _(b'block device')
1053 1035 elif stat.S_ISFIFO(mode):
1054 1036 kind = _(b'fifo')
1055 1037 elif stat.S_ISSOCK(mode):
1056 1038 kind = _(b'socket')
1057 1039 elif stat.S_ISDIR(mode):
1058 1040 kind = _(b'directory')
1059 1041 return _(b'unsupported file type (type is %s)') % kind
1060 1042
1061 1043 badfn = match.bad
1062 1044 dmap = self._map
1063 1045 lstat = os.lstat
1064 1046 getkind = stat.S_IFMT
1065 1047 dirkind = stat.S_IFDIR
1066 1048 regkind = stat.S_IFREG
1067 1049 lnkkind = stat.S_IFLNK
1068 1050 join = self._join
1069 1051 dirsfound = []
1070 1052 foundadd = dirsfound.append
1071 1053 dirsnotfound = []
1072 1054 notfoundadd = dirsnotfound.append
1073 1055
1074 1056 if not match.isexact() and self._checkcase:
1075 1057 normalize = self._normalize
1076 1058 else:
1077 1059 normalize = None
1078 1060
1079 1061 files = sorted(match.files())
1080 1062 subrepos.sort()
1081 1063 i, j = 0, 0
1082 1064 while i < len(files) and j < len(subrepos):
1083 1065 subpath = subrepos[j] + b"/"
1084 1066 if files[i] < subpath:
1085 1067 i += 1
1086 1068 continue
1087 1069 while i < len(files) and files[i].startswith(subpath):
1088 1070 del files[i]
1089 1071 j += 1
1090 1072
1091 1073 if not files or b'' in files:
1092 1074 files = [b'']
1093 1075 # constructing the foldmap is expensive, so don't do it for the
1094 1076 # common case where files is ['']
1095 1077 normalize = None
1096 1078 results = dict.fromkeys(subrepos)
1097 1079 results[b'.hg'] = None
1098 1080
1099 1081 for ff in files:
1100 1082 if normalize:
1101 1083 nf = normalize(ff, False, True)
1102 1084 else:
1103 1085 nf = ff
1104 1086 if nf in results:
1105 1087 continue
1106 1088
1107 1089 try:
1108 1090 st = lstat(join(nf))
1109 1091 kind = getkind(st.st_mode)
1110 1092 if kind == dirkind:
1111 1093 if nf in dmap:
1112 1094 # file replaced by dir on disk but still in dirstate
1113 1095 results[nf] = None
1114 1096 foundadd((nf, ff))
1115 1097 elif kind == regkind or kind == lnkkind:
1116 1098 results[nf] = st
1117 1099 else:
1118 1100 badfn(ff, badtype(kind))
1119 1101 if nf in dmap:
1120 1102 results[nf] = None
1121 1103 except OSError as inst: # nf not found on disk - it is dirstate only
1122 1104 if nf in dmap: # does it exactly match a missing file?
1123 1105 results[nf] = None
1124 1106 else: # does it match a missing directory?
1125 1107 if self._map.hasdir(nf):
1126 1108 notfoundadd(nf)
1127 1109 else:
1128 1110 badfn(ff, encoding.strtolocal(inst.strerror))
1129 1111
1130 1112 # match.files() may contain explicitly-specified paths that shouldn't
1131 1113 # be taken; drop them from the list of files found. dirsfound/notfound
1132 1114 # aren't filtered here because they will be tested later.
1133 1115 if match.anypats():
1134 1116 for f in list(results):
1135 1117 if f == b'.hg' or f in subrepos:
1136 1118 # keep sentinel to disable further out-of-repo walks
1137 1119 continue
1138 1120 if not match(f):
1139 1121 del results[f]
1140 1122
1141 1123 # Case insensitive filesystems cannot rely on lstat() failing to detect
1142 1124 # a case-only rename. Prune the stat object for any file that does not
1143 1125 # match the case in the filesystem, if there are multiple files that
1144 1126 # normalize to the same path.
1145 1127 if match.isexact() and self._checkcase:
1146 1128 normed = {}
1147 1129
1148 1130 for f, st in pycompat.iteritems(results):
1149 1131 if st is None:
1150 1132 continue
1151 1133
1152 1134 nc = util.normcase(f)
1153 1135 paths = normed.get(nc)
1154 1136
1155 1137 if paths is None:
1156 1138 paths = set()
1157 1139 normed[nc] = paths
1158 1140
1159 1141 paths.add(f)
1160 1142
1161 1143 for norm, paths in pycompat.iteritems(normed):
1162 1144 if len(paths) > 1:
1163 1145 for path in paths:
1164 1146 folded = self._discoverpath(
1165 1147 path, norm, True, None, self._map.dirfoldmap
1166 1148 )
1167 1149 if path != folded:
1168 1150 results[path] = None
1169 1151
1170 1152 return results, dirsfound, dirsnotfound
1171 1153
1172 1154 def walk(self, match, subrepos, unknown, ignored, full=True):
1173 1155 """
1174 1156 Walk recursively through the directory tree, finding all files
1175 1157 matched by match.
1176 1158
1177 1159 If full is False, maybe skip some known-clean files.
1178 1160
1179 1161 Return a dict mapping filename to stat-like object (either
1180 1162 mercurial.osutil.stat instance or return value of os.stat()).
1181 1163
1182 1164 """
1183 1165 # full is a flag that extensions that hook into walk can use -- this
1184 1166 # implementation doesn't use it at all. This satisfies the contract
1185 1167 # because we only guarantee a "maybe".
1186 1168
1187 1169 if ignored:
1188 1170 ignore = util.never
1189 1171 dirignore = util.never
1190 1172 elif unknown:
1191 1173 ignore = self._ignore
1192 1174 dirignore = self._dirignore
1193 1175 else:
1194 1176 # if not unknown and not ignored, drop dir recursion and step 2
1195 1177 ignore = util.always
1196 1178 dirignore = util.always
1197 1179
1198 1180 matchfn = match.matchfn
1199 1181 matchalways = match.always()
1200 1182 matchtdir = match.traversedir
1201 1183 dmap = self._map
1202 1184 listdir = util.listdir
1203 1185 lstat = os.lstat
1204 1186 dirkind = stat.S_IFDIR
1205 1187 regkind = stat.S_IFREG
1206 1188 lnkkind = stat.S_IFLNK
1207 1189 join = self._join
1208 1190
1209 1191 exact = skipstep3 = False
1210 1192 if match.isexact(): # match.exact
1211 1193 exact = True
1212 1194 dirignore = util.always # skip step 2
1213 1195 elif match.prefix(): # match.match, no patterns
1214 1196 skipstep3 = True
1215 1197
1216 1198 if not exact and self._checkcase:
1217 1199 normalize = self._normalize
1218 1200 normalizefile = self._normalizefile
1219 1201 skipstep3 = False
1220 1202 else:
1221 1203 normalize = self._normalize
1222 1204 normalizefile = None
1223 1205
1224 1206 # step 1: find all explicit files
1225 1207 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1226 1208 if matchtdir:
1227 1209 for d in work:
1228 1210 matchtdir(d[0])
1229 1211 for d in dirsnotfound:
1230 1212 matchtdir(d)
1231 1213
1232 1214 skipstep3 = skipstep3 and not (work or dirsnotfound)
1233 1215 work = [d for d in work if not dirignore(d[0])]
1234 1216
1235 1217 # step 2: visit subdirectories
1236 1218 def traverse(work, alreadynormed):
1237 1219 wadd = work.append
1238 1220 while work:
1239 1221 tracing.counter('dirstate.walk work', len(work))
1240 1222 nd = work.pop()
1241 1223 visitentries = match.visitchildrenset(nd)
1242 1224 if not visitentries:
1243 1225 continue
1244 1226 if visitentries == b'this' or visitentries == b'all':
1245 1227 visitentries = None
1246 1228 skip = None
1247 1229 if nd != b'':
1248 1230 skip = b'.hg'
1249 1231 try:
1250 1232 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1251 1233 entries = listdir(join(nd), stat=True, skip=skip)
1252 1234 except OSError as inst:
1253 1235 if inst.errno in (errno.EACCES, errno.ENOENT):
1254 1236 match.bad(
1255 1237 self.pathto(nd), encoding.strtolocal(inst.strerror)
1256 1238 )
1257 1239 continue
1258 1240 raise
1259 1241 for f, kind, st in entries:
1260 1242 # Some matchers may return files in the visitentries set,
1261 1243 # instead of 'this', if the matcher explicitly mentions them
1262 1244 # and is not an exactmatcher. This is acceptable; we do not
1263 1245 # make any hard assumptions about file-or-directory below
1264 1246 # based on the presence of `f` in visitentries. If
1265 1247 # visitchildrenset returned a set, we can always skip the
1266 1248 # entries *not* in the set it provided regardless of whether
1267 1249 # they're actually a file or a directory.
1268 1250 if visitentries and f not in visitentries:
1269 1251 continue
1270 1252 if normalizefile:
1271 1253 # even though f might be a directory, we're only
1272 1254 # interested in comparing it to files currently in the
1273 1255 # dmap -- therefore normalizefile is enough
1274 1256 nf = normalizefile(
1275 1257 nd and (nd + b"/" + f) or f, True, True
1276 1258 )
1277 1259 else:
1278 1260 nf = nd and (nd + b"/" + f) or f
1279 1261 if nf not in results:
1280 1262 if kind == dirkind:
1281 1263 if not ignore(nf):
1282 1264 if matchtdir:
1283 1265 matchtdir(nf)
1284 1266 wadd(nf)
1285 1267 if nf in dmap and (matchalways or matchfn(nf)):
1286 1268 results[nf] = None
1287 1269 elif kind == regkind or kind == lnkkind:
1288 1270 if nf in dmap:
1289 1271 if matchalways or matchfn(nf):
1290 1272 results[nf] = st
1291 1273 elif (matchalways or matchfn(nf)) and not ignore(
1292 1274 nf
1293 1275 ):
1294 1276 # unknown file -- normalize if necessary
1295 1277 if not alreadynormed:
1296 1278 nf = normalize(nf, False, True)
1297 1279 results[nf] = st
1298 1280 elif nf in dmap and (matchalways or matchfn(nf)):
1299 1281 results[nf] = None
1300 1282
1301 1283 for nd, d in work:
1302 1284 # alreadynormed means that processwork doesn't have to do any
1303 1285 # expensive directory normalization
1304 1286 alreadynormed = not normalize or nd == d
1305 1287 traverse([d], alreadynormed)
1306 1288
1307 1289 for s in subrepos:
1308 1290 del results[s]
1309 1291 del results[b'.hg']
1310 1292
1311 1293 # step 3: visit remaining files from dmap
1312 1294 if not skipstep3 and not exact:
1313 1295 # If a dmap file is not in results yet, it was either
1314 1296 # a) not matching matchfn b) ignored, c) missing, or d) under a
1315 1297 # symlink directory.
1316 1298 if not results and matchalways:
1317 1299 visit = [f for f in dmap]
1318 1300 else:
1319 1301 visit = [f for f in dmap if f not in results and matchfn(f)]
1320 1302 visit.sort()
1321 1303
1322 1304 if unknown:
1323 1305 # unknown == True means we walked all dirs under the roots
1324 1306 # that wasn't ignored, and everything that matched was stat'ed
1325 1307 # and is already in results.
1326 1308 # The rest must thus be ignored or under a symlink.
1327 1309 audit_path = pathutil.pathauditor(self._root, cached=True)
1328 1310
1329 1311 for nf in iter(visit):
1330 1312 # If a stat for the same file was already added with a
1331 1313 # different case, don't add one for this, since that would
1332 1314 # make it appear as if the file exists under both names
1333 1315 # on disk.
1334 1316 if (
1335 1317 normalizefile
1336 1318 and normalizefile(nf, True, True) in results
1337 1319 ):
1338 1320 results[nf] = None
1339 1321 # Report ignored items in the dmap as long as they are not
1340 1322 # under a symlink directory.
1341 1323 elif audit_path.check(nf):
1342 1324 try:
1343 1325 results[nf] = lstat(join(nf))
1344 1326 # file was just ignored, no links, and exists
1345 1327 except OSError:
1346 1328 # file doesn't exist
1347 1329 results[nf] = None
1348 1330 else:
1349 1331 # It's either missing or under a symlink directory
1350 1332 # which we in this case report as missing
1351 1333 results[nf] = None
1352 1334 else:
1353 1335 # We may not have walked the full directory tree above,
1354 1336 # so stat and check everything we missed.
1355 1337 iv = iter(visit)
1356 1338 for st in util.statfiles([join(i) for i in visit]):
1357 1339 results[next(iv)] = st
1358 1340 return results
1359 1341
1360 1342 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1361 1343 # Force Rayon (Rust parallelism library) to respect the number of
1362 1344 # workers. This is a temporary workaround until Rust code knows
1363 1345 # how to read the config file.
1364 1346 numcpus = self._ui.configint(b"worker", b"numcpus")
1365 1347 if numcpus is not None:
1366 1348 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1367 1349
1368 1350 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1369 1351 if not workers_enabled:
1370 1352 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1371 1353
1372 1354 (
1373 1355 lookup,
1374 1356 modified,
1375 1357 added,
1376 1358 removed,
1377 1359 deleted,
1378 1360 clean,
1379 1361 ignored,
1380 1362 unknown,
1381 1363 warnings,
1382 1364 bad,
1383 1365 traversed,
1384 1366 dirty,
1385 1367 ) = rustmod.status(
1386 1368 self._map._rustmap,
1387 1369 matcher,
1388 1370 self._rootdir,
1389 1371 self._ignorefiles(),
1390 1372 self._checkexec,
1391 1373 self._lastnormaltime,
1392 1374 bool(list_clean),
1393 1375 bool(list_ignored),
1394 1376 bool(list_unknown),
1395 1377 bool(matcher.traversedir),
1396 1378 )
1397 1379
1398 1380 self._dirty |= dirty
1399 1381
1400 1382 if matcher.traversedir:
1401 1383 for dir in traversed:
1402 1384 matcher.traversedir(dir)
1403 1385
1404 1386 if self._ui.warn:
1405 1387 for item in warnings:
1406 1388 if isinstance(item, tuple):
1407 1389 file_path, syntax = item
1408 1390 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1409 1391 file_path,
1410 1392 syntax,
1411 1393 )
1412 1394 self._ui.warn(msg)
1413 1395 else:
1414 1396 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1415 1397 self._ui.warn(
1416 1398 msg
1417 1399 % (
1418 1400 pathutil.canonpath(
1419 1401 self._rootdir, self._rootdir, item
1420 1402 ),
1421 1403 b"No such file or directory",
1422 1404 )
1423 1405 )
1424 1406
1425 1407 for (fn, message) in bad:
1426 1408 matcher.bad(fn, encoding.strtolocal(message))
1427 1409
1428 1410 status = scmutil.status(
1429 1411 modified=modified,
1430 1412 added=added,
1431 1413 removed=removed,
1432 1414 deleted=deleted,
1433 1415 unknown=unknown,
1434 1416 ignored=ignored,
1435 1417 clean=clean,
1436 1418 )
1437 1419 return (lookup, status)
1438 1420
1439 1421 def status(self, match, subrepos, ignored, clean, unknown):
1440 1422 """Determine the status of the working copy relative to the
1441 1423 dirstate and return a pair of (unsure, status), where status is of type
1442 1424 scmutil.status and:
1443 1425
1444 1426 unsure:
1445 1427 files that might have been modified since the dirstate was
1446 1428 written, but need to be read to be sure (size is the same
1447 1429 but mtime differs)
1448 1430 status.modified:
1449 1431 files that have definitely been modified since the dirstate
1450 1432 was written (different size or mode)
1451 1433 status.clean:
1452 1434 files that have definitely not been modified since the
1453 1435 dirstate was written
1454 1436 """
1455 1437 listignored, listclean, listunknown = ignored, clean, unknown
1456 1438 lookup, modified, added, unknown, ignored = [], [], [], [], []
1457 1439 removed, deleted, clean = [], [], []
1458 1440
1459 1441 dmap = self._map
1460 1442 dmap.preload()
1461 1443
1462 1444 use_rust = True
1463 1445
1464 1446 allowed_matchers = (
1465 1447 matchmod.alwaysmatcher,
1466 1448 matchmod.exactmatcher,
1467 1449 matchmod.includematcher,
1468 1450 )
1469 1451
1470 1452 if rustmod is None:
1471 1453 use_rust = False
1472 1454 elif self._checkcase:
1473 1455 # Case-insensitive filesystems are not handled yet
1474 1456 use_rust = False
1475 1457 elif subrepos:
1476 1458 use_rust = False
1477 1459 elif sparse.enabled:
1478 1460 use_rust = False
1479 1461 elif not isinstance(match, allowed_matchers):
1480 1462 # Some matchers have yet to be implemented
1481 1463 use_rust = False
1482 1464
1483 1465 if use_rust:
1484 1466 try:
1485 1467 return self._rust_status(
1486 1468 match, listclean, listignored, listunknown
1487 1469 )
1488 1470 except rustmod.FallbackError:
1489 1471 pass
1490 1472
1491 1473 def noop(f):
1492 1474 pass
1493 1475
1494 1476 dcontains = dmap.__contains__
1495 1477 dget = dmap.__getitem__
1496 1478 ladd = lookup.append # aka "unsure"
1497 1479 madd = modified.append
1498 1480 aadd = added.append
1499 1481 uadd = unknown.append if listunknown else noop
1500 1482 iadd = ignored.append if listignored else noop
1501 1483 radd = removed.append
1502 1484 dadd = deleted.append
1503 1485 cadd = clean.append if listclean else noop
1504 1486 mexact = match.exact
1505 1487 dirignore = self._dirignore
1506 1488 checkexec = self._checkexec
1507 1489 copymap = self._map.copymap
1508 1490 lastnormaltime = self._lastnormaltime
1509 1491
1510 1492 # We need to do full walks when either
1511 1493 # - we're listing all clean files, or
1512 1494 # - match.traversedir does something, because match.traversedir should
1513 1495 # be called for every dir in the working dir
1514 1496 full = listclean or match.traversedir is not None
1515 1497 for fn, st in pycompat.iteritems(
1516 1498 self.walk(match, subrepos, listunknown, listignored, full=full)
1517 1499 ):
1518 1500 if not dcontains(fn):
1519 1501 if (listignored or mexact(fn)) and dirignore(fn):
1520 1502 if listignored:
1521 1503 iadd(fn)
1522 1504 else:
1523 1505 uadd(fn)
1524 1506 continue
1525 1507
1526 1508 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1527 1509 # written like that for performance reasons. dmap[fn] is not a
1528 1510 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1529 1511 # opcode has fast paths when the value to be unpacked is a tuple or
1530 1512 # a list, but falls back to creating a full-fledged iterator in
1531 1513 # general. That is much slower than simply accessing and storing the
1532 1514 # tuple members one by one.
1533 1515 t = dget(fn)
1534 1516 mode = t.mode
1535 1517 size = t.size
1536 1518 time = t.mtime
1537 1519
1538 1520 if not st and t.tracked:
1539 1521 dadd(fn)
1540 1522 elif t.merged:
1541 1523 madd(fn)
1542 1524 elif t.added:
1543 1525 aadd(fn)
1544 1526 elif t.removed:
1545 1527 radd(fn)
1546 1528 elif t.tracked:
1547 1529 if (
1548 1530 size >= 0
1549 1531 and (
1550 1532 (size != st.st_size and size != st.st_size & _rangemask)
1551 1533 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1552 1534 )
1553 1535 or t.from_p2
1554 1536 or fn in copymap
1555 1537 ):
1556 1538 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1557 1539 # issue6456: Size returned may be longer due to
1558 1540 # encryption on EXT-4 fscrypt, undecided.
1559 1541 ladd(fn)
1560 1542 else:
1561 1543 madd(fn)
1562 1544 elif (
1563 1545 time != st[stat.ST_MTIME]
1564 1546 and time != st[stat.ST_MTIME] & _rangemask
1565 1547 ):
1566 1548 ladd(fn)
1567 1549 elif st[stat.ST_MTIME] == lastnormaltime:
1568 1550 # fn may have just been marked as normal and it may have
1569 1551 # changed in the same second without changing its size.
1570 1552 # This can happen if we quickly do multiple commits.
1571 1553 # Force lookup, so we don't miss such a racy file change.
1572 1554 ladd(fn)
1573 1555 elif listclean:
1574 1556 cadd(fn)
1575 1557 status = scmutil.status(
1576 1558 modified, added, removed, deleted, unknown, ignored, clean
1577 1559 )
1578 1560 return (lookup, status)
1579 1561
1580 1562 def matches(self, match):
1581 1563 """
1582 1564 return files in the dirstate (in whatever state) filtered by match
1583 1565 """
1584 1566 dmap = self._map
1585 1567 if rustmod is not None:
1586 1568 dmap = self._map._rustmap
1587 1569
1588 1570 if match.always():
1589 1571 return dmap.keys()
1590 1572 files = match.files()
1591 1573 if match.isexact():
1592 1574 # fast path -- filter the other way around, since typically files is
1593 1575 # much smaller than dmap
1594 1576 return [f for f in files if f in dmap]
1595 1577 if match.prefix() and all(fn in dmap for fn in files):
1596 1578 # fast path -- all the values are known to be files, so just return
1597 1579 # that
1598 1580 return list(files)
1599 1581 return [f for f in dmap if match(f)]
1600 1582
1601 1583 def _actualfilename(self, tr):
1602 1584 if tr:
1603 1585 return self._pendingfilename
1604 1586 else:
1605 1587 return self._filename
1606 1588
1607 1589 def savebackup(self, tr, backupname):
1608 1590 '''Save current dirstate into backup file'''
1609 1591 filename = self._actualfilename(tr)
1610 1592 assert backupname != filename
1611 1593
1612 1594 # use '_writedirstate' instead of 'write' to write changes certainly,
1613 1595 # because the latter omits writing out if transaction is running.
1614 1596 # output file will be used to create backup of dirstate at this point.
1615 1597 if self._dirty or not self._opener.exists(filename):
1616 1598 self._writedirstate(
1617 1599 tr,
1618 1600 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1619 1601 )
1620 1602
1621 1603 if tr:
1622 1604 # ensure that subsequent tr.writepending returns True for
1623 1605 # changes written out above, even if dirstate is never
1624 1606 # changed after this
1625 1607 tr.addfilegenerator(
1626 1608 b'dirstate',
1627 1609 (self._filename,),
1628 1610 lambda f: self._writedirstate(tr, f),
1629 1611 location=b'plain',
1630 1612 )
1631 1613
1632 1614 # ensure that pending file written above is unlinked at
1633 1615 # failure, even if tr.writepending isn't invoked until the
1634 1616 # end of this transaction
1635 1617 tr.registertmp(filename, location=b'plain')
1636 1618
1637 1619 self._opener.tryunlink(backupname)
1638 1620 # hardlink backup is okay because _writedirstate is always called
1639 1621 # with an "atomictemp=True" file.
1640 1622 util.copyfile(
1641 1623 self._opener.join(filename),
1642 1624 self._opener.join(backupname),
1643 1625 hardlink=True,
1644 1626 )
1645 1627
1646 1628 def restorebackup(self, tr, backupname):
1647 1629 '''Restore dirstate by backup file'''
1648 1630 # this "invalidate()" prevents "wlock.release()" from writing
1649 1631 # changes of dirstate out after restoring from backup file
1650 1632 self.invalidate()
1651 1633 filename = self._actualfilename(tr)
1652 1634 o = self._opener
1653 1635 if util.samefile(o.join(backupname), o.join(filename)):
1654 1636 o.unlink(backupname)
1655 1637 else:
1656 1638 o.rename(backupname, filename, checkambig=True)
1657 1639
1658 1640 def clearbackup(self, tr, backupname):
1659 1641 '''Clear backup file'''
1660 1642 self._opener.unlink(backupname)
@@ -1,223 +1,220
1 1 from __future__ import absolute_import, print_function
2 2
3 3 import contextlib
4 4
5 5 from . import util as interfaceutil
6 6
7 7
8 8 class idirstate(interfaceutil.Interface):
9 9 def __init__(
10 10 opener,
11 11 ui,
12 12 root,
13 13 validate,
14 14 sparsematchfn,
15 15 nodeconstants,
16 16 use_dirstate_v2,
17 17 ):
18 18 """Create a new dirstate object.
19 19
20 20 opener is an open()-like callable that can be used to open the
21 21 dirstate file; root is the root of the directory tracked by
22 22 the dirstate.
23 23 """
24 24
25 25 # TODO: all these private methods and attributes should be made
26 26 # public or removed from the interface.
27 27 _ignore = interfaceutil.Attribute("""Matcher for ignored files.""")
28 28
29 29 def _ignorefiles():
30 30 """Return a list of files containing patterns to ignore."""
31 31
32 32 def _ignorefileandline(f):
33 33 """Given a file `f`, return the ignore file and line that ignores it."""
34 34
35 35 _checklink = interfaceutil.Attribute("""Callable for checking symlinks.""")
36 36 _checkexec = interfaceutil.Attribute("""Callable for checking exec bits.""")
37 37
38 38 @contextlib.contextmanager
39 39 def parentchange():
40 40 """Context manager for handling dirstate parents.
41 41
42 42 If an exception occurs in the scope of the context manager,
43 43 the incoherent dirstate won't be written when wlock is
44 44 released.
45 45 """
46 46
47 47 def pendingparentchange():
48 48 """Returns true if the dirstate is in the middle of a set of changes
49 49 that modify the dirstate parent.
50 50 """
51 51
52 52 def hasdir(d):
53 53 pass
54 54
55 55 def flagfunc(buildfallback):
56 56 pass
57 57
58 58 def getcwd():
59 59 """Return the path from which a canonical path is calculated.
60 60
61 61 This path should be used to resolve file patterns or to convert
62 62 canonical paths back to file paths for display. It shouldn't be
63 63 used to get real file paths. Use vfs functions instead.
64 64 """
65 65
66 66 def pathto(f, cwd=None):
67 67 pass
68 68
69 69 def __getitem__(key):
70 70 """Return the current state of key (a filename) in the dirstate.
71 71
72 72 States are:
73 73 n normal
74 74 m needs merging
75 75 r marked for removal
76 76 a marked for addition
77 77 ? not tracked
78 78 """
79 79
80 80 def __contains__(key):
81 81 """Check if bytestring `key` is known to the dirstate."""
82 82
83 83 def __iter__():
84 84 """Iterate the dirstate's contained filenames as bytestrings."""
85 85
86 86 def items():
87 87 """Iterate the dirstate's entries as (filename, DirstateItem.
88 88
89 89 As usual, filename is a bytestring.
90 90 """
91 91
92 92 iteritems = items
93 93
94 94 def parents():
95 95 pass
96 96
97 97 def p1():
98 98 pass
99 99
100 100 def p2():
101 101 pass
102 102
103 103 def branch():
104 104 pass
105 105
106 106 def setparents(p1, p2=None):
107 107 """Set dirstate parents to p1 and p2.
108 108
109 109 When moving from two parents to one, 'm' merged entries a
110 110 adjusted to normal and previous copy records discarded and
111 111 returned by the call.
112 112
113 113 See localrepo.setparents()
114 114 """
115 115
116 116 def setbranch(branch):
117 117 pass
118 118
119 119 def invalidate():
120 120 """Causes the next access to reread the dirstate.
121 121
122 122 This is different from localrepo.invalidatedirstate() because it always
123 123 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
124 124 check whether the dirstate has changed before rereading it."""
125 125
126 126 def copy(source, dest):
127 127 """Mark dest as a copy of source. Unmark dest if source is None."""
128 128
129 129 def copied(file):
130 130 pass
131 131
132 132 def copies():
133 133 pass
134 134
135 def drop(f):
136 '''Drop a file from the dirstate'''
137
138 135 def normalize(path, isknown=False, ignoremissing=False):
139 136 """
140 137 normalize the case of a pathname when on a casefolding filesystem
141 138
142 139 isknown specifies whether the filename came from walking the
143 140 disk, to avoid extra filesystem access.
144 141
145 142 If ignoremissing is True, missing path are returned
146 143 unchanged. Otherwise, we try harder to normalize possibly
147 144 existing path components.
148 145
149 146 The normalized case is determined based on the following precedence:
150 147
151 148 - version of name already stored in the dirstate
152 149 - version of name stored on disk
153 150 - version provided via command arguments
154 151 """
155 152
156 153 def clear():
157 154 pass
158 155
159 156 def rebuild(parent, allfiles, changedfiles=None):
160 157 pass
161 158
162 159 def identity():
163 160 """Return identity of dirstate it to detect changing in storage
164 161
165 162 If identity of previous dirstate is equal to this, writing
166 163 changes based on the former dirstate out can keep consistency.
167 164 """
168 165
169 166 def write(tr):
170 167 pass
171 168
172 169 def addparentchangecallback(category, callback):
173 170 """add a callback to be called when the wd parents are changed
174 171
175 172 Callback will be called with the following arguments:
176 173 dirstate, (oldp1, oldp2), (newp1, newp2)
177 174
178 175 Category is a unique identifier to allow overwriting an old callback
179 176 with a newer callback.
180 177 """
181 178
182 179 def walk(match, subrepos, unknown, ignored, full=True):
183 180 """
184 181 Walk recursively through the directory tree, finding all files
185 182 matched by match.
186 183
187 184 If full is False, maybe skip some known-clean files.
188 185
189 186 Return a dict mapping filename to stat-like object (either
190 187 mercurial.osutil.stat instance or return value of os.stat()).
191 188
192 189 """
193 190
194 191 def status(match, subrepos, ignored, clean, unknown):
195 192 """Determine the status of the working copy relative to the
196 193 dirstate and return a pair of (unsure, status), where status is of type
197 194 scmutil.status and:
198 195
199 196 unsure:
200 197 files that might have been modified since the dirstate was
201 198 written, but need to be read to be sure (size is the same
202 199 but mtime differs)
203 200 status.modified:
204 201 files that have definitely been modified since the dirstate
205 202 was written (different size or mode)
206 203 status.clean:
207 204 files that have definitely not been modified since the
208 205 dirstate was written
209 206 """
210 207
211 208 def matches(match):
212 209 """
213 210 return files in the dirstate (in whatever state) filtered by match
214 211 """
215 212
216 213 def savebackup(tr, backupname):
217 214 '''Save current dirstate into backup file'''
218 215
219 216 def restorebackup(tr, backupname):
220 217 '''Restore dirstate by backup file'''
221 218
222 219 def clearbackup(tr, backupname):
223 220 '''Clear backup file'''
General Comments 0
You need to be logged in to leave comments. Login now