##// END OF EJS Templates
dirstate: add a `in_merge` property...
marmoute -
r48299:94c58f3a default
parent child Browse files
Show More
@@ -1,1452 +1,1457 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = 0x7FFFFFFF
47 47
48 48 dirstatetuple = parsers.dirstatetuple
49 49
50 50
51 51 # a special value used internally for `size` if the file come from the other parent
52 52 FROM_P2 = dirstatemap.FROM_P2
53 53
54 54 # a special value used internally for `size` if the file is modified/merged/added
55 55 NONNORMAL = dirstatemap.NONNORMAL
56 56
57 57 # a special value used internally for `time` if the time is ambigeous
58 58 AMBIGUOUS_TIME = dirstatemap.AMBIGUOUS_TIME
59 59
60 60
61 61 class repocache(filecache):
62 62 """filecache for files in .hg/"""
63 63
64 64 def join(self, obj, fname):
65 65 return obj._opener.join(fname)
66 66
67 67
68 68 class rootcache(filecache):
69 69 """filecache for files in the repository root"""
70 70
71 71 def join(self, obj, fname):
72 72 return obj._join(fname)
73 73
74 74
75 75 def _getfsnow(vfs):
76 76 '''Get "now" timestamp on filesystem'''
77 77 tmpfd, tmpname = vfs.mkstemp()
78 78 try:
79 79 return os.fstat(tmpfd)[stat.ST_MTIME]
80 80 finally:
81 81 os.close(tmpfd)
82 82 vfs.unlink(tmpname)
83 83
84 84
85 85 @interfaceutil.implementer(intdirstate.idirstate)
86 86 class dirstate(object):
87 87 def __init__(
88 88 self,
89 89 opener,
90 90 ui,
91 91 root,
92 92 validate,
93 93 sparsematchfn,
94 94 nodeconstants,
95 95 use_dirstate_v2,
96 96 ):
97 97 """Create a new dirstate object.
98 98
99 99 opener is an open()-like callable that can be used to open the
100 100 dirstate file; root is the root of the directory tracked by
101 101 the dirstate.
102 102 """
103 103 self._use_dirstate_v2 = use_dirstate_v2
104 104 self._nodeconstants = nodeconstants
105 105 self._opener = opener
106 106 self._validate = validate
107 107 self._root = root
108 108 self._sparsematchfn = sparsematchfn
109 109 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
110 110 # UNC path pointing to root share (issue4557)
111 111 self._rootdir = pathutil.normasprefix(root)
112 112 self._dirty = False
113 113 self._lastnormaltime = 0
114 114 self._ui = ui
115 115 self._filecache = {}
116 116 self._parentwriters = 0
117 117 self._filename = b'dirstate'
118 118 self._pendingfilename = b'%s.pending' % self._filename
119 119 self._plchangecallbacks = {}
120 120 self._origpl = None
121 121 self._updatedfiles = set()
122 122 self._mapcls = dirstatemap.dirstatemap
123 123 # Access and cache cwd early, so we don't access it for the first time
124 124 # after a working-copy update caused it to not exist (accessing it then
125 125 # raises an exception).
126 126 self._cwd
127 127
128 128 def prefetch_parents(self):
129 129 """make sure the parents are loaded
130 130
131 131 Used to avoid a race condition.
132 132 """
133 133 self._pl
134 134
135 135 @contextlib.contextmanager
136 136 def parentchange(self):
137 137 """Context manager for handling dirstate parents.
138 138
139 139 If an exception occurs in the scope of the context manager,
140 140 the incoherent dirstate won't be written when wlock is
141 141 released.
142 142 """
143 143 self._parentwriters += 1
144 144 yield
145 145 # Typically we want the "undo" step of a context manager in a
146 146 # finally block so it happens even when an exception
147 147 # occurs. In this case, however, we only want to decrement
148 148 # parentwriters if the code in the with statement exits
149 149 # normally, so we don't have a try/finally here on purpose.
150 150 self._parentwriters -= 1
151 151
152 152 def pendingparentchange(self):
153 153 """Returns true if the dirstate is in the middle of a set of changes
154 154 that modify the dirstate parent.
155 155 """
156 156 return self._parentwriters > 0
157 157
158 158 @propertycache
159 159 def _map(self):
160 160 """Return the dirstate contents (see documentation for dirstatemap)."""
161 161 self._map = self._mapcls(
162 162 self._ui,
163 163 self._opener,
164 164 self._root,
165 165 self._nodeconstants,
166 166 self._use_dirstate_v2,
167 167 )
168 168 return self._map
169 169
170 170 @property
171 171 def _sparsematcher(self):
172 172 """The matcher for the sparse checkout.
173 173
174 174 The working directory may not include every file from a manifest. The
175 175 matcher obtained by this property will match a path if it is to be
176 176 included in the working directory.
177 177 """
178 178 # TODO there is potential to cache this property. For now, the matcher
179 179 # is resolved on every access. (But the called function does use a
180 180 # cache to keep the lookup fast.)
181 181 return self._sparsematchfn()
182 182
183 183 @repocache(b'branch')
184 184 def _branch(self):
185 185 try:
186 186 return self._opener.read(b"branch").strip() or b"default"
187 187 except IOError as inst:
188 188 if inst.errno != errno.ENOENT:
189 189 raise
190 190 return b"default"
191 191
192 192 @property
193 193 def _pl(self):
194 194 return self._map.parents()
195 195
196 196 def hasdir(self, d):
197 197 return self._map.hastrackeddir(d)
198 198
199 199 @rootcache(b'.hgignore')
200 200 def _ignore(self):
201 201 files = self._ignorefiles()
202 202 if not files:
203 203 return matchmod.never()
204 204
205 205 pats = [b'include:%s' % f for f in files]
206 206 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
207 207
208 208 @propertycache
209 209 def _slash(self):
210 210 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
211 211
212 212 @propertycache
213 213 def _checklink(self):
214 214 return util.checklink(self._root)
215 215
216 216 @propertycache
217 217 def _checkexec(self):
218 218 return bool(util.checkexec(self._root))
219 219
220 220 @propertycache
221 221 def _checkcase(self):
222 222 return not util.fscasesensitive(self._join(b'.hg'))
223 223
224 224 def _join(self, f):
225 225 # much faster than os.path.join()
226 226 # it's safe because f is always a relative path
227 227 return self._rootdir + f
228 228
229 229 def flagfunc(self, buildfallback):
230 230 if self._checklink and self._checkexec:
231 231
232 232 def f(x):
233 233 try:
234 234 st = os.lstat(self._join(x))
235 235 if util.statislink(st):
236 236 return b'l'
237 237 if util.statisexec(st):
238 238 return b'x'
239 239 except OSError:
240 240 pass
241 241 return b''
242 242
243 243 return f
244 244
245 245 fallback = buildfallback()
246 246 if self._checklink:
247 247
248 248 def f(x):
249 249 if os.path.islink(self._join(x)):
250 250 return b'l'
251 251 if b'x' in fallback(x):
252 252 return b'x'
253 253 return b''
254 254
255 255 return f
256 256 if self._checkexec:
257 257
258 258 def f(x):
259 259 if b'l' in fallback(x):
260 260 return b'l'
261 261 if util.isexec(self._join(x)):
262 262 return b'x'
263 263 return b''
264 264
265 265 return f
266 266 else:
267 267 return fallback
268 268
269 269 @propertycache
270 270 def _cwd(self):
271 271 # internal config: ui.forcecwd
272 272 forcecwd = self._ui.config(b'ui', b'forcecwd')
273 273 if forcecwd:
274 274 return forcecwd
275 275 return encoding.getcwd()
276 276
277 277 def getcwd(self):
278 278 """Return the path from which a canonical path is calculated.
279 279
280 280 This path should be used to resolve file patterns or to convert
281 281 canonical paths back to file paths for display. It shouldn't be
282 282 used to get real file paths. Use vfs functions instead.
283 283 """
284 284 cwd = self._cwd
285 285 if cwd == self._root:
286 286 return b''
287 287 # self._root ends with a path separator if self._root is '/' or 'C:\'
288 288 rootsep = self._root
289 289 if not util.endswithsep(rootsep):
290 290 rootsep += pycompat.ossep
291 291 if cwd.startswith(rootsep):
292 292 return cwd[len(rootsep) :]
293 293 else:
294 294 # we're outside the repo. return an absolute path.
295 295 return cwd
296 296
297 297 def pathto(self, f, cwd=None):
298 298 if cwd is None:
299 299 cwd = self.getcwd()
300 300 path = util.pathto(self._root, cwd, f)
301 301 if self._slash:
302 302 return util.pconvert(path)
303 303 return path
304 304
305 305 def __getitem__(self, key):
306 306 """Return the current state of key (a filename) in the dirstate.
307 307
308 308 States are:
309 309 n normal
310 310 m needs merging
311 311 r marked for removal
312 312 a marked for addition
313 313 ? not tracked
314 314 """
315 315 return self._map.get(key, (b"?",))[0]
316 316
317 317 def __contains__(self, key):
318 318 return key in self._map
319 319
320 320 def __iter__(self):
321 321 return iter(sorted(self._map))
322 322
323 323 def items(self):
324 324 return pycompat.iteritems(self._map)
325 325
326 326 iteritems = items
327 327
328 328 def directories(self):
329 329 return self._map.directories()
330 330
331 331 def parents(self):
332 332 return [self._validate(p) for p in self._pl]
333 333
334 334 def p1(self):
335 335 return self._validate(self._pl[0])
336 336
337 337 def p2(self):
338 338 return self._validate(self._pl[1])
339 339
340 @property
341 def in_merge(self):
342 """True if a merge is in progress"""
343 return self._pl[1] != self._nodeconstants.nullid
344
340 345 def branch(self):
341 346 return encoding.tolocal(self._branch)
342 347
343 348 def setparents(self, p1, p2=None):
344 349 """Set dirstate parents to p1 and p2.
345 350
346 351 When moving from two parents to one, 'm' merged entries a
347 352 adjusted to normal and previous copy records discarded and
348 353 returned by the call.
349 354
350 355 See localrepo.setparents()
351 356 """
352 357 if p2 is None:
353 358 p2 = self._nodeconstants.nullid
354 359 if self._parentwriters == 0:
355 360 raise ValueError(
356 361 b"cannot set dirstate parent outside of "
357 362 b"dirstate.parentchange context manager"
358 363 )
359 364
360 365 self._dirty = True
361 366 oldp2 = self._pl[1]
362 367 if self._origpl is None:
363 368 self._origpl = self._pl
364 369 self._map.setparents(p1, p2)
365 370 copies = {}
366 371 if (
367 372 oldp2 != self._nodeconstants.nullid
368 373 and p2 == self._nodeconstants.nullid
369 374 ):
370 375 candidatefiles = self._map.non_normal_or_other_parent_paths()
371 376
372 377 for f in candidatefiles:
373 378 s = self._map.get(f)
374 379 if s is None:
375 380 continue
376 381
377 382 # Discard 'm' markers when moving away from a merge state
378 383 if s[0] == b'm':
379 384 source = self._map.copymap.get(f)
380 385 if source:
381 386 copies[f] = source
382 387 self.normallookup(f)
383 388 # Also fix up otherparent markers
384 389 elif s[0] == b'n' and s[2] == FROM_P2:
385 390 source = self._map.copymap.get(f)
386 391 if source:
387 392 copies[f] = source
388 393 self.add(f)
389 394 return copies
390 395
391 396 def setbranch(self, branch):
392 397 self.__class__._branch.set(self, encoding.fromlocal(branch))
393 398 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
394 399 try:
395 400 f.write(self._branch + b'\n')
396 401 f.close()
397 402
398 403 # make sure filecache has the correct stat info for _branch after
399 404 # replacing the underlying file
400 405 ce = self._filecache[b'_branch']
401 406 if ce:
402 407 ce.refresh()
403 408 except: # re-raises
404 409 f.discard()
405 410 raise
406 411
407 412 def invalidate(self):
408 413 """Causes the next access to reread the dirstate.
409 414
410 415 This is different from localrepo.invalidatedirstate() because it always
411 416 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
412 417 check whether the dirstate has changed before rereading it."""
413 418
414 419 for a in ("_map", "_branch", "_ignore"):
415 420 if a in self.__dict__:
416 421 delattr(self, a)
417 422 self._lastnormaltime = 0
418 423 self._dirty = False
419 424 self._updatedfiles.clear()
420 425 self._parentwriters = 0
421 426 self._origpl = None
422 427
423 428 def copy(self, source, dest):
424 429 """Mark dest as a copy of source. Unmark dest if source is None."""
425 430 if source == dest:
426 431 return
427 432 self._dirty = True
428 433 if source is not None:
429 434 self._map.copymap[dest] = source
430 435 self._updatedfiles.add(source)
431 436 self._updatedfiles.add(dest)
432 437 elif self._map.copymap.pop(dest, None):
433 438 self._updatedfiles.add(dest)
434 439
435 440 def copied(self, file):
436 441 return self._map.copymap.get(file, None)
437 442
438 443 def copies(self):
439 444 return self._map.copymap
440 445
441 446 def _addpath(
442 447 self,
443 448 f,
444 449 state,
445 450 mode,
446 451 size=NONNORMAL,
447 452 mtime=AMBIGUOUS_TIME,
448 453 from_p2=False,
449 454 possibly_dirty=False,
450 455 ):
451 456 oldstate = self[f]
452 457 if state == b'a' or oldstate == b'r':
453 458 scmutil.checkfilename(f)
454 459 if self._map.hastrackeddir(f):
455 460 msg = _(b'directory %r already in dirstate')
456 461 msg %= pycompat.bytestr(f)
457 462 raise error.Abort(msg)
458 463 # shadows
459 464 for d in pathutil.finddirs(f):
460 465 if self._map.hastrackeddir(d):
461 466 break
462 467 entry = self._map.get(d)
463 468 if entry is not None and entry[0] != b'r':
464 469 msg = _(b'file %r in dirstate clashes with %r')
465 470 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
466 471 raise error.Abort(msg)
467 472 if state == b'a':
468 473 assert not possibly_dirty
469 474 assert not from_p2
470 475 size = NONNORMAL
471 476 mtime = AMBIGUOUS_TIME
472 477 elif from_p2:
473 478 assert not possibly_dirty
474 479 size = FROM_P2
475 480 mtime = AMBIGUOUS_TIME
476 481 elif possibly_dirty:
477 482 mtime = AMBIGUOUS_TIME
478 483 else:
479 484 assert size != FROM_P2
480 485 assert size != NONNORMAL
481 486 size = size & _rangemask
482 487 mtime = mtime & _rangemask
483 488 self._dirty = True
484 489 self._updatedfiles.add(f)
485 490 self._map.addfile(f, oldstate, state, mode, size, mtime)
486 491
487 492 def normal(self, f, parentfiledata=None):
488 493 """Mark a file normal and clean.
489 494
490 495 parentfiledata: (mode, size, mtime) of the clean file
491 496
492 497 parentfiledata should be computed from memory (for mode,
493 498 size), as or close as possible from the point where we
494 499 determined the file was clean, to limit the risk of the
495 500 file having been changed by an external process between the
496 501 moment where the file was determined to be clean and now."""
497 502 if parentfiledata:
498 503 (mode, size, mtime) = parentfiledata
499 504 else:
500 505 s = os.lstat(self._join(f))
501 506 mode = s.st_mode
502 507 size = s.st_size
503 508 mtime = s[stat.ST_MTIME]
504 509 self._addpath(f, b'n', mode, size, mtime)
505 510 self._map.copymap.pop(f, None)
506 511 if f in self._map.nonnormalset:
507 512 self._map.nonnormalset.remove(f)
508 513 if mtime > self._lastnormaltime:
509 514 # Remember the most recent modification timeslot for status(),
510 515 # to make sure we won't miss future size-preserving file content
511 516 # modifications that happen within the same timeslot.
512 517 self._lastnormaltime = mtime
513 518
514 519 def normallookup(self, f):
515 520 '''Mark a file normal, but possibly dirty.'''
516 if self._pl[1] != self._nodeconstants.nullid:
521 if self.in_merge:
517 522 # if there is a merge going on and the file was either
518 523 # in state 'm' (-1) or coming from other parent (-2) before
519 524 # being removed, restore that state.
520 525 entry = self._map.get(f)
521 526 if entry is not None:
522 527 if entry[0] == b'r' and entry[2] in (NONNORMAL, FROM_P2):
523 528 source = self._map.copymap.get(f)
524 529 if entry[2] == NONNORMAL:
525 530 self.merge(f)
526 531 elif entry[2] == FROM_P2:
527 532 self.otherparent(f)
528 533 if source:
529 534 self.copy(source, f)
530 535 return
531 536 if entry[0] == b'm' or entry[0] == b'n' and entry[2] == FROM_P2:
532 537 return
533 538 self._addpath(f, b'n', 0, possibly_dirty=True)
534 539 self._map.copymap.pop(f, None)
535 540
536 541 def otherparent(self, f):
537 542 '''Mark as coming from the other parent, always dirty.'''
538 if self._pl[1] == self._nodeconstants.nullid:
543 if not self.in_merge:
539 544 msg = _(b"setting %r to other parent only allowed in merges") % f
540 545 raise error.Abort(msg)
541 546 if f in self and self[f] == b'n':
542 547 # merge-like
543 548 self._addpath(f, b'm', 0, from_p2=True)
544 549 else:
545 550 # add-like
546 551 self._addpath(f, b'n', 0, from_p2=True)
547 552 self._map.copymap.pop(f, None)
548 553
549 554 def add(self, f):
550 555 '''Mark a file added.'''
551 556 self._addpath(f, b'a', 0)
552 557 self._map.copymap.pop(f, None)
553 558
554 559 def remove(self, f):
555 560 '''Mark a file removed.'''
556 561 self._dirty = True
557 562 oldstate = self[f]
558 563 size = 0
559 if self._pl[1] != self._nodeconstants.nullid:
564 if self.in_merge:
560 565 entry = self._map.get(f)
561 566 if entry is not None:
562 567 # backup the previous state
563 568 if entry[0] == b'm': # merge
564 569 size = NONNORMAL
565 570 elif entry[0] == b'n' and entry[2] == FROM_P2: # other parent
566 571 size = FROM_P2
567 572 self._map.otherparentset.add(f)
568 573 self._updatedfiles.add(f)
569 574 self._map.removefile(f, oldstate, size)
570 575 if size == 0:
571 576 self._map.copymap.pop(f, None)
572 577
573 578 def merge(self, f):
574 579 '''Mark a file merged.'''
575 if self._pl[1] == self._nodeconstants.nullid:
580 if not self.in_merge:
576 581 return self.normallookup(f)
577 582 return self.otherparent(f)
578 583
579 584 def drop(self, f):
580 585 '''Drop a file from the dirstate'''
581 586 oldstate = self[f]
582 587 if self._map.dropfile(f, oldstate):
583 588 self._dirty = True
584 589 self._updatedfiles.add(f)
585 590 self._map.copymap.pop(f, None)
586 591
587 592 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
588 593 if exists is None:
589 594 exists = os.path.lexists(os.path.join(self._root, path))
590 595 if not exists:
591 596 # Maybe a path component exists
592 597 if not ignoremissing and b'/' in path:
593 598 d, f = path.rsplit(b'/', 1)
594 599 d = self._normalize(d, False, ignoremissing, None)
595 600 folded = d + b"/" + f
596 601 else:
597 602 # No path components, preserve original case
598 603 folded = path
599 604 else:
600 605 # recursively normalize leading directory components
601 606 # against dirstate
602 607 if b'/' in normed:
603 608 d, f = normed.rsplit(b'/', 1)
604 609 d = self._normalize(d, False, ignoremissing, True)
605 610 r = self._root + b"/" + d
606 611 folded = d + b"/" + util.fspath(f, r)
607 612 else:
608 613 folded = util.fspath(normed, self._root)
609 614 storemap[normed] = folded
610 615
611 616 return folded
612 617
613 618 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
614 619 normed = util.normcase(path)
615 620 folded = self._map.filefoldmap.get(normed, None)
616 621 if folded is None:
617 622 if isknown:
618 623 folded = path
619 624 else:
620 625 folded = self._discoverpath(
621 626 path, normed, ignoremissing, exists, self._map.filefoldmap
622 627 )
623 628 return folded
624 629
625 630 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
626 631 normed = util.normcase(path)
627 632 folded = self._map.filefoldmap.get(normed, None)
628 633 if folded is None:
629 634 folded = self._map.dirfoldmap.get(normed, None)
630 635 if folded is None:
631 636 if isknown:
632 637 folded = path
633 638 else:
634 639 # store discovered result in dirfoldmap so that future
635 640 # normalizefile calls don't start matching directories
636 641 folded = self._discoverpath(
637 642 path, normed, ignoremissing, exists, self._map.dirfoldmap
638 643 )
639 644 return folded
640 645
641 646 def normalize(self, path, isknown=False, ignoremissing=False):
642 647 """
643 648 normalize the case of a pathname when on a casefolding filesystem
644 649
645 650 isknown specifies whether the filename came from walking the
646 651 disk, to avoid extra filesystem access.
647 652
648 653 If ignoremissing is True, missing path are returned
649 654 unchanged. Otherwise, we try harder to normalize possibly
650 655 existing path components.
651 656
652 657 The normalized case is determined based on the following precedence:
653 658
654 659 - version of name already stored in the dirstate
655 660 - version of name stored on disk
656 661 - version provided via command arguments
657 662 """
658 663
659 664 if self._checkcase:
660 665 return self._normalize(path, isknown, ignoremissing)
661 666 return path
662 667
663 668 def clear(self):
664 669 self._map.clear()
665 670 self._lastnormaltime = 0
666 671 self._updatedfiles.clear()
667 672 self._dirty = True
668 673
669 674 def rebuild(self, parent, allfiles, changedfiles=None):
670 675 if changedfiles is None:
671 676 # Rebuild entire dirstate
672 677 to_lookup = allfiles
673 678 to_drop = []
674 679 lastnormaltime = self._lastnormaltime
675 680 self.clear()
676 681 self._lastnormaltime = lastnormaltime
677 682 elif len(changedfiles) < 10:
678 683 # Avoid turning allfiles into a set, which can be expensive if it's
679 684 # large.
680 685 to_lookup = []
681 686 to_drop = []
682 687 for f in changedfiles:
683 688 if f in allfiles:
684 689 to_lookup.append(f)
685 690 else:
686 691 to_drop.append(f)
687 692 else:
688 693 changedfilesset = set(changedfiles)
689 694 to_lookup = changedfilesset & set(allfiles)
690 695 to_drop = changedfilesset - to_lookup
691 696
692 697 if self._origpl is None:
693 698 self._origpl = self._pl
694 699 self._map.setparents(parent, self._nodeconstants.nullid)
695 700
696 701 for f in to_lookup:
697 702 self.normallookup(f)
698 703 for f in to_drop:
699 704 self.drop(f)
700 705
701 706 self._dirty = True
702 707
703 708 def identity(self):
704 709 """Return identity of dirstate itself to detect changing in storage
705 710
706 711 If identity of previous dirstate is equal to this, writing
707 712 changes based on the former dirstate out can keep consistency.
708 713 """
709 714 return self._map.identity
710 715
711 716 def write(self, tr):
712 717 if not self._dirty:
713 718 return
714 719
715 720 filename = self._filename
716 721 if tr:
717 722 # 'dirstate.write()' is not only for writing in-memory
718 723 # changes out, but also for dropping ambiguous timestamp.
719 724 # delayed writing re-raise "ambiguous timestamp issue".
720 725 # See also the wiki page below for detail:
721 726 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
722 727
723 728 # emulate dropping timestamp in 'parsers.pack_dirstate'
724 729 now = _getfsnow(self._opener)
725 730 self._map.clearambiguoustimes(self._updatedfiles, now)
726 731
727 732 # emulate that all 'dirstate.normal' results are written out
728 733 self._lastnormaltime = 0
729 734 self._updatedfiles.clear()
730 735
731 736 # delay writing in-memory changes out
732 737 tr.addfilegenerator(
733 738 b'dirstate',
734 739 (self._filename,),
735 740 self._writedirstate,
736 741 location=b'plain',
737 742 )
738 743 return
739 744
740 745 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
741 746 self._writedirstate(st)
742 747
743 748 def addparentchangecallback(self, category, callback):
744 749 """add a callback to be called when the wd parents are changed
745 750
746 751 Callback will be called with the following arguments:
747 752 dirstate, (oldp1, oldp2), (newp1, newp2)
748 753
749 754 Category is a unique identifier to allow overwriting an old callback
750 755 with a newer callback.
751 756 """
752 757 self._plchangecallbacks[category] = callback
753 758
754 759 def _writedirstate(self, st):
755 760 # notify callbacks about parents change
756 761 if self._origpl is not None and self._origpl != self._pl:
757 762 for c, callback in sorted(
758 763 pycompat.iteritems(self._plchangecallbacks)
759 764 ):
760 765 callback(self, self._origpl, self._pl)
761 766 self._origpl = None
762 767 # use the modification time of the newly created temporary file as the
763 768 # filesystem's notion of 'now'
764 769 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
765 770
766 771 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
767 772 # timestamp of each entries in dirstate, because of 'now > mtime'
768 773 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
769 774 if delaywrite > 0:
770 775 # do we have any files to delay for?
771 776 for f, e in pycompat.iteritems(self._map):
772 777 if e[0] == b'n' and e[3] == now:
773 778 import time # to avoid useless import
774 779
775 780 # rather than sleep n seconds, sleep until the next
776 781 # multiple of n seconds
777 782 clock = time.time()
778 783 start = int(clock) - (int(clock) % delaywrite)
779 784 end = start + delaywrite
780 785 time.sleep(end - clock)
781 786 now = end # trust our estimate that the end is near now
782 787 break
783 788
784 789 self._map.write(st, now)
785 790 self._lastnormaltime = 0
786 791 self._dirty = False
787 792
788 793 def _dirignore(self, f):
789 794 if self._ignore(f):
790 795 return True
791 796 for p in pathutil.finddirs(f):
792 797 if self._ignore(p):
793 798 return True
794 799 return False
795 800
796 801 def _ignorefiles(self):
797 802 files = []
798 803 if os.path.exists(self._join(b'.hgignore')):
799 804 files.append(self._join(b'.hgignore'))
800 805 for name, path in self._ui.configitems(b"ui"):
801 806 if name == b'ignore' or name.startswith(b'ignore.'):
802 807 # we need to use os.path.join here rather than self._join
803 808 # because path is arbitrary and user-specified
804 809 files.append(os.path.join(self._rootdir, util.expandpath(path)))
805 810 return files
806 811
807 812 def _ignorefileandline(self, f):
808 813 files = collections.deque(self._ignorefiles())
809 814 visited = set()
810 815 while files:
811 816 i = files.popleft()
812 817 patterns = matchmod.readpatternfile(
813 818 i, self._ui.warn, sourceinfo=True
814 819 )
815 820 for pattern, lineno, line in patterns:
816 821 kind, p = matchmod._patsplit(pattern, b'glob')
817 822 if kind == b"subinclude":
818 823 if p not in visited:
819 824 files.append(p)
820 825 continue
821 826 m = matchmod.match(
822 827 self._root, b'', [], [pattern], warn=self._ui.warn
823 828 )
824 829 if m(f):
825 830 return (i, lineno, line)
826 831 visited.add(i)
827 832 return (None, -1, b"")
828 833
829 834 def _walkexplicit(self, match, subrepos):
830 835 """Get stat data about the files explicitly specified by match.
831 836
832 837 Return a triple (results, dirsfound, dirsnotfound).
833 838 - results is a mapping from filename to stat result. It also contains
834 839 listings mapping subrepos and .hg to None.
835 840 - dirsfound is a list of files found to be directories.
836 841 - dirsnotfound is a list of files that the dirstate thinks are
837 842 directories and that were not found."""
838 843
839 844 def badtype(mode):
840 845 kind = _(b'unknown')
841 846 if stat.S_ISCHR(mode):
842 847 kind = _(b'character device')
843 848 elif stat.S_ISBLK(mode):
844 849 kind = _(b'block device')
845 850 elif stat.S_ISFIFO(mode):
846 851 kind = _(b'fifo')
847 852 elif stat.S_ISSOCK(mode):
848 853 kind = _(b'socket')
849 854 elif stat.S_ISDIR(mode):
850 855 kind = _(b'directory')
851 856 return _(b'unsupported file type (type is %s)') % kind
852 857
853 858 badfn = match.bad
854 859 dmap = self._map
855 860 lstat = os.lstat
856 861 getkind = stat.S_IFMT
857 862 dirkind = stat.S_IFDIR
858 863 regkind = stat.S_IFREG
859 864 lnkkind = stat.S_IFLNK
860 865 join = self._join
861 866 dirsfound = []
862 867 foundadd = dirsfound.append
863 868 dirsnotfound = []
864 869 notfoundadd = dirsnotfound.append
865 870
866 871 if not match.isexact() and self._checkcase:
867 872 normalize = self._normalize
868 873 else:
869 874 normalize = None
870 875
871 876 files = sorted(match.files())
872 877 subrepos.sort()
873 878 i, j = 0, 0
874 879 while i < len(files) and j < len(subrepos):
875 880 subpath = subrepos[j] + b"/"
876 881 if files[i] < subpath:
877 882 i += 1
878 883 continue
879 884 while i < len(files) and files[i].startswith(subpath):
880 885 del files[i]
881 886 j += 1
882 887
883 888 if not files or b'' in files:
884 889 files = [b'']
885 890 # constructing the foldmap is expensive, so don't do it for the
886 891 # common case where files is ['']
887 892 normalize = None
888 893 results = dict.fromkeys(subrepos)
889 894 results[b'.hg'] = None
890 895
891 896 for ff in files:
892 897 if normalize:
893 898 nf = normalize(ff, False, True)
894 899 else:
895 900 nf = ff
896 901 if nf in results:
897 902 continue
898 903
899 904 try:
900 905 st = lstat(join(nf))
901 906 kind = getkind(st.st_mode)
902 907 if kind == dirkind:
903 908 if nf in dmap:
904 909 # file replaced by dir on disk but still in dirstate
905 910 results[nf] = None
906 911 foundadd((nf, ff))
907 912 elif kind == regkind or kind == lnkkind:
908 913 results[nf] = st
909 914 else:
910 915 badfn(ff, badtype(kind))
911 916 if nf in dmap:
912 917 results[nf] = None
913 918 except OSError as inst: # nf not found on disk - it is dirstate only
914 919 if nf in dmap: # does it exactly match a missing file?
915 920 results[nf] = None
916 921 else: # does it match a missing directory?
917 922 if self._map.hasdir(nf):
918 923 notfoundadd(nf)
919 924 else:
920 925 badfn(ff, encoding.strtolocal(inst.strerror))
921 926
922 927 # match.files() may contain explicitly-specified paths that shouldn't
923 928 # be taken; drop them from the list of files found. dirsfound/notfound
924 929 # aren't filtered here because they will be tested later.
925 930 if match.anypats():
926 931 for f in list(results):
927 932 if f == b'.hg' or f in subrepos:
928 933 # keep sentinel to disable further out-of-repo walks
929 934 continue
930 935 if not match(f):
931 936 del results[f]
932 937
933 938 # Case insensitive filesystems cannot rely on lstat() failing to detect
934 939 # a case-only rename. Prune the stat object for any file that does not
935 940 # match the case in the filesystem, if there are multiple files that
936 941 # normalize to the same path.
937 942 if match.isexact() and self._checkcase:
938 943 normed = {}
939 944
940 945 for f, st in pycompat.iteritems(results):
941 946 if st is None:
942 947 continue
943 948
944 949 nc = util.normcase(f)
945 950 paths = normed.get(nc)
946 951
947 952 if paths is None:
948 953 paths = set()
949 954 normed[nc] = paths
950 955
951 956 paths.add(f)
952 957
953 958 for norm, paths in pycompat.iteritems(normed):
954 959 if len(paths) > 1:
955 960 for path in paths:
956 961 folded = self._discoverpath(
957 962 path, norm, True, None, self._map.dirfoldmap
958 963 )
959 964 if path != folded:
960 965 results[path] = None
961 966
962 967 return results, dirsfound, dirsnotfound
963 968
964 969 def walk(self, match, subrepos, unknown, ignored, full=True):
965 970 """
966 971 Walk recursively through the directory tree, finding all files
967 972 matched by match.
968 973
969 974 If full is False, maybe skip some known-clean files.
970 975
971 976 Return a dict mapping filename to stat-like object (either
972 977 mercurial.osutil.stat instance or return value of os.stat()).
973 978
974 979 """
975 980 # full is a flag that extensions that hook into walk can use -- this
976 981 # implementation doesn't use it at all. This satisfies the contract
977 982 # because we only guarantee a "maybe".
978 983
979 984 if ignored:
980 985 ignore = util.never
981 986 dirignore = util.never
982 987 elif unknown:
983 988 ignore = self._ignore
984 989 dirignore = self._dirignore
985 990 else:
986 991 # if not unknown and not ignored, drop dir recursion and step 2
987 992 ignore = util.always
988 993 dirignore = util.always
989 994
990 995 matchfn = match.matchfn
991 996 matchalways = match.always()
992 997 matchtdir = match.traversedir
993 998 dmap = self._map
994 999 listdir = util.listdir
995 1000 lstat = os.lstat
996 1001 dirkind = stat.S_IFDIR
997 1002 regkind = stat.S_IFREG
998 1003 lnkkind = stat.S_IFLNK
999 1004 join = self._join
1000 1005
1001 1006 exact = skipstep3 = False
1002 1007 if match.isexact(): # match.exact
1003 1008 exact = True
1004 1009 dirignore = util.always # skip step 2
1005 1010 elif match.prefix(): # match.match, no patterns
1006 1011 skipstep3 = True
1007 1012
1008 1013 if not exact and self._checkcase:
1009 1014 normalize = self._normalize
1010 1015 normalizefile = self._normalizefile
1011 1016 skipstep3 = False
1012 1017 else:
1013 1018 normalize = self._normalize
1014 1019 normalizefile = None
1015 1020
1016 1021 # step 1: find all explicit files
1017 1022 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1018 1023 if matchtdir:
1019 1024 for d in work:
1020 1025 matchtdir(d[0])
1021 1026 for d in dirsnotfound:
1022 1027 matchtdir(d)
1023 1028
1024 1029 skipstep3 = skipstep3 and not (work or dirsnotfound)
1025 1030 work = [d for d in work if not dirignore(d[0])]
1026 1031
1027 1032 # step 2: visit subdirectories
1028 1033 def traverse(work, alreadynormed):
1029 1034 wadd = work.append
1030 1035 while work:
1031 1036 tracing.counter('dirstate.walk work', len(work))
1032 1037 nd = work.pop()
1033 1038 visitentries = match.visitchildrenset(nd)
1034 1039 if not visitentries:
1035 1040 continue
1036 1041 if visitentries == b'this' or visitentries == b'all':
1037 1042 visitentries = None
1038 1043 skip = None
1039 1044 if nd != b'':
1040 1045 skip = b'.hg'
1041 1046 try:
1042 1047 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1043 1048 entries = listdir(join(nd), stat=True, skip=skip)
1044 1049 except OSError as inst:
1045 1050 if inst.errno in (errno.EACCES, errno.ENOENT):
1046 1051 match.bad(
1047 1052 self.pathto(nd), encoding.strtolocal(inst.strerror)
1048 1053 )
1049 1054 continue
1050 1055 raise
1051 1056 for f, kind, st in entries:
1052 1057 # Some matchers may return files in the visitentries set,
1053 1058 # instead of 'this', if the matcher explicitly mentions them
1054 1059 # and is not an exactmatcher. This is acceptable; we do not
1055 1060 # make any hard assumptions about file-or-directory below
1056 1061 # based on the presence of `f` in visitentries. If
1057 1062 # visitchildrenset returned a set, we can always skip the
1058 1063 # entries *not* in the set it provided regardless of whether
1059 1064 # they're actually a file or a directory.
1060 1065 if visitentries and f not in visitentries:
1061 1066 continue
1062 1067 if normalizefile:
1063 1068 # even though f might be a directory, we're only
1064 1069 # interested in comparing it to files currently in the
1065 1070 # dmap -- therefore normalizefile is enough
1066 1071 nf = normalizefile(
1067 1072 nd and (nd + b"/" + f) or f, True, True
1068 1073 )
1069 1074 else:
1070 1075 nf = nd and (nd + b"/" + f) or f
1071 1076 if nf not in results:
1072 1077 if kind == dirkind:
1073 1078 if not ignore(nf):
1074 1079 if matchtdir:
1075 1080 matchtdir(nf)
1076 1081 wadd(nf)
1077 1082 if nf in dmap and (matchalways or matchfn(nf)):
1078 1083 results[nf] = None
1079 1084 elif kind == regkind or kind == lnkkind:
1080 1085 if nf in dmap:
1081 1086 if matchalways or matchfn(nf):
1082 1087 results[nf] = st
1083 1088 elif (matchalways or matchfn(nf)) and not ignore(
1084 1089 nf
1085 1090 ):
1086 1091 # unknown file -- normalize if necessary
1087 1092 if not alreadynormed:
1088 1093 nf = normalize(nf, False, True)
1089 1094 results[nf] = st
1090 1095 elif nf in dmap and (matchalways or matchfn(nf)):
1091 1096 results[nf] = None
1092 1097
1093 1098 for nd, d in work:
1094 1099 # alreadynormed means that processwork doesn't have to do any
1095 1100 # expensive directory normalization
1096 1101 alreadynormed = not normalize or nd == d
1097 1102 traverse([d], alreadynormed)
1098 1103
1099 1104 for s in subrepos:
1100 1105 del results[s]
1101 1106 del results[b'.hg']
1102 1107
1103 1108 # step 3: visit remaining files from dmap
1104 1109 if not skipstep3 and not exact:
1105 1110 # If a dmap file is not in results yet, it was either
1106 1111 # a) not matching matchfn b) ignored, c) missing, or d) under a
1107 1112 # symlink directory.
1108 1113 if not results and matchalways:
1109 1114 visit = [f for f in dmap]
1110 1115 else:
1111 1116 visit = [f for f in dmap if f not in results and matchfn(f)]
1112 1117 visit.sort()
1113 1118
1114 1119 if unknown:
1115 1120 # unknown == True means we walked all dirs under the roots
1116 1121 # that wasn't ignored, and everything that matched was stat'ed
1117 1122 # and is already in results.
1118 1123 # The rest must thus be ignored or under a symlink.
1119 1124 audit_path = pathutil.pathauditor(self._root, cached=True)
1120 1125
1121 1126 for nf in iter(visit):
1122 1127 # If a stat for the same file was already added with a
1123 1128 # different case, don't add one for this, since that would
1124 1129 # make it appear as if the file exists under both names
1125 1130 # on disk.
1126 1131 if (
1127 1132 normalizefile
1128 1133 and normalizefile(nf, True, True) in results
1129 1134 ):
1130 1135 results[nf] = None
1131 1136 # Report ignored items in the dmap as long as they are not
1132 1137 # under a symlink directory.
1133 1138 elif audit_path.check(nf):
1134 1139 try:
1135 1140 results[nf] = lstat(join(nf))
1136 1141 # file was just ignored, no links, and exists
1137 1142 except OSError:
1138 1143 # file doesn't exist
1139 1144 results[nf] = None
1140 1145 else:
1141 1146 # It's either missing or under a symlink directory
1142 1147 # which we in this case report as missing
1143 1148 results[nf] = None
1144 1149 else:
1145 1150 # We may not have walked the full directory tree above,
1146 1151 # so stat and check everything we missed.
1147 1152 iv = iter(visit)
1148 1153 for st in util.statfiles([join(i) for i in visit]):
1149 1154 results[next(iv)] = st
1150 1155 return results
1151 1156
1152 1157 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1153 1158 # Force Rayon (Rust parallelism library) to respect the number of
1154 1159 # workers. This is a temporary workaround until Rust code knows
1155 1160 # how to read the config file.
1156 1161 numcpus = self._ui.configint(b"worker", b"numcpus")
1157 1162 if numcpus is not None:
1158 1163 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1159 1164
1160 1165 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1161 1166 if not workers_enabled:
1162 1167 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1163 1168
1164 1169 (
1165 1170 lookup,
1166 1171 modified,
1167 1172 added,
1168 1173 removed,
1169 1174 deleted,
1170 1175 clean,
1171 1176 ignored,
1172 1177 unknown,
1173 1178 warnings,
1174 1179 bad,
1175 1180 traversed,
1176 1181 dirty,
1177 1182 ) = rustmod.status(
1178 1183 self._map._rustmap,
1179 1184 matcher,
1180 1185 self._rootdir,
1181 1186 self._ignorefiles(),
1182 1187 self._checkexec,
1183 1188 self._lastnormaltime,
1184 1189 bool(list_clean),
1185 1190 bool(list_ignored),
1186 1191 bool(list_unknown),
1187 1192 bool(matcher.traversedir),
1188 1193 )
1189 1194
1190 1195 self._dirty |= dirty
1191 1196
1192 1197 if matcher.traversedir:
1193 1198 for dir in traversed:
1194 1199 matcher.traversedir(dir)
1195 1200
1196 1201 if self._ui.warn:
1197 1202 for item in warnings:
1198 1203 if isinstance(item, tuple):
1199 1204 file_path, syntax = item
1200 1205 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1201 1206 file_path,
1202 1207 syntax,
1203 1208 )
1204 1209 self._ui.warn(msg)
1205 1210 else:
1206 1211 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1207 1212 self._ui.warn(
1208 1213 msg
1209 1214 % (
1210 1215 pathutil.canonpath(
1211 1216 self._rootdir, self._rootdir, item
1212 1217 ),
1213 1218 b"No such file or directory",
1214 1219 )
1215 1220 )
1216 1221
1217 1222 for (fn, message) in bad:
1218 1223 matcher.bad(fn, encoding.strtolocal(message))
1219 1224
1220 1225 status = scmutil.status(
1221 1226 modified=modified,
1222 1227 added=added,
1223 1228 removed=removed,
1224 1229 deleted=deleted,
1225 1230 unknown=unknown,
1226 1231 ignored=ignored,
1227 1232 clean=clean,
1228 1233 )
1229 1234 return (lookup, status)
1230 1235
1231 1236 def status(self, match, subrepos, ignored, clean, unknown):
1232 1237 """Determine the status of the working copy relative to the
1233 1238 dirstate and return a pair of (unsure, status), where status is of type
1234 1239 scmutil.status and:
1235 1240
1236 1241 unsure:
1237 1242 files that might have been modified since the dirstate was
1238 1243 written, but need to be read to be sure (size is the same
1239 1244 but mtime differs)
1240 1245 status.modified:
1241 1246 files that have definitely been modified since the dirstate
1242 1247 was written (different size or mode)
1243 1248 status.clean:
1244 1249 files that have definitely not been modified since the
1245 1250 dirstate was written
1246 1251 """
1247 1252 listignored, listclean, listunknown = ignored, clean, unknown
1248 1253 lookup, modified, added, unknown, ignored = [], [], [], [], []
1249 1254 removed, deleted, clean = [], [], []
1250 1255
1251 1256 dmap = self._map
1252 1257 dmap.preload()
1253 1258
1254 1259 use_rust = True
1255 1260
1256 1261 allowed_matchers = (
1257 1262 matchmod.alwaysmatcher,
1258 1263 matchmod.exactmatcher,
1259 1264 matchmod.includematcher,
1260 1265 )
1261 1266
1262 1267 if rustmod is None:
1263 1268 use_rust = False
1264 1269 elif self._checkcase:
1265 1270 # Case-insensitive filesystems are not handled yet
1266 1271 use_rust = False
1267 1272 elif subrepos:
1268 1273 use_rust = False
1269 1274 elif sparse.enabled:
1270 1275 use_rust = False
1271 1276 elif not isinstance(match, allowed_matchers):
1272 1277 # Some matchers have yet to be implemented
1273 1278 use_rust = False
1274 1279
1275 1280 if use_rust:
1276 1281 try:
1277 1282 return self._rust_status(
1278 1283 match, listclean, listignored, listunknown
1279 1284 )
1280 1285 except rustmod.FallbackError:
1281 1286 pass
1282 1287
1283 1288 def noop(f):
1284 1289 pass
1285 1290
1286 1291 dcontains = dmap.__contains__
1287 1292 dget = dmap.__getitem__
1288 1293 ladd = lookup.append # aka "unsure"
1289 1294 madd = modified.append
1290 1295 aadd = added.append
1291 1296 uadd = unknown.append if listunknown else noop
1292 1297 iadd = ignored.append if listignored else noop
1293 1298 radd = removed.append
1294 1299 dadd = deleted.append
1295 1300 cadd = clean.append if listclean else noop
1296 1301 mexact = match.exact
1297 1302 dirignore = self._dirignore
1298 1303 checkexec = self._checkexec
1299 1304 copymap = self._map.copymap
1300 1305 lastnormaltime = self._lastnormaltime
1301 1306
1302 1307 # We need to do full walks when either
1303 1308 # - we're listing all clean files, or
1304 1309 # - match.traversedir does something, because match.traversedir should
1305 1310 # be called for every dir in the working dir
1306 1311 full = listclean or match.traversedir is not None
1307 1312 for fn, st in pycompat.iteritems(
1308 1313 self.walk(match, subrepos, listunknown, listignored, full=full)
1309 1314 ):
1310 1315 if not dcontains(fn):
1311 1316 if (listignored or mexact(fn)) and dirignore(fn):
1312 1317 if listignored:
1313 1318 iadd(fn)
1314 1319 else:
1315 1320 uadd(fn)
1316 1321 continue
1317 1322
1318 1323 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1319 1324 # written like that for performance reasons. dmap[fn] is not a
1320 1325 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1321 1326 # opcode has fast paths when the value to be unpacked is a tuple or
1322 1327 # a list, but falls back to creating a full-fledged iterator in
1323 1328 # general. That is much slower than simply accessing and storing the
1324 1329 # tuple members one by one.
1325 1330 t = dget(fn)
1326 1331 state = t[0]
1327 1332 mode = t[1]
1328 1333 size = t[2]
1329 1334 time = t[3]
1330 1335
1331 1336 if not st and state in b"nma":
1332 1337 dadd(fn)
1333 1338 elif state == b'n':
1334 1339 if (
1335 1340 size >= 0
1336 1341 and (
1337 1342 (size != st.st_size and size != st.st_size & _rangemask)
1338 1343 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1339 1344 )
1340 1345 or size == FROM_P2 # other parent
1341 1346 or fn in copymap
1342 1347 ):
1343 1348 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1344 1349 # issue6456: Size returned may be longer due to
1345 1350 # encryption on EXT-4 fscrypt, undecided.
1346 1351 ladd(fn)
1347 1352 else:
1348 1353 madd(fn)
1349 1354 elif (
1350 1355 time != st[stat.ST_MTIME]
1351 1356 and time != st[stat.ST_MTIME] & _rangemask
1352 1357 ):
1353 1358 ladd(fn)
1354 1359 elif st[stat.ST_MTIME] == lastnormaltime:
1355 1360 # fn may have just been marked as normal and it may have
1356 1361 # changed in the same second without changing its size.
1357 1362 # This can happen if we quickly do multiple commits.
1358 1363 # Force lookup, so we don't miss such a racy file change.
1359 1364 ladd(fn)
1360 1365 elif listclean:
1361 1366 cadd(fn)
1362 1367 elif state == b'm':
1363 1368 madd(fn)
1364 1369 elif state == b'a':
1365 1370 aadd(fn)
1366 1371 elif state == b'r':
1367 1372 radd(fn)
1368 1373 status = scmutil.status(
1369 1374 modified, added, removed, deleted, unknown, ignored, clean
1370 1375 )
1371 1376 return (lookup, status)
1372 1377
1373 1378 def matches(self, match):
1374 1379 """
1375 1380 return files in the dirstate (in whatever state) filtered by match
1376 1381 """
1377 1382 dmap = self._map
1378 1383 if rustmod is not None:
1379 1384 dmap = self._map._rustmap
1380 1385
1381 1386 if match.always():
1382 1387 return dmap.keys()
1383 1388 files = match.files()
1384 1389 if match.isexact():
1385 1390 # fast path -- filter the other way around, since typically files is
1386 1391 # much smaller than dmap
1387 1392 return [f for f in files if f in dmap]
1388 1393 if match.prefix() and all(fn in dmap for fn in files):
1389 1394 # fast path -- all the values are known to be files, so just return
1390 1395 # that
1391 1396 return list(files)
1392 1397 return [f for f in dmap if match(f)]
1393 1398
1394 1399 def _actualfilename(self, tr):
1395 1400 if tr:
1396 1401 return self._pendingfilename
1397 1402 else:
1398 1403 return self._filename
1399 1404
1400 1405 def savebackup(self, tr, backupname):
1401 1406 '''Save current dirstate into backup file'''
1402 1407 filename = self._actualfilename(tr)
1403 1408 assert backupname != filename
1404 1409
1405 1410 # use '_writedirstate' instead of 'write' to write changes certainly,
1406 1411 # because the latter omits writing out if transaction is running.
1407 1412 # output file will be used to create backup of dirstate at this point.
1408 1413 if self._dirty or not self._opener.exists(filename):
1409 1414 self._writedirstate(
1410 1415 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1411 1416 )
1412 1417
1413 1418 if tr:
1414 1419 # ensure that subsequent tr.writepending returns True for
1415 1420 # changes written out above, even if dirstate is never
1416 1421 # changed after this
1417 1422 tr.addfilegenerator(
1418 1423 b'dirstate',
1419 1424 (self._filename,),
1420 1425 self._writedirstate,
1421 1426 location=b'plain',
1422 1427 )
1423 1428
1424 1429 # ensure that pending file written above is unlinked at
1425 1430 # failure, even if tr.writepending isn't invoked until the
1426 1431 # end of this transaction
1427 1432 tr.registertmp(filename, location=b'plain')
1428 1433
1429 1434 self._opener.tryunlink(backupname)
1430 1435 # hardlink backup is okay because _writedirstate is always called
1431 1436 # with an "atomictemp=True" file.
1432 1437 util.copyfile(
1433 1438 self._opener.join(filename),
1434 1439 self._opener.join(backupname),
1435 1440 hardlink=True,
1436 1441 )
1437 1442
1438 1443 def restorebackup(self, tr, backupname):
1439 1444 '''Restore dirstate by backup file'''
1440 1445 # this "invalidate()" prevents "wlock.release()" from writing
1441 1446 # changes of dirstate out after restoring from backup file
1442 1447 self.invalidate()
1443 1448 filename = self._actualfilename(tr)
1444 1449 o = self._opener
1445 1450 if util.samefile(o.join(backupname), o.join(filename)):
1446 1451 o.unlink(backupname)
1447 1452 else:
1448 1453 o.rename(backupname, filename, checkambig=True)
1449 1454
1450 1455 def clearbackup(self, tr, backupname):
1451 1456 '''Clear backup file'''
1452 1457 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now