##// END OF EJS Templates
dirstate: move special handling of files==['.'] together...
Martin von Zweigbergk -
r42527:7ada5989 default
parent child Browse files
Show More
@@ -1,1524 +1,1525 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 try:
31 31 from . import rustext
32 32 rustext.__name__ # force actual import (see hgdemandimport)
33 33 except ImportError:
34 34 rustext = None
35 35
36 36 parsers = policy.importmod(r'parsers')
37 37
38 38 propertycache = util.propertycache
39 39 filecache = scmutil.filecache
40 40 _rangemask = 0x7fffffff
41 41
42 42 dirstatetuple = parsers.dirstatetuple
43 43
44 44 class repocache(filecache):
45 45 """filecache for files in .hg/"""
46 46 def join(self, obj, fname):
47 47 return obj._opener.join(fname)
48 48
49 49 class rootcache(filecache):
50 50 """filecache for files in the repository root"""
51 51 def join(self, obj, fname):
52 52 return obj._join(fname)
53 53
54 54 def _getfsnow(vfs):
55 55 '''Get "now" timestamp on filesystem'''
56 56 tmpfd, tmpname = vfs.mkstemp()
57 57 try:
58 58 return os.fstat(tmpfd)[stat.ST_MTIME]
59 59 finally:
60 60 os.close(tmpfd)
61 61 vfs.unlink(tmpname)
62 62
63 63 class dirstate(object):
64 64
65 65 def __init__(self, opener, ui, root, validate, sparsematchfn):
66 66 '''Create a new dirstate object.
67 67
68 68 opener is an open()-like callable that can be used to open the
69 69 dirstate file; root is the root of the directory tracked by
70 70 the dirstate.
71 71 '''
72 72 self._opener = opener
73 73 self._validate = validate
74 74 self._root = root
75 75 self._sparsematchfn = sparsematchfn
76 76 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
77 77 # UNC path pointing to root share (issue4557)
78 78 self._rootdir = pathutil.normasprefix(root)
79 79 self._dirty = False
80 80 self._lastnormaltime = 0
81 81 self._ui = ui
82 82 self._filecache = {}
83 83 self._parentwriters = 0
84 84 self._filename = 'dirstate'
85 85 self._pendingfilename = '%s.pending' % self._filename
86 86 self._plchangecallbacks = {}
87 87 self._origpl = None
88 88 self._updatedfiles = set()
89 89 self._mapcls = dirstatemap
90 90 # Access and cache cwd early, so we don't access it for the first time
91 91 # after a working-copy update caused it to not exist (accessing it then
92 92 # raises an exception).
93 93 self._cwd
94 94
95 95 @contextlib.contextmanager
96 96 def parentchange(self):
97 97 '''Context manager for handling dirstate parents.
98 98
99 99 If an exception occurs in the scope of the context manager,
100 100 the incoherent dirstate won't be written when wlock is
101 101 released.
102 102 '''
103 103 self._parentwriters += 1
104 104 yield
105 105 # Typically we want the "undo" step of a context manager in a
106 106 # finally block so it happens even when an exception
107 107 # occurs. In this case, however, we only want to decrement
108 108 # parentwriters if the code in the with statement exits
109 109 # normally, so we don't have a try/finally here on purpose.
110 110 self._parentwriters -= 1
111 111
112 112 def pendingparentchange(self):
113 113 '''Returns true if the dirstate is in the middle of a set of changes
114 114 that modify the dirstate parent.
115 115 '''
116 116 return self._parentwriters > 0
117 117
118 118 @propertycache
119 119 def _map(self):
120 120 """Return the dirstate contents (see documentation for dirstatemap)."""
121 121 self._map = self._mapcls(self._ui, self._opener, self._root)
122 122 return self._map
123 123
124 124 @property
125 125 def _sparsematcher(self):
126 126 """The matcher for the sparse checkout.
127 127
128 128 The working directory may not include every file from a manifest. The
129 129 matcher obtained by this property will match a path if it is to be
130 130 included in the working directory.
131 131 """
132 132 # TODO there is potential to cache this property. For now, the matcher
133 133 # is resolved on every access. (But the called function does use a
134 134 # cache to keep the lookup fast.)
135 135 return self._sparsematchfn()
136 136
137 137 @repocache('branch')
138 138 def _branch(self):
139 139 try:
140 140 return self._opener.read("branch").strip() or "default"
141 141 except IOError as inst:
142 142 if inst.errno != errno.ENOENT:
143 143 raise
144 144 return "default"
145 145
146 146 @property
147 147 def _pl(self):
148 148 return self._map.parents()
149 149
150 150 def hasdir(self, d):
151 151 return self._map.hastrackeddir(d)
152 152
153 153 @rootcache('.hgignore')
154 154 def _ignore(self):
155 155 files = self._ignorefiles()
156 156 if not files:
157 157 return matchmod.never()
158 158
159 159 pats = ['include:%s' % f for f in files]
160 160 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
161 161
162 162 @propertycache
163 163 def _slash(self):
164 164 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
165 165
166 166 @propertycache
167 167 def _checklink(self):
168 168 return util.checklink(self._root)
169 169
170 170 @propertycache
171 171 def _checkexec(self):
172 172 return util.checkexec(self._root)
173 173
174 174 @propertycache
175 175 def _checkcase(self):
176 176 return not util.fscasesensitive(self._join('.hg'))
177 177
178 178 def _join(self, f):
179 179 # much faster than os.path.join()
180 180 # it's safe because f is always a relative path
181 181 return self._rootdir + f
182 182
183 183 def flagfunc(self, buildfallback):
184 184 if self._checklink and self._checkexec:
185 185 def f(x):
186 186 try:
187 187 st = os.lstat(self._join(x))
188 188 if util.statislink(st):
189 189 return 'l'
190 190 if util.statisexec(st):
191 191 return 'x'
192 192 except OSError:
193 193 pass
194 194 return ''
195 195 return f
196 196
197 197 fallback = buildfallback()
198 198 if self._checklink:
199 199 def f(x):
200 200 if os.path.islink(self._join(x)):
201 201 return 'l'
202 202 if 'x' in fallback(x):
203 203 return 'x'
204 204 return ''
205 205 return f
206 206 if self._checkexec:
207 207 def f(x):
208 208 if 'l' in fallback(x):
209 209 return 'l'
210 210 if util.isexec(self._join(x)):
211 211 return 'x'
212 212 return ''
213 213 return f
214 214 else:
215 215 return fallback
216 216
217 217 @propertycache
218 218 def _cwd(self):
219 219 # internal config: ui.forcecwd
220 220 forcecwd = self._ui.config('ui', 'forcecwd')
221 221 if forcecwd:
222 222 return forcecwd
223 223 return encoding.getcwd()
224 224
225 225 def getcwd(self):
226 226 '''Return the path from which a canonical path is calculated.
227 227
228 228 This path should be used to resolve file patterns or to convert
229 229 canonical paths back to file paths for display. It shouldn't be
230 230 used to get real file paths. Use vfs functions instead.
231 231 '''
232 232 cwd = self._cwd
233 233 if cwd == self._root:
234 234 return ''
235 235 # self._root ends with a path separator if self._root is '/' or 'C:\'
236 236 rootsep = self._root
237 237 if not util.endswithsep(rootsep):
238 238 rootsep += pycompat.ossep
239 239 if cwd.startswith(rootsep):
240 240 return cwd[len(rootsep):]
241 241 else:
242 242 # we're outside the repo. return an absolute path.
243 243 return cwd
244 244
245 245 def pathto(self, f, cwd=None):
246 246 if cwd is None:
247 247 cwd = self.getcwd()
248 248 path = util.pathto(self._root, cwd, f)
249 249 if self._slash:
250 250 return util.pconvert(path)
251 251 return path
252 252
253 253 def __getitem__(self, key):
254 254 '''Return the current state of key (a filename) in the dirstate.
255 255
256 256 States are:
257 257 n normal
258 258 m needs merging
259 259 r marked for removal
260 260 a marked for addition
261 261 ? not tracked
262 262 '''
263 263 return self._map.get(key, ("?",))[0]
264 264
265 265 def __contains__(self, key):
266 266 return key in self._map
267 267
268 268 def __iter__(self):
269 269 return iter(sorted(self._map))
270 270
271 271 def items(self):
272 272 return self._map.iteritems()
273 273
274 274 iteritems = items
275 275
276 276 def parents(self):
277 277 return [self._validate(p) for p in self._pl]
278 278
279 279 def p1(self):
280 280 return self._validate(self._pl[0])
281 281
282 282 def p2(self):
283 283 return self._validate(self._pl[1])
284 284
285 285 def branch(self):
286 286 return encoding.tolocal(self._branch)
287 287
288 288 def setparents(self, p1, p2=nullid):
289 289 """Set dirstate parents to p1 and p2.
290 290
291 291 When moving from two parents to one, 'm' merged entries a
292 292 adjusted to normal and previous copy records discarded and
293 293 returned by the call.
294 294
295 295 See localrepo.setparents()
296 296 """
297 297 if self._parentwriters == 0:
298 298 raise ValueError("cannot set dirstate parent outside of "
299 299 "dirstate.parentchange context manager")
300 300
301 301 self._dirty = True
302 302 oldp2 = self._pl[1]
303 303 if self._origpl is None:
304 304 self._origpl = self._pl
305 305 self._map.setparents(p1, p2)
306 306 copies = {}
307 307 if oldp2 != nullid and p2 == nullid:
308 308 candidatefiles = self._map.nonnormalset.union(
309 309 self._map.otherparentset)
310 310 for f in candidatefiles:
311 311 s = self._map.get(f)
312 312 if s is None:
313 313 continue
314 314
315 315 # Discard 'm' markers when moving away from a merge state
316 316 if s[0] == 'm':
317 317 source = self._map.copymap.get(f)
318 318 if source:
319 319 copies[f] = source
320 320 self.normallookup(f)
321 321 # Also fix up otherparent markers
322 322 elif s[0] == 'n' and s[2] == -2:
323 323 source = self._map.copymap.get(f)
324 324 if source:
325 325 copies[f] = source
326 326 self.add(f)
327 327 return copies
328 328
329 329 def setbranch(self, branch):
330 330 self.__class__._branch.set(self, encoding.fromlocal(branch))
331 331 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
332 332 try:
333 333 f.write(self._branch + '\n')
334 334 f.close()
335 335
336 336 # make sure filecache has the correct stat info for _branch after
337 337 # replacing the underlying file
338 338 ce = self._filecache['_branch']
339 339 if ce:
340 340 ce.refresh()
341 341 except: # re-raises
342 342 f.discard()
343 343 raise
344 344
345 345 def invalidate(self):
346 346 '''Causes the next access to reread the dirstate.
347 347
348 348 This is different from localrepo.invalidatedirstate() because it always
349 349 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
350 350 check whether the dirstate has changed before rereading it.'''
351 351
352 352 for a in (r"_map", r"_branch", r"_ignore"):
353 353 if a in self.__dict__:
354 354 delattr(self, a)
355 355 self._lastnormaltime = 0
356 356 self._dirty = False
357 357 self._updatedfiles.clear()
358 358 self._parentwriters = 0
359 359 self._origpl = None
360 360
361 361 def copy(self, source, dest):
362 362 """Mark dest as a copy of source. Unmark dest if source is None."""
363 363 if source == dest:
364 364 return
365 365 self._dirty = True
366 366 if source is not None:
367 367 self._map.copymap[dest] = source
368 368 self._updatedfiles.add(source)
369 369 self._updatedfiles.add(dest)
370 370 elif self._map.copymap.pop(dest, None):
371 371 self._updatedfiles.add(dest)
372 372
373 373 def copied(self, file):
374 374 return self._map.copymap.get(file, None)
375 375
376 376 def copies(self):
377 377 return self._map.copymap
378 378
379 379 def _addpath(self, f, state, mode, size, mtime):
380 380 oldstate = self[f]
381 381 if state == 'a' or oldstate == 'r':
382 382 scmutil.checkfilename(f)
383 383 if self._map.hastrackeddir(f):
384 384 raise error.Abort(_('directory %r already in dirstate') %
385 385 pycompat.bytestr(f))
386 386 # shadows
387 387 for d in util.finddirs(f):
388 388 if self._map.hastrackeddir(d):
389 389 break
390 390 entry = self._map.get(d)
391 391 if entry is not None and entry[0] != 'r':
392 392 raise error.Abort(
393 393 _('file %r in dirstate clashes with %r') %
394 394 (pycompat.bytestr(d), pycompat.bytestr(f)))
395 395 self._dirty = True
396 396 self._updatedfiles.add(f)
397 397 self._map.addfile(f, oldstate, state, mode, size, mtime)
398 398
399 399 def normal(self, f):
400 400 '''Mark a file normal and clean.'''
401 401 s = os.lstat(self._join(f))
402 402 mtime = s[stat.ST_MTIME]
403 403 self._addpath(f, 'n', s.st_mode,
404 404 s.st_size & _rangemask, mtime & _rangemask)
405 405 self._map.copymap.pop(f, None)
406 406 if f in self._map.nonnormalset:
407 407 self._map.nonnormalset.remove(f)
408 408 if mtime > self._lastnormaltime:
409 409 # Remember the most recent modification timeslot for status(),
410 410 # to make sure we won't miss future size-preserving file content
411 411 # modifications that happen within the same timeslot.
412 412 self._lastnormaltime = mtime
413 413
414 414 def normallookup(self, f):
415 415 '''Mark a file normal, but possibly dirty.'''
416 416 if self._pl[1] != nullid:
417 417 # if there is a merge going on and the file was either
418 418 # in state 'm' (-1) or coming from other parent (-2) before
419 419 # being removed, restore that state.
420 420 entry = self._map.get(f)
421 421 if entry is not None:
422 422 if entry[0] == 'r' and entry[2] in (-1, -2):
423 423 source = self._map.copymap.get(f)
424 424 if entry[2] == -1:
425 425 self.merge(f)
426 426 elif entry[2] == -2:
427 427 self.otherparent(f)
428 428 if source:
429 429 self.copy(source, f)
430 430 return
431 431 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
432 432 return
433 433 self._addpath(f, 'n', 0, -1, -1)
434 434 self._map.copymap.pop(f, None)
435 435
436 436 def otherparent(self, f):
437 437 '''Mark as coming from the other parent, always dirty.'''
438 438 if self._pl[1] == nullid:
439 439 raise error.Abort(_("setting %r to other parent "
440 440 "only allowed in merges") % f)
441 441 if f in self and self[f] == 'n':
442 442 # merge-like
443 443 self._addpath(f, 'm', 0, -2, -1)
444 444 else:
445 445 # add-like
446 446 self._addpath(f, 'n', 0, -2, -1)
447 447 self._map.copymap.pop(f, None)
448 448
449 449 def add(self, f):
450 450 '''Mark a file added.'''
451 451 self._addpath(f, 'a', 0, -1, -1)
452 452 self._map.copymap.pop(f, None)
453 453
454 454 def remove(self, f):
455 455 '''Mark a file removed.'''
456 456 self._dirty = True
457 457 oldstate = self[f]
458 458 size = 0
459 459 if self._pl[1] != nullid:
460 460 entry = self._map.get(f)
461 461 if entry is not None:
462 462 # backup the previous state
463 463 if entry[0] == 'm': # merge
464 464 size = -1
465 465 elif entry[0] == 'n' and entry[2] == -2: # other parent
466 466 size = -2
467 467 self._map.otherparentset.add(f)
468 468 self._updatedfiles.add(f)
469 469 self._map.removefile(f, oldstate, size)
470 470 if size == 0:
471 471 self._map.copymap.pop(f, None)
472 472
473 473 def merge(self, f):
474 474 '''Mark a file merged.'''
475 475 if self._pl[1] == nullid:
476 476 return self.normallookup(f)
477 477 return self.otherparent(f)
478 478
479 479 def drop(self, f):
480 480 '''Drop a file from the dirstate'''
481 481 oldstate = self[f]
482 482 if self._map.dropfile(f, oldstate):
483 483 self._dirty = True
484 484 self._updatedfiles.add(f)
485 485 self._map.copymap.pop(f, None)
486 486
487 487 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
488 488 if exists is None:
489 489 exists = os.path.lexists(os.path.join(self._root, path))
490 490 if not exists:
491 491 # Maybe a path component exists
492 492 if not ignoremissing and '/' in path:
493 493 d, f = path.rsplit('/', 1)
494 494 d = self._normalize(d, False, ignoremissing, None)
495 495 folded = d + "/" + f
496 496 else:
497 497 # No path components, preserve original case
498 498 folded = path
499 499 else:
500 500 # recursively normalize leading directory components
501 501 # against dirstate
502 502 if '/' in normed:
503 503 d, f = normed.rsplit('/', 1)
504 504 d = self._normalize(d, False, ignoremissing, True)
505 505 r = self._root + "/" + d
506 506 folded = d + "/" + util.fspath(f, r)
507 507 else:
508 508 folded = util.fspath(normed, self._root)
509 509 storemap[normed] = folded
510 510
511 511 return folded
512 512
513 513 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
514 514 normed = util.normcase(path)
515 515 folded = self._map.filefoldmap.get(normed, None)
516 516 if folded is None:
517 517 if isknown:
518 518 folded = path
519 519 else:
520 520 folded = self._discoverpath(path, normed, ignoremissing, exists,
521 521 self._map.filefoldmap)
522 522 return folded
523 523
524 524 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
525 525 normed = util.normcase(path)
526 526 folded = self._map.filefoldmap.get(normed, None)
527 527 if folded is None:
528 528 folded = self._map.dirfoldmap.get(normed, None)
529 529 if folded is None:
530 530 if isknown:
531 531 folded = path
532 532 else:
533 533 # store discovered result in dirfoldmap so that future
534 534 # normalizefile calls don't start matching directories
535 535 folded = self._discoverpath(path, normed, ignoremissing, exists,
536 536 self._map.dirfoldmap)
537 537 return folded
538 538
539 539 def normalize(self, path, isknown=False, ignoremissing=False):
540 540 '''
541 541 normalize the case of a pathname when on a casefolding filesystem
542 542
543 543 isknown specifies whether the filename came from walking the
544 544 disk, to avoid extra filesystem access.
545 545
546 546 If ignoremissing is True, missing path are returned
547 547 unchanged. Otherwise, we try harder to normalize possibly
548 548 existing path components.
549 549
550 550 The normalized case is determined based on the following precedence:
551 551
552 552 - version of name already stored in the dirstate
553 553 - version of name stored on disk
554 554 - version provided via command arguments
555 555 '''
556 556
557 557 if self._checkcase:
558 558 return self._normalize(path, isknown, ignoremissing)
559 559 return path
560 560
561 561 def clear(self):
562 562 self._map.clear()
563 563 self._lastnormaltime = 0
564 564 self._updatedfiles.clear()
565 565 self._dirty = True
566 566
567 567 def rebuild(self, parent, allfiles, changedfiles=None):
568 568 if changedfiles is None:
569 569 # Rebuild entire dirstate
570 570 changedfiles = allfiles
571 571 lastnormaltime = self._lastnormaltime
572 572 self.clear()
573 573 self._lastnormaltime = lastnormaltime
574 574
575 575 if self._origpl is None:
576 576 self._origpl = self._pl
577 577 self._map.setparents(parent, nullid)
578 578 for f in changedfiles:
579 579 if f in allfiles:
580 580 self.normallookup(f)
581 581 else:
582 582 self.drop(f)
583 583
584 584 self._dirty = True
585 585
586 586 def identity(self):
587 587 '''Return identity of dirstate itself to detect changing in storage
588 588
589 589 If identity of previous dirstate is equal to this, writing
590 590 changes based on the former dirstate out can keep consistency.
591 591 '''
592 592 return self._map.identity
593 593
594 594 def write(self, tr):
595 595 if not self._dirty:
596 596 return
597 597
598 598 filename = self._filename
599 599 if tr:
600 600 # 'dirstate.write()' is not only for writing in-memory
601 601 # changes out, but also for dropping ambiguous timestamp.
602 602 # delayed writing re-raise "ambiguous timestamp issue".
603 603 # See also the wiki page below for detail:
604 604 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
605 605
606 606 # emulate dropping timestamp in 'parsers.pack_dirstate'
607 607 now = _getfsnow(self._opener)
608 608 self._map.clearambiguoustimes(self._updatedfiles, now)
609 609
610 610 # emulate that all 'dirstate.normal' results are written out
611 611 self._lastnormaltime = 0
612 612 self._updatedfiles.clear()
613 613
614 614 # delay writing in-memory changes out
615 615 tr.addfilegenerator('dirstate', (self._filename,),
616 616 self._writedirstate, location='plain')
617 617 return
618 618
619 619 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
620 620 self._writedirstate(st)
621 621
622 622 def addparentchangecallback(self, category, callback):
623 623 """add a callback to be called when the wd parents are changed
624 624
625 625 Callback will be called with the following arguments:
626 626 dirstate, (oldp1, oldp2), (newp1, newp2)
627 627
628 628 Category is a unique identifier to allow overwriting an old callback
629 629 with a newer callback.
630 630 """
631 631 self._plchangecallbacks[category] = callback
632 632
633 633 def _writedirstate(self, st):
634 634 # notify callbacks about parents change
635 635 if self._origpl is not None and self._origpl != self._pl:
636 636 for c, callback in sorted(self._plchangecallbacks.iteritems()):
637 637 callback(self, self._origpl, self._pl)
638 638 self._origpl = None
639 639 # use the modification time of the newly created temporary file as the
640 640 # filesystem's notion of 'now'
641 641 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
642 642
643 643 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
644 644 # timestamp of each entries in dirstate, because of 'now > mtime'
645 645 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite')
646 646 if delaywrite > 0:
647 647 # do we have any files to delay for?
648 648 for f, e in self._map.iteritems():
649 649 if e[0] == 'n' and e[3] == now:
650 650 import time # to avoid useless import
651 651 # rather than sleep n seconds, sleep until the next
652 652 # multiple of n seconds
653 653 clock = time.time()
654 654 start = int(clock) - (int(clock) % delaywrite)
655 655 end = start + delaywrite
656 656 time.sleep(end - clock)
657 657 now = end # trust our estimate that the end is near now
658 658 break
659 659
660 660 self._map.write(st, now)
661 661 self._lastnormaltime = 0
662 662 self._dirty = False
663 663
664 664 def _dirignore(self, f):
665 665 if f == '.':
666 666 return False
667 667 if self._ignore(f):
668 668 return True
669 669 for p in util.finddirs(f):
670 670 if self._ignore(p):
671 671 return True
672 672 return False
673 673
674 674 def _ignorefiles(self):
675 675 files = []
676 676 if os.path.exists(self._join('.hgignore')):
677 677 files.append(self._join('.hgignore'))
678 678 for name, path in self._ui.configitems("ui"):
679 679 if name == 'ignore' or name.startswith('ignore.'):
680 680 # we need to use os.path.join here rather than self._join
681 681 # because path is arbitrary and user-specified
682 682 files.append(os.path.join(self._rootdir, util.expandpath(path)))
683 683 return files
684 684
685 685 def _ignorefileandline(self, f):
686 686 files = collections.deque(self._ignorefiles())
687 687 visited = set()
688 688 while files:
689 689 i = files.popleft()
690 690 patterns = matchmod.readpatternfile(i, self._ui.warn,
691 691 sourceinfo=True)
692 692 for pattern, lineno, line in patterns:
693 693 kind, p = matchmod._patsplit(pattern, 'glob')
694 694 if kind == "subinclude":
695 695 if p not in visited:
696 696 files.append(p)
697 697 continue
698 698 m = matchmod.match(self._root, '', [], [pattern],
699 699 warn=self._ui.warn)
700 700 if m(f):
701 701 return (i, lineno, line)
702 702 visited.add(i)
703 703 return (None, -1, "")
704 704
705 705 def _walkexplicit(self, match, subrepos):
706 706 '''Get stat data about the files explicitly specified by match.
707 707
708 708 Return a triple (results, dirsfound, dirsnotfound).
709 709 - results is a mapping from filename to stat result. It also contains
710 710 listings mapping subrepos and .hg to None.
711 711 - dirsfound is a list of files found to be directories.
712 712 - dirsnotfound is a list of files that the dirstate thinks are
713 713 directories and that were not found.'''
714 714
715 715 def badtype(mode):
716 716 kind = _('unknown')
717 717 if stat.S_ISCHR(mode):
718 718 kind = _('character device')
719 719 elif stat.S_ISBLK(mode):
720 720 kind = _('block device')
721 721 elif stat.S_ISFIFO(mode):
722 722 kind = _('fifo')
723 723 elif stat.S_ISSOCK(mode):
724 724 kind = _('socket')
725 725 elif stat.S_ISDIR(mode):
726 726 kind = _('directory')
727 727 return _('unsupported file type (type is %s)') % kind
728 728
729 729 matchedir = match.explicitdir
730 730 badfn = match.bad
731 731 dmap = self._map
732 732 lstat = os.lstat
733 733 getkind = stat.S_IFMT
734 734 dirkind = stat.S_IFDIR
735 735 regkind = stat.S_IFREG
736 736 lnkkind = stat.S_IFLNK
737 737 join = self._join
738 738 dirsfound = []
739 739 foundadd = dirsfound.append
740 740 dirsnotfound = []
741 741 notfoundadd = dirsnotfound.append
742 742
743 743 if not match.isexact() and self._checkcase:
744 744 normalize = self._normalize
745 745 else:
746 746 normalize = None
747 747
748 748 files = sorted(match.files())
749 749 subrepos.sort()
750 750 i, j = 0, 0
751 751 while i < len(files) and j < len(subrepos):
752 752 subpath = subrepos[j] + "/"
753 753 if files[i] < subpath:
754 754 i += 1
755 755 continue
756 756 while i < len(files) and files[i].startswith(subpath):
757 757 del files[i]
758 758 j += 1
759 759
760 760 if not files or '.' in files:
761 761 files = ['.']
762 # constructing the foldmap is expensive, so don't do it for the
763 # common case where files is ['.']
764 normalize = None
762 765 results = dict.fromkeys(subrepos)
763 766 results['.hg'] = None
764 767
765 768 for ff in files:
766 # constructing the foldmap is expensive, so don't do it for the
767 # common case where files is ['.']
768 if normalize and ff != '.':
769 if normalize:
769 770 nf = normalize(ff, False, True)
770 771 else:
771 772 nf = ff
772 773 if nf in results:
773 774 continue
774 775
775 776 try:
776 777 st = lstat(join(nf))
777 778 kind = getkind(st.st_mode)
778 779 if kind == dirkind:
779 780 if nf in dmap:
780 781 # file replaced by dir on disk but still in dirstate
781 782 results[nf] = None
782 783 if matchedir:
783 784 matchedir(nf)
784 785 foundadd((nf, ff))
785 786 elif kind == regkind or kind == lnkkind:
786 787 results[nf] = st
787 788 else:
788 789 badfn(ff, badtype(kind))
789 790 if nf in dmap:
790 791 results[nf] = None
791 792 except OSError as inst: # nf not found on disk - it is dirstate only
792 793 if nf in dmap: # does it exactly match a missing file?
793 794 results[nf] = None
794 795 else: # does it match a missing directory?
795 796 if self._map.hasdir(nf):
796 797 if matchedir:
797 798 matchedir(nf)
798 799 notfoundadd(nf)
799 800 else:
800 801 badfn(ff, encoding.strtolocal(inst.strerror))
801 802
802 803 # match.files() may contain explicitly-specified paths that shouldn't
803 804 # be taken; drop them from the list of files found. dirsfound/notfound
804 805 # aren't filtered here because they will be tested later.
805 806 if match.anypats():
806 807 for f in list(results):
807 808 if f == '.hg' or f in subrepos:
808 809 # keep sentinel to disable further out-of-repo walks
809 810 continue
810 811 if not match(f):
811 812 del results[f]
812 813
813 814 # Case insensitive filesystems cannot rely on lstat() failing to detect
814 815 # a case-only rename. Prune the stat object for any file that does not
815 816 # match the case in the filesystem, if there are multiple files that
816 817 # normalize to the same path.
817 818 if match.isexact() and self._checkcase:
818 819 normed = {}
819 820
820 821 for f, st in results.iteritems():
821 822 if st is None:
822 823 continue
823 824
824 825 nc = util.normcase(f)
825 826 paths = normed.get(nc)
826 827
827 828 if paths is None:
828 829 paths = set()
829 830 normed[nc] = paths
830 831
831 832 paths.add(f)
832 833
833 834 for norm, paths in normed.iteritems():
834 835 if len(paths) > 1:
835 836 for path in paths:
836 837 folded = self._discoverpath(path, norm, True, None,
837 838 self._map.dirfoldmap)
838 839 if path != folded:
839 840 results[path] = None
840 841
841 842 return results, dirsfound, dirsnotfound
842 843
843 844 def walk(self, match, subrepos, unknown, ignored, full=True):
844 845 '''
845 846 Walk recursively through the directory tree, finding all files
846 847 matched by match.
847 848
848 849 If full is False, maybe skip some known-clean files.
849 850
850 851 Return a dict mapping filename to stat-like object (either
851 852 mercurial.osutil.stat instance or return value of os.stat()).
852 853
853 854 '''
854 855 # full is a flag that extensions that hook into walk can use -- this
855 856 # implementation doesn't use it at all. This satisfies the contract
856 857 # because we only guarantee a "maybe".
857 858
858 859 if ignored:
859 860 ignore = util.never
860 861 dirignore = util.never
861 862 elif unknown:
862 863 ignore = self._ignore
863 864 dirignore = self._dirignore
864 865 else:
865 866 # if not unknown and not ignored, drop dir recursion and step 2
866 867 ignore = util.always
867 868 dirignore = util.always
868 869
869 870 matchfn = match.matchfn
870 871 matchalways = match.always()
871 872 matchtdir = match.traversedir
872 873 dmap = self._map
873 874 listdir = util.listdir
874 875 lstat = os.lstat
875 876 dirkind = stat.S_IFDIR
876 877 regkind = stat.S_IFREG
877 878 lnkkind = stat.S_IFLNK
878 879 join = self._join
879 880
880 881 exact = skipstep3 = False
881 882 if match.isexact(): # match.exact
882 883 exact = True
883 884 dirignore = util.always # skip step 2
884 885 elif match.prefix(): # match.match, no patterns
885 886 skipstep3 = True
886 887
887 888 if not exact and self._checkcase:
888 889 normalize = self._normalize
889 890 normalizefile = self._normalizefile
890 891 skipstep3 = False
891 892 else:
892 893 normalize = self._normalize
893 894 normalizefile = None
894 895
895 896 # step 1: find all explicit files
896 897 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
897 898
898 899 skipstep3 = skipstep3 and not (work or dirsnotfound)
899 900 work = [d for d in work if not dirignore(d[0])]
900 901
901 902 # step 2: visit subdirectories
902 903 def traverse(work, alreadynormed):
903 904 wadd = work.append
904 905 while work:
905 906 nd = work.pop()
906 907 visitentries = match.visitchildrenset(nd)
907 908 if not visitentries:
908 909 continue
909 910 if visitentries == 'this' or visitentries == 'all':
910 911 visitentries = None
911 912 skip = None
912 913 if nd == '.':
913 914 nd = ''
914 915 else:
915 916 skip = '.hg'
916 917 try:
917 918 entries = listdir(join(nd), stat=True, skip=skip)
918 919 except OSError as inst:
919 920 if inst.errno in (errno.EACCES, errno.ENOENT):
920 921 match.bad(self.pathto(nd),
921 922 encoding.strtolocal(inst.strerror))
922 923 continue
923 924 raise
924 925 for f, kind, st in entries:
925 926 # Some matchers may return files in the visitentries set,
926 927 # instead of 'this', if the matcher explicitly mentions them
927 928 # and is not an exactmatcher. This is acceptable; we do not
928 929 # make any hard assumptions about file-or-directory below
929 930 # based on the presence of `f` in visitentries. If
930 931 # visitchildrenset returned a set, we can always skip the
931 932 # entries *not* in the set it provided regardless of whether
932 933 # they're actually a file or a directory.
933 934 if visitentries and f not in visitentries:
934 935 continue
935 936 if normalizefile:
936 937 # even though f might be a directory, we're only
937 938 # interested in comparing it to files currently in the
938 939 # dmap -- therefore normalizefile is enough
939 940 nf = normalizefile(nd and (nd + "/" + f) or f, True,
940 941 True)
941 942 else:
942 943 nf = nd and (nd + "/" + f) or f
943 944 if nf not in results:
944 945 if kind == dirkind:
945 946 if not ignore(nf):
946 947 if matchtdir:
947 948 matchtdir(nf)
948 949 wadd(nf)
949 950 if nf in dmap and (matchalways or matchfn(nf)):
950 951 results[nf] = None
951 952 elif kind == regkind or kind == lnkkind:
952 953 if nf in dmap:
953 954 if matchalways or matchfn(nf):
954 955 results[nf] = st
955 956 elif ((matchalways or matchfn(nf))
956 957 and not ignore(nf)):
957 958 # unknown file -- normalize if necessary
958 959 if not alreadynormed:
959 960 nf = normalize(nf, False, True)
960 961 results[nf] = st
961 962 elif nf in dmap and (matchalways or matchfn(nf)):
962 963 results[nf] = None
963 964
964 965 for nd, d in work:
965 966 # alreadynormed means that processwork doesn't have to do any
966 967 # expensive directory normalization
967 968 alreadynormed = not normalize or nd == d
968 969 traverse([d], alreadynormed)
969 970
970 971 for s in subrepos:
971 972 del results[s]
972 973 del results['.hg']
973 974
974 975 # step 3: visit remaining files from dmap
975 976 if not skipstep3 and not exact:
976 977 # If a dmap file is not in results yet, it was either
977 978 # a) not matching matchfn b) ignored, c) missing, or d) under a
978 979 # symlink directory.
979 980 if not results and matchalways:
980 981 visit = [f for f in dmap]
981 982 else:
982 983 visit = [f for f in dmap if f not in results and matchfn(f)]
983 984 visit.sort()
984 985
985 986 if unknown:
986 987 # unknown == True means we walked all dirs under the roots
987 988 # that wasn't ignored, and everything that matched was stat'ed
988 989 # and is already in results.
989 990 # The rest must thus be ignored or under a symlink.
990 991 audit_path = pathutil.pathauditor(self._root, cached=True)
991 992
992 993 for nf in iter(visit):
993 994 # If a stat for the same file was already added with a
994 995 # different case, don't add one for this, since that would
995 996 # make it appear as if the file exists under both names
996 997 # on disk.
997 998 if (normalizefile and
998 999 normalizefile(nf, True, True) in results):
999 1000 results[nf] = None
1000 1001 # Report ignored items in the dmap as long as they are not
1001 1002 # under a symlink directory.
1002 1003 elif audit_path.check(nf):
1003 1004 try:
1004 1005 results[nf] = lstat(join(nf))
1005 1006 # file was just ignored, no links, and exists
1006 1007 except OSError:
1007 1008 # file doesn't exist
1008 1009 results[nf] = None
1009 1010 else:
1010 1011 # It's either missing or under a symlink directory
1011 1012 # which we in this case report as missing
1012 1013 results[nf] = None
1013 1014 else:
1014 1015 # We may not have walked the full directory tree above,
1015 1016 # so stat and check everything we missed.
1016 1017 iv = iter(visit)
1017 1018 for st in util.statfiles([join(i) for i in visit]):
1018 1019 results[next(iv)] = st
1019 1020 return results
1020 1021
1021 1022 def status(self, match, subrepos, ignored, clean, unknown):
1022 1023 '''Determine the status of the working copy relative to the
1023 1024 dirstate and return a pair of (unsure, status), where status is of type
1024 1025 scmutil.status and:
1025 1026
1026 1027 unsure:
1027 1028 files that might have been modified since the dirstate was
1028 1029 written, but need to be read to be sure (size is the same
1029 1030 but mtime differs)
1030 1031 status.modified:
1031 1032 files that have definitely been modified since the dirstate
1032 1033 was written (different size or mode)
1033 1034 status.clean:
1034 1035 files that have definitely not been modified since the
1035 1036 dirstate was written
1036 1037 '''
1037 1038 listignored, listclean, listunknown = ignored, clean, unknown
1038 1039 lookup, modified, added, unknown, ignored = [], [], [], [], []
1039 1040 removed, deleted, clean = [], [], []
1040 1041
1041 1042 dmap = self._map
1042 1043 dmap.preload()
1043 1044 dcontains = dmap.__contains__
1044 1045 dget = dmap.__getitem__
1045 1046 ladd = lookup.append # aka "unsure"
1046 1047 madd = modified.append
1047 1048 aadd = added.append
1048 1049 uadd = unknown.append
1049 1050 iadd = ignored.append
1050 1051 radd = removed.append
1051 1052 dadd = deleted.append
1052 1053 cadd = clean.append
1053 1054 mexact = match.exact
1054 1055 dirignore = self._dirignore
1055 1056 checkexec = self._checkexec
1056 1057 copymap = self._map.copymap
1057 1058 lastnormaltime = self._lastnormaltime
1058 1059
1059 1060 # We need to do full walks when either
1060 1061 # - we're listing all clean files, or
1061 1062 # - match.traversedir does something, because match.traversedir should
1062 1063 # be called for every dir in the working dir
1063 1064 full = listclean or match.traversedir is not None
1064 1065 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1065 1066 full=full).iteritems():
1066 1067 if not dcontains(fn):
1067 1068 if (listignored or mexact(fn)) and dirignore(fn):
1068 1069 if listignored:
1069 1070 iadd(fn)
1070 1071 else:
1071 1072 uadd(fn)
1072 1073 continue
1073 1074
1074 1075 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1075 1076 # written like that for performance reasons. dmap[fn] is not a
1076 1077 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1077 1078 # opcode has fast paths when the value to be unpacked is a tuple or
1078 1079 # a list, but falls back to creating a full-fledged iterator in
1079 1080 # general. That is much slower than simply accessing and storing the
1080 1081 # tuple members one by one.
1081 1082 t = dget(fn)
1082 1083 state = t[0]
1083 1084 mode = t[1]
1084 1085 size = t[2]
1085 1086 time = t[3]
1086 1087
1087 1088 if not st and state in "nma":
1088 1089 dadd(fn)
1089 1090 elif state == 'n':
1090 1091 if (size >= 0 and
1091 1092 ((size != st.st_size and size != st.st_size & _rangemask)
1092 1093 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1093 1094 or size == -2 # other parent
1094 1095 or fn in copymap):
1095 1096 madd(fn)
1096 1097 elif (time != st[stat.ST_MTIME]
1097 1098 and time != st[stat.ST_MTIME] & _rangemask):
1098 1099 ladd(fn)
1099 1100 elif st[stat.ST_MTIME] == lastnormaltime:
1100 1101 # fn may have just been marked as normal and it may have
1101 1102 # changed in the same second without changing its size.
1102 1103 # This can happen if we quickly do multiple commits.
1103 1104 # Force lookup, so we don't miss such a racy file change.
1104 1105 ladd(fn)
1105 1106 elif listclean:
1106 1107 cadd(fn)
1107 1108 elif state == 'm':
1108 1109 madd(fn)
1109 1110 elif state == 'a':
1110 1111 aadd(fn)
1111 1112 elif state == 'r':
1112 1113 radd(fn)
1113 1114
1114 1115 return (lookup, scmutil.status(modified, added, removed, deleted,
1115 1116 unknown, ignored, clean))
1116 1117
1117 1118 def matches(self, match):
1118 1119 '''
1119 1120 return files in the dirstate (in whatever state) filtered by match
1120 1121 '''
1121 1122 dmap = self._map
1122 1123 if match.always():
1123 1124 return dmap.keys()
1124 1125 files = match.files()
1125 1126 if match.isexact():
1126 1127 # fast path -- filter the other way around, since typically files is
1127 1128 # much smaller than dmap
1128 1129 return [f for f in files if f in dmap]
1129 1130 if match.prefix() and all(fn in dmap for fn in files):
1130 1131 # fast path -- all the values are known to be files, so just return
1131 1132 # that
1132 1133 return list(files)
1133 1134 return [f for f in dmap if match(f)]
1134 1135
1135 1136 def _actualfilename(self, tr):
1136 1137 if tr:
1137 1138 return self._pendingfilename
1138 1139 else:
1139 1140 return self._filename
1140 1141
1141 1142 def savebackup(self, tr, backupname):
1142 1143 '''Save current dirstate into backup file'''
1143 1144 filename = self._actualfilename(tr)
1144 1145 assert backupname != filename
1145 1146
1146 1147 # use '_writedirstate' instead of 'write' to write changes certainly,
1147 1148 # because the latter omits writing out if transaction is running.
1148 1149 # output file will be used to create backup of dirstate at this point.
1149 1150 if self._dirty or not self._opener.exists(filename):
1150 1151 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1151 1152 checkambig=True))
1152 1153
1153 1154 if tr:
1154 1155 # ensure that subsequent tr.writepending returns True for
1155 1156 # changes written out above, even if dirstate is never
1156 1157 # changed after this
1157 1158 tr.addfilegenerator('dirstate', (self._filename,),
1158 1159 self._writedirstate, location='plain')
1159 1160
1160 1161 # ensure that pending file written above is unlinked at
1161 1162 # failure, even if tr.writepending isn't invoked until the
1162 1163 # end of this transaction
1163 1164 tr.registertmp(filename, location='plain')
1164 1165
1165 1166 self._opener.tryunlink(backupname)
1166 1167 # hardlink backup is okay because _writedirstate is always called
1167 1168 # with an "atomictemp=True" file.
1168 1169 util.copyfile(self._opener.join(filename),
1169 1170 self._opener.join(backupname), hardlink=True)
1170 1171
1171 1172 def restorebackup(self, tr, backupname):
1172 1173 '''Restore dirstate by backup file'''
1173 1174 # this "invalidate()" prevents "wlock.release()" from writing
1174 1175 # changes of dirstate out after restoring from backup file
1175 1176 self.invalidate()
1176 1177 filename = self._actualfilename(tr)
1177 1178 o = self._opener
1178 1179 if util.samefile(o.join(backupname), o.join(filename)):
1179 1180 o.unlink(backupname)
1180 1181 else:
1181 1182 o.rename(backupname, filename, checkambig=True)
1182 1183
1183 1184 def clearbackup(self, tr, backupname):
1184 1185 '''Clear backup file'''
1185 1186 self._opener.unlink(backupname)
1186 1187
1187 1188 class dirstatemap(object):
1188 1189 """Map encapsulating the dirstate's contents.
1189 1190
1190 1191 The dirstate contains the following state:
1191 1192
1192 1193 - `identity` is the identity of the dirstate file, which can be used to
1193 1194 detect when changes have occurred to the dirstate file.
1194 1195
1195 1196 - `parents` is a pair containing the parents of the working copy. The
1196 1197 parents are updated by calling `setparents`.
1197 1198
1198 1199 - the state map maps filenames to tuples of (state, mode, size, mtime),
1199 1200 where state is a single character representing 'normal', 'added',
1200 1201 'removed', or 'merged'. It is read by treating the dirstate as a
1201 1202 dict. File state is updated by calling the `addfile`, `removefile` and
1202 1203 `dropfile` methods.
1203 1204
1204 1205 - `copymap` maps destination filenames to their source filename.
1205 1206
1206 1207 The dirstate also provides the following views onto the state:
1207 1208
1208 1209 - `nonnormalset` is a set of the filenames that have state other
1209 1210 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1210 1211
1211 1212 - `otherparentset` is a set of the filenames that are marked as coming
1212 1213 from the second parent when the dirstate is currently being merged.
1213 1214
1214 1215 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1215 1216 form that they appear as in the dirstate.
1216 1217
1217 1218 - `dirfoldmap` is a dict mapping normalized directory names to the
1218 1219 denormalized form that they appear as in the dirstate.
1219 1220 """
1220 1221
1221 1222 def __init__(self, ui, opener, root):
1222 1223 self._ui = ui
1223 1224 self._opener = opener
1224 1225 self._root = root
1225 1226 self._filename = 'dirstate'
1226 1227
1227 1228 self._parents = None
1228 1229 self._dirtyparents = False
1229 1230
1230 1231 # for consistent view between _pl() and _read() invocations
1231 1232 self._pendingmode = None
1232 1233
1233 1234 @propertycache
1234 1235 def _map(self):
1235 1236 self._map = {}
1236 1237 self.read()
1237 1238 return self._map
1238 1239
1239 1240 @propertycache
1240 1241 def copymap(self):
1241 1242 self.copymap = {}
1242 1243 self._map
1243 1244 return self.copymap
1244 1245
1245 1246 def clear(self):
1246 1247 self._map.clear()
1247 1248 self.copymap.clear()
1248 1249 self.setparents(nullid, nullid)
1249 1250 util.clearcachedproperty(self, "_dirs")
1250 1251 util.clearcachedproperty(self, "_alldirs")
1251 1252 util.clearcachedproperty(self, "filefoldmap")
1252 1253 util.clearcachedproperty(self, "dirfoldmap")
1253 1254 util.clearcachedproperty(self, "nonnormalset")
1254 1255 util.clearcachedproperty(self, "otherparentset")
1255 1256
1256 1257 def items(self):
1257 1258 return self._map.iteritems()
1258 1259
1259 1260 # forward for python2,3 compat
1260 1261 iteritems = items
1261 1262
1262 1263 def __len__(self):
1263 1264 return len(self._map)
1264 1265
1265 1266 def __iter__(self):
1266 1267 return iter(self._map)
1267 1268
1268 1269 def get(self, key, default=None):
1269 1270 return self._map.get(key, default)
1270 1271
1271 1272 def __contains__(self, key):
1272 1273 return key in self._map
1273 1274
1274 1275 def __getitem__(self, key):
1275 1276 return self._map[key]
1276 1277
1277 1278 def keys(self):
1278 1279 return self._map.keys()
1279 1280
1280 1281 def preload(self):
1281 1282 """Loads the underlying data, if it's not already loaded"""
1282 1283 self._map
1283 1284
1284 1285 def addfile(self, f, oldstate, state, mode, size, mtime):
1285 1286 """Add a tracked file to the dirstate."""
1286 1287 if oldstate in "?r" and r"_dirs" in self.__dict__:
1287 1288 self._dirs.addpath(f)
1288 1289 if oldstate == "?" and r"_alldirs" in self.__dict__:
1289 1290 self._alldirs.addpath(f)
1290 1291 self._map[f] = dirstatetuple(state, mode, size, mtime)
1291 1292 if state != 'n' or mtime == -1:
1292 1293 self.nonnormalset.add(f)
1293 1294 if size == -2:
1294 1295 self.otherparentset.add(f)
1295 1296
1296 1297 def removefile(self, f, oldstate, size):
1297 1298 """
1298 1299 Mark a file as removed in the dirstate.
1299 1300
1300 1301 The `size` parameter is used to store sentinel values that indicate
1301 1302 the file's previous state. In the future, we should refactor this
1302 1303 to be more explicit about what that state is.
1303 1304 """
1304 1305 if oldstate not in "?r" and r"_dirs" in self.__dict__:
1305 1306 self._dirs.delpath(f)
1306 1307 if oldstate == "?" and r"_alldirs" in self.__dict__:
1307 1308 self._alldirs.addpath(f)
1308 1309 if r"filefoldmap" in self.__dict__:
1309 1310 normed = util.normcase(f)
1310 1311 self.filefoldmap.pop(normed, None)
1311 1312 self._map[f] = dirstatetuple('r', 0, size, 0)
1312 1313 self.nonnormalset.add(f)
1313 1314
1314 1315 def dropfile(self, f, oldstate):
1315 1316 """
1316 1317 Remove a file from the dirstate. Returns True if the file was
1317 1318 previously recorded.
1318 1319 """
1319 1320 exists = self._map.pop(f, None) is not None
1320 1321 if exists:
1321 1322 if oldstate != "r" and r"_dirs" in self.__dict__:
1322 1323 self._dirs.delpath(f)
1323 1324 if r"_alldirs" in self.__dict__:
1324 1325 self._alldirs.delpath(f)
1325 1326 if r"filefoldmap" in self.__dict__:
1326 1327 normed = util.normcase(f)
1327 1328 self.filefoldmap.pop(normed, None)
1328 1329 self.nonnormalset.discard(f)
1329 1330 return exists
1330 1331
1331 1332 def clearambiguoustimes(self, files, now):
1332 1333 for f in files:
1333 1334 e = self.get(f)
1334 1335 if e is not None and e[0] == 'n' and e[3] == now:
1335 1336 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1336 1337 self.nonnormalset.add(f)
1337 1338
1338 1339 def nonnormalentries(self):
1339 1340 '''Compute the nonnormal dirstate entries from the dmap'''
1340 1341 try:
1341 1342 return parsers.nonnormalotherparententries(self._map)
1342 1343 except AttributeError:
1343 1344 nonnorm = set()
1344 1345 otherparent = set()
1345 1346 for fname, e in self._map.iteritems():
1346 1347 if e[0] != 'n' or e[3] == -1:
1347 1348 nonnorm.add(fname)
1348 1349 if e[0] == 'n' and e[2] == -2:
1349 1350 otherparent.add(fname)
1350 1351 return nonnorm, otherparent
1351 1352
1352 1353 @propertycache
1353 1354 def filefoldmap(self):
1354 1355 """Returns a dictionary mapping normalized case paths to their
1355 1356 non-normalized versions.
1356 1357 """
1357 1358 try:
1358 1359 makefilefoldmap = parsers.make_file_foldmap
1359 1360 except AttributeError:
1360 1361 pass
1361 1362 else:
1362 1363 return makefilefoldmap(self._map, util.normcasespec,
1363 1364 util.normcasefallback)
1364 1365
1365 1366 f = {}
1366 1367 normcase = util.normcase
1367 1368 for name, s in self._map.iteritems():
1368 1369 if s[0] != 'r':
1369 1370 f[normcase(name)] = name
1370 1371 f['.'] = '.' # prevents useless util.fspath() invocation
1371 1372 return f
1372 1373
1373 1374 def hastrackeddir(self, d):
1374 1375 """
1375 1376 Returns True if the dirstate contains a tracked (not removed) file
1376 1377 in this directory.
1377 1378 """
1378 1379 return d in self._dirs
1379 1380
1380 1381 def hasdir(self, d):
1381 1382 """
1382 1383 Returns True if the dirstate contains a file (tracked or removed)
1383 1384 in this directory.
1384 1385 """
1385 1386 return d in self._alldirs
1386 1387
1387 1388 @propertycache
1388 1389 def _dirs(self):
1389 1390 return util.dirs(self._map, 'r')
1390 1391
1391 1392 @propertycache
1392 1393 def _alldirs(self):
1393 1394 return util.dirs(self._map)
1394 1395
1395 1396 def _opendirstatefile(self):
1396 1397 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1397 1398 if self._pendingmode is not None and self._pendingmode != mode:
1398 1399 fp.close()
1399 1400 raise error.Abort(_('working directory state may be '
1400 1401 'changed parallelly'))
1401 1402 self._pendingmode = mode
1402 1403 return fp
1403 1404
1404 1405 def parents(self):
1405 1406 if not self._parents:
1406 1407 try:
1407 1408 fp = self._opendirstatefile()
1408 1409 st = fp.read(40)
1409 1410 fp.close()
1410 1411 except IOError as err:
1411 1412 if err.errno != errno.ENOENT:
1412 1413 raise
1413 1414 # File doesn't exist, so the current state is empty
1414 1415 st = ''
1415 1416
1416 1417 l = len(st)
1417 1418 if l == 40:
1418 1419 self._parents = (st[:20], st[20:40])
1419 1420 elif l == 0:
1420 1421 self._parents = (nullid, nullid)
1421 1422 else:
1422 1423 raise error.Abort(_('working directory state appears '
1423 1424 'damaged!'))
1424 1425
1425 1426 return self._parents
1426 1427
1427 1428 def setparents(self, p1, p2):
1428 1429 self._parents = (p1, p2)
1429 1430 self._dirtyparents = True
1430 1431
1431 1432 def read(self):
1432 1433 # ignore HG_PENDING because identity is used only for writing
1433 1434 self.identity = util.filestat.frompath(
1434 1435 self._opener.join(self._filename))
1435 1436
1436 1437 try:
1437 1438 fp = self._opendirstatefile()
1438 1439 try:
1439 1440 st = fp.read()
1440 1441 finally:
1441 1442 fp.close()
1442 1443 except IOError as err:
1443 1444 if err.errno != errno.ENOENT:
1444 1445 raise
1445 1446 return
1446 1447 if not st:
1447 1448 return
1448 1449
1449 1450 if util.safehasattr(parsers, 'dict_new_presized'):
1450 1451 # Make an estimate of the number of files in the dirstate based on
1451 1452 # its size. From a linear regression on a set of real-world repos,
1452 1453 # all over 10,000 files, the size of a dirstate entry is 85
1453 1454 # bytes. The cost of resizing is significantly higher than the cost
1454 1455 # of filling in a larger presized dict, so subtract 20% from the
1455 1456 # size.
1456 1457 #
1457 1458 # This heuristic is imperfect in many ways, so in a future dirstate
1458 1459 # format update it makes sense to just record the number of entries
1459 1460 # on write.
1460 1461 self._map = parsers.dict_new_presized(len(st) // 71)
1461 1462
1462 1463 # Python's garbage collector triggers a GC each time a certain number
1463 1464 # of container objects (the number being defined by
1464 1465 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1465 1466 # for each file in the dirstate. The C version then immediately marks
1466 1467 # them as not to be tracked by the collector. However, this has no
1467 1468 # effect on when GCs are triggered, only on what objects the GC looks
1468 1469 # into. This means that O(number of files) GCs are unavoidable.
1469 1470 # Depending on when in the process's lifetime the dirstate is parsed,
1470 1471 # this can get very expensive. As a workaround, disable GC while
1471 1472 # parsing the dirstate.
1472 1473 #
1473 1474 # (we cannot decorate the function directly since it is in a C module)
1474 1475 if rustext is not None:
1475 1476 parse_dirstate = rustext.dirstate.parse_dirstate
1476 1477 else:
1477 1478 parse_dirstate = parsers.parse_dirstate
1478 1479
1479 1480 parse_dirstate = util.nogc(parse_dirstate)
1480 1481 p = parse_dirstate(self._map, self.copymap, st)
1481 1482 if not self._dirtyparents:
1482 1483 self.setparents(*p)
1483 1484
1484 1485 # Avoid excess attribute lookups by fast pathing certain checks
1485 1486 self.__contains__ = self._map.__contains__
1486 1487 self.__getitem__ = self._map.__getitem__
1487 1488 self.get = self._map.get
1488 1489
1489 1490 def write(self, st, now):
1490 1491 if rustext is not None:
1491 1492 pack_dirstate = rustext.dirstate.pack_dirstate
1492 1493 else:
1493 1494 pack_dirstate = parsers.pack_dirstate
1494 1495
1495 1496 st.write(pack_dirstate(self._map, self.copymap,
1496 1497 self.parents(), now))
1497 1498 st.close()
1498 1499 self._dirtyparents = False
1499 1500 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1500 1501
1501 1502 @propertycache
1502 1503 def nonnormalset(self):
1503 1504 nonnorm, otherparents = self.nonnormalentries()
1504 1505 self.otherparentset = otherparents
1505 1506 return nonnorm
1506 1507
1507 1508 @propertycache
1508 1509 def otherparentset(self):
1509 1510 nonnorm, otherparents = self.nonnormalentries()
1510 1511 self.nonnormalset = nonnorm
1511 1512 return otherparents
1512 1513
1513 1514 @propertycache
1514 1515 def identity(self):
1515 1516 self._map
1516 1517 return self.identity
1517 1518
1518 1519 @propertycache
1519 1520 def dirfoldmap(self):
1520 1521 f = {}
1521 1522 normcase = util.normcase
1522 1523 for name in self._dirs:
1523 1524 f[normcase(name)] = name
1524 1525 return f
General Comments 0
You need to be logged in to leave comments. Login now