##// END OF EJS Templates
dirstate: remove obsolete reference to dirstate.beginparentchange...
Martin von Zweigbergk -
r42117:42dd6998 default
parent child Browse files
Show More
@@ -1,1508 +1,1508 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 parsers = policy.importmod(r'parsers')
31 31
32 32 propertycache = util.propertycache
33 33 filecache = scmutil.filecache
34 34 _rangemask = 0x7fffffff
35 35
36 36 dirstatetuple = parsers.dirstatetuple
37 37
38 38 class repocache(filecache):
39 39 """filecache for files in .hg/"""
40 40 def join(self, obj, fname):
41 41 return obj._opener.join(fname)
42 42
43 43 class rootcache(filecache):
44 44 """filecache for files in the repository root"""
45 45 def join(self, obj, fname):
46 46 return obj._join(fname)
47 47
48 48 def _getfsnow(vfs):
49 49 '''Get "now" timestamp on filesystem'''
50 50 tmpfd, tmpname = vfs.mkstemp()
51 51 try:
52 52 return os.fstat(tmpfd)[stat.ST_MTIME]
53 53 finally:
54 54 os.close(tmpfd)
55 55 vfs.unlink(tmpname)
56 56
57 57 class dirstate(object):
58 58
59 59 def __init__(self, opener, ui, root, validate, sparsematchfn):
60 60 '''Create a new dirstate object.
61 61
62 62 opener is an open()-like callable that can be used to open the
63 63 dirstate file; root is the root of the directory tracked by
64 64 the dirstate.
65 65 '''
66 66 self._opener = opener
67 67 self._validate = validate
68 68 self._root = root
69 69 self._sparsematchfn = sparsematchfn
70 70 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
71 71 # UNC path pointing to root share (issue4557)
72 72 self._rootdir = pathutil.normasprefix(root)
73 73 self._dirty = False
74 74 self._lastnormaltime = 0
75 75 self._ui = ui
76 76 self._filecache = {}
77 77 self._parentwriters = 0
78 78 self._filename = 'dirstate'
79 79 self._pendingfilename = '%s.pending' % self._filename
80 80 self._plchangecallbacks = {}
81 81 self._origpl = None
82 82 self._updatedfiles = set()
83 83 self._mapcls = dirstatemap
84 84 # Access and cache cwd early, so we don't access it for the first time
85 85 # after a working-copy update caused it to not exist (accessing it then
86 86 # raises an exception).
87 87 self._cwd
88 88
89 89 @contextlib.contextmanager
90 90 def parentchange(self):
91 91 '''Context manager for handling dirstate parents.
92 92
93 93 If an exception occurs in the scope of the context manager,
94 94 the incoherent dirstate won't be written when wlock is
95 95 released.
96 96 '''
97 97 self._parentwriters += 1
98 98 yield
99 99 # Typically we want the "undo" step of a context manager in a
100 100 # finally block so it happens even when an exception
101 101 # occurs. In this case, however, we only want to decrement
102 102 # parentwriters if the code in the with statement exits
103 103 # normally, so we don't have a try/finally here on purpose.
104 104 self._parentwriters -= 1
105 105
106 106 def pendingparentchange(self):
107 107 '''Returns true if the dirstate is in the middle of a set of changes
108 108 that modify the dirstate parent.
109 109 '''
110 110 return self._parentwriters > 0
111 111
112 112 @propertycache
113 113 def _map(self):
114 114 """Return the dirstate contents (see documentation for dirstatemap)."""
115 115 self._map = self._mapcls(self._ui, self._opener, self._root)
116 116 return self._map
117 117
118 118 @property
119 119 def _sparsematcher(self):
120 120 """The matcher for the sparse checkout.
121 121
122 122 The working directory may not include every file from a manifest. The
123 123 matcher obtained by this property will match a path if it is to be
124 124 included in the working directory.
125 125 """
126 126 # TODO there is potential to cache this property. For now, the matcher
127 127 # is resolved on every access. (But the called function does use a
128 128 # cache to keep the lookup fast.)
129 129 return self._sparsematchfn()
130 130
131 131 @repocache('branch')
132 132 def _branch(self):
133 133 try:
134 134 return self._opener.read("branch").strip() or "default"
135 135 except IOError as inst:
136 136 if inst.errno != errno.ENOENT:
137 137 raise
138 138 return "default"
139 139
140 140 @property
141 141 def _pl(self):
142 142 return self._map.parents()
143 143
144 144 def hasdir(self, d):
145 145 return self._map.hastrackeddir(d)
146 146
147 147 @rootcache('.hgignore')
148 148 def _ignore(self):
149 149 files = self._ignorefiles()
150 150 if not files:
151 151 return matchmod.never()
152 152
153 153 pats = ['include:%s' % f for f in files]
154 154 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
155 155
156 156 @propertycache
157 157 def _slash(self):
158 158 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
159 159
160 160 @propertycache
161 161 def _checklink(self):
162 162 return util.checklink(self._root)
163 163
164 164 @propertycache
165 165 def _checkexec(self):
166 166 return util.checkexec(self._root)
167 167
168 168 @propertycache
169 169 def _checkcase(self):
170 170 return not util.fscasesensitive(self._join('.hg'))
171 171
172 172 def _join(self, f):
173 173 # much faster than os.path.join()
174 174 # it's safe because f is always a relative path
175 175 return self._rootdir + f
176 176
177 177 def flagfunc(self, buildfallback):
178 178 if self._checklink and self._checkexec:
179 179 def f(x):
180 180 try:
181 181 st = os.lstat(self._join(x))
182 182 if util.statislink(st):
183 183 return 'l'
184 184 if util.statisexec(st):
185 185 return 'x'
186 186 except OSError:
187 187 pass
188 188 return ''
189 189 return f
190 190
191 191 fallback = buildfallback()
192 192 if self._checklink:
193 193 def f(x):
194 194 if os.path.islink(self._join(x)):
195 195 return 'l'
196 196 if 'x' in fallback(x):
197 197 return 'x'
198 198 return ''
199 199 return f
200 200 if self._checkexec:
201 201 def f(x):
202 202 if 'l' in fallback(x):
203 203 return 'l'
204 204 if util.isexec(self._join(x)):
205 205 return 'x'
206 206 return ''
207 207 return f
208 208 else:
209 209 return fallback
210 210
211 211 @propertycache
212 212 def _cwd(self):
213 213 # internal config: ui.forcecwd
214 214 forcecwd = self._ui.config('ui', 'forcecwd')
215 215 if forcecwd:
216 216 return forcecwd
217 217 return encoding.getcwd()
218 218
219 219 def getcwd(self):
220 220 '''Return the path from which a canonical path is calculated.
221 221
222 222 This path should be used to resolve file patterns or to convert
223 223 canonical paths back to file paths for display. It shouldn't be
224 224 used to get real file paths. Use vfs functions instead.
225 225 '''
226 226 cwd = self._cwd
227 227 if cwd == self._root:
228 228 return ''
229 229 # self._root ends with a path separator if self._root is '/' or 'C:\'
230 230 rootsep = self._root
231 231 if not util.endswithsep(rootsep):
232 232 rootsep += pycompat.ossep
233 233 if cwd.startswith(rootsep):
234 234 return cwd[len(rootsep):]
235 235 else:
236 236 # we're outside the repo. return an absolute path.
237 237 return cwd
238 238
239 239 def pathto(self, f, cwd=None):
240 240 if cwd is None:
241 241 cwd = self.getcwd()
242 242 path = util.pathto(self._root, cwd, f)
243 243 if self._slash:
244 244 return util.pconvert(path)
245 245 return path
246 246
247 247 def __getitem__(self, key):
248 248 '''Return the current state of key (a filename) in the dirstate.
249 249
250 250 States are:
251 251 n normal
252 252 m needs merging
253 253 r marked for removal
254 254 a marked for addition
255 255 ? not tracked
256 256 '''
257 257 return self._map.get(key, ("?",))[0]
258 258
259 259 def __contains__(self, key):
260 260 return key in self._map
261 261
262 262 def __iter__(self):
263 263 return iter(sorted(self._map))
264 264
265 265 def items(self):
266 266 return self._map.iteritems()
267 267
268 268 iteritems = items
269 269
270 270 def parents(self):
271 271 return [self._validate(p) for p in self._pl]
272 272
273 273 def p1(self):
274 274 return self._validate(self._pl[0])
275 275
276 276 def p2(self):
277 277 return self._validate(self._pl[1])
278 278
279 279 def branch(self):
280 280 return encoding.tolocal(self._branch)
281 281
282 282 def setparents(self, p1, p2=nullid):
283 283 """Set dirstate parents to p1 and p2.
284 284
285 285 When moving from two parents to one, 'm' merged entries a
286 286 adjusted to normal and previous copy records discarded and
287 287 returned by the call.
288 288
289 289 See localrepo.setparents()
290 290 """
291 291 if self._parentwriters == 0:
292 raise ValueError("cannot set dirstate parent without "
293 "calling dirstate.beginparentchange")
292 raise ValueError("cannot set dirstate parent outside of "
293 "dirstate.parentchange context manager")
294 294
295 295 self._dirty = True
296 296 oldp2 = self._pl[1]
297 297 if self._origpl is None:
298 298 self._origpl = self._pl
299 299 self._map.setparents(p1, p2)
300 300 copies = {}
301 301 if oldp2 != nullid and p2 == nullid:
302 302 candidatefiles = self._map.nonnormalset.union(
303 303 self._map.otherparentset)
304 304 for f in candidatefiles:
305 305 s = self._map.get(f)
306 306 if s is None:
307 307 continue
308 308
309 309 # Discard 'm' markers when moving away from a merge state
310 310 if s[0] == 'm':
311 311 source = self._map.copymap.get(f)
312 312 if source:
313 313 copies[f] = source
314 314 self.normallookup(f)
315 315 # Also fix up otherparent markers
316 316 elif s[0] == 'n' and s[2] == -2:
317 317 source = self._map.copymap.get(f)
318 318 if source:
319 319 copies[f] = source
320 320 self.add(f)
321 321 return copies
322 322
323 323 def setbranch(self, branch):
324 324 self.__class__._branch.set(self, encoding.fromlocal(branch))
325 325 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
326 326 try:
327 327 f.write(self._branch + '\n')
328 328 f.close()
329 329
330 330 # make sure filecache has the correct stat info for _branch after
331 331 # replacing the underlying file
332 332 ce = self._filecache['_branch']
333 333 if ce:
334 334 ce.refresh()
335 335 except: # re-raises
336 336 f.discard()
337 337 raise
338 338
339 339 def invalidate(self):
340 340 '''Causes the next access to reread the dirstate.
341 341
342 342 This is different from localrepo.invalidatedirstate() because it always
343 343 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
344 344 check whether the dirstate has changed before rereading it.'''
345 345
346 346 for a in (r"_map", r"_branch", r"_ignore"):
347 347 if a in self.__dict__:
348 348 delattr(self, a)
349 349 self._lastnormaltime = 0
350 350 self._dirty = False
351 351 self._updatedfiles.clear()
352 352 self._parentwriters = 0
353 353 self._origpl = None
354 354
355 355 def copy(self, source, dest):
356 356 """Mark dest as a copy of source. Unmark dest if source is None."""
357 357 if source == dest:
358 358 return
359 359 self._dirty = True
360 360 if source is not None:
361 361 self._map.copymap[dest] = source
362 362 self._updatedfiles.add(source)
363 363 self._updatedfiles.add(dest)
364 364 elif self._map.copymap.pop(dest, None):
365 365 self._updatedfiles.add(dest)
366 366
367 367 def copied(self, file):
368 368 return self._map.copymap.get(file, None)
369 369
370 370 def copies(self):
371 371 return self._map.copymap
372 372
373 373 def _addpath(self, f, state, mode, size, mtime):
374 374 oldstate = self[f]
375 375 if state == 'a' or oldstate == 'r':
376 376 scmutil.checkfilename(f)
377 377 if self._map.hastrackeddir(f):
378 378 raise error.Abort(_('directory %r already in dirstate') %
379 379 pycompat.bytestr(f))
380 380 # shadows
381 381 for d in util.finddirs(f):
382 382 if self._map.hastrackeddir(d):
383 383 break
384 384 entry = self._map.get(d)
385 385 if entry is not None and entry[0] != 'r':
386 386 raise error.Abort(
387 387 _('file %r in dirstate clashes with %r') %
388 388 (pycompat.bytestr(d), pycompat.bytestr(f)))
389 389 self._dirty = True
390 390 self._updatedfiles.add(f)
391 391 self._map.addfile(f, oldstate, state, mode, size, mtime)
392 392
393 393 def normal(self, f):
394 394 '''Mark a file normal and clean.'''
395 395 s = os.lstat(self._join(f))
396 396 mtime = s[stat.ST_MTIME]
397 397 self._addpath(f, 'n', s.st_mode,
398 398 s.st_size & _rangemask, mtime & _rangemask)
399 399 self._map.copymap.pop(f, None)
400 400 if f in self._map.nonnormalset:
401 401 self._map.nonnormalset.remove(f)
402 402 if mtime > self._lastnormaltime:
403 403 # Remember the most recent modification timeslot for status(),
404 404 # to make sure we won't miss future size-preserving file content
405 405 # modifications that happen within the same timeslot.
406 406 self._lastnormaltime = mtime
407 407
408 408 def normallookup(self, f):
409 409 '''Mark a file normal, but possibly dirty.'''
410 410 if self._pl[1] != nullid:
411 411 # if there is a merge going on and the file was either
412 412 # in state 'm' (-1) or coming from other parent (-2) before
413 413 # being removed, restore that state.
414 414 entry = self._map.get(f)
415 415 if entry is not None:
416 416 if entry[0] == 'r' and entry[2] in (-1, -2):
417 417 source = self._map.copymap.get(f)
418 418 if entry[2] == -1:
419 419 self.merge(f)
420 420 elif entry[2] == -2:
421 421 self.otherparent(f)
422 422 if source:
423 423 self.copy(source, f)
424 424 return
425 425 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
426 426 return
427 427 self._addpath(f, 'n', 0, -1, -1)
428 428 self._map.copymap.pop(f, None)
429 429
430 430 def otherparent(self, f):
431 431 '''Mark as coming from the other parent, always dirty.'''
432 432 if self._pl[1] == nullid:
433 433 raise error.Abort(_("setting %r to other parent "
434 434 "only allowed in merges") % f)
435 435 if f in self and self[f] == 'n':
436 436 # merge-like
437 437 self._addpath(f, 'm', 0, -2, -1)
438 438 else:
439 439 # add-like
440 440 self._addpath(f, 'n', 0, -2, -1)
441 441 self._map.copymap.pop(f, None)
442 442
443 443 def add(self, f):
444 444 '''Mark a file added.'''
445 445 self._addpath(f, 'a', 0, -1, -1)
446 446 self._map.copymap.pop(f, None)
447 447
448 448 def remove(self, f):
449 449 '''Mark a file removed.'''
450 450 self._dirty = True
451 451 oldstate = self[f]
452 452 size = 0
453 453 if self._pl[1] != nullid:
454 454 entry = self._map.get(f)
455 455 if entry is not None:
456 456 # backup the previous state
457 457 if entry[0] == 'm': # merge
458 458 size = -1
459 459 elif entry[0] == 'n' and entry[2] == -2: # other parent
460 460 size = -2
461 461 self._map.otherparentset.add(f)
462 462 self._updatedfiles.add(f)
463 463 self._map.removefile(f, oldstate, size)
464 464 if size == 0:
465 465 self._map.copymap.pop(f, None)
466 466
467 467 def merge(self, f):
468 468 '''Mark a file merged.'''
469 469 if self._pl[1] == nullid:
470 470 return self.normallookup(f)
471 471 return self.otherparent(f)
472 472
473 473 def drop(self, f):
474 474 '''Drop a file from the dirstate'''
475 475 oldstate = self[f]
476 476 if self._map.dropfile(f, oldstate):
477 477 self._dirty = True
478 478 self._updatedfiles.add(f)
479 479 self._map.copymap.pop(f, None)
480 480
481 481 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
482 482 if exists is None:
483 483 exists = os.path.lexists(os.path.join(self._root, path))
484 484 if not exists:
485 485 # Maybe a path component exists
486 486 if not ignoremissing and '/' in path:
487 487 d, f = path.rsplit('/', 1)
488 488 d = self._normalize(d, False, ignoremissing, None)
489 489 folded = d + "/" + f
490 490 else:
491 491 # No path components, preserve original case
492 492 folded = path
493 493 else:
494 494 # recursively normalize leading directory components
495 495 # against dirstate
496 496 if '/' in normed:
497 497 d, f = normed.rsplit('/', 1)
498 498 d = self._normalize(d, False, ignoremissing, True)
499 499 r = self._root + "/" + d
500 500 folded = d + "/" + util.fspath(f, r)
501 501 else:
502 502 folded = util.fspath(normed, self._root)
503 503 storemap[normed] = folded
504 504
505 505 return folded
506 506
507 507 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
508 508 normed = util.normcase(path)
509 509 folded = self._map.filefoldmap.get(normed, None)
510 510 if folded is None:
511 511 if isknown:
512 512 folded = path
513 513 else:
514 514 folded = self._discoverpath(path, normed, ignoremissing, exists,
515 515 self._map.filefoldmap)
516 516 return folded
517 517
518 518 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
519 519 normed = util.normcase(path)
520 520 folded = self._map.filefoldmap.get(normed, None)
521 521 if folded is None:
522 522 folded = self._map.dirfoldmap.get(normed, None)
523 523 if folded is None:
524 524 if isknown:
525 525 folded = path
526 526 else:
527 527 # store discovered result in dirfoldmap so that future
528 528 # normalizefile calls don't start matching directories
529 529 folded = self._discoverpath(path, normed, ignoremissing, exists,
530 530 self._map.dirfoldmap)
531 531 return folded
532 532
533 533 def normalize(self, path, isknown=False, ignoremissing=False):
534 534 '''
535 535 normalize the case of a pathname when on a casefolding filesystem
536 536
537 537 isknown specifies whether the filename came from walking the
538 538 disk, to avoid extra filesystem access.
539 539
540 540 If ignoremissing is True, missing path are returned
541 541 unchanged. Otherwise, we try harder to normalize possibly
542 542 existing path components.
543 543
544 544 The normalized case is determined based on the following precedence:
545 545
546 546 - version of name already stored in the dirstate
547 547 - version of name stored on disk
548 548 - version provided via command arguments
549 549 '''
550 550
551 551 if self._checkcase:
552 552 return self._normalize(path, isknown, ignoremissing)
553 553 return path
554 554
555 555 def clear(self):
556 556 self._map.clear()
557 557 self._lastnormaltime = 0
558 558 self._updatedfiles.clear()
559 559 self._dirty = True
560 560
561 561 def rebuild(self, parent, allfiles, changedfiles=None):
562 562 if changedfiles is None:
563 563 # Rebuild entire dirstate
564 564 changedfiles = allfiles
565 565 lastnormaltime = self._lastnormaltime
566 566 self.clear()
567 567 self._lastnormaltime = lastnormaltime
568 568
569 569 if self._origpl is None:
570 570 self._origpl = self._pl
571 571 self._map.setparents(parent, nullid)
572 572 for f in changedfiles:
573 573 if f in allfiles:
574 574 self.normallookup(f)
575 575 else:
576 576 self.drop(f)
577 577
578 578 self._dirty = True
579 579
580 580 def identity(self):
581 581 '''Return identity of dirstate itself to detect changing in storage
582 582
583 583 If identity of previous dirstate is equal to this, writing
584 584 changes based on the former dirstate out can keep consistency.
585 585 '''
586 586 return self._map.identity
587 587
588 588 def write(self, tr):
589 589 if not self._dirty:
590 590 return
591 591
592 592 filename = self._filename
593 593 if tr:
594 594 # 'dirstate.write()' is not only for writing in-memory
595 595 # changes out, but also for dropping ambiguous timestamp.
596 596 # delayed writing re-raise "ambiguous timestamp issue".
597 597 # See also the wiki page below for detail:
598 598 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
599 599
600 600 # emulate dropping timestamp in 'parsers.pack_dirstate'
601 601 now = _getfsnow(self._opener)
602 602 self._map.clearambiguoustimes(self._updatedfiles, now)
603 603
604 604 # emulate that all 'dirstate.normal' results are written out
605 605 self._lastnormaltime = 0
606 606 self._updatedfiles.clear()
607 607
608 608 # delay writing in-memory changes out
609 609 tr.addfilegenerator('dirstate', (self._filename,),
610 610 self._writedirstate, location='plain')
611 611 return
612 612
613 613 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
614 614 self._writedirstate(st)
615 615
616 616 def addparentchangecallback(self, category, callback):
617 617 """add a callback to be called when the wd parents are changed
618 618
619 619 Callback will be called with the following arguments:
620 620 dirstate, (oldp1, oldp2), (newp1, newp2)
621 621
622 622 Category is a unique identifier to allow overwriting an old callback
623 623 with a newer callback.
624 624 """
625 625 self._plchangecallbacks[category] = callback
626 626
627 627 def _writedirstate(self, st):
628 628 # notify callbacks about parents change
629 629 if self._origpl is not None and self._origpl != self._pl:
630 630 for c, callback in sorted(self._plchangecallbacks.iteritems()):
631 631 callback(self, self._origpl, self._pl)
632 632 self._origpl = None
633 633 # use the modification time of the newly created temporary file as the
634 634 # filesystem's notion of 'now'
635 635 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
636 636
637 637 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
638 638 # timestamp of each entries in dirstate, because of 'now > mtime'
639 639 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite')
640 640 if delaywrite > 0:
641 641 # do we have any files to delay for?
642 642 for f, e in self._map.iteritems():
643 643 if e[0] == 'n' and e[3] == now:
644 644 import time # to avoid useless import
645 645 # rather than sleep n seconds, sleep until the next
646 646 # multiple of n seconds
647 647 clock = time.time()
648 648 start = int(clock) - (int(clock) % delaywrite)
649 649 end = start + delaywrite
650 650 time.sleep(end - clock)
651 651 now = end # trust our estimate that the end is near now
652 652 break
653 653
654 654 self._map.write(st, now)
655 655 self._lastnormaltime = 0
656 656 self._dirty = False
657 657
658 658 def _dirignore(self, f):
659 659 if f == '.':
660 660 return False
661 661 if self._ignore(f):
662 662 return True
663 663 for p in util.finddirs(f):
664 664 if self._ignore(p):
665 665 return True
666 666 return False
667 667
668 668 def _ignorefiles(self):
669 669 files = []
670 670 if os.path.exists(self._join('.hgignore')):
671 671 files.append(self._join('.hgignore'))
672 672 for name, path in self._ui.configitems("ui"):
673 673 if name == 'ignore' or name.startswith('ignore.'):
674 674 # we need to use os.path.join here rather than self._join
675 675 # because path is arbitrary and user-specified
676 676 files.append(os.path.join(self._rootdir, util.expandpath(path)))
677 677 return files
678 678
679 679 def _ignorefileandline(self, f):
680 680 files = collections.deque(self._ignorefiles())
681 681 visited = set()
682 682 while files:
683 683 i = files.popleft()
684 684 patterns = matchmod.readpatternfile(i, self._ui.warn,
685 685 sourceinfo=True)
686 686 for pattern, lineno, line in patterns:
687 687 kind, p = matchmod._patsplit(pattern, 'glob')
688 688 if kind == "subinclude":
689 689 if p not in visited:
690 690 files.append(p)
691 691 continue
692 692 m = matchmod.match(self._root, '', [], [pattern],
693 693 warn=self._ui.warn)
694 694 if m(f):
695 695 return (i, lineno, line)
696 696 visited.add(i)
697 697 return (None, -1, "")
698 698
699 699 def _walkexplicit(self, match, subrepos):
700 700 '''Get stat data about the files explicitly specified by match.
701 701
702 702 Return a triple (results, dirsfound, dirsnotfound).
703 703 - results is a mapping from filename to stat result. It also contains
704 704 listings mapping subrepos and .hg to None.
705 705 - dirsfound is a list of files found to be directories.
706 706 - dirsnotfound is a list of files that the dirstate thinks are
707 707 directories and that were not found.'''
708 708
709 709 def badtype(mode):
710 710 kind = _('unknown')
711 711 if stat.S_ISCHR(mode):
712 712 kind = _('character device')
713 713 elif stat.S_ISBLK(mode):
714 714 kind = _('block device')
715 715 elif stat.S_ISFIFO(mode):
716 716 kind = _('fifo')
717 717 elif stat.S_ISSOCK(mode):
718 718 kind = _('socket')
719 719 elif stat.S_ISDIR(mode):
720 720 kind = _('directory')
721 721 return _('unsupported file type (type is %s)') % kind
722 722
723 723 matchedir = match.explicitdir
724 724 badfn = match.bad
725 725 dmap = self._map
726 726 lstat = os.lstat
727 727 getkind = stat.S_IFMT
728 728 dirkind = stat.S_IFDIR
729 729 regkind = stat.S_IFREG
730 730 lnkkind = stat.S_IFLNK
731 731 join = self._join
732 732 dirsfound = []
733 733 foundadd = dirsfound.append
734 734 dirsnotfound = []
735 735 notfoundadd = dirsnotfound.append
736 736
737 737 if not match.isexact() and self._checkcase:
738 738 normalize = self._normalize
739 739 else:
740 740 normalize = None
741 741
742 742 files = sorted(match.files())
743 743 subrepos.sort()
744 744 i, j = 0, 0
745 745 while i < len(files) and j < len(subrepos):
746 746 subpath = subrepos[j] + "/"
747 747 if files[i] < subpath:
748 748 i += 1
749 749 continue
750 750 while i < len(files) and files[i].startswith(subpath):
751 751 del files[i]
752 752 j += 1
753 753
754 754 if not files or '.' in files:
755 755 files = ['.']
756 756 results = dict.fromkeys(subrepos)
757 757 results['.hg'] = None
758 758
759 759 for ff in files:
760 760 # constructing the foldmap is expensive, so don't do it for the
761 761 # common case where files is ['.']
762 762 if normalize and ff != '.':
763 763 nf = normalize(ff, False, True)
764 764 else:
765 765 nf = ff
766 766 if nf in results:
767 767 continue
768 768
769 769 try:
770 770 st = lstat(join(nf))
771 771 kind = getkind(st.st_mode)
772 772 if kind == dirkind:
773 773 if nf in dmap:
774 774 # file replaced by dir on disk but still in dirstate
775 775 results[nf] = None
776 776 if matchedir:
777 777 matchedir(nf)
778 778 foundadd((nf, ff))
779 779 elif kind == regkind or kind == lnkkind:
780 780 results[nf] = st
781 781 else:
782 782 badfn(ff, badtype(kind))
783 783 if nf in dmap:
784 784 results[nf] = None
785 785 except OSError as inst: # nf not found on disk - it is dirstate only
786 786 if nf in dmap: # does it exactly match a missing file?
787 787 results[nf] = None
788 788 else: # does it match a missing directory?
789 789 if self._map.hasdir(nf):
790 790 if matchedir:
791 791 matchedir(nf)
792 792 notfoundadd(nf)
793 793 else:
794 794 badfn(ff, encoding.strtolocal(inst.strerror))
795 795
796 796 # match.files() may contain explicitly-specified paths that shouldn't
797 797 # be taken; drop them from the list of files found. dirsfound/notfound
798 798 # aren't filtered here because they will be tested later.
799 799 if match.anypats():
800 800 for f in list(results):
801 801 if f == '.hg' or f in subrepos:
802 802 # keep sentinel to disable further out-of-repo walks
803 803 continue
804 804 if not match(f):
805 805 del results[f]
806 806
807 807 # Case insensitive filesystems cannot rely on lstat() failing to detect
808 808 # a case-only rename. Prune the stat object for any file that does not
809 809 # match the case in the filesystem, if there are multiple files that
810 810 # normalize to the same path.
811 811 if match.isexact() and self._checkcase:
812 812 normed = {}
813 813
814 814 for f, st in results.iteritems():
815 815 if st is None:
816 816 continue
817 817
818 818 nc = util.normcase(f)
819 819 paths = normed.get(nc)
820 820
821 821 if paths is None:
822 822 paths = set()
823 823 normed[nc] = paths
824 824
825 825 paths.add(f)
826 826
827 827 for norm, paths in normed.iteritems():
828 828 if len(paths) > 1:
829 829 for path in paths:
830 830 folded = self._discoverpath(path, norm, True, None,
831 831 self._map.dirfoldmap)
832 832 if path != folded:
833 833 results[path] = None
834 834
835 835 return results, dirsfound, dirsnotfound
836 836
837 837 def walk(self, match, subrepos, unknown, ignored, full=True):
838 838 '''
839 839 Walk recursively through the directory tree, finding all files
840 840 matched by match.
841 841
842 842 If full is False, maybe skip some known-clean files.
843 843
844 844 Return a dict mapping filename to stat-like object (either
845 845 mercurial.osutil.stat instance or return value of os.stat()).
846 846
847 847 '''
848 848 # full is a flag that extensions that hook into walk can use -- this
849 849 # implementation doesn't use it at all. This satisfies the contract
850 850 # because we only guarantee a "maybe".
851 851
852 852 if ignored:
853 853 ignore = util.never
854 854 dirignore = util.never
855 855 elif unknown:
856 856 ignore = self._ignore
857 857 dirignore = self._dirignore
858 858 else:
859 859 # if not unknown and not ignored, drop dir recursion and step 2
860 860 ignore = util.always
861 861 dirignore = util.always
862 862
863 863 matchfn = match.matchfn
864 864 matchalways = match.always()
865 865 matchtdir = match.traversedir
866 866 dmap = self._map
867 867 listdir = util.listdir
868 868 lstat = os.lstat
869 869 dirkind = stat.S_IFDIR
870 870 regkind = stat.S_IFREG
871 871 lnkkind = stat.S_IFLNK
872 872 join = self._join
873 873
874 874 exact = skipstep3 = False
875 875 if match.isexact(): # match.exact
876 876 exact = True
877 877 dirignore = util.always # skip step 2
878 878 elif match.prefix(): # match.match, no patterns
879 879 skipstep3 = True
880 880
881 881 if not exact and self._checkcase:
882 882 normalize = self._normalize
883 883 normalizefile = self._normalizefile
884 884 skipstep3 = False
885 885 else:
886 886 normalize = self._normalize
887 887 normalizefile = None
888 888
889 889 # step 1: find all explicit files
890 890 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
891 891
892 892 skipstep3 = skipstep3 and not (work or dirsnotfound)
893 893 work = [d for d in work if not dirignore(d[0])]
894 894
895 895 # step 2: visit subdirectories
896 896 def traverse(work, alreadynormed):
897 897 wadd = work.append
898 898 while work:
899 899 nd = work.pop()
900 900 visitentries = match.visitchildrenset(nd)
901 901 if not visitentries:
902 902 continue
903 903 if visitentries == 'this' or visitentries == 'all':
904 904 visitentries = None
905 905 skip = None
906 906 if nd == '.':
907 907 nd = ''
908 908 else:
909 909 skip = '.hg'
910 910 try:
911 911 entries = listdir(join(nd), stat=True, skip=skip)
912 912 except OSError as inst:
913 913 if inst.errno in (errno.EACCES, errno.ENOENT):
914 914 match.bad(self.pathto(nd),
915 915 encoding.strtolocal(inst.strerror))
916 916 continue
917 917 raise
918 918 for f, kind, st in entries:
919 919 # Some matchers may return files in the visitentries set,
920 920 # instead of 'this', if the matcher explicitly mentions them
921 921 # and is not an exactmatcher. This is acceptable; we do not
922 922 # make any hard assumptions about file-or-directory below
923 923 # based on the presence of `f` in visitentries. If
924 924 # visitchildrenset returned a set, we can always skip the
925 925 # entries *not* in the set it provided regardless of whether
926 926 # they're actually a file or a directory.
927 927 if visitentries and f not in visitentries:
928 928 continue
929 929 if normalizefile:
930 930 # even though f might be a directory, we're only
931 931 # interested in comparing it to files currently in the
932 932 # dmap -- therefore normalizefile is enough
933 933 nf = normalizefile(nd and (nd + "/" + f) or f, True,
934 934 True)
935 935 else:
936 936 nf = nd and (nd + "/" + f) or f
937 937 if nf not in results:
938 938 if kind == dirkind:
939 939 if not ignore(nf):
940 940 if matchtdir:
941 941 matchtdir(nf)
942 942 wadd(nf)
943 943 if nf in dmap and (matchalways or matchfn(nf)):
944 944 results[nf] = None
945 945 elif kind == regkind or kind == lnkkind:
946 946 if nf in dmap:
947 947 if matchalways or matchfn(nf):
948 948 results[nf] = st
949 949 elif ((matchalways or matchfn(nf))
950 950 and not ignore(nf)):
951 951 # unknown file -- normalize if necessary
952 952 if not alreadynormed:
953 953 nf = normalize(nf, False, True)
954 954 results[nf] = st
955 955 elif nf in dmap and (matchalways or matchfn(nf)):
956 956 results[nf] = None
957 957
958 958 for nd, d in work:
959 959 # alreadynormed means that processwork doesn't have to do any
960 960 # expensive directory normalization
961 961 alreadynormed = not normalize or nd == d
962 962 traverse([d], alreadynormed)
963 963
964 964 for s in subrepos:
965 965 del results[s]
966 966 del results['.hg']
967 967
968 968 # step 3: visit remaining files from dmap
969 969 if not skipstep3 and not exact:
970 970 # If a dmap file is not in results yet, it was either
971 971 # a) not matching matchfn b) ignored, c) missing, or d) under a
972 972 # symlink directory.
973 973 if not results and matchalways:
974 974 visit = [f for f in dmap]
975 975 else:
976 976 visit = [f for f in dmap if f not in results and matchfn(f)]
977 977 visit.sort()
978 978
979 979 if unknown:
980 980 # unknown == True means we walked all dirs under the roots
981 981 # that wasn't ignored, and everything that matched was stat'ed
982 982 # and is already in results.
983 983 # The rest must thus be ignored or under a symlink.
984 984 audit_path = pathutil.pathauditor(self._root, cached=True)
985 985
986 986 for nf in iter(visit):
987 987 # If a stat for the same file was already added with a
988 988 # different case, don't add one for this, since that would
989 989 # make it appear as if the file exists under both names
990 990 # on disk.
991 991 if (normalizefile and
992 992 normalizefile(nf, True, True) in results):
993 993 results[nf] = None
994 994 # Report ignored items in the dmap as long as they are not
995 995 # under a symlink directory.
996 996 elif audit_path.check(nf):
997 997 try:
998 998 results[nf] = lstat(join(nf))
999 999 # file was just ignored, no links, and exists
1000 1000 except OSError:
1001 1001 # file doesn't exist
1002 1002 results[nf] = None
1003 1003 else:
1004 1004 # It's either missing or under a symlink directory
1005 1005 # which we in this case report as missing
1006 1006 results[nf] = None
1007 1007 else:
1008 1008 # We may not have walked the full directory tree above,
1009 1009 # so stat and check everything we missed.
1010 1010 iv = iter(visit)
1011 1011 for st in util.statfiles([join(i) for i in visit]):
1012 1012 results[next(iv)] = st
1013 1013 return results
1014 1014
1015 1015 def status(self, match, subrepos, ignored, clean, unknown):
1016 1016 '''Determine the status of the working copy relative to the
1017 1017 dirstate and return a pair of (unsure, status), where status is of type
1018 1018 scmutil.status and:
1019 1019
1020 1020 unsure:
1021 1021 files that might have been modified since the dirstate was
1022 1022 written, but need to be read to be sure (size is the same
1023 1023 but mtime differs)
1024 1024 status.modified:
1025 1025 files that have definitely been modified since the dirstate
1026 1026 was written (different size or mode)
1027 1027 status.clean:
1028 1028 files that have definitely not been modified since the
1029 1029 dirstate was written
1030 1030 '''
1031 1031 listignored, listclean, listunknown = ignored, clean, unknown
1032 1032 lookup, modified, added, unknown, ignored = [], [], [], [], []
1033 1033 removed, deleted, clean = [], [], []
1034 1034
1035 1035 dmap = self._map
1036 1036 dmap.preload()
1037 1037 dcontains = dmap.__contains__
1038 1038 dget = dmap.__getitem__
1039 1039 ladd = lookup.append # aka "unsure"
1040 1040 madd = modified.append
1041 1041 aadd = added.append
1042 1042 uadd = unknown.append
1043 1043 iadd = ignored.append
1044 1044 radd = removed.append
1045 1045 dadd = deleted.append
1046 1046 cadd = clean.append
1047 1047 mexact = match.exact
1048 1048 dirignore = self._dirignore
1049 1049 checkexec = self._checkexec
1050 1050 copymap = self._map.copymap
1051 1051 lastnormaltime = self._lastnormaltime
1052 1052
1053 1053 # We need to do full walks when either
1054 1054 # - we're listing all clean files, or
1055 1055 # - match.traversedir does something, because match.traversedir should
1056 1056 # be called for every dir in the working dir
1057 1057 full = listclean or match.traversedir is not None
1058 1058 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1059 1059 full=full).iteritems():
1060 1060 if not dcontains(fn):
1061 1061 if (listignored or mexact(fn)) and dirignore(fn):
1062 1062 if listignored:
1063 1063 iadd(fn)
1064 1064 else:
1065 1065 uadd(fn)
1066 1066 continue
1067 1067
1068 1068 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1069 1069 # written like that for performance reasons. dmap[fn] is not a
1070 1070 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1071 1071 # opcode has fast paths when the value to be unpacked is a tuple or
1072 1072 # a list, but falls back to creating a full-fledged iterator in
1073 1073 # general. That is much slower than simply accessing and storing the
1074 1074 # tuple members one by one.
1075 1075 t = dget(fn)
1076 1076 state = t[0]
1077 1077 mode = t[1]
1078 1078 size = t[2]
1079 1079 time = t[3]
1080 1080
1081 1081 if not st and state in "nma":
1082 1082 dadd(fn)
1083 1083 elif state == 'n':
1084 1084 if (size >= 0 and
1085 1085 ((size != st.st_size and size != st.st_size & _rangemask)
1086 1086 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1087 1087 or size == -2 # other parent
1088 1088 or fn in copymap):
1089 1089 madd(fn)
1090 1090 elif (time != st[stat.ST_MTIME]
1091 1091 and time != st[stat.ST_MTIME] & _rangemask):
1092 1092 ladd(fn)
1093 1093 elif st[stat.ST_MTIME] == lastnormaltime:
1094 1094 # fn may have just been marked as normal and it may have
1095 1095 # changed in the same second without changing its size.
1096 1096 # This can happen if we quickly do multiple commits.
1097 1097 # Force lookup, so we don't miss such a racy file change.
1098 1098 ladd(fn)
1099 1099 elif listclean:
1100 1100 cadd(fn)
1101 1101 elif state == 'm':
1102 1102 madd(fn)
1103 1103 elif state == 'a':
1104 1104 aadd(fn)
1105 1105 elif state == 'r':
1106 1106 radd(fn)
1107 1107
1108 1108 return (lookup, scmutil.status(modified, added, removed, deleted,
1109 1109 unknown, ignored, clean))
1110 1110
1111 1111 def matches(self, match):
1112 1112 '''
1113 1113 return files in the dirstate (in whatever state) filtered by match
1114 1114 '''
1115 1115 dmap = self._map
1116 1116 if match.always():
1117 1117 return dmap.keys()
1118 1118 files = match.files()
1119 1119 if match.isexact():
1120 1120 # fast path -- filter the other way around, since typically files is
1121 1121 # much smaller than dmap
1122 1122 return [f for f in files if f in dmap]
1123 1123 if match.prefix() and all(fn in dmap for fn in files):
1124 1124 # fast path -- all the values are known to be files, so just return
1125 1125 # that
1126 1126 return list(files)
1127 1127 return [f for f in dmap if match(f)]
1128 1128
1129 1129 def _actualfilename(self, tr):
1130 1130 if tr:
1131 1131 return self._pendingfilename
1132 1132 else:
1133 1133 return self._filename
1134 1134
1135 1135 def savebackup(self, tr, backupname):
1136 1136 '''Save current dirstate into backup file'''
1137 1137 filename = self._actualfilename(tr)
1138 1138 assert backupname != filename
1139 1139
1140 1140 # use '_writedirstate' instead of 'write' to write changes certainly,
1141 1141 # because the latter omits writing out if transaction is running.
1142 1142 # output file will be used to create backup of dirstate at this point.
1143 1143 if self._dirty or not self._opener.exists(filename):
1144 1144 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1145 1145 checkambig=True))
1146 1146
1147 1147 if tr:
1148 1148 # ensure that subsequent tr.writepending returns True for
1149 1149 # changes written out above, even if dirstate is never
1150 1150 # changed after this
1151 1151 tr.addfilegenerator('dirstate', (self._filename,),
1152 1152 self._writedirstate, location='plain')
1153 1153
1154 1154 # ensure that pending file written above is unlinked at
1155 1155 # failure, even if tr.writepending isn't invoked until the
1156 1156 # end of this transaction
1157 1157 tr.registertmp(filename, location='plain')
1158 1158
1159 1159 self._opener.tryunlink(backupname)
1160 1160 # hardlink backup is okay because _writedirstate is always called
1161 1161 # with an "atomictemp=True" file.
1162 1162 util.copyfile(self._opener.join(filename),
1163 1163 self._opener.join(backupname), hardlink=True)
1164 1164
1165 1165 def restorebackup(self, tr, backupname):
1166 1166 '''Restore dirstate by backup file'''
1167 1167 # this "invalidate()" prevents "wlock.release()" from writing
1168 1168 # changes of dirstate out after restoring from backup file
1169 1169 self.invalidate()
1170 1170 filename = self._actualfilename(tr)
1171 1171 o = self._opener
1172 1172 if util.samefile(o.join(backupname), o.join(filename)):
1173 1173 o.unlink(backupname)
1174 1174 else:
1175 1175 o.rename(backupname, filename, checkambig=True)
1176 1176
1177 1177 def clearbackup(self, tr, backupname):
1178 1178 '''Clear backup file'''
1179 1179 self._opener.unlink(backupname)
1180 1180
1181 1181 class dirstatemap(object):
1182 1182 """Map encapsulating the dirstate's contents.
1183 1183
1184 1184 The dirstate contains the following state:
1185 1185
1186 1186 - `identity` is the identity of the dirstate file, which can be used to
1187 1187 detect when changes have occurred to the dirstate file.
1188 1188
1189 1189 - `parents` is a pair containing the parents of the working copy. The
1190 1190 parents are updated by calling `setparents`.
1191 1191
1192 1192 - the state map maps filenames to tuples of (state, mode, size, mtime),
1193 1193 where state is a single character representing 'normal', 'added',
1194 1194 'removed', or 'merged'. It is read by treating the dirstate as a
1195 1195 dict. File state is updated by calling the `addfile`, `removefile` and
1196 1196 `dropfile` methods.
1197 1197
1198 1198 - `copymap` maps destination filenames to their source filename.
1199 1199
1200 1200 The dirstate also provides the following views onto the state:
1201 1201
1202 1202 - `nonnormalset` is a set of the filenames that have state other
1203 1203 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1204 1204
1205 1205 - `otherparentset` is a set of the filenames that are marked as coming
1206 1206 from the second parent when the dirstate is currently being merged.
1207 1207
1208 1208 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1209 1209 form that they appear as in the dirstate.
1210 1210
1211 1211 - `dirfoldmap` is a dict mapping normalized directory names to the
1212 1212 denormalized form that they appear as in the dirstate.
1213 1213 """
1214 1214
1215 1215 def __init__(self, ui, opener, root):
1216 1216 self._ui = ui
1217 1217 self._opener = opener
1218 1218 self._root = root
1219 1219 self._filename = 'dirstate'
1220 1220
1221 1221 self._parents = None
1222 1222 self._dirtyparents = False
1223 1223
1224 1224 # for consistent view between _pl() and _read() invocations
1225 1225 self._pendingmode = None
1226 1226
1227 1227 @propertycache
1228 1228 def _map(self):
1229 1229 self._map = {}
1230 1230 self.read()
1231 1231 return self._map
1232 1232
1233 1233 @propertycache
1234 1234 def copymap(self):
1235 1235 self.copymap = {}
1236 1236 self._map
1237 1237 return self.copymap
1238 1238
1239 1239 def clear(self):
1240 1240 self._map.clear()
1241 1241 self.copymap.clear()
1242 1242 self.setparents(nullid, nullid)
1243 1243 util.clearcachedproperty(self, "_dirs")
1244 1244 util.clearcachedproperty(self, "_alldirs")
1245 1245 util.clearcachedproperty(self, "filefoldmap")
1246 1246 util.clearcachedproperty(self, "dirfoldmap")
1247 1247 util.clearcachedproperty(self, "nonnormalset")
1248 1248 util.clearcachedproperty(self, "otherparentset")
1249 1249
1250 1250 def items(self):
1251 1251 return self._map.iteritems()
1252 1252
1253 1253 # forward for python2,3 compat
1254 1254 iteritems = items
1255 1255
1256 1256 def __len__(self):
1257 1257 return len(self._map)
1258 1258
1259 1259 def __iter__(self):
1260 1260 return iter(self._map)
1261 1261
1262 1262 def get(self, key, default=None):
1263 1263 return self._map.get(key, default)
1264 1264
1265 1265 def __contains__(self, key):
1266 1266 return key in self._map
1267 1267
1268 1268 def __getitem__(self, key):
1269 1269 return self._map[key]
1270 1270
1271 1271 def keys(self):
1272 1272 return self._map.keys()
1273 1273
1274 1274 def preload(self):
1275 1275 """Loads the underlying data, if it's not already loaded"""
1276 1276 self._map
1277 1277
1278 1278 def addfile(self, f, oldstate, state, mode, size, mtime):
1279 1279 """Add a tracked file to the dirstate."""
1280 1280 if oldstate in "?r" and r"_dirs" in self.__dict__:
1281 1281 self._dirs.addpath(f)
1282 1282 if oldstate == "?" and r"_alldirs" in self.__dict__:
1283 1283 self._alldirs.addpath(f)
1284 1284 self._map[f] = dirstatetuple(state, mode, size, mtime)
1285 1285 if state != 'n' or mtime == -1:
1286 1286 self.nonnormalset.add(f)
1287 1287 if size == -2:
1288 1288 self.otherparentset.add(f)
1289 1289
1290 1290 def removefile(self, f, oldstate, size):
1291 1291 """
1292 1292 Mark a file as removed in the dirstate.
1293 1293
1294 1294 The `size` parameter is used to store sentinel values that indicate
1295 1295 the file's previous state. In the future, we should refactor this
1296 1296 to be more explicit about what that state is.
1297 1297 """
1298 1298 if oldstate not in "?r" and r"_dirs" in self.__dict__:
1299 1299 self._dirs.delpath(f)
1300 1300 if oldstate == "?" and r"_alldirs" in self.__dict__:
1301 1301 self._alldirs.addpath(f)
1302 1302 if r"filefoldmap" in self.__dict__:
1303 1303 normed = util.normcase(f)
1304 1304 self.filefoldmap.pop(normed, None)
1305 1305 self._map[f] = dirstatetuple('r', 0, size, 0)
1306 1306 self.nonnormalset.add(f)
1307 1307
1308 1308 def dropfile(self, f, oldstate):
1309 1309 """
1310 1310 Remove a file from the dirstate. Returns True if the file was
1311 1311 previously recorded.
1312 1312 """
1313 1313 exists = self._map.pop(f, None) is not None
1314 1314 if exists:
1315 1315 if oldstate != "r" and r"_dirs" in self.__dict__:
1316 1316 self._dirs.delpath(f)
1317 1317 if r"_alldirs" in self.__dict__:
1318 1318 self._alldirs.delpath(f)
1319 1319 if r"filefoldmap" in self.__dict__:
1320 1320 normed = util.normcase(f)
1321 1321 self.filefoldmap.pop(normed, None)
1322 1322 self.nonnormalset.discard(f)
1323 1323 return exists
1324 1324
1325 1325 def clearambiguoustimes(self, files, now):
1326 1326 for f in files:
1327 1327 e = self.get(f)
1328 1328 if e is not None and e[0] == 'n' and e[3] == now:
1329 1329 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1330 1330 self.nonnormalset.add(f)
1331 1331
1332 1332 def nonnormalentries(self):
1333 1333 '''Compute the nonnormal dirstate entries from the dmap'''
1334 1334 try:
1335 1335 return parsers.nonnormalotherparententries(self._map)
1336 1336 except AttributeError:
1337 1337 nonnorm = set()
1338 1338 otherparent = set()
1339 1339 for fname, e in self._map.iteritems():
1340 1340 if e[0] != 'n' or e[3] == -1:
1341 1341 nonnorm.add(fname)
1342 1342 if e[0] == 'n' and e[2] == -2:
1343 1343 otherparent.add(fname)
1344 1344 return nonnorm, otherparent
1345 1345
1346 1346 @propertycache
1347 1347 def filefoldmap(self):
1348 1348 """Returns a dictionary mapping normalized case paths to their
1349 1349 non-normalized versions.
1350 1350 """
1351 1351 try:
1352 1352 makefilefoldmap = parsers.make_file_foldmap
1353 1353 except AttributeError:
1354 1354 pass
1355 1355 else:
1356 1356 return makefilefoldmap(self._map, util.normcasespec,
1357 1357 util.normcasefallback)
1358 1358
1359 1359 f = {}
1360 1360 normcase = util.normcase
1361 1361 for name, s in self._map.iteritems():
1362 1362 if s[0] != 'r':
1363 1363 f[normcase(name)] = name
1364 1364 f['.'] = '.' # prevents useless util.fspath() invocation
1365 1365 return f
1366 1366
1367 1367 def hastrackeddir(self, d):
1368 1368 """
1369 1369 Returns True if the dirstate contains a tracked (not removed) file
1370 1370 in this directory.
1371 1371 """
1372 1372 return d in self._dirs
1373 1373
1374 1374 def hasdir(self, d):
1375 1375 """
1376 1376 Returns True if the dirstate contains a file (tracked or removed)
1377 1377 in this directory.
1378 1378 """
1379 1379 return d in self._alldirs
1380 1380
1381 1381 @propertycache
1382 1382 def _dirs(self):
1383 1383 return util.dirs(self._map, 'r')
1384 1384
1385 1385 @propertycache
1386 1386 def _alldirs(self):
1387 1387 return util.dirs(self._map)
1388 1388
1389 1389 def _opendirstatefile(self):
1390 1390 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1391 1391 if self._pendingmode is not None and self._pendingmode != mode:
1392 1392 fp.close()
1393 1393 raise error.Abort(_('working directory state may be '
1394 1394 'changed parallelly'))
1395 1395 self._pendingmode = mode
1396 1396 return fp
1397 1397
1398 1398 def parents(self):
1399 1399 if not self._parents:
1400 1400 try:
1401 1401 fp = self._opendirstatefile()
1402 1402 st = fp.read(40)
1403 1403 fp.close()
1404 1404 except IOError as err:
1405 1405 if err.errno != errno.ENOENT:
1406 1406 raise
1407 1407 # File doesn't exist, so the current state is empty
1408 1408 st = ''
1409 1409
1410 1410 l = len(st)
1411 1411 if l == 40:
1412 1412 self._parents = (st[:20], st[20:40])
1413 1413 elif l == 0:
1414 1414 self._parents = (nullid, nullid)
1415 1415 else:
1416 1416 raise error.Abort(_('working directory state appears '
1417 1417 'damaged!'))
1418 1418
1419 1419 return self._parents
1420 1420
1421 1421 def setparents(self, p1, p2):
1422 1422 self._parents = (p1, p2)
1423 1423 self._dirtyparents = True
1424 1424
1425 1425 def read(self):
1426 1426 # ignore HG_PENDING because identity is used only for writing
1427 1427 self.identity = util.filestat.frompath(
1428 1428 self._opener.join(self._filename))
1429 1429
1430 1430 try:
1431 1431 fp = self._opendirstatefile()
1432 1432 try:
1433 1433 st = fp.read()
1434 1434 finally:
1435 1435 fp.close()
1436 1436 except IOError as err:
1437 1437 if err.errno != errno.ENOENT:
1438 1438 raise
1439 1439 return
1440 1440 if not st:
1441 1441 return
1442 1442
1443 1443 if util.safehasattr(parsers, 'dict_new_presized'):
1444 1444 # Make an estimate of the number of files in the dirstate based on
1445 1445 # its size. From a linear regression on a set of real-world repos,
1446 1446 # all over 10,000 files, the size of a dirstate entry is 85
1447 1447 # bytes. The cost of resizing is significantly higher than the cost
1448 1448 # of filling in a larger presized dict, so subtract 20% from the
1449 1449 # size.
1450 1450 #
1451 1451 # This heuristic is imperfect in many ways, so in a future dirstate
1452 1452 # format update it makes sense to just record the number of entries
1453 1453 # on write.
1454 1454 self._map = parsers.dict_new_presized(len(st) // 71)
1455 1455
1456 1456 # Python's garbage collector triggers a GC each time a certain number
1457 1457 # of container objects (the number being defined by
1458 1458 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1459 1459 # for each file in the dirstate. The C version then immediately marks
1460 1460 # them as not to be tracked by the collector. However, this has no
1461 1461 # effect on when GCs are triggered, only on what objects the GC looks
1462 1462 # into. This means that O(number of files) GCs are unavoidable.
1463 1463 # Depending on when in the process's lifetime the dirstate is parsed,
1464 1464 # this can get very expensive. As a workaround, disable GC while
1465 1465 # parsing the dirstate.
1466 1466 #
1467 1467 # (we cannot decorate the function directly since it is in a C module)
1468 1468 parse_dirstate = util.nogc(parsers.parse_dirstate)
1469 1469 p = parse_dirstate(self._map, self.copymap, st)
1470 1470 if not self._dirtyparents:
1471 1471 self.setparents(*p)
1472 1472
1473 1473 # Avoid excess attribute lookups by fast pathing certain checks
1474 1474 self.__contains__ = self._map.__contains__
1475 1475 self.__getitem__ = self._map.__getitem__
1476 1476 self.get = self._map.get
1477 1477
1478 1478 def write(self, st, now):
1479 1479 st.write(parsers.pack_dirstate(self._map, self.copymap,
1480 1480 self.parents(), now))
1481 1481 st.close()
1482 1482 self._dirtyparents = False
1483 1483 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1484 1484
1485 1485 @propertycache
1486 1486 def nonnormalset(self):
1487 1487 nonnorm, otherparents = self.nonnormalentries()
1488 1488 self.otherparentset = otherparents
1489 1489 return nonnorm
1490 1490
1491 1491 @propertycache
1492 1492 def otherparentset(self):
1493 1493 nonnorm, otherparents = self.nonnormalentries()
1494 1494 self.nonnormalset = nonnorm
1495 1495 return otherparents
1496 1496
1497 1497 @propertycache
1498 1498 def identity(self):
1499 1499 self._map
1500 1500 return self.identity
1501 1501
1502 1502 @propertycache
1503 1503 def dirfoldmap(self):
1504 1504 f = {}
1505 1505 normcase = util.normcase
1506 1506 for name in self._dirs:
1507 1507 f[normcase(name)] = name
1508 1508 return f
General Comments 0
You need to be logged in to leave comments. Login now