##// END OF EJS Templates
dirstate: add explicit methods for modifying dirstate...
Mark Thomas -
r35078:853b7c41 default
parent child Browse files
Show More
@@ -1,1464 +1,1477 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 parsers = policy.importmod(r'parsers')
31 31
32 32 propertycache = util.propertycache
33 33 filecache = scmutil.filecache
34 34 _rangemask = 0x7fffffff
35 35
36 36 dirstatetuple = parsers.dirstatetuple
37 37
38 38 class repocache(filecache):
39 39 """filecache for files in .hg/"""
40 40 def join(self, obj, fname):
41 41 return obj._opener.join(fname)
42 42
43 43 class rootcache(filecache):
44 44 """filecache for files in the repository root"""
45 45 def join(self, obj, fname):
46 46 return obj._join(fname)
47 47
48 48 def _getfsnow(vfs):
49 49 '''Get "now" timestamp on filesystem'''
50 50 tmpfd, tmpname = vfs.mkstemp()
51 51 try:
52 52 return os.fstat(tmpfd).st_mtime
53 53 finally:
54 54 os.close(tmpfd)
55 55 vfs.unlink(tmpname)
56 56
57 57 class dirstate(object):
58 58
59 59 def __init__(self, opener, ui, root, validate, sparsematchfn):
60 60 '''Create a new dirstate object.
61 61
62 62 opener is an open()-like callable that can be used to open the
63 63 dirstate file; root is the root of the directory tracked by
64 64 the dirstate.
65 65 '''
66 66 self._opener = opener
67 67 self._validate = validate
68 68 self._root = root
69 69 self._sparsematchfn = sparsematchfn
70 70 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
71 71 # UNC path pointing to root share (issue4557)
72 72 self._rootdir = pathutil.normasprefix(root)
73 73 self._dirty = False
74 74 self._lastnormaltime = 0
75 75 self._ui = ui
76 76 self._filecache = {}
77 77 self._parentwriters = 0
78 78 self._filename = 'dirstate'
79 79 self._pendingfilename = '%s.pending' % self._filename
80 80 self._plchangecallbacks = {}
81 81 self._origpl = None
82 82 self._updatedfiles = set()
83 83
84 84 @contextlib.contextmanager
85 85 def parentchange(self):
86 86 '''Context manager for handling dirstate parents.
87 87
88 88 If an exception occurs in the scope of the context manager,
89 89 the incoherent dirstate won't be written when wlock is
90 90 released.
91 91 '''
92 92 self._parentwriters += 1
93 93 yield
94 94 # Typically we want the "undo" step of a context manager in a
95 95 # finally block so it happens even when an exception
96 96 # occurs. In this case, however, we only want to decrement
97 97 # parentwriters if the code in the with statement exits
98 98 # normally, so we don't have a try/finally here on purpose.
99 99 self._parentwriters -= 1
100 100
101 101 def beginparentchange(self):
102 102 '''Marks the beginning of a set of changes that involve changing
103 103 the dirstate parents. If there is an exception during this time,
104 104 the dirstate will not be written when the wlock is released. This
105 105 prevents writing an incoherent dirstate where the parent doesn't
106 106 match the contents.
107 107 '''
108 108 self._ui.deprecwarn('beginparentchange is obsoleted by the '
109 109 'parentchange context manager.', '4.3')
110 110 self._parentwriters += 1
111 111
112 112 def endparentchange(self):
113 113 '''Marks the end of a set of changes that involve changing the
114 114 dirstate parents. Once all parent changes have been marked done,
115 115 the wlock will be free to write the dirstate on release.
116 116 '''
117 117 self._ui.deprecwarn('endparentchange is obsoleted by the '
118 118 'parentchange context manager.', '4.3')
119 119 if self._parentwriters > 0:
120 120 self._parentwriters -= 1
121 121
122 122 def pendingparentchange(self):
123 123 '''Returns true if the dirstate is in the middle of a set of changes
124 124 that modify the dirstate parent.
125 125 '''
126 126 return self._parentwriters > 0
127 127
128 128 @propertycache
129 129 def _map(self):
130 130 """Return the dirstate contents (see documentation for dirstatemap)."""
131 131 self._map = dirstatemap(self._ui, self._opener, self._root)
132 132 return self._map
133 133
134 134 @property
135 135 def _sparsematcher(self):
136 136 """The matcher for the sparse checkout.
137 137
138 138 The working directory may not include every file from a manifest. The
139 139 matcher obtained by this property will match a path if it is to be
140 140 included in the working directory.
141 141 """
142 142 # TODO there is potential to cache this property. For now, the matcher
143 143 # is resolved on every access. (But the called function does use a
144 144 # cache to keep the lookup fast.)
145 145 return self._sparsematchfn()
146 146
147 147 @repocache('branch')
148 148 def _branch(self):
149 149 try:
150 150 return self._opener.read("branch").strip() or "default"
151 151 except IOError as inst:
152 152 if inst.errno != errno.ENOENT:
153 153 raise
154 154 return "default"
155 155
156 156 @property
157 157 def _pl(self):
158 158 return self._map.parents()
159 159
160 160 def dirs(self):
161 161 return self._map.dirs
162 162
163 163 @rootcache('.hgignore')
164 164 def _ignore(self):
165 165 files = self._ignorefiles()
166 166 if not files:
167 167 return matchmod.never(self._root, '')
168 168
169 169 pats = ['include:%s' % f for f in files]
170 170 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
171 171
172 172 @propertycache
173 173 def _slash(self):
174 174 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
175 175
176 176 @propertycache
177 177 def _checklink(self):
178 178 return util.checklink(self._root)
179 179
180 180 @propertycache
181 181 def _checkexec(self):
182 182 return util.checkexec(self._root)
183 183
184 184 @propertycache
185 185 def _checkcase(self):
186 186 return not util.fscasesensitive(self._join('.hg'))
187 187
188 188 def _join(self, f):
189 189 # much faster than os.path.join()
190 190 # it's safe because f is always a relative path
191 191 return self._rootdir + f
192 192
193 193 def flagfunc(self, buildfallback):
194 194 if self._checklink and self._checkexec:
195 195 def f(x):
196 196 try:
197 197 st = os.lstat(self._join(x))
198 198 if util.statislink(st):
199 199 return 'l'
200 200 if util.statisexec(st):
201 201 return 'x'
202 202 except OSError:
203 203 pass
204 204 return ''
205 205 return f
206 206
207 207 fallback = buildfallback()
208 208 if self._checklink:
209 209 def f(x):
210 210 if os.path.islink(self._join(x)):
211 211 return 'l'
212 212 if 'x' in fallback(x):
213 213 return 'x'
214 214 return ''
215 215 return f
216 216 if self._checkexec:
217 217 def f(x):
218 218 if 'l' in fallback(x):
219 219 return 'l'
220 220 if util.isexec(self._join(x)):
221 221 return 'x'
222 222 return ''
223 223 return f
224 224 else:
225 225 return fallback
226 226
227 227 @propertycache
228 228 def _cwd(self):
229 229 # internal config: ui.forcecwd
230 230 forcecwd = self._ui.config('ui', 'forcecwd')
231 231 if forcecwd:
232 232 return forcecwd
233 233 return pycompat.getcwd()
234 234
235 235 def getcwd(self):
236 236 '''Return the path from which a canonical path is calculated.
237 237
238 238 This path should be used to resolve file patterns or to convert
239 239 canonical paths back to file paths for display. It shouldn't be
240 240 used to get real file paths. Use vfs functions instead.
241 241 '''
242 242 cwd = self._cwd
243 243 if cwd == self._root:
244 244 return ''
245 245 # self._root ends with a path separator if self._root is '/' or 'C:\'
246 246 rootsep = self._root
247 247 if not util.endswithsep(rootsep):
248 248 rootsep += pycompat.ossep
249 249 if cwd.startswith(rootsep):
250 250 return cwd[len(rootsep):]
251 251 else:
252 252 # we're outside the repo. return an absolute path.
253 253 return cwd
254 254
255 255 def pathto(self, f, cwd=None):
256 256 if cwd is None:
257 257 cwd = self.getcwd()
258 258 path = util.pathto(self._root, cwd, f)
259 259 if self._slash:
260 260 return util.pconvert(path)
261 261 return path
262 262
263 263 def __getitem__(self, key):
264 264 '''Return the current state of key (a filename) in the dirstate.
265 265
266 266 States are:
267 267 n normal
268 268 m needs merging
269 269 r marked for removal
270 270 a marked for addition
271 271 ? not tracked
272 272 '''
273 273 return self._map.get(key, ("?",))[0]
274 274
275 275 def __contains__(self, key):
276 276 return key in self._map
277 277
278 278 def __iter__(self):
279 279 return iter(sorted(self._map))
280 280
281 281 def items(self):
282 282 return self._map.iteritems()
283 283
284 284 iteritems = items
285 285
286 286 def parents(self):
287 287 return [self._validate(p) for p in self._pl]
288 288
289 289 def p1(self):
290 290 return self._validate(self._pl[0])
291 291
292 292 def p2(self):
293 293 return self._validate(self._pl[1])
294 294
295 295 def branch(self):
296 296 return encoding.tolocal(self._branch)
297 297
298 298 def setparents(self, p1, p2=nullid):
299 299 """Set dirstate parents to p1 and p2.
300 300
301 301 When moving from two parents to one, 'm' merged entries a
302 302 adjusted to normal and previous copy records discarded and
303 303 returned by the call.
304 304
305 305 See localrepo.setparents()
306 306 """
307 307 if self._parentwriters == 0:
308 308 raise ValueError("cannot set dirstate parent without "
309 309 "calling dirstate.beginparentchange")
310 310
311 311 self._dirty = True
312 312 oldp2 = self._pl[1]
313 313 if self._origpl is None:
314 314 self._origpl = self._pl
315 315 self._map.setparents(p1, p2)
316 316 copies = {}
317 317 if oldp2 != nullid and p2 == nullid:
318 318 candidatefiles = self._map.nonnormalset.union(
319 319 self._map.otherparentset)
320 320 for f in candidatefiles:
321 321 s = self._map.get(f)
322 322 if s is None:
323 323 continue
324 324
325 325 # Discard 'm' markers when moving away from a merge state
326 326 if s[0] == 'm':
327 327 source = self._map.copymap.get(f)
328 328 if source:
329 329 copies[f] = source
330 330 self.normallookup(f)
331 331 # Also fix up otherparent markers
332 332 elif s[0] == 'n' and s[2] == -2:
333 333 source = self._map.copymap.get(f)
334 334 if source:
335 335 copies[f] = source
336 336 self.add(f)
337 337 return copies
338 338
339 339 def setbranch(self, branch):
340 340 self._branch = encoding.fromlocal(branch)
341 341 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
342 342 try:
343 343 f.write(self._branch + '\n')
344 344 f.close()
345 345
346 346 # make sure filecache has the correct stat info for _branch after
347 347 # replacing the underlying file
348 348 ce = self._filecache['_branch']
349 349 if ce:
350 350 ce.refresh()
351 351 except: # re-raises
352 352 f.discard()
353 353 raise
354 354
355 355 def invalidate(self):
356 356 '''Causes the next access to reread the dirstate.
357 357
358 358 This is different from localrepo.invalidatedirstate() because it always
359 359 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
360 360 check whether the dirstate has changed before rereading it.'''
361 361
362 362 for a in ("_map", "_branch", "_ignore"):
363 363 if a in self.__dict__:
364 364 delattr(self, a)
365 365 self._lastnormaltime = 0
366 366 self._dirty = False
367 367 self._updatedfiles.clear()
368 368 self._parentwriters = 0
369 369 self._origpl = None
370 370
371 371 def copy(self, source, dest):
372 372 """Mark dest as a copy of source. Unmark dest if source is None."""
373 373 if source == dest:
374 374 return
375 375 self._dirty = True
376 376 if source is not None:
377 377 self._map.copymap[dest] = source
378 378 self._updatedfiles.add(source)
379 379 self._updatedfiles.add(dest)
380 380 elif self._map.copymap.pop(dest, None):
381 381 self._updatedfiles.add(dest)
382 382
383 383 def copied(self, file):
384 384 return self._map.copymap.get(file, None)
385 385
386 386 def copies(self):
387 387 return self._map.copymap
388 388
389 389 def _droppath(self, f):
390 390 if self[f] not in "?r" and "dirs" in self._map.__dict__:
391 391 self._map.dirs.delpath(f)
392 392
393 393 if "filefoldmap" in self._map.__dict__:
394 394 normed = util.normcase(f)
395 395 if normed in self._map.filefoldmap:
396 396 del self._map.filefoldmap[normed]
397 397
398 398 self._updatedfiles.add(f)
399 399
400 400 def _addpath(self, f, state, mode, size, mtime):
401 401 oldstate = self[f]
402 402 if state == 'a' or oldstate == 'r':
403 403 scmutil.checkfilename(f)
404 404 if f in self._map.dirs:
405 405 raise error.Abort(_('directory %r already in dirstate') % f)
406 406 # shadows
407 407 for d in util.finddirs(f):
408 408 if d in self._map.dirs:
409 409 break
410 410 entry = self._map.get(d)
411 411 if entry is not None and entry[0] != 'r':
412 412 raise error.Abort(
413 413 _('file %r in dirstate clashes with %r') % (d, f))
414 414 if oldstate in "?r" and "dirs" in self._map.__dict__:
415 415 self._map.dirs.addpath(f)
416 416 self._dirty = True
417 417 self._updatedfiles.add(f)
418 self._map[f] = dirstatetuple(state, mode, size, mtime)
419 418 if state != 'n' or mtime == -1:
420 419 self._map.nonnormalset.add(f)
421 420 if size == -2:
422 421 self._map.otherparentset.add(f)
422 self._map.addfile(f, state, mode, size, mtime)
423 423
424 424 def normal(self, f):
425 425 '''Mark a file normal and clean.'''
426 426 s = os.lstat(self._join(f))
427 427 mtime = s.st_mtime
428 428 self._addpath(f, 'n', s.st_mode,
429 429 s.st_size & _rangemask, mtime & _rangemask)
430 430 self._map.copymap.pop(f, None)
431 431 if f in self._map.nonnormalset:
432 432 self._map.nonnormalset.remove(f)
433 433 if mtime > self._lastnormaltime:
434 434 # Remember the most recent modification timeslot for status(),
435 435 # to make sure we won't miss future size-preserving file content
436 436 # modifications that happen within the same timeslot.
437 437 self._lastnormaltime = mtime
438 438
439 439 def normallookup(self, f):
440 440 '''Mark a file normal, but possibly dirty.'''
441 441 if self._pl[1] != nullid:
442 442 # if there is a merge going on and the file was either
443 443 # in state 'm' (-1) or coming from other parent (-2) before
444 444 # being removed, restore that state.
445 445 entry = self._map.get(f)
446 446 if entry is not None:
447 447 if entry[0] == 'r' and entry[2] in (-1, -2):
448 448 source = self._map.copymap.get(f)
449 449 if entry[2] == -1:
450 450 self.merge(f)
451 451 elif entry[2] == -2:
452 452 self.otherparent(f)
453 453 if source:
454 454 self.copy(source, f)
455 455 return
456 456 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
457 457 return
458 458 self._addpath(f, 'n', 0, -1, -1)
459 459 self._map.copymap.pop(f, None)
460 460
461 461 def otherparent(self, f):
462 462 '''Mark as coming from the other parent, always dirty.'''
463 463 if self._pl[1] == nullid:
464 464 raise error.Abort(_("setting %r to other parent "
465 465 "only allowed in merges") % f)
466 466 if f in self and self[f] == 'n':
467 467 # merge-like
468 468 self._addpath(f, 'm', 0, -2, -1)
469 469 else:
470 470 # add-like
471 471 self._addpath(f, 'n', 0, -2, -1)
472 472 self._map.copymap.pop(f, None)
473 473
474 474 def add(self, f):
475 475 '''Mark a file added.'''
476 476 self._addpath(f, 'a', 0, -1, -1)
477 477 self._map.copymap.pop(f, None)
478 478
479 479 def remove(self, f):
480 480 '''Mark a file removed.'''
481 481 self._dirty = True
482 482 self._droppath(f)
483 483 size = 0
484 484 if self._pl[1] != nullid:
485 485 entry = self._map.get(f)
486 486 if entry is not None:
487 487 # backup the previous state
488 488 if entry[0] == 'm': # merge
489 489 size = -1
490 490 elif entry[0] == 'n' and entry[2] == -2: # other parent
491 491 size = -2
492 492 self._map.otherparentset.add(f)
493 self._map[f] = dirstatetuple('r', 0, size, 0)
494 493 self._map.nonnormalset.add(f)
494 self._map.removefile(f, size)
495 495 if size == 0:
496 496 self._map.copymap.pop(f, None)
497 497
498 498 def merge(self, f):
499 499 '''Mark a file merged.'''
500 500 if self._pl[1] == nullid:
501 501 return self.normallookup(f)
502 502 return self.otherparent(f)
503 503
504 504 def drop(self, f):
505 505 '''Drop a file from the dirstate'''
506 if f in self._map:
506 if self._map.dropfile(f):
507 507 self._dirty = True
508 508 self._droppath(f)
509 del self._map[f]
510 509 if f in self._map.nonnormalset:
511 510 self._map.nonnormalset.remove(f)
512 511 self._map.copymap.pop(f, None)
513 512
514 513 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
515 514 if exists is None:
516 515 exists = os.path.lexists(os.path.join(self._root, path))
517 516 if not exists:
518 517 # Maybe a path component exists
519 518 if not ignoremissing and '/' in path:
520 519 d, f = path.rsplit('/', 1)
521 520 d = self._normalize(d, False, ignoremissing, None)
522 521 folded = d + "/" + f
523 522 else:
524 523 # No path components, preserve original case
525 524 folded = path
526 525 else:
527 526 # recursively normalize leading directory components
528 527 # against dirstate
529 528 if '/' in normed:
530 529 d, f = normed.rsplit('/', 1)
531 530 d = self._normalize(d, False, ignoremissing, True)
532 531 r = self._root + "/" + d
533 532 folded = d + "/" + util.fspath(f, r)
534 533 else:
535 534 folded = util.fspath(normed, self._root)
536 535 storemap[normed] = folded
537 536
538 537 return folded
539 538
540 539 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
541 540 normed = util.normcase(path)
542 541 folded = self._map.filefoldmap.get(normed, None)
543 542 if folded is None:
544 543 if isknown:
545 544 folded = path
546 545 else:
547 546 folded = self._discoverpath(path, normed, ignoremissing, exists,
548 547 self._map.filefoldmap)
549 548 return folded
550 549
551 550 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
552 551 normed = util.normcase(path)
553 552 folded = self._map.filefoldmap.get(normed, None)
554 553 if folded is None:
555 554 folded = self._map.dirfoldmap.get(normed, None)
556 555 if folded is None:
557 556 if isknown:
558 557 folded = path
559 558 else:
560 559 # store discovered result in dirfoldmap so that future
561 560 # normalizefile calls don't start matching directories
562 561 folded = self._discoverpath(path, normed, ignoremissing, exists,
563 562 self._map.dirfoldmap)
564 563 return folded
565 564
566 565 def normalize(self, path, isknown=False, ignoremissing=False):
567 566 '''
568 567 normalize the case of a pathname when on a casefolding filesystem
569 568
570 569 isknown specifies whether the filename came from walking the
571 570 disk, to avoid extra filesystem access.
572 571
573 572 If ignoremissing is True, missing path are returned
574 573 unchanged. Otherwise, we try harder to normalize possibly
575 574 existing path components.
576 575
577 576 The normalized case is determined based on the following precedence:
578 577
579 578 - version of name already stored in the dirstate
580 579 - version of name stored on disk
581 580 - version provided via command arguments
582 581 '''
583 582
584 583 if self._checkcase:
585 584 return self._normalize(path, isknown, ignoremissing)
586 585 return path
587 586
588 587 def clear(self):
589 588 self._map.clear()
590 589 self._lastnormaltime = 0
591 590 self._updatedfiles.clear()
592 591 self._dirty = True
593 592
594 593 def rebuild(self, parent, allfiles, changedfiles=None):
595 594 if changedfiles is None:
596 595 # Rebuild entire dirstate
597 596 changedfiles = allfiles
598 597 lastnormaltime = self._lastnormaltime
599 598 self.clear()
600 599 self._lastnormaltime = lastnormaltime
601 600
602 601 if self._origpl is None:
603 602 self._origpl = self._pl
604 603 self._map.setparents(parent, nullid)
605 604 for f in changedfiles:
606 605 if f in allfiles:
607 606 self.normallookup(f)
608 607 else:
609 608 self.drop(f)
610 609
611 610 self._dirty = True
612 611
613 612 def identity(self):
614 613 '''Return identity of dirstate itself to detect changing in storage
615 614
616 615 If identity of previous dirstate is equal to this, writing
617 616 changes based on the former dirstate out can keep consistency.
618 617 '''
619 618 return self._map.identity
620 619
621 620 def write(self, tr):
622 621 if not self._dirty:
623 622 return
624 623
625 624 filename = self._filename
626 625 if tr:
627 626 # 'dirstate.write()' is not only for writing in-memory
628 627 # changes out, but also for dropping ambiguous timestamp.
629 628 # delayed writing re-raise "ambiguous timestamp issue".
630 629 # See also the wiki page below for detail:
631 630 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
632 631
633 632 # emulate dropping timestamp in 'parsers.pack_dirstate'
634 633 now = _getfsnow(self._opener)
635 634 dmap = self._map
636 635 for f in self._updatedfiles:
637 636 e = dmap.get(f)
638 637 if e is not None and e[0] == 'n' and e[3] == now:
639 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
638 dmap.addfile(f, e[0], e[1], e[2], -1)
640 639 self._map.nonnormalset.add(f)
641 640
642 641 # emulate that all 'dirstate.normal' results are written out
643 642 self._lastnormaltime = 0
644 643 self._updatedfiles.clear()
645 644
646 645 # delay writing in-memory changes out
647 646 tr.addfilegenerator('dirstate', (self._filename,),
648 647 self._writedirstate, location='plain')
649 648 return
650 649
651 650 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
652 651 self._writedirstate(st)
653 652
654 653 def addparentchangecallback(self, category, callback):
655 654 """add a callback to be called when the wd parents are changed
656 655
657 656 Callback will be called with the following arguments:
658 657 dirstate, (oldp1, oldp2), (newp1, newp2)
659 658
660 659 Category is a unique identifier to allow overwriting an old callback
661 660 with a newer callback.
662 661 """
663 662 self._plchangecallbacks[category] = callback
664 663
665 664 def _writedirstate(self, st):
666 665 # notify callbacks about parents change
667 666 if self._origpl is not None and self._origpl != self._pl:
668 667 for c, callback in sorted(self._plchangecallbacks.iteritems()):
669 668 callback(self, self._origpl, self._pl)
670 669 self._origpl = None
671 670 # use the modification time of the newly created temporary file as the
672 671 # filesystem's notion of 'now'
673 672 now = util.fstat(st).st_mtime & _rangemask
674 673
675 674 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
676 675 # timestamp of each entries in dirstate, because of 'now > mtime'
677 676 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite')
678 677 if delaywrite > 0:
679 678 # do we have any files to delay for?
680 679 for f, e in self._map.iteritems():
681 680 if e[0] == 'n' and e[3] == now:
682 681 import time # to avoid useless import
683 682 # rather than sleep n seconds, sleep until the next
684 683 # multiple of n seconds
685 684 clock = time.time()
686 685 start = int(clock) - (int(clock) % delaywrite)
687 686 end = start + delaywrite
688 687 time.sleep(end - clock)
689 688 now = end # trust our estimate that the end is near now
690 689 break
691 690
692 691 self._map.write(st, now)
693 692 self._lastnormaltime = 0
694 693 self._dirty = False
695 694
696 695 def _dirignore(self, f):
697 696 if f == '.':
698 697 return False
699 698 if self._ignore(f):
700 699 return True
701 700 for p in util.finddirs(f):
702 701 if self._ignore(p):
703 702 return True
704 703 return False
705 704
706 705 def _ignorefiles(self):
707 706 files = []
708 707 if os.path.exists(self._join('.hgignore')):
709 708 files.append(self._join('.hgignore'))
710 709 for name, path in self._ui.configitems("ui"):
711 710 if name == 'ignore' or name.startswith('ignore.'):
712 711 # we need to use os.path.join here rather than self._join
713 712 # because path is arbitrary and user-specified
714 713 files.append(os.path.join(self._rootdir, util.expandpath(path)))
715 714 return files
716 715
717 716 def _ignorefileandline(self, f):
718 717 files = collections.deque(self._ignorefiles())
719 718 visited = set()
720 719 while files:
721 720 i = files.popleft()
722 721 patterns = matchmod.readpatternfile(i, self._ui.warn,
723 722 sourceinfo=True)
724 723 for pattern, lineno, line in patterns:
725 724 kind, p = matchmod._patsplit(pattern, 'glob')
726 725 if kind == "subinclude":
727 726 if p not in visited:
728 727 files.append(p)
729 728 continue
730 729 m = matchmod.match(self._root, '', [], [pattern],
731 730 warn=self._ui.warn)
732 731 if m(f):
733 732 return (i, lineno, line)
734 733 visited.add(i)
735 734 return (None, -1, "")
736 735
737 736 def _walkexplicit(self, match, subrepos):
738 737 '''Get stat data about the files explicitly specified by match.
739 738
740 739 Return a triple (results, dirsfound, dirsnotfound).
741 740 - results is a mapping from filename to stat result. It also contains
742 741 listings mapping subrepos and .hg to None.
743 742 - dirsfound is a list of files found to be directories.
744 743 - dirsnotfound is a list of files that the dirstate thinks are
745 744 directories and that were not found.'''
746 745
747 746 def badtype(mode):
748 747 kind = _('unknown')
749 748 if stat.S_ISCHR(mode):
750 749 kind = _('character device')
751 750 elif stat.S_ISBLK(mode):
752 751 kind = _('block device')
753 752 elif stat.S_ISFIFO(mode):
754 753 kind = _('fifo')
755 754 elif stat.S_ISSOCK(mode):
756 755 kind = _('socket')
757 756 elif stat.S_ISDIR(mode):
758 757 kind = _('directory')
759 758 return _('unsupported file type (type is %s)') % kind
760 759
761 760 matchedir = match.explicitdir
762 761 badfn = match.bad
763 762 dmap = self._map
764 763 lstat = os.lstat
765 764 getkind = stat.S_IFMT
766 765 dirkind = stat.S_IFDIR
767 766 regkind = stat.S_IFREG
768 767 lnkkind = stat.S_IFLNK
769 768 join = self._join
770 769 dirsfound = []
771 770 foundadd = dirsfound.append
772 771 dirsnotfound = []
773 772 notfoundadd = dirsnotfound.append
774 773
775 774 if not match.isexact() and self._checkcase:
776 775 normalize = self._normalize
777 776 else:
778 777 normalize = None
779 778
780 779 files = sorted(match.files())
781 780 subrepos.sort()
782 781 i, j = 0, 0
783 782 while i < len(files) and j < len(subrepos):
784 783 subpath = subrepos[j] + "/"
785 784 if files[i] < subpath:
786 785 i += 1
787 786 continue
788 787 while i < len(files) and files[i].startswith(subpath):
789 788 del files[i]
790 789 j += 1
791 790
792 791 if not files or '.' in files:
793 792 files = ['.']
794 793 results = dict.fromkeys(subrepos)
795 794 results['.hg'] = None
796 795
797 796 alldirs = None
798 797 for ff in files:
799 798 # constructing the foldmap is expensive, so don't do it for the
800 799 # common case where files is ['.']
801 800 if normalize and ff != '.':
802 801 nf = normalize(ff, False, True)
803 802 else:
804 803 nf = ff
805 804 if nf in results:
806 805 continue
807 806
808 807 try:
809 808 st = lstat(join(nf))
810 809 kind = getkind(st.st_mode)
811 810 if kind == dirkind:
812 811 if nf in dmap:
813 812 # file replaced by dir on disk but still in dirstate
814 813 results[nf] = None
815 814 if matchedir:
816 815 matchedir(nf)
817 816 foundadd((nf, ff))
818 817 elif kind == regkind or kind == lnkkind:
819 818 results[nf] = st
820 819 else:
821 820 badfn(ff, badtype(kind))
822 821 if nf in dmap:
823 822 results[nf] = None
824 823 except OSError as inst: # nf not found on disk - it is dirstate only
825 824 if nf in dmap: # does it exactly match a missing file?
826 825 results[nf] = None
827 826 else: # does it match a missing directory?
828 827 if alldirs is None:
829 828 alldirs = util.dirs(dmap._map)
830 829 if nf in alldirs:
831 830 if matchedir:
832 831 matchedir(nf)
833 832 notfoundadd(nf)
834 833 else:
835 834 badfn(ff, encoding.strtolocal(inst.strerror))
836 835
837 836 # Case insensitive filesystems cannot rely on lstat() failing to detect
838 837 # a case-only rename. Prune the stat object for any file that does not
839 838 # match the case in the filesystem, if there are multiple files that
840 839 # normalize to the same path.
841 840 if match.isexact() and self._checkcase:
842 841 normed = {}
843 842
844 843 for f, st in results.iteritems():
845 844 if st is None:
846 845 continue
847 846
848 847 nc = util.normcase(f)
849 848 paths = normed.get(nc)
850 849
851 850 if paths is None:
852 851 paths = set()
853 852 normed[nc] = paths
854 853
855 854 paths.add(f)
856 855
857 856 for norm, paths in normed.iteritems():
858 857 if len(paths) > 1:
859 858 for path in paths:
860 859 folded = self._discoverpath(path, norm, True, None,
861 860 self._map.dirfoldmap)
862 861 if path != folded:
863 862 results[path] = None
864 863
865 864 return results, dirsfound, dirsnotfound
866 865
867 866 def walk(self, match, subrepos, unknown, ignored, full=True):
868 867 '''
869 868 Walk recursively through the directory tree, finding all files
870 869 matched by match.
871 870
872 871 If full is False, maybe skip some known-clean files.
873 872
874 873 Return a dict mapping filename to stat-like object (either
875 874 mercurial.osutil.stat instance or return value of os.stat()).
876 875
877 876 '''
878 877 # full is a flag that extensions that hook into walk can use -- this
879 878 # implementation doesn't use it at all. This satisfies the contract
880 879 # because we only guarantee a "maybe".
881 880
882 881 if ignored:
883 882 ignore = util.never
884 883 dirignore = util.never
885 884 elif unknown:
886 885 ignore = self._ignore
887 886 dirignore = self._dirignore
888 887 else:
889 888 # if not unknown and not ignored, drop dir recursion and step 2
890 889 ignore = util.always
891 890 dirignore = util.always
892 891
893 892 matchfn = match.matchfn
894 893 matchalways = match.always()
895 894 matchtdir = match.traversedir
896 895 dmap = self._map
897 896 listdir = util.listdir
898 897 lstat = os.lstat
899 898 dirkind = stat.S_IFDIR
900 899 regkind = stat.S_IFREG
901 900 lnkkind = stat.S_IFLNK
902 901 join = self._join
903 902
904 903 exact = skipstep3 = False
905 904 if match.isexact(): # match.exact
906 905 exact = True
907 906 dirignore = util.always # skip step 2
908 907 elif match.prefix(): # match.match, no patterns
909 908 skipstep3 = True
910 909
911 910 if not exact and self._checkcase:
912 911 normalize = self._normalize
913 912 normalizefile = self._normalizefile
914 913 skipstep3 = False
915 914 else:
916 915 normalize = self._normalize
917 916 normalizefile = None
918 917
919 918 # step 1: find all explicit files
920 919 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
921 920
922 921 skipstep3 = skipstep3 and not (work or dirsnotfound)
923 922 work = [d for d in work if not dirignore(d[0])]
924 923
925 924 # step 2: visit subdirectories
926 925 def traverse(work, alreadynormed):
927 926 wadd = work.append
928 927 while work:
929 928 nd = work.pop()
930 929 if not match.visitdir(nd):
931 930 continue
932 931 skip = None
933 932 if nd == '.':
934 933 nd = ''
935 934 else:
936 935 skip = '.hg'
937 936 try:
938 937 entries = listdir(join(nd), stat=True, skip=skip)
939 938 except OSError as inst:
940 939 if inst.errno in (errno.EACCES, errno.ENOENT):
941 940 match.bad(self.pathto(nd),
942 941 encoding.strtolocal(inst.strerror))
943 942 continue
944 943 raise
945 944 for f, kind, st in entries:
946 945 if normalizefile:
947 946 # even though f might be a directory, we're only
948 947 # interested in comparing it to files currently in the
949 948 # dmap -- therefore normalizefile is enough
950 949 nf = normalizefile(nd and (nd + "/" + f) or f, True,
951 950 True)
952 951 else:
953 952 nf = nd and (nd + "/" + f) or f
954 953 if nf not in results:
955 954 if kind == dirkind:
956 955 if not ignore(nf):
957 956 if matchtdir:
958 957 matchtdir(nf)
959 958 wadd(nf)
960 959 if nf in dmap and (matchalways or matchfn(nf)):
961 960 results[nf] = None
962 961 elif kind == regkind or kind == lnkkind:
963 962 if nf in dmap:
964 963 if matchalways or matchfn(nf):
965 964 results[nf] = st
966 965 elif ((matchalways or matchfn(nf))
967 966 and not ignore(nf)):
968 967 # unknown file -- normalize if necessary
969 968 if not alreadynormed:
970 969 nf = normalize(nf, False, True)
971 970 results[nf] = st
972 971 elif nf in dmap and (matchalways or matchfn(nf)):
973 972 results[nf] = None
974 973
975 974 for nd, d in work:
976 975 # alreadynormed means that processwork doesn't have to do any
977 976 # expensive directory normalization
978 977 alreadynormed = not normalize or nd == d
979 978 traverse([d], alreadynormed)
980 979
981 980 for s in subrepos:
982 981 del results[s]
983 982 del results['.hg']
984 983
985 984 # step 3: visit remaining files from dmap
986 985 if not skipstep3 and not exact:
987 986 # If a dmap file is not in results yet, it was either
988 987 # a) not matching matchfn b) ignored, c) missing, or d) under a
989 988 # symlink directory.
990 989 if not results and matchalways:
991 990 visit = [f for f in dmap]
992 991 else:
993 992 visit = [f for f in dmap if f not in results and matchfn(f)]
994 993 visit.sort()
995 994
996 995 if unknown:
997 996 # unknown == True means we walked all dirs under the roots
998 997 # that wasn't ignored, and everything that matched was stat'ed
999 998 # and is already in results.
1000 999 # The rest must thus be ignored or under a symlink.
1001 1000 audit_path = pathutil.pathauditor(self._root, cached=True)
1002 1001
1003 1002 for nf in iter(visit):
1004 1003 # If a stat for the same file was already added with a
1005 1004 # different case, don't add one for this, since that would
1006 1005 # make it appear as if the file exists under both names
1007 1006 # on disk.
1008 1007 if (normalizefile and
1009 1008 normalizefile(nf, True, True) in results):
1010 1009 results[nf] = None
1011 1010 # Report ignored items in the dmap as long as they are not
1012 1011 # under a symlink directory.
1013 1012 elif audit_path.check(nf):
1014 1013 try:
1015 1014 results[nf] = lstat(join(nf))
1016 1015 # file was just ignored, no links, and exists
1017 1016 except OSError:
1018 1017 # file doesn't exist
1019 1018 results[nf] = None
1020 1019 else:
1021 1020 # It's either missing or under a symlink directory
1022 1021 # which we in this case report as missing
1023 1022 results[nf] = None
1024 1023 else:
1025 1024 # We may not have walked the full directory tree above,
1026 1025 # so stat and check everything we missed.
1027 1026 iv = iter(visit)
1028 1027 for st in util.statfiles([join(i) for i in visit]):
1029 1028 results[next(iv)] = st
1030 1029 return results
1031 1030
1032 1031 def status(self, match, subrepos, ignored, clean, unknown):
1033 1032 '''Determine the status of the working copy relative to the
1034 1033 dirstate and return a pair of (unsure, status), where status is of type
1035 1034 scmutil.status and:
1036 1035
1037 1036 unsure:
1038 1037 files that might have been modified since the dirstate was
1039 1038 written, but need to be read to be sure (size is the same
1040 1039 but mtime differs)
1041 1040 status.modified:
1042 1041 files that have definitely been modified since the dirstate
1043 1042 was written (different size or mode)
1044 1043 status.clean:
1045 1044 files that have definitely not been modified since the
1046 1045 dirstate was written
1047 1046 '''
1048 1047 listignored, listclean, listunknown = ignored, clean, unknown
1049 1048 lookup, modified, added, unknown, ignored = [], [], [], [], []
1050 1049 removed, deleted, clean = [], [], []
1051 1050
1052 1051 dmap = self._map
1053 1052 dmap.preload()
1054 1053 dcontains = dmap.__contains__
1055 1054 dget = dmap.__getitem__
1056 1055 ladd = lookup.append # aka "unsure"
1057 1056 madd = modified.append
1058 1057 aadd = added.append
1059 1058 uadd = unknown.append
1060 1059 iadd = ignored.append
1061 1060 radd = removed.append
1062 1061 dadd = deleted.append
1063 1062 cadd = clean.append
1064 1063 mexact = match.exact
1065 1064 dirignore = self._dirignore
1066 1065 checkexec = self._checkexec
1067 1066 copymap = self._map.copymap
1068 1067 lastnormaltime = self._lastnormaltime
1069 1068
1070 1069 # We need to do full walks when either
1071 1070 # - we're listing all clean files, or
1072 1071 # - match.traversedir does something, because match.traversedir should
1073 1072 # be called for every dir in the working dir
1074 1073 full = listclean or match.traversedir is not None
1075 1074 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1076 1075 full=full).iteritems():
1077 1076 if not dcontains(fn):
1078 1077 if (listignored or mexact(fn)) and dirignore(fn):
1079 1078 if listignored:
1080 1079 iadd(fn)
1081 1080 else:
1082 1081 uadd(fn)
1083 1082 continue
1084 1083
1085 1084 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1086 1085 # written like that for performance reasons. dmap[fn] is not a
1087 1086 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1088 1087 # opcode has fast paths when the value to be unpacked is a tuple or
1089 1088 # a list, but falls back to creating a full-fledged iterator in
1090 1089 # general. That is much slower than simply accessing and storing the
1091 1090 # tuple members one by one.
1092 1091 t = dget(fn)
1093 1092 state = t[0]
1094 1093 mode = t[1]
1095 1094 size = t[2]
1096 1095 time = t[3]
1097 1096
1098 1097 if not st and state in "nma":
1099 1098 dadd(fn)
1100 1099 elif state == 'n':
1101 1100 if (size >= 0 and
1102 1101 ((size != st.st_size and size != st.st_size & _rangemask)
1103 1102 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1104 1103 or size == -2 # other parent
1105 1104 or fn in copymap):
1106 1105 madd(fn)
1107 1106 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1108 1107 ladd(fn)
1109 1108 elif st.st_mtime == lastnormaltime:
1110 1109 # fn may have just been marked as normal and it may have
1111 1110 # changed in the same second without changing its size.
1112 1111 # This can happen if we quickly do multiple commits.
1113 1112 # Force lookup, so we don't miss such a racy file change.
1114 1113 ladd(fn)
1115 1114 elif listclean:
1116 1115 cadd(fn)
1117 1116 elif state == 'm':
1118 1117 madd(fn)
1119 1118 elif state == 'a':
1120 1119 aadd(fn)
1121 1120 elif state == 'r':
1122 1121 radd(fn)
1123 1122
1124 1123 return (lookup, scmutil.status(modified, added, removed, deleted,
1125 1124 unknown, ignored, clean))
1126 1125
1127 1126 def matches(self, match):
1128 1127 '''
1129 1128 return files in the dirstate (in whatever state) filtered by match
1130 1129 '''
1131 1130 dmap = self._map
1132 1131 if match.always():
1133 1132 return dmap.keys()
1134 1133 files = match.files()
1135 1134 if match.isexact():
1136 1135 # fast path -- filter the other way around, since typically files is
1137 1136 # much smaller than dmap
1138 1137 return [f for f in files if f in dmap]
1139 1138 if match.prefix() and all(fn in dmap for fn in files):
1140 1139 # fast path -- all the values are known to be files, so just return
1141 1140 # that
1142 1141 return list(files)
1143 1142 return [f for f in dmap if match(f)]
1144 1143
1145 1144 def _actualfilename(self, tr):
1146 1145 if tr:
1147 1146 return self._pendingfilename
1148 1147 else:
1149 1148 return self._filename
1150 1149
1151 1150 def savebackup(self, tr, backupname):
1152 1151 '''Save current dirstate into backup file'''
1153 1152 filename = self._actualfilename(tr)
1154 1153 assert backupname != filename
1155 1154
1156 1155 # use '_writedirstate' instead of 'write' to write changes certainly,
1157 1156 # because the latter omits writing out if transaction is running.
1158 1157 # output file will be used to create backup of dirstate at this point.
1159 1158 if self._dirty or not self._opener.exists(filename):
1160 1159 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1161 1160 checkambig=True))
1162 1161
1163 1162 if tr:
1164 1163 # ensure that subsequent tr.writepending returns True for
1165 1164 # changes written out above, even if dirstate is never
1166 1165 # changed after this
1167 1166 tr.addfilegenerator('dirstate', (self._filename,),
1168 1167 self._writedirstate, location='plain')
1169 1168
1170 1169 # ensure that pending file written above is unlinked at
1171 1170 # failure, even if tr.writepending isn't invoked until the
1172 1171 # end of this transaction
1173 1172 tr.registertmp(filename, location='plain')
1174 1173
1175 1174 self._opener.tryunlink(backupname)
1176 1175 # hardlink backup is okay because _writedirstate is always called
1177 1176 # with an "atomictemp=True" file.
1178 1177 util.copyfile(self._opener.join(filename),
1179 1178 self._opener.join(backupname), hardlink=True)
1180 1179
1181 1180 def restorebackup(self, tr, backupname):
1182 1181 '''Restore dirstate by backup file'''
1183 1182 # this "invalidate()" prevents "wlock.release()" from writing
1184 1183 # changes of dirstate out after restoring from backup file
1185 1184 self.invalidate()
1186 1185 filename = self._actualfilename(tr)
1187 1186 o = self._opener
1188 1187 if util.samefile(o.join(backupname), o.join(filename)):
1189 1188 o.unlink(backupname)
1190 1189 else:
1191 1190 o.rename(backupname, filename, checkambig=True)
1192 1191
1193 1192 def clearbackup(self, tr, backupname):
1194 1193 '''Clear backup file'''
1195 1194 self._opener.unlink(backupname)
1196 1195
1197 1196 class dirstatemap(object):
1198 1197 """Map encapsulating the dirstate's contents.
1199 1198
1200 1199 The dirstate contains the following state:
1201 1200
1202 1201 - `identity` is the identity of the dirstate file, which can be used to
1203 1202 detect when changes have occurred to the dirstate file.
1204 1203
1205 1204 - `parents` is a pair containing the parents of the working copy. The
1206 1205 parents are updated by calling `setparents`.
1207 1206
1208 1207 - the state map maps filenames to tuples of (state, mode, size, mtime),
1209 1208 where state is a single character representing 'normal', 'added',
1210 'removed', or 'merged'. It is accessed by treating the dirstate as a
1211 dict.
1209 'removed', or 'merged'. It is read by treating the dirstate as a
1210 dict. File state is updated by calling the `addfile`, `removefile` and
1211 `dropfile` methods.
1212 1212
1213 1213 - `copymap` maps destination filenames to their source filename.
1214 1214
1215 1215 The dirstate also provides the following views onto the state:
1216 1216
1217 1217 - `nonnormalset` is a set of the filenames that have state other
1218 1218 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1219 1219
1220 1220 - `otherparentset` is a set of the filenames that are marked as coming
1221 1221 from the second parent when the dirstate is currently being merged.
1222 1222
1223 1223 - `dirs` is a set-like object containing all the directories that contain
1224 1224 files in the dirstate, excluding any files that are marked as removed.
1225 1225
1226 1226 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1227 1227 form that they appear as in the dirstate.
1228 1228
1229 1229 - `dirfoldmap` is a dict mapping normalized directory names to the
1230 1230 denormalized form that they appear as in the dirstate.
1231 1231
1232 1232 Once instantiated, the nonnormalset, otherparentset, dirs, filefoldmap and
1233 1233 dirfoldmap views must be maintained by the caller.
1234 1234 """
1235 1235
1236 1236 def __init__(self, ui, opener, root):
1237 1237 self._ui = ui
1238 1238 self._opener = opener
1239 1239 self._root = root
1240 1240 self._filename = 'dirstate'
1241 1241
1242 1242 self._parents = None
1243 1243 self._dirtyparents = False
1244 1244
1245 1245 # for consistent view between _pl() and _read() invocations
1246 1246 self._pendingmode = None
1247 1247
1248 1248 @propertycache
1249 1249 def _map(self):
1250 1250 self._map = {}
1251 1251 self.read()
1252 1252 return self._map
1253 1253
1254 1254 @propertycache
1255 1255 def copymap(self):
1256 1256 self.copymap = {}
1257 1257 self._map
1258 1258 return self.copymap
1259 1259
1260 1260 def clear(self):
1261 1261 self._map.clear()
1262 1262 self.copymap.clear()
1263 1263 self.setparents(nullid, nullid)
1264 1264 util.clearcachedproperty(self, "dirs")
1265 1265 util.clearcachedproperty(self, "filefoldmap")
1266 1266 util.clearcachedproperty(self, "dirfoldmap")
1267 1267 util.clearcachedproperty(self, "nonnormalset")
1268 1268 util.clearcachedproperty(self, "otherparentset")
1269 1269
1270 1270 def iteritems(self):
1271 1271 return self._map.iteritems()
1272 1272
1273 1273 def __len__(self):
1274 1274 return len(self._map)
1275 1275
1276 1276 def __iter__(self):
1277 1277 return iter(self._map)
1278 1278
1279 1279 def get(self, key, default=None):
1280 1280 return self._map.get(key, default)
1281 1281
1282 1282 def __contains__(self, key):
1283 1283 return key in self._map
1284 1284
1285 def __setitem__(self, key, value):
1286 self._map[key] = value
1287
1288 1285 def __getitem__(self, key):
1289 1286 return self._map[key]
1290 1287
1291 def __delitem__(self, key):
1292 del self._map[key]
1293
1294 1288 def keys(self):
1295 1289 return self._map.keys()
1296 1290
1297 1291 def preload(self):
1298 1292 """Loads the underlying data, if it's not already loaded"""
1299 1293 self._map
1300 1294
1295 def addfile(self, f, state, mode, size, mtime):
1296 """Add a tracked file to the dirstate."""
1297 self._map[f] = dirstatetuple(state, mode, size, mtime)
1298
1299 def removefile(self, f, size):
1300 """
1301 Mark a file as removed in the dirstate.
1302
1303 The `size` parameter is used to store sentinel values that indicate
1304 the file's previous state. In the future, we should refactor this
1305 to be more explicit about what that state is.
1306 """
1307 self._map[f] = dirstatetuple('r', 0, size, 0)
1308
1309 def dropfile(self, f):
1310 """
1311 Remove a file from the dirstate. Returns True if the file was
1312 previously recorded.
1313 """
1314 return self._map.pop(f, None) is not None
1315
1301 1316 def nonnormalentries(self):
1302 1317 '''Compute the nonnormal dirstate entries from the dmap'''
1303 1318 try:
1304 1319 return parsers.nonnormalotherparententries(self._map)
1305 1320 except AttributeError:
1306 1321 nonnorm = set()
1307 1322 otherparent = set()
1308 1323 for fname, e in self._map.iteritems():
1309 1324 if e[0] != 'n' or e[3] == -1:
1310 1325 nonnorm.add(fname)
1311 1326 if e[0] == 'n' and e[2] == -2:
1312 1327 otherparent.add(fname)
1313 1328 return nonnorm, otherparent
1314 1329
1315 1330 @propertycache
1316 1331 def filefoldmap(self):
1317 1332 """Returns a dictionary mapping normalized case paths to their
1318 1333 non-normalized versions.
1319 1334 """
1320 1335 try:
1321 1336 makefilefoldmap = parsers.make_file_foldmap
1322 1337 except AttributeError:
1323 1338 pass
1324 1339 else:
1325 1340 return makefilefoldmap(self._map, util.normcasespec,
1326 1341 util.normcasefallback)
1327 1342
1328 1343 f = {}
1329 1344 normcase = util.normcase
1330 1345 for name, s in self._map.iteritems():
1331 1346 if s[0] != 'r':
1332 1347 f[normcase(name)] = name
1333 1348 f['.'] = '.' # prevents useless util.fspath() invocation
1334 1349 return f
1335 1350
1336 1351 @propertycache
1337 1352 def dirs(self):
1338 1353 """Returns a set-like object containing all the directories in the
1339 1354 current dirstate.
1340 1355 """
1341 1356 return util.dirs(self._map, 'r')
1342 1357
1343 1358 def _opendirstatefile(self):
1344 1359 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1345 1360 if self._pendingmode is not None and self._pendingmode != mode:
1346 1361 fp.close()
1347 1362 raise error.Abort(_('working directory state may be '
1348 1363 'changed parallelly'))
1349 1364 self._pendingmode = mode
1350 1365 return fp
1351 1366
1352 1367 def parents(self):
1353 1368 if not self._parents:
1354 1369 try:
1355 1370 fp = self._opendirstatefile()
1356 1371 st = fp.read(40)
1357 1372 fp.close()
1358 1373 except IOError as err:
1359 1374 if err.errno != errno.ENOENT:
1360 1375 raise
1361 1376 # File doesn't exist, so the current state is empty
1362 1377 st = ''
1363 1378
1364 1379 l = len(st)
1365 1380 if l == 40:
1366 1381 self._parents = st[:20], st[20:40]
1367 1382 elif l == 0:
1368 1383 self._parents = [nullid, nullid]
1369 1384 else:
1370 1385 raise error.Abort(_('working directory state appears '
1371 1386 'damaged!'))
1372 1387
1373 1388 return self._parents
1374 1389
1375 1390 def setparents(self, p1, p2):
1376 1391 self._parents = (p1, p2)
1377 1392 self._dirtyparents = True
1378 1393
1379 1394 def read(self):
1380 1395 # ignore HG_PENDING because identity is used only for writing
1381 1396 self.identity = util.filestat.frompath(
1382 1397 self._opener.join(self._filename))
1383 1398
1384 1399 try:
1385 1400 fp = self._opendirstatefile()
1386 1401 try:
1387 1402 st = fp.read()
1388 1403 finally:
1389 1404 fp.close()
1390 1405 except IOError as err:
1391 1406 if err.errno != errno.ENOENT:
1392 1407 raise
1393 1408 return
1394 1409 if not st:
1395 1410 return
1396 1411
1397 1412 if util.safehasattr(parsers, 'dict_new_presized'):
1398 1413 # Make an estimate of the number of files in the dirstate based on
1399 1414 # its size. From a linear regression on a set of real-world repos,
1400 1415 # all over 10,000 files, the size of a dirstate entry is 85
1401 1416 # bytes. The cost of resizing is significantly higher than the cost
1402 1417 # of filling in a larger presized dict, so subtract 20% from the
1403 1418 # size.
1404 1419 #
1405 1420 # This heuristic is imperfect in many ways, so in a future dirstate
1406 1421 # format update it makes sense to just record the number of entries
1407 1422 # on write.
1408 1423 self._map = parsers.dict_new_presized(len(st) / 71)
1409 1424
1410 1425 # Python's garbage collector triggers a GC each time a certain number
1411 1426 # of container objects (the number being defined by
1412 1427 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1413 1428 # for each file in the dirstate. The C version then immediately marks
1414 1429 # them as not to be tracked by the collector. However, this has no
1415 1430 # effect on when GCs are triggered, only on what objects the GC looks
1416 1431 # into. This means that O(number of files) GCs are unavoidable.
1417 1432 # Depending on when in the process's lifetime the dirstate is parsed,
1418 1433 # this can get very expensive. As a workaround, disable GC while
1419 1434 # parsing the dirstate.
1420 1435 #
1421 1436 # (we cannot decorate the function directly since it is in a C module)
1422 1437 parse_dirstate = util.nogc(parsers.parse_dirstate)
1423 1438 p = parse_dirstate(self._map, self.copymap, st)
1424 1439 if not self._dirtyparents:
1425 1440 self.setparents(*p)
1426 1441
1427 1442 # Avoid excess attribute lookups by fast pathing certain checks
1428 1443 self.__contains__ = self._map.__contains__
1429 1444 self.__getitem__ = self._map.__getitem__
1430 self.__setitem__ = self._map.__setitem__
1431 self.__delitem__ = self._map.__delitem__
1432 1445 self.get = self._map.get
1433 1446
1434 1447 def write(self, st, now):
1435 1448 st.write(parsers.pack_dirstate(self._map, self.copymap,
1436 1449 self.parents(), now))
1437 1450 st.close()
1438 1451 self._dirtyparents = False
1439 1452 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1440 1453
1441 1454 @propertycache
1442 1455 def nonnormalset(self):
1443 1456 nonnorm, otherparents = self.nonnormalentries()
1444 1457 self.otherparentset = otherparents
1445 1458 return nonnorm
1446 1459
1447 1460 @propertycache
1448 1461 def otherparentset(self):
1449 1462 nonnorm, otherparents = self.nonnormalentries()
1450 1463 self.nonnormalset = nonnorm
1451 1464 return otherparents
1452 1465
1453 1466 @propertycache
1454 1467 def identity(self):
1455 1468 self._map
1456 1469 return self.identity
1457 1470
1458 1471 @propertycache
1459 1472 def dirfoldmap(self):
1460 1473 f = {}
1461 1474 normcase = util.normcase
1462 1475 for name in self.dirs:
1463 1476 f[normcase(name)] = name
1464 1477 return f
General Comments 0
You need to be logged in to leave comments. Login now