##// END OF EJS Templates
dirstate: move clear onto dirstatemap class...
Durham Goode -
r34934:0217f75b stable
parent child Browse files
Show More
@@ -1,1396 +1,1400
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 parsers = policy.importmod(r'parsers')
31 31
32 32 propertycache = util.propertycache
33 33 filecache = scmutil.filecache
34 34 _rangemask = 0x7fffffff
35 35
36 36 dirstatetuple = parsers.dirstatetuple
37 37
38 38 class repocache(filecache):
39 39 """filecache for files in .hg/"""
40 40 def join(self, obj, fname):
41 41 return obj._opener.join(fname)
42 42
43 43 class rootcache(filecache):
44 44 """filecache for files in the repository root"""
45 45 def join(self, obj, fname):
46 46 return obj._join(fname)
47 47
48 48 def _getfsnow(vfs):
49 49 '''Get "now" timestamp on filesystem'''
50 50 tmpfd, tmpname = vfs.mkstemp()
51 51 try:
52 52 return os.fstat(tmpfd).st_mtime
53 53 finally:
54 54 os.close(tmpfd)
55 55 vfs.unlink(tmpname)
56 56
57 57 class dirstate(object):
58 58
59 59 def __init__(self, opener, ui, root, validate, sparsematchfn):
60 60 '''Create a new dirstate object.
61 61
62 62 opener is an open()-like callable that can be used to open the
63 63 dirstate file; root is the root of the directory tracked by
64 64 the dirstate.
65 65 '''
66 66 self._opener = opener
67 67 self._validate = validate
68 68 self._root = root
69 69 self._sparsematchfn = sparsematchfn
70 70 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
71 71 # UNC path pointing to root share (issue4557)
72 72 self._rootdir = pathutil.normasprefix(root)
73 73 self._dirty = False
74 74 self._lastnormaltime = 0
75 75 self._ui = ui
76 76 self._filecache = {}
77 77 self._parentwriters = 0
78 78 self._filename = 'dirstate'
79 79 self._pendingfilename = '%s.pending' % self._filename
80 80 self._plchangecallbacks = {}
81 81 self._origpl = None
82 82 self._updatedfiles = set()
83 83
84 84 @contextlib.contextmanager
85 85 def parentchange(self):
86 86 '''Context manager for handling dirstate parents.
87 87
88 88 If an exception occurs in the scope of the context manager,
89 89 the incoherent dirstate won't be written when wlock is
90 90 released.
91 91 '''
92 92 self._parentwriters += 1
93 93 yield
94 94 # Typically we want the "undo" step of a context manager in a
95 95 # finally block so it happens even when an exception
96 96 # occurs. In this case, however, we only want to decrement
97 97 # parentwriters if the code in the with statement exits
98 98 # normally, so we don't have a try/finally here on purpose.
99 99 self._parentwriters -= 1
100 100
101 101 def beginparentchange(self):
102 102 '''Marks the beginning of a set of changes that involve changing
103 103 the dirstate parents. If there is an exception during this time,
104 104 the dirstate will not be written when the wlock is released. This
105 105 prevents writing an incoherent dirstate where the parent doesn't
106 106 match the contents.
107 107 '''
108 108 self._ui.deprecwarn('beginparentchange is obsoleted by the '
109 109 'parentchange context manager.', '4.3')
110 110 self._parentwriters += 1
111 111
112 112 def endparentchange(self):
113 113 '''Marks the end of a set of changes that involve changing the
114 114 dirstate parents. Once all parent changes have been marked done,
115 115 the wlock will be free to write the dirstate on release.
116 116 '''
117 117 self._ui.deprecwarn('endparentchange is obsoleted by the '
118 118 'parentchange context manager.', '4.3')
119 119 if self._parentwriters > 0:
120 120 self._parentwriters -= 1
121 121
122 122 def pendingparentchange(self):
123 123 '''Returns true if the dirstate is in the middle of a set of changes
124 124 that modify the dirstate parent.
125 125 '''
126 126 return self._parentwriters > 0
127 127
128 128 @propertycache
129 129 def _map(self):
130 130 '''Return the dirstate contents as a map from filename to
131 131 (state, mode, size, time).'''
132 132 self._read()
133 133 return self._map
134 134
135 135 @property
136 136 def _sparsematcher(self):
137 137 """The matcher for the sparse checkout.
138 138
139 139 The working directory may not include every file from a manifest. The
140 140 matcher obtained by this property will match a path if it is to be
141 141 included in the working directory.
142 142 """
143 143 # TODO there is potential to cache this property. For now, the matcher
144 144 # is resolved on every access. (But the called function does use a
145 145 # cache to keep the lookup fast.)
146 146 return self._sparsematchfn()
147 147
148 148 @repocache('branch')
149 149 def _branch(self):
150 150 try:
151 151 return self._opener.read("branch").strip() or "default"
152 152 except IOError as inst:
153 153 if inst.errno != errno.ENOENT:
154 154 raise
155 155 return "default"
156 156
157 157 @property
158 158 def _pl(self):
159 159 return self._map.parents()
160 160
161 161 def dirs(self):
162 162 return self._map.dirs
163 163
164 164 @rootcache('.hgignore')
165 165 def _ignore(self):
166 166 files = self._ignorefiles()
167 167 if not files:
168 168 return matchmod.never(self._root, '')
169 169
170 170 pats = ['include:%s' % f for f in files]
171 171 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
172 172
173 173 @propertycache
174 174 def _slash(self):
175 175 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
176 176
177 177 @propertycache
178 178 def _checklink(self):
179 179 return util.checklink(self._root)
180 180
181 181 @propertycache
182 182 def _checkexec(self):
183 183 return util.checkexec(self._root)
184 184
185 185 @propertycache
186 186 def _checkcase(self):
187 187 return not util.fscasesensitive(self._join('.hg'))
188 188
189 189 def _join(self, f):
190 190 # much faster than os.path.join()
191 191 # it's safe because f is always a relative path
192 192 return self._rootdir + f
193 193
194 194 def flagfunc(self, buildfallback):
195 195 if self._checklink and self._checkexec:
196 196 def f(x):
197 197 try:
198 198 st = os.lstat(self._join(x))
199 199 if util.statislink(st):
200 200 return 'l'
201 201 if util.statisexec(st):
202 202 return 'x'
203 203 except OSError:
204 204 pass
205 205 return ''
206 206 return f
207 207
208 208 fallback = buildfallback()
209 209 if self._checklink:
210 210 def f(x):
211 211 if os.path.islink(self._join(x)):
212 212 return 'l'
213 213 if 'x' in fallback(x):
214 214 return 'x'
215 215 return ''
216 216 return f
217 217 if self._checkexec:
218 218 def f(x):
219 219 if 'l' in fallback(x):
220 220 return 'l'
221 221 if util.isexec(self._join(x)):
222 222 return 'x'
223 223 return ''
224 224 return f
225 225 else:
226 226 return fallback
227 227
228 228 @propertycache
229 229 def _cwd(self):
230 230 # internal config: ui.forcecwd
231 231 forcecwd = self._ui.config('ui', 'forcecwd')
232 232 if forcecwd:
233 233 return forcecwd
234 234 return pycompat.getcwd()
235 235
236 236 def getcwd(self):
237 237 '''Return the path from which a canonical path is calculated.
238 238
239 239 This path should be used to resolve file patterns or to convert
240 240 canonical paths back to file paths for display. It shouldn't be
241 241 used to get real file paths. Use vfs functions instead.
242 242 '''
243 243 cwd = self._cwd
244 244 if cwd == self._root:
245 245 return ''
246 246 # self._root ends with a path separator if self._root is '/' or 'C:\'
247 247 rootsep = self._root
248 248 if not util.endswithsep(rootsep):
249 249 rootsep += pycompat.ossep
250 250 if cwd.startswith(rootsep):
251 251 return cwd[len(rootsep):]
252 252 else:
253 253 # we're outside the repo. return an absolute path.
254 254 return cwd
255 255
256 256 def pathto(self, f, cwd=None):
257 257 if cwd is None:
258 258 cwd = self.getcwd()
259 259 path = util.pathto(self._root, cwd, f)
260 260 if self._slash:
261 261 return util.pconvert(path)
262 262 return path
263 263
264 264 def __getitem__(self, key):
265 265 '''Return the current state of key (a filename) in the dirstate.
266 266
267 267 States are:
268 268 n normal
269 269 m needs merging
270 270 r marked for removal
271 271 a marked for addition
272 272 ? not tracked
273 273 '''
274 274 return self._map.get(key, ("?",))[0]
275 275
276 276 def __contains__(self, key):
277 277 return key in self._map
278 278
279 279 def __iter__(self):
280 280 return iter(sorted(self._map))
281 281
282 282 def items(self):
283 283 return self._map.iteritems()
284 284
285 285 iteritems = items
286 286
287 287 def parents(self):
288 288 return [self._validate(p) for p in self._pl]
289 289
290 290 def p1(self):
291 291 return self._validate(self._pl[0])
292 292
293 293 def p2(self):
294 294 return self._validate(self._pl[1])
295 295
296 296 def branch(self):
297 297 return encoding.tolocal(self._branch)
298 298
299 299 def setparents(self, p1, p2=nullid):
300 300 """Set dirstate parents to p1 and p2.
301 301
302 302 When moving from two parents to one, 'm' merged entries a
303 303 adjusted to normal and previous copy records discarded and
304 304 returned by the call.
305 305
306 306 See localrepo.setparents()
307 307 """
308 308 if self._parentwriters == 0:
309 309 raise ValueError("cannot set dirstate parent without "
310 310 "calling dirstate.beginparentchange")
311 311
312 312 self._dirty = True
313 313 oldp2 = self._pl[1]
314 314 if self._origpl is None:
315 315 self._origpl = self._pl
316 316 self._map.setparents(p1, p2)
317 317 copies = {}
318 318 if oldp2 != nullid and p2 == nullid:
319 319 candidatefiles = self._map.nonnormalset.union(
320 320 self._map.otherparentset)
321 321 for f in candidatefiles:
322 322 s = self._map.get(f)
323 323 if s is None:
324 324 continue
325 325
326 326 # Discard 'm' markers when moving away from a merge state
327 327 if s[0] == 'm':
328 328 source = self._map.copymap.get(f)
329 329 if source:
330 330 copies[f] = source
331 331 self.normallookup(f)
332 332 # Also fix up otherparent markers
333 333 elif s[0] == 'n' and s[2] == -2:
334 334 source = self._map.copymap.get(f)
335 335 if source:
336 336 copies[f] = source
337 337 self.add(f)
338 338 return copies
339 339
340 340 def setbranch(self, branch):
341 341 self._branch = encoding.fromlocal(branch)
342 342 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
343 343 try:
344 344 f.write(self._branch + '\n')
345 345 f.close()
346 346
347 347 # make sure filecache has the correct stat info for _branch after
348 348 # replacing the underlying file
349 349 ce = self._filecache['_branch']
350 350 if ce:
351 351 ce.refresh()
352 352 except: # re-raises
353 353 f.discard()
354 354 raise
355 355
356 356 def _read(self):
357 357 self._map = dirstatemap(self._ui, self._opener, self._root)
358 358 self._map.read()
359 359
360 360 def invalidate(self):
361 361 '''Causes the next access to reread the dirstate.
362 362
363 363 This is different from localrepo.invalidatedirstate() because it always
364 364 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
365 365 check whether the dirstate has changed before rereading it.'''
366 366
367 367 for a in ("_map", "_branch", "_ignore"):
368 368 if a in self.__dict__:
369 369 delattr(self, a)
370 370 self._lastnormaltime = 0
371 371 self._dirty = False
372 372 self._updatedfiles.clear()
373 373 self._parentwriters = 0
374 374 self._origpl = None
375 375
376 376 def copy(self, source, dest):
377 377 """Mark dest as a copy of source. Unmark dest if source is None."""
378 378 if source == dest:
379 379 return
380 380 self._dirty = True
381 381 if source is not None:
382 382 self._map.copymap[dest] = source
383 383 self._updatedfiles.add(source)
384 384 self._updatedfiles.add(dest)
385 385 elif self._map.copymap.pop(dest, None):
386 386 self._updatedfiles.add(dest)
387 387
388 388 def copied(self, file):
389 389 return self._map.copymap.get(file, None)
390 390
391 391 def copies(self):
392 392 return self._map.copymap
393 393
394 394 def _droppath(self, f):
395 395 if self[f] not in "?r" and "dirs" in self._map.__dict__:
396 396 self._map.dirs.delpath(f)
397 397
398 398 if "filefoldmap" in self._map.__dict__:
399 399 normed = util.normcase(f)
400 400 if normed in self._map.filefoldmap:
401 401 del self._map.filefoldmap[normed]
402 402
403 403 self._updatedfiles.add(f)
404 404
405 405 def _addpath(self, f, state, mode, size, mtime):
406 406 oldstate = self[f]
407 407 if state == 'a' or oldstate == 'r':
408 408 scmutil.checkfilename(f)
409 409 if f in self._map.dirs:
410 410 raise error.Abort(_('directory %r already in dirstate') % f)
411 411 # shadows
412 412 for d in util.finddirs(f):
413 413 if d in self._map.dirs:
414 414 break
415 415 entry = self._map.get(d)
416 416 if entry is not None and entry[0] != 'r':
417 417 raise error.Abort(
418 418 _('file %r in dirstate clashes with %r') % (d, f))
419 419 if oldstate in "?r" and "dirs" in self._map.__dict__:
420 420 self._map.dirs.addpath(f)
421 421 self._dirty = True
422 422 self._updatedfiles.add(f)
423 423 self._map[f] = dirstatetuple(state, mode, size, mtime)
424 424 if state != 'n' or mtime == -1:
425 425 self._map.nonnormalset.add(f)
426 426 if size == -2:
427 427 self._map.otherparentset.add(f)
428 428
429 429 def normal(self, f):
430 430 '''Mark a file normal and clean.'''
431 431 s = os.lstat(self._join(f))
432 432 mtime = s.st_mtime
433 433 self._addpath(f, 'n', s.st_mode,
434 434 s.st_size & _rangemask, mtime & _rangemask)
435 435 self._map.copymap.pop(f, None)
436 436 if f in self._map.nonnormalset:
437 437 self._map.nonnormalset.remove(f)
438 438 if mtime > self._lastnormaltime:
439 439 # Remember the most recent modification timeslot for status(),
440 440 # to make sure we won't miss future size-preserving file content
441 441 # modifications that happen within the same timeslot.
442 442 self._lastnormaltime = mtime
443 443
444 444 def normallookup(self, f):
445 445 '''Mark a file normal, but possibly dirty.'''
446 446 if self._pl[1] != nullid:
447 447 # if there is a merge going on and the file was either
448 448 # in state 'm' (-1) or coming from other parent (-2) before
449 449 # being removed, restore that state.
450 450 entry = self._map.get(f)
451 451 if entry is not None:
452 452 if entry[0] == 'r' and entry[2] in (-1, -2):
453 453 source = self._map.copymap.get(f)
454 454 if entry[2] == -1:
455 455 self.merge(f)
456 456 elif entry[2] == -2:
457 457 self.otherparent(f)
458 458 if source:
459 459 self.copy(source, f)
460 460 return
461 461 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
462 462 return
463 463 self._addpath(f, 'n', 0, -1, -1)
464 464 self._map.copymap.pop(f, None)
465 465 if f in self._map.nonnormalset:
466 466 self._map.nonnormalset.remove(f)
467 467
468 468 def otherparent(self, f):
469 469 '''Mark as coming from the other parent, always dirty.'''
470 470 if self._pl[1] == nullid:
471 471 raise error.Abort(_("setting %r to other parent "
472 472 "only allowed in merges") % f)
473 473 if f in self and self[f] == 'n':
474 474 # merge-like
475 475 self._addpath(f, 'm', 0, -2, -1)
476 476 else:
477 477 # add-like
478 478 self._addpath(f, 'n', 0, -2, -1)
479 479 self._map.copymap.pop(f, None)
480 480
481 481 def add(self, f):
482 482 '''Mark a file added.'''
483 483 self._addpath(f, 'a', 0, -1, -1)
484 484 self._map.copymap.pop(f, None)
485 485
486 486 def remove(self, f):
487 487 '''Mark a file removed.'''
488 488 self._dirty = True
489 489 self._droppath(f)
490 490 size = 0
491 491 if self._pl[1] != nullid:
492 492 entry = self._map.get(f)
493 493 if entry is not None:
494 494 # backup the previous state
495 495 if entry[0] == 'm': # merge
496 496 size = -1
497 497 elif entry[0] == 'n' and entry[2] == -2: # other parent
498 498 size = -2
499 499 self._map.otherparentset.add(f)
500 500 self._map[f] = dirstatetuple('r', 0, size, 0)
501 501 self._map.nonnormalset.add(f)
502 502 if size == 0:
503 503 self._map.copymap.pop(f, None)
504 504
505 505 def merge(self, f):
506 506 '''Mark a file merged.'''
507 507 if self._pl[1] == nullid:
508 508 return self.normallookup(f)
509 509 return self.otherparent(f)
510 510
511 511 def drop(self, f):
512 512 '''Drop a file from the dirstate'''
513 513 if f in self._map:
514 514 self._dirty = True
515 515 self._droppath(f)
516 516 del self._map[f]
517 517 if f in self._map.nonnormalset:
518 518 self._map.nonnormalset.remove(f)
519 519 self._map.copymap.pop(f, None)
520 520
521 521 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
522 522 if exists is None:
523 523 exists = os.path.lexists(os.path.join(self._root, path))
524 524 if not exists:
525 525 # Maybe a path component exists
526 526 if not ignoremissing and '/' in path:
527 527 d, f = path.rsplit('/', 1)
528 528 d = self._normalize(d, False, ignoremissing, None)
529 529 folded = d + "/" + f
530 530 else:
531 531 # No path components, preserve original case
532 532 folded = path
533 533 else:
534 534 # recursively normalize leading directory components
535 535 # against dirstate
536 536 if '/' in normed:
537 537 d, f = normed.rsplit('/', 1)
538 538 d = self._normalize(d, False, ignoremissing, True)
539 539 r = self._root + "/" + d
540 540 folded = d + "/" + util.fspath(f, r)
541 541 else:
542 542 folded = util.fspath(normed, self._root)
543 543 storemap[normed] = folded
544 544
545 545 return folded
546 546
547 547 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
548 548 normed = util.normcase(path)
549 549 folded = self._map.filefoldmap.get(normed, None)
550 550 if folded is None:
551 551 if isknown:
552 552 folded = path
553 553 else:
554 554 folded = self._discoverpath(path, normed, ignoremissing, exists,
555 555 self._map.filefoldmap)
556 556 return folded
557 557
558 558 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
559 559 normed = util.normcase(path)
560 560 folded = self._map.filefoldmap.get(normed, None)
561 561 if folded is None:
562 562 folded = self._map.dirfoldmap.get(normed, None)
563 563 if folded is None:
564 564 if isknown:
565 565 folded = path
566 566 else:
567 567 # store discovered result in dirfoldmap so that future
568 568 # normalizefile calls don't start matching directories
569 569 folded = self._discoverpath(path, normed, ignoremissing, exists,
570 570 self._map.dirfoldmap)
571 571 return folded
572 572
573 573 def normalize(self, path, isknown=False, ignoremissing=False):
574 574 '''
575 575 normalize the case of a pathname when on a casefolding filesystem
576 576
577 577 isknown specifies whether the filename came from walking the
578 578 disk, to avoid extra filesystem access.
579 579
580 580 If ignoremissing is True, missing path are returned
581 581 unchanged. Otherwise, we try harder to normalize possibly
582 582 existing path components.
583 583
584 584 The normalized case is determined based on the following precedence:
585 585
586 586 - version of name already stored in the dirstate
587 587 - version of name stored on disk
588 588 - version provided via command arguments
589 589 '''
590 590
591 591 if self._checkcase:
592 592 return self._normalize(path, isknown, ignoremissing)
593 593 return path
594 594
595 595 def clear(self):
596 self._map = dirstatemap(self._ui, self._opener, self._root)
597 self._map.setparents(nullid, nullid)
596 self._map.clear()
598 597 self._lastnormaltime = 0
599 598 self._updatedfiles.clear()
600 599 self._dirty = True
601 600
602 601 def rebuild(self, parent, allfiles, changedfiles=None):
603 602 if changedfiles is None:
604 603 # Rebuild entire dirstate
605 604 changedfiles = allfiles
606 605 lastnormaltime = self._lastnormaltime
607 606 self.clear()
608 607 self._lastnormaltime = lastnormaltime
609 608
610 609 if self._origpl is None:
611 610 self._origpl = self._pl
612 611 self._map.setparents(parent, nullid)
613 612 for f in changedfiles:
614 613 if f in allfiles:
615 614 self.normallookup(f)
616 615 else:
617 616 self.drop(f)
618 617
619 618 self._dirty = True
620 619
621 620 def identity(self):
622 621 '''Return identity of dirstate itself to detect changing in storage
623 622
624 623 If identity of previous dirstate is equal to this, writing
625 624 changes based on the former dirstate out can keep consistency.
626 625 '''
627 626 return self._map.identity
628 627
629 628 def write(self, tr):
630 629 if not self._dirty:
631 630 return
632 631
633 632 filename = self._filename
634 633 if tr:
635 634 # 'dirstate.write()' is not only for writing in-memory
636 635 # changes out, but also for dropping ambiguous timestamp.
637 636 # delayed writing re-raise "ambiguous timestamp issue".
638 637 # See also the wiki page below for detail:
639 638 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
640 639
641 640 # emulate dropping timestamp in 'parsers.pack_dirstate'
642 641 now = _getfsnow(self._opener)
643 642 dmap = self._map
644 643 for f in self._updatedfiles:
645 644 e = dmap.get(f)
646 645 if e is not None and e[0] == 'n' and e[3] == now:
647 646 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
648 647 self._map.nonnormalset.add(f)
649 648
650 649 # emulate that all 'dirstate.normal' results are written out
651 650 self._lastnormaltime = 0
652 651 self._updatedfiles.clear()
653 652
654 653 # delay writing in-memory changes out
655 654 tr.addfilegenerator('dirstate', (self._filename,),
656 655 self._writedirstate, location='plain')
657 656 return
658 657
659 658 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
660 659 self._writedirstate(st)
661 660
662 661 def addparentchangecallback(self, category, callback):
663 662 """add a callback to be called when the wd parents are changed
664 663
665 664 Callback will be called with the following arguments:
666 665 dirstate, (oldp1, oldp2), (newp1, newp2)
667 666
668 667 Category is a unique identifier to allow overwriting an old callback
669 668 with a newer callback.
670 669 """
671 670 self._plchangecallbacks[category] = callback
672 671
673 672 def _writedirstate(self, st):
674 673 # notify callbacks about parents change
675 674 if self._origpl is not None and self._origpl != self._pl:
676 675 for c, callback in sorted(self._plchangecallbacks.iteritems()):
677 676 callback(self, self._origpl, self._pl)
678 677 self._origpl = None
679 678 # use the modification time of the newly created temporary file as the
680 679 # filesystem's notion of 'now'
681 680 now = util.fstat(st).st_mtime & _rangemask
682 681
683 682 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
684 683 # timestamp of each entries in dirstate, because of 'now > mtime'
685 684 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite')
686 685 if delaywrite > 0:
687 686 # do we have any files to delay for?
688 687 for f, e in self._map.iteritems():
689 688 if e[0] == 'n' and e[3] == now:
690 689 import time # to avoid useless import
691 690 # rather than sleep n seconds, sleep until the next
692 691 # multiple of n seconds
693 692 clock = time.time()
694 693 start = int(clock) - (int(clock) % delaywrite)
695 694 end = start + delaywrite
696 695 time.sleep(end - clock)
697 696 now = end # trust our estimate that the end is near now
698 697 break
699 698
700 699 self._map.write(st, now)
701 700 self._lastnormaltime = 0
702 701 self._dirty = False
703 702
704 703 def _dirignore(self, f):
705 704 if f == '.':
706 705 return False
707 706 if self._ignore(f):
708 707 return True
709 708 for p in util.finddirs(f):
710 709 if self._ignore(p):
711 710 return True
712 711 return False
713 712
714 713 def _ignorefiles(self):
715 714 files = []
716 715 if os.path.exists(self._join('.hgignore')):
717 716 files.append(self._join('.hgignore'))
718 717 for name, path in self._ui.configitems("ui"):
719 718 if name == 'ignore' or name.startswith('ignore.'):
720 719 # we need to use os.path.join here rather than self._join
721 720 # because path is arbitrary and user-specified
722 721 files.append(os.path.join(self._rootdir, util.expandpath(path)))
723 722 return files
724 723
725 724 def _ignorefileandline(self, f):
726 725 files = collections.deque(self._ignorefiles())
727 726 visited = set()
728 727 while files:
729 728 i = files.popleft()
730 729 patterns = matchmod.readpatternfile(i, self._ui.warn,
731 730 sourceinfo=True)
732 731 for pattern, lineno, line in patterns:
733 732 kind, p = matchmod._patsplit(pattern, 'glob')
734 733 if kind == "subinclude":
735 734 if p not in visited:
736 735 files.append(p)
737 736 continue
738 737 m = matchmod.match(self._root, '', [], [pattern],
739 738 warn=self._ui.warn)
740 739 if m(f):
741 740 return (i, lineno, line)
742 741 visited.add(i)
743 742 return (None, -1, "")
744 743
745 744 def _walkexplicit(self, match, subrepos):
746 745 '''Get stat data about the files explicitly specified by match.
747 746
748 747 Return a triple (results, dirsfound, dirsnotfound).
749 748 - results is a mapping from filename to stat result. It also contains
750 749 listings mapping subrepos and .hg to None.
751 750 - dirsfound is a list of files found to be directories.
752 751 - dirsnotfound is a list of files that the dirstate thinks are
753 752 directories and that were not found.'''
754 753
755 754 def badtype(mode):
756 755 kind = _('unknown')
757 756 if stat.S_ISCHR(mode):
758 757 kind = _('character device')
759 758 elif stat.S_ISBLK(mode):
760 759 kind = _('block device')
761 760 elif stat.S_ISFIFO(mode):
762 761 kind = _('fifo')
763 762 elif stat.S_ISSOCK(mode):
764 763 kind = _('socket')
765 764 elif stat.S_ISDIR(mode):
766 765 kind = _('directory')
767 766 return _('unsupported file type (type is %s)') % kind
768 767
769 768 matchedir = match.explicitdir
770 769 badfn = match.bad
771 770 dmap = self._map
772 771 lstat = os.lstat
773 772 getkind = stat.S_IFMT
774 773 dirkind = stat.S_IFDIR
775 774 regkind = stat.S_IFREG
776 775 lnkkind = stat.S_IFLNK
777 776 join = self._join
778 777 dirsfound = []
779 778 foundadd = dirsfound.append
780 779 dirsnotfound = []
781 780 notfoundadd = dirsnotfound.append
782 781
783 782 if not match.isexact() and self._checkcase:
784 783 normalize = self._normalize
785 784 else:
786 785 normalize = None
787 786
788 787 files = sorted(match.files())
789 788 subrepos.sort()
790 789 i, j = 0, 0
791 790 while i < len(files) and j < len(subrepos):
792 791 subpath = subrepos[j] + "/"
793 792 if files[i] < subpath:
794 793 i += 1
795 794 continue
796 795 while i < len(files) and files[i].startswith(subpath):
797 796 del files[i]
798 797 j += 1
799 798
800 799 if not files or '.' in files:
801 800 files = ['.']
802 801 results = dict.fromkeys(subrepos)
803 802 results['.hg'] = None
804 803
805 804 alldirs = None
806 805 for ff in files:
807 806 # constructing the foldmap is expensive, so don't do it for the
808 807 # common case where files is ['.']
809 808 if normalize and ff != '.':
810 809 nf = normalize(ff, False, True)
811 810 else:
812 811 nf = ff
813 812 if nf in results:
814 813 continue
815 814
816 815 try:
817 816 st = lstat(join(nf))
818 817 kind = getkind(st.st_mode)
819 818 if kind == dirkind:
820 819 if nf in dmap:
821 820 # file replaced by dir on disk but still in dirstate
822 821 results[nf] = None
823 822 if matchedir:
824 823 matchedir(nf)
825 824 foundadd((nf, ff))
826 825 elif kind == regkind or kind == lnkkind:
827 826 results[nf] = st
828 827 else:
829 828 badfn(ff, badtype(kind))
830 829 if nf in dmap:
831 830 results[nf] = None
832 831 except OSError as inst: # nf not found on disk - it is dirstate only
833 832 if nf in dmap: # does it exactly match a missing file?
834 833 results[nf] = None
835 834 else: # does it match a missing directory?
836 835 if alldirs is None:
837 836 alldirs = util.dirs(dmap._map)
838 837 if nf in alldirs:
839 838 if matchedir:
840 839 matchedir(nf)
841 840 notfoundadd(nf)
842 841 else:
843 842 badfn(ff, encoding.strtolocal(inst.strerror))
844 843
845 844 # Case insensitive filesystems cannot rely on lstat() failing to detect
846 845 # a case-only rename. Prune the stat object for any file that does not
847 846 # match the case in the filesystem, if there are multiple files that
848 847 # normalize to the same path.
849 848 if match.isexact() and self._checkcase:
850 849 normed = {}
851 850
852 851 for f, st in results.iteritems():
853 852 if st is None:
854 853 continue
855 854
856 855 nc = util.normcase(f)
857 856 paths = normed.get(nc)
858 857
859 858 if paths is None:
860 859 paths = set()
861 860 normed[nc] = paths
862 861
863 862 paths.add(f)
864 863
865 864 for norm, paths in normed.iteritems():
866 865 if len(paths) > 1:
867 866 for path in paths:
868 867 folded = self._discoverpath(path, norm, True, None,
869 868 self._map.dirfoldmap)
870 869 if path != folded:
871 870 results[path] = None
872 871
873 872 return results, dirsfound, dirsnotfound
874 873
875 874 def walk(self, match, subrepos, unknown, ignored, full=True):
876 875 '''
877 876 Walk recursively through the directory tree, finding all files
878 877 matched by match.
879 878
880 879 If full is False, maybe skip some known-clean files.
881 880
882 881 Return a dict mapping filename to stat-like object (either
883 882 mercurial.osutil.stat instance or return value of os.stat()).
884 883
885 884 '''
886 885 # full is a flag that extensions that hook into walk can use -- this
887 886 # implementation doesn't use it at all. This satisfies the contract
888 887 # because we only guarantee a "maybe".
889 888
890 889 if ignored:
891 890 ignore = util.never
892 891 dirignore = util.never
893 892 elif unknown:
894 893 ignore = self._ignore
895 894 dirignore = self._dirignore
896 895 else:
897 896 # if not unknown and not ignored, drop dir recursion and step 2
898 897 ignore = util.always
899 898 dirignore = util.always
900 899
901 900 matchfn = match.matchfn
902 901 matchalways = match.always()
903 902 matchtdir = match.traversedir
904 903 dmap = self._map
905 904 listdir = util.listdir
906 905 lstat = os.lstat
907 906 dirkind = stat.S_IFDIR
908 907 regkind = stat.S_IFREG
909 908 lnkkind = stat.S_IFLNK
910 909 join = self._join
911 910
912 911 exact = skipstep3 = False
913 912 if match.isexact(): # match.exact
914 913 exact = True
915 914 dirignore = util.always # skip step 2
916 915 elif match.prefix(): # match.match, no patterns
917 916 skipstep3 = True
918 917
919 918 if not exact and self._checkcase:
920 919 normalize = self._normalize
921 920 normalizefile = self._normalizefile
922 921 skipstep3 = False
923 922 else:
924 923 normalize = self._normalize
925 924 normalizefile = None
926 925
927 926 # step 1: find all explicit files
928 927 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
929 928
930 929 skipstep3 = skipstep3 and not (work or dirsnotfound)
931 930 work = [d for d in work if not dirignore(d[0])]
932 931
933 932 # step 2: visit subdirectories
934 933 def traverse(work, alreadynormed):
935 934 wadd = work.append
936 935 while work:
937 936 nd = work.pop()
938 937 if not match.visitdir(nd):
939 938 continue
940 939 skip = None
941 940 if nd == '.':
942 941 nd = ''
943 942 else:
944 943 skip = '.hg'
945 944 try:
946 945 entries = listdir(join(nd), stat=True, skip=skip)
947 946 except OSError as inst:
948 947 if inst.errno in (errno.EACCES, errno.ENOENT):
949 948 match.bad(self.pathto(nd),
950 949 encoding.strtolocal(inst.strerror))
951 950 continue
952 951 raise
953 952 for f, kind, st in entries:
954 953 if normalizefile:
955 954 # even though f might be a directory, we're only
956 955 # interested in comparing it to files currently in the
957 956 # dmap -- therefore normalizefile is enough
958 957 nf = normalizefile(nd and (nd + "/" + f) or f, True,
959 958 True)
960 959 else:
961 960 nf = nd and (nd + "/" + f) or f
962 961 if nf not in results:
963 962 if kind == dirkind:
964 963 if not ignore(nf):
965 964 if matchtdir:
966 965 matchtdir(nf)
967 966 wadd(nf)
968 967 if nf in dmap and (matchalways or matchfn(nf)):
969 968 results[nf] = None
970 969 elif kind == regkind or kind == lnkkind:
971 970 if nf in dmap:
972 971 if matchalways or matchfn(nf):
973 972 results[nf] = st
974 973 elif ((matchalways or matchfn(nf))
975 974 and not ignore(nf)):
976 975 # unknown file -- normalize if necessary
977 976 if not alreadynormed:
978 977 nf = normalize(nf, False, True)
979 978 results[nf] = st
980 979 elif nf in dmap and (matchalways or matchfn(nf)):
981 980 results[nf] = None
982 981
983 982 for nd, d in work:
984 983 # alreadynormed means that processwork doesn't have to do any
985 984 # expensive directory normalization
986 985 alreadynormed = not normalize or nd == d
987 986 traverse([d], alreadynormed)
988 987
989 988 for s in subrepos:
990 989 del results[s]
991 990 del results['.hg']
992 991
993 992 # step 3: visit remaining files from dmap
994 993 if not skipstep3 and not exact:
995 994 # If a dmap file is not in results yet, it was either
996 995 # a) not matching matchfn b) ignored, c) missing, or d) under a
997 996 # symlink directory.
998 997 if not results and matchalways:
999 998 visit = [f for f in dmap]
1000 999 else:
1001 1000 visit = [f for f in dmap if f not in results and matchfn(f)]
1002 1001 visit.sort()
1003 1002
1004 1003 if unknown:
1005 1004 # unknown == True means we walked all dirs under the roots
1006 1005 # that wasn't ignored, and everything that matched was stat'ed
1007 1006 # and is already in results.
1008 1007 # The rest must thus be ignored or under a symlink.
1009 1008 audit_path = pathutil.pathauditor(self._root, cached=True)
1010 1009
1011 1010 for nf in iter(visit):
1012 1011 # If a stat for the same file was already added with a
1013 1012 # different case, don't add one for this, since that would
1014 1013 # make it appear as if the file exists under both names
1015 1014 # on disk.
1016 1015 if (normalizefile and
1017 1016 normalizefile(nf, True, True) in results):
1018 1017 results[nf] = None
1019 1018 # Report ignored items in the dmap as long as they are not
1020 1019 # under a symlink directory.
1021 1020 elif audit_path.check(nf):
1022 1021 try:
1023 1022 results[nf] = lstat(join(nf))
1024 1023 # file was just ignored, no links, and exists
1025 1024 except OSError:
1026 1025 # file doesn't exist
1027 1026 results[nf] = None
1028 1027 else:
1029 1028 # It's either missing or under a symlink directory
1030 1029 # which we in this case report as missing
1031 1030 results[nf] = None
1032 1031 else:
1033 1032 # We may not have walked the full directory tree above,
1034 1033 # so stat and check everything we missed.
1035 1034 iv = iter(visit)
1036 1035 for st in util.statfiles([join(i) for i in visit]):
1037 1036 results[next(iv)] = st
1038 1037 return results
1039 1038
1040 1039 def status(self, match, subrepos, ignored, clean, unknown):
1041 1040 '''Determine the status of the working copy relative to the
1042 1041 dirstate and return a pair of (unsure, status), where status is of type
1043 1042 scmutil.status and:
1044 1043
1045 1044 unsure:
1046 1045 files that might have been modified since the dirstate was
1047 1046 written, but need to be read to be sure (size is the same
1048 1047 but mtime differs)
1049 1048 status.modified:
1050 1049 files that have definitely been modified since the dirstate
1051 1050 was written (different size or mode)
1052 1051 status.clean:
1053 1052 files that have definitely not been modified since the
1054 1053 dirstate was written
1055 1054 '''
1056 1055 listignored, listclean, listunknown = ignored, clean, unknown
1057 1056 lookup, modified, added, unknown, ignored = [], [], [], [], []
1058 1057 removed, deleted, clean = [], [], []
1059 1058
1060 1059 dmap = self._map
1061 1060 ladd = lookup.append # aka "unsure"
1062 1061 madd = modified.append
1063 1062 aadd = added.append
1064 1063 uadd = unknown.append
1065 1064 iadd = ignored.append
1066 1065 radd = removed.append
1067 1066 dadd = deleted.append
1068 1067 cadd = clean.append
1069 1068 mexact = match.exact
1070 1069 dirignore = self._dirignore
1071 1070 checkexec = self._checkexec
1072 1071 copymap = self._map.copymap
1073 1072 lastnormaltime = self._lastnormaltime
1074 1073
1075 1074 # We need to do full walks when either
1076 1075 # - we're listing all clean files, or
1077 1076 # - match.traversedir does something, because match.traversedir should
1078 1077 # be called for every dir in the working dir
1079 1078 full = listclean or match.traversedir is not None
1080 1079 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1081 1080 full=full).iteritems():
1082 1081 if fn not in dmap:
1083 1082 if (listignored or mexact(fn)) and dirignore(fn):
1084 1083 if listignored:
1085 1084 iadd(fn)
1086 1085 else:
1087 1086 uadd(fn)
1088 1087 continue
1089 1088
1090 1089 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1091 1090 # written like that for performance reasons. dmap[fn] is not a
1092 1091 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1093 1092 # opcode has fast paths when the value to be unpacked is a tuple or
1094 1093 # a list, but falls back to creating a full-fledged iterator in
1095 1094 # general. That is much slower than simply accessing and storing the
1096 1095 # tuple members one by one.
1097 1096 t = dmap[fn]
1098 1097 state = t[0]
1099 1098 mode = t[1]
1100 1099 size = t[2]
1101 1100 time = t[3]
1102 1101
1103 1102 if not st and state in "nma":
1104 1103 dadd(fn)
1105 1104 elif state == 'n':
1106 1105 if (size >= 0 and
1107 1106 ((size != st.st_size and size != st.st_size & _rangemask)
1108 1107 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1109 1108 or size == -2 # other parent
1110 1109 or fn in copymap):
1111 1110 madd(fn)
1112 1111 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1113 1112 ladd(fn)
1114 1113 elif st.st_mtime == lastnormaltime:
1115 1114 # fn may have just been marked as normal and it may have
1116 1115 # changed in the same second without changing its size.
1117 1116 # This can happen if we quickly do multiple commits.
1118 1117 # Force lookup, so we don't miss such a racy file change.
1119 1118 ladd(fn)
1120 1119 elif listclean:
1121 1120 cadd(fn)
1122 1121 elif state == 'm':
1123 1122 madd(fn)
1124 1123 elif state == 'a':
1125 1124 aadd(fn)
1126 1125 elif state == 'r':
1127 1126 radd(fn)
1128 1127
1129 1128 return (lookup, scmutil.status(modified, added, removed, deleted,
1130 1129 unknown, ignored, clean))
1131 1130
1132 1131 def matches(self, match):
1133 1132 '''
1134 1133 return files in the dirstate (in whatever state) filtered by match
1135 1134 '''
1136 1135 dmap = self._map
1137 1136 if match.always():
1138 1137 return dmap.keys()
1139 1138 files = match.files()
1140 1139 if match.isexact():
1141 1140 # fast path -- filter the other way around, since typically files is
1142 1141 # much smaller than dmap
1143 1142 return [f for f in files if f in dmap]
1144 1143 if match.prefix() and all(fn in dmap for fn in files):
1145 1144 # fast path -- all the values are known to be files, so just return
1146 1145 # that
1147 1146 return list(files)
1148 1147 return [f for f in dmap if match(f)]
1149 1148
1150 1149 def _actualfilename(self, tr):
1151 1150 if tr:
1152 1151 return self._pendingfilename
1153 1152 else:
1154 1153 return self._filename
1155 1154
1156 1155 def savebackup(self, tr, backupname):
1157 1156 '''Save current dirstate into backup file'''
1158 1157 filename = self._actualfilename(tr)
1159 1158 assert backupname != filename
1160 1159
1161 1160 # use '_writedirstate' instead of 'write' to write changes certainly,
1162 1161 # because the latter omits writing out if transaction is running.
1163 1162 # output file will be used to create backup of dirstate at this point.
1164 1163 if self._dirty or not self._opener.exists(filename):
1165 1164 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1166 1165 checkambig=True))
1167 1166
1168 1167 if tr:
1169 1168 # ensure that subsequent tr.writepending returns True for
1170 1169 # changes written out above, even if dirstate is never
1171 1170 # changed after this
1172 1171 tr.addfilegenerator('dirstate', (self._filename,),
1173 1172 self._writedirstate, location='plain')
1174 1173
1175 1174 # ensure that pending file written above is unlinked at
1176 1175 # failure, even if tr.writepending isn't invoked until the
1177 1176 # end of this transaction
1178 1177 tr.registertmp(filename, location='plain')
1179 1178
1180 1179 self._opener.tryunlink(backupname)
1181 1180 # hardlink backup is okay because _writedirstate is always called
1182 1181 # with an "atomictemp=True" file.
1183 1182 util.copyfile(self._opener.join(filename),
1184 1183 self._opener.join(backupname), hardlink=True)
1185 1184
1186 1185 def restorebackup(self, tr, backupname):
1187 1186 '''Restore dirstate by backup file'''
1188 1187 # this "invalidate()" prevents "wlock.release()" from writing
1189 1188 # changes of dirstate out after restoring from backup file
1190 1189 self.invalidate()
1191 1190 filename = self._actualfilename(tr)
1192 1191 self._opener.rename(backupname, filename, checkambig=True)
1193 1192
1194 1193 def clearbackup(self, tr, backupname):
1195 1194 '''Clear backup file'''
1196 1195 self._opener.unlink(backupname)
1197 1196
1198 1197 class dirstatemap(object):
1199 1198 def __init__(self, ui, opener, root):
1200 1199 self._ui = ui
1201 1200 self._opener = opener
1202 1201 self._root = root
1203 1202 self._filename = 'dirstate'
1204 1203
1205 1204 self._map = {}
1206 1205 self.copymap = {}
1207 1206 self._parents = None
1208 1207 self._dirtyparents = False
1209 1208
1210 1209 # for consistent view between _pl() and _read() invocations
1211 1210 self._pendingmode = None
1212 1211
1212 def clear(self):
1213 self._map = {}
1214 self.copymap = {}
1215 self.setparents(nullid, nullid)
1216
1213 1217 def iteritems(self):
1214 1218 return self._map.iteritems()
1215 1219
1216 1220 def __len__(self):
1217 1221 return len(self._map)
1218 1222
1219 1223 def __iter__(self):
1220 1224 return iter(self._map)
1221 1225
1222 1226 def get(self, key, default=None):
1223 1227 return self._map.get(key, default)
1224 1228
1225 1229 def __contains__(self, key):
1226 1230 return key in self._map
1227 1231
1228 1232 def __setitem__(self, key, value):
1229 1233 self._map[key] = value
1230 1234
1231 1235 def __getitem__(self, key):
1232 1236 return self._map[key]
1233 1237
1234 1238 def __delitem__(self, key):
1235 1239 del self._map[key]
1236 1240
1237 1241 def keys(self):
1238 1242 return self._map.keys()
1239 1243
1240 1244 def nonnormalentries(self):
1241 1245 '''Compute the nonnormal dirstate entries from the dmap'''
1242 1246 try:
1243 1247 return parsers.nonnormalotherparententries(self._map)
1244 1248 except AttributeError:
1245 1249 nonnorm = set()
1246 1250 otherparent = set()
1247 1251 for fname, e in self._map.iteritems():
1248 1252 if e[0] != 'n' or e[3] == -1:
1249 1253 nonnorm.add(fname)
1250 1254 if e[0] == 'n' and e[2] == -2:
1251 1255 otherparent.add(fname)
1252 1256 return nonnorm, otherparent
1253 1257
1254 1258 @propertycache
1255 1259 def filefoldmap(self):
1256 1260 """Returns a dictionary mapping normalized case paths to their
1257 1261 non-normalized versions.
1258 1262 """
1259 1263 try:
1260 1264 makefilefoldmap = parsers.make_file_foldmap
1261 1265 except AttributeError:
1262 1266 pass
1263 1267 else:
1264 1268 return makefilefoldmap(self._map, util.normcasespec,
1265 1269 util.normcasefallback)
1266 1270
1267 1271 f = {}
1268 1272 normcase = util.normcase
1269 1273 for name, s in self._map.iteritems():
1270 1274 if s[0] != 'r':
1271 1275 f[normcase(name)] = name
1272 1276 f['.'] = '.' # prevents useless util.fspath() invocation
1273 1277 return f
1274 1278
1275 1279 @propertycache
1276 1280 def dirs(self):
1277 1281 """Returns a set-like object containing all the directories in the
1278 1282 current dirstate.
1279 1283 """
1280 1284 return util.dirs(self._map, 'r')
1281 1285
1282 1286 def _opendirstatefile(self):
1283 1287 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1284 1288 if self._pendingmode is not None and self._pendingmode != mode:
1285 1289 fp.close()
1286 1290 raise error.Abort(_('working directory state may be '
1287 1291 'changed parallelly'))
1288 1292 self._pendingmode = mode
1289 1293 return fp
1290 1294
1291 1295 def parents(self):
1292 1296 if not self._parents:
1293 1297 try:
1294 1298 fp = self._opendirstatefile()
1295 1299 st = fp.read(40)
1296 1300 fp.close()
1297 1301 except IOError as err:
1298 1302 if err.errno != errno.ENOENT:
1299 1303 raise
1300 1304 # File doesn't exist, so the current state is empty
1301 1305 st = ''
1302 1306
1303 1307 l = len(st)
1304 1308 if l == 40:
1305 1309 self._parents = st[:20], st[20:40]
1306 1310 elif l == 0:
1307 1311 self._parents = [nullid, nullid]
1308 1312 else:
1309 1313 raise error.Abort(_('working directory state appears '
1310 1314 'damaged!'))
1311 1315
1312 1316 return self._parents
1313 1317
1314 1318 def setparents(self, p1, p2):
1315 1319 self._parents = (p1, p2)
1316 1320 self._dirtyparents = True
1317 1321
1318 1322 def read(self):
1319 1323 # ignore HG_PENDING because identity is used only for writing
1320 1324 self.identity = util.filestat.frompath(
1321 1325 self._opener.join(self._filename))
1322 1326
1323 1327 try:
1324 1328 fp = self._opendirstatefile()
1325 1329 try:
1326 1330 st = fp.read()
1327 1331 finally:
1328 1332 fp.close()
1329 1333 except IOError as err:
1330 1334 if err.errno != errno.ENOENT:
1331 1335 raise
1332 1336 return
1333 1337 if not st:
1334 1338 return
1335 1339
1336 1340 if util.safehasattr(parsers, 'dict_new_presized'):
1337 1341 # Make an estimate of the number of files in the dirstate based on
1338 1342 # its size. From a linear regression on a set of real-world repos,
1339 1343 # all over 10,000 files, the size of a dirstate entry is 85
1340 1344 # bytes. The cost of resizing is significantly higher than the cost
1341 1345 # of filling in a larger presized dict, so subtract 20% from the
1342 1346 # size.
1343 1347 #
1344 1348 # This heuristic is imperfect in many ways, so in a future dirstate
1345 1349 # format update it makes sense to just record the number of entries
1346 1350 # on write.
1347 1351 self._map = parsers.dict_new_presized(len(st) / 71)
1348 1352
1349 1353 # Python's garbage collector triggers a GC each time a certain number
1350 1354 # of container objects (the number being defined by
1351 1355 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1352 1356 # for each file in the dirstate. The C version then immediately marks
1353 1357 # them as not to be tracked by the collector. However, this has no
1354 1358 # effect on when GCs are triggered, only on what objects the GC looks
1355 1359 # into. This means that O(number of files) GCs are unavoidable.
1356 1360 # Depending on when in the process's lifetime the dirstate is parsed,
1357 1361 # this can get very expensive. As a workaround, disable GC while
1358 1362 # parsing the dirstate.
1359 1363 #
1360 1364 # (we cannot decorate the function directly since it is in a C module)
1361 1365 parse_dirstate = util.nogc(parsers.parse_dirstate)
1362 1366 p = parse_dirstate(self._map, self.copymap, st)
1363 1367 if not self._dirtyparents:
1364 1368 self.setparents(*p)
1365 1369
1366 1370 def write(self, st, now):
1367 1371 st.write(parsers.pack_dirstate(self._map, self.copymap,
1368 1372 self.parents(), now))
1369 1373 st.close()
1370 1374 self._dirtyparents = False
1371 1375 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1372 1376
1373 1377 @propertycache
1374 1378 def nonnormalset(self):
1375 1379 nonnorm, otherparents = self.nonnormalentries()
1376 1380 self.otherparentset = otherparents
1377 1381 return nonnorm
1378 1382
1379 1383 @propertycache
1380 1384 def otherparentset(self):
1381 1385 nonnorm, otherparents = self.nonnormalentries()
1382 1386 self.nonnormalset = nonnorm
1383 1387 return otherparents
1384 1388
1385 1389 @propertycache
1386 1390 def identity(self):
1387 1391 self.read()
1388 1392 return self.identity
1389 1393
1390 1394 @propertycache
1391 1395 def dirfoldmap(self):
1392 1396 f = {}
1393 1397 normcase = util.normcase
1394 1398 for name in self.dirs:
1395 1399 f[normcase(name)] = name
1396 1400 return f
General Comments 0
You need to be logged in to leave comments. Login now