##// END OF EJS Templates
dirstate: don't remove normallookup files from nonnormalset...
Mark Thomas -
r35023:1664dc7c default
parent child Browse files
Show More
@@ -1,1429 +1,1427
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 parsers = policy.importmod(r'parsers')
31 31
32 32 propertycache = util.propertycache
33 33 filecache = scmutil.filecache
34 34 _rangemask = 0x7fffffff
35 35
36 36 dirstatetuple = parsers.dirstatetuple
37 37
38 38 class repocache(filecache):
39 39 """filecache for files in .hg/"""
40 40 def join(self, obj, fname):
41 41 return obj._opener.join(fname)
42 42
43 43 class rootcache(filecache):
44 44 """filecache for files in the repository root"""
45 45 def join(self, obj, fname):
46 46 return obj._join(fname)
47 47
48 48 def _getfsnow(vfs):
49 49 '''Get "now" timestamp on filesystem'''
50 50 tmpfd, tmpname = vfs.mkstemp()
51 51 try:
52 52 return os.fstat(tmpfd).st_mtime
53 53 finally:
54 54 os.close(tmpfd)
55 55 vfs.unlink(tmpname)
56 56
57 57 class dirstate(object):
58 58
59 59 def __init__(self, opener, ui, root, validate, sparsematchfn):
60 60 '''Create a new dirstate object.
61 61
62 62 opener is an open()-like callable that can be used to open the
63 63 dirstate file; root is the root of the directory tracked by
64 64 the dirstate.
65 65 '''
66 66 self._opener = opener
67 67 self._validate = validate
68 68 self._root = root
69 69 self._sparsematchfn = sparsematchfn
70 70 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
71 71 # UNC path pointing to root share (issue4557)
72 72 self._rootdir = pathutil.normasprefix(root)
73 73 self._dirty = False
74 74 self._lastnormaltime = 0
75 75 self._ui = ui
76 76 self._filecache = {}
77 77 self._parentwriters = 0
78 78 self._filename = 'dirstate'
79 79 self._pendingfilename = '%s.pending' % self._filename
80 80 self._plchangecallbacks = {}
81 81 self._origpl = None
82 82 self._updatedfiles = set()
83 83
84 84 @contextlib.contextmanager
85 85 def parentchange(self):
86 86 '''Context manager for handling dirstate parents.
87 87
88 88 If an exception occurs in the scope of the context manager,
89 89 the incoherent dirstate won't be written when wlock is
90 90 released.
91 91 '''
92 92 self._parentwriters += 1
93 93 yield
94 94 # Typically we want the "undo" step of a context manager in a
95 95 # finally block so it happens even when an exception
96 96 # occurs. In this case, however, we only want to decrement
97 97 # parentwriters if the code in the with statement exits
98 98 # normally, so we don't have a try/finally here on purpose.
99 99 self._parentwriters -= 1
100 100
101 101 def beginparentchange(self):
102 102 '''Marks the beginning of a set of changes that involve changing
103 103 the dirstate parents. If there is an exception during this time,
104 104 the dirstate will not be written when the wlock is released. This
105 105 prevents writing an incoherent dirstate where the parent doesn't
106 106 match the contents.
107 107 '''
108 108 self._ui.deprecwarn('beginparentchange is obsoleted by the '
109 109 'parentchange context manager.', '4.3')
110 110 self._parentwriters += 1
111 111
112 112 def endparentchange(self):
113 113 '''Marks the end of a set of changes that involve changing the
114 114 dirstate parents. Once all parent changes have been marked done,
115 115 the wlock will be free to write the dirstate on release.
116 116 '''
117 117 self._ui.deprecwarn('endparentchange is obsoleted by the '
118 118 'parentchange context manager.', '4.3')
119 119 if self._parentwriters > 0:
120 120 self._parentwriters -= 1
121 121
122 122 def pendingparentchange(self):
123 123 '''Returns true if the dirstate is in the middle of a set of changes
124 124 that modify the dirstate parent.
125 125 '''
126 126 return self._parentwriters > 0
127 127
128 128 @propertycache
129 129 def _map(self):
130 130 '''Return the dirstate contents as a map from filename to
131 131 (state, mode, size, time).'''
132 132 self._map = dirstatemap(self._ui, self._opener, self._root)
133 133 return self._map
134 134
135 135 @property
136 136 def _sparsematcher(self):
137 137 """The matcher for the sparse checkout.
138 138
139 139 The working directory may not include every file from a manifest. The
140 140 matcher obtained by this property will match a path if it is to be
141 141 included in the working directory.
142 142 """
143 143 # TODO there is potential to cache this property. For now, the matcher
144 144 # is resolved on every access. (But the called function does use a
145 145 # cache to keep the lookup fast.)
146 146 return self._sparsematchfn()
147 147
148 148 @repocache('branch')
149 149 def _branch(self):
150 150 try:
151 151 return self._opener.read("branch").strip() or "default"
152 152 except IOError as inst:
153 153 if inst.errno != errno.ENOENT:
154 154 raise
155 155 return "default"
156 156
157 157 @property
158 158 def _pl(self):
159 159 return self._map.parents()
160 160
161 161 def dirs(self):
162 162 return self._map.dirs
163 163
164 164 @rootcache('.hgignore')
165 165 def _ignore(self):
166 166 files = self._ignorefiles()
167 167 if not files:
168 168 return matchmod.never(self._root, '')
169 169
170 170 pats = ['include:%s' % f for f in files]
171 171 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
172 172
173 173 @propertycache
174 174 def _slash(self):
175 175 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
176 176
177 177 @propertycache
178 178 def _checklink(self):
179 179 return util.checklink(self._root)
180 180
181 181 @propertycache
182 182 def _checkexec(self):
183 183 return util.checkexec(self._root)
184 184
185 185 @propertycache
186 186 def _checkcase(self):
187 187 return not util.fscasesensitive(self._join('.hg'))
188 188
189 189 def _join(self, f):
190 190 # much faster than os.path.join()
191 191 # it's safe because f is always a relative path
192 192 return self._rootdir + f
193 193
194 194 def flagfunc(self, buildfallback):
195 195 if self._checklink and self._checkexec:
196 196 def f(x):
197 197 try:
198 198 st = os.lstat(self._join(x))
199 199 if util.statislink(st):
200 200 return 'l'
201 201 if util.statisexec(st):
202 202 return 'x'
203 203 except OSError:
204 204 pass
205 205 return ''
206 206 return f
207 207
208 208 fallback = buildfallback()
209 209 if self._checklink:
210 210 def f(x):
211 211 if os.path.islink(self._join(x)):
212 212 return 'l'
213 213 if 'x' in fallback(x):
214 214 return 'x'
215 215 return ''
216 216 return f
217 217 if self._checkexec:
218 218 def f(x):
219 219 if 'l' in fallback(x):
220 220 return 'l'
221 221 if util.isexec(self._join(x)):
222 222 return 'x'
223 223 return ''
224 224 return f
225 225 else:
226 226 return fallback
227 227
228 228 @propertycache
229 229 def _cwd(self):
230 230 # internal config: ui.forcecwd
231 231 forcecwd = self._ui.config('ui', 'forcecwd')
232 232 if forcecwd:
233 233 return forcecwd
234 234 return pycompat.getcwd()
235 235
236 236 def getcwd(self):
237 237 '''Return the path from which a canonical path is calculated.
238 238
239 239 This path should be used to resolve file patterns or to convert
240 240 canonical paths back to file paths for display. It shouldn't be
241 241 used to get real file paths. Use vfs functions instead.
242 242 '''
243 243 cwd = self._cwd
244 244 if cwd == self._root:
245 245 return ''
246 246 # self._root ends with a path separator if self._root is '/' or 'C:\'
247 247 rootsep = self._root
248 248 if not util.endswithsep(rootsep):
249 249 rootsep += pycompat.ossep
250 250 if cwd.startswith(rootsep):
251 251 return cwd[len(rootsep):]
252 252 else:
253 253 # we're outside the repo. return an absolute path.
254 254 return cwd
255 255
256 256 def pathto(self, f, cwd=None):
257 257 if cwd is None:
258 258 cwd = self.getcwd()
259 259 path = util.pathto(self._root, cwd, f)
260 260 if self._slash:
261 261 return util.pconvert(path)
262 262 return path
263 263
264 264 def __getitem__(self, key):
265 265 '''Return the current state of key (a filename) in the dirstate.
266 266
267 267 States are:
268 268 n normal
269 269 m needs merging
270 270 r marked for removal
271 271 a marked for addition
272 272 ? not tracked
273 273 '''
274 274 return self._map.get(key, ("?",))[0]
275 275
276 276 def __contains__(self, key):
277 277 return key in self._map
278 278
279 279 def __iter__(self):
280 280 return iter(sorted(self._map))
281 281
282 282 def items(self):
283 283 return self._map.iteritems()
284 284
285 285 iteritems = items
286 286
287 287 def parents(self):
288 288 return [self._validate(p) for p in self._pl]
289 289
290 290 def p1(self):
291 291 return self._validate(self._pl[0])
292 292
293 293 def p2(self):
294 294 return self._validate(self._pl[1])
295 295
296 296 def branch(self):
297 297 return encoding.tolocal(self._branch)
298 298
299 299 def setparents(self, p1, p2=nullid):
300 300 """Set dirstate parents to p1 and p2.
301 301
302 302 When moving from two parents to one, 'm' merged entries a
303 303 adjusted to normal and previous copy records discarded and
304 304 returned by the call.
305 305
306 306 See localrepo.setparents()
307 307 """
308 308 if self._parentwriters == 0:
309 309 raise ValueError("cannot set dirstate parent without "
310 310 "calling dirstate.beginparentchange")
311 311
312 312 self._dirty = True
313 313 oldp2 = self._pl[1]
314 314 if self._origpl is None:
315 315 self._origpl = self._pl
316 316 self._map.setparents(p1, p2)
317 317 copies = {}
318 318 if oldp2 != nullid and p2 == nullid:
319 319 candidatefiles = self._map.nonnormalset.union(
320 320 self._map.otherparentset)
321 321 for f in candidatefiles:
322 322 s = self._map.get(f)
323 323 if s is None:
324 324 continue
325 325
326 326 # Discard 'm' markers when moving away from a merge state
327 327 if s[0] == 'm':
328 328 source = self._map.copymap.get(f)
329 329 if source:
330 330 copies[f] = source
331 331 self.normallookup(f)
332 332 # Also fix up otherparent markers
333 333 elif s[0] == 'n' and s[2] == -2:
334 334 source = self._map.copymap.get(f)
335 335 if source:
336 336 copies[f] = source
337 337 self.add(f)
338 338 return copies
339 339
340 340 def setbranch(self, branch):
341 341 self._branch = encoding.fromlocal(branch)
342 342 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
343 343 try:
344 344 f.write(self._branch + '\n')
345 345 f.close()
346 346
347 347 # make sure filecache has the correct stat info for _branch after
348 348 # replacing the underlying file
349 349 ce = self._filecache['_branch']
350 350 if ce:
351 351 ce.refresh()
352 352 except: # re-raises
353 353 f.discard()
354 354 raise
355 355
356 356 def invalidate(self):
357 357 '''Causes the next access to reread the dirstate.
358 358
359 359 This is different from localrepo.invalidatedirstate() because it always
360 360 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
361 361 check whether the dirstate has changed before rereading it.'''
362 362
363 363 for a in ("_map", "_branch", "_ignore"):
364 364 if a in self.__dict__:
365 365 delattr(self, a)
366 366 self._lastnormaltime = 0
367 367 self._dirty = False
368 368 self._updatedfiles.clear()
369 369 self._parentwriters = 0
370 370 self._origpl = None
371 371
372 372 def copy(self, source, dest):
373 373 """Mark dest as a copy of source. Unmark dest if source is None."""
374 374 if source == dest:
375 375 return
376 376 self._dirty = True
377 377 if source is not None:
378 378 self._map.copymap[dest] = source
379 379 self._updatedfiles.add(source)
380 380 self._updatedfiles.add(dest)
381 381 elif self._map.copymap.pop(dest, None):
382 382 self._updatedfiles.add(dest)
383 383
384 384 def copied(self, file):
385 385 return self._map.copymap.get(file, None)
386 386
387 387 def copies(self):
388 388 return self._map.copymap
389 389
390 390 def _droppath(self, f):
391 391 if self[f] not in "?r" and "dirs" in self._map.__dict__:
392 392 self._map.dirs.delpath(f)
393 393
394 394 if "filefoldmap" in self._map.__dict__:
395 395 normed = util.normcase(f)
396 396 if normed in self._map.filefoldmap:
397 397 del self._map.filefoldmap[normed]
398 398
399 399 self._updatedfiles.add(f)
400 400
401 401 def _addpath(self, f, state, mode, size, mtime):
402 402 oldstate = self[f]
403 403 if state == 'a' or oldstate == 'r':
404 404 scmutil.checkfilename(f)
405 405 if f in self._map.dirs:
406 406 raise error.Abort(_('directory %r already in dirstate') % f)
407 407 # shadows
408 408 for d in util.finddirs(f):
409 409 if d in self._map.dirs:
410 410 break
411 411 entry = self._map.get(d)
412 412 if entry is not None and entry[0] != 'r':
413 413 raise error.Abort(
414 414 _('file %r in dirstate clashes with %r') % (d, f))
415 415 if oldstate in "?r" and "dirs" in self._map.__dict__:
416 416 self._map.dirs.addpath(f)
417 417 self._dirty = True
418 418 self._updatedfiles.add(f)
419 419 self._map[f] = dirstatetuple(state, mode, size, mtime)
420 420 if state != 'n' or mtime == -1:
421 421 self._map.nonnormalset.add(f)
422 422 if size == -2:
423 423 self._map.otherparentset.add(f)
424 424
425 425 def normal(self, f):
426 426 '''Mark a file normal and clean.'''
427 427 s = os.lstat(self._join(f))
428 428 mtime = s.st_mtime
429 429 self._addpath(f, 'n', s.st_mode,
430 430 s.st_size & _rangemask, mtime & _rangemask)
431 431 self._map.copymap.pop(f, None)
432 432 if f in self._map.nonnormalset:
433 433 self._map.nonnormalset.remove(f)
434 434 if mtime > self._lastnormaltime:
435 435 # Remember the most recent modification timeslot for status(),
436 436 # to make sure we won't miss future size-preserving file content
437 437 # modifications that happen within the same timeslot.
438 438 self._lastnormaltime = mtime
439 439
440 440 def normallookup(self, f):
441 441 '''Mark a file normal, but possibly dirty.'''
442 442 if self._pl[1] != nullid:
443 443 # if there is a merge going on and the file was either
444 444 # in state 'm' (-1) or coming from other parent (-2) before
445 445 # being removed, restore that state.
446 446 entry = self._map.get(f)
447 447 if entry is not None:
448 448 if entry[0] == 'r' and entry[2] in (-1, -2):
449 449 source = self._map.copymap.get(f)
450 450 if entry[2] == -1:
451 451 self.merge(f)
452 452 elif entry[2] == -2:
453 453 self.otherparent(f)
454 454 if source:
455 455 self.copy(source, f)
456 456 return
457 457 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
458 458 return
459 459 self._addpath(f, 'n', 0, -1, -1)
460 460 self._map.copymap.pop(f, None)
461 if f in self._map.nonnormalset:
462 self._map.nonnormalset.remove(f)
463 461
464 462 def otherparent(self, f):
465 463 '''Mark as coming from the other parent, always dirty.'''
466 464 if self._pl[1] == nullid:
467 465 raise error.Abort(_("setting %r to other parent "
468 466 "only allowed in merges") % f)
469 467 if f in self and self[f] == 'n':
470 468 # merge-like
471 469 self._addpath(f, 'm', 0, -2, -1)
472 470 else:
473 471 # add-like
474 472 self._addpath(f, 'n', 0, -2, -1)
475 473 self._map.copymap.pop(f, None)
476 474
477 475 def add(self, f):
478 476 '''Mark a file added.'''
479 477 self._addpath(f, 'a', 0, -1, -1)
480 478 self._map.copymap.pop(f, None)
481 479
482 480 def remove(self, f):
483 481 '''Mark a file removed.'''
484 482 self._dirty = True
485 483 self._droppath(f)
486 484 size = 0
487 485 if self._pl[1] != nullid:
488 486 entry = self._map.get(f)
489 487 if entry is not None:
490 488 # backup the previous state
491 489 if entry[0] == 'm': # merge
492 490 size = -1
493 491 elif entry[0] == 'n' and entry[2] == -2: # other parent
494 492 size = -2
495 493 self._map.otherparentset.add(f)
496 494 self._map[f] = dirstatetuple('r', 0, size, 0)
497 495 self._map.nonnormalset.add(f)
498 496 if size == 0:
499 497 self._map.copymap.pop(f, None)
500 498
501 499 def merge(self, f):
502 500 '''Mark a file merged.'''
503 501 if self._pl[1] == nullid:
504 502 return self.normallookup(f)
505 503 return self.otherparent(f)
506 504
507 505 def drop(self, f):
508 506 '''Drop a file from the dirstate'''
509 507 if f in self._map:
510 508 self._dirty = True
511 509 self._droppath(f)
512 510 del self._map[f]
513 511 if f in self._map.nonnormalset:
514 512 self._map.nonnormalset.remove(f)
515 513 self._map.copymap.pop(f, None)
516 514
517 515 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
518 516 if exists is None:
519 517 exists = os.path.lexists(os.path.join(self._root, path))
520 518 if not exists:
521 519 # Maybe a path component exists
522 520 if not ignoremissing and '/' in path:
523 521 d, f = path.rsplit('/', 1)
524 522 d = self._normalize(d, False, ignoremissing, None)
525 523 folded = d + "/" + f
526 524 else:
527 525 # No path components, preserve original case
528 526 folded = path
529 527 else:
530 528 # recursively normalize leading directory components
531 529 # against dirstate
532 530 if '/' in normed:
533 531 d, f = normed.rsplit('/', 1)
534 532 d = self._normalize(d, False, ignoremissing, True)
535 533 r = self._root + "/" + d
536 534 folded = d + "/" + util.fspath(f, r)
537 535 else:
538 536 folded = util.fspath(normed, self._root)
539 537 storemap[normed] = folded
540 538
541 539 return folded
542 540
543 541 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
544 542 normed = util.normcase(path)
545 543 folded = self._map.filefoldmap.get(normed, None)
546 544 if folded is None:
547 545 if isknown:
548 546 folded = path
549 547 else:
550 548 folded = self._discoverpath(path, normed, ignoremissing, exists,
551 549 self._map.filefoldmap)
552 550 return folded
553 551
554 552 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
555 553 normed = util.normcase(path)
556 554 folded = self._map.filefoldmap.get(normed, None)
557 555 if folded is None:
558 556 folded = self._map.dirfoldmap.get(normed, None)
559 557 if folded is None:
560 558 if isknown:
561 559 folded = path
562 560 else:
563 561 # store discovered result in dirfoldmap so that future
564 562 # normalizefile calls don't start matching directories
565 563 folded = self._discoverpath(path, normed, ignoremissing, exists,
566 564 self._map.dirfoldmap)
567 565 return folded
568 566
569 567 def normalize(self, path, isknown=False, ignoremissing=False):
570 568 '''
571 569 normalize the case of a pathname when on a casefolding filesystem
572 570
573 571 isknown specifies whether the filename came from walking the
574 572 disk, to avoid extra filesystem access.
575 573
576 574 If ignoremissing is True, missing path are returned
577 575 unchanged. Otherwise, we try harder to normalize possibly
578 576 existing path components.
579 577
580 578 The normalized case is determined based on the following precedence:
581 579
582 580 - version of name already stored in the dirstate
583 581 - version of name stored on disk
584 582 - version provided via command arguments
585 583 '''
586 584
587 585 if self._checkcase:
588 586 return self._normalize(path, isknown, ignoremissing)
589 587 return path
590 588
591 589 def clear(self):
592 590 self._map.clear()
593 591 self._lastnormaltime = 0
594 592 self._updatedfiles.clear()
595 593 self._dirty = True
596 594
597 595 def rebuild(self, parent, allfiles, changedfiles=None):
598 596 if changedfiles is None:
599 597 # Rebuild entire dirstate
600 598 changedfiles = allfiles
601 599 lastnormaltime = self._lastnormaltime
602 600 self.clear()
603 601 self._lastnormaltime = lastnormaltime
604 602
605 603 if self._origpl is None:
606 604 self._origpl = self._pl
607 605 self._map.setparents(parent, nullid)
608 606 for f in changedfiles:
609 607 if f in allfiles:
610 608 self.normallookup(f)
611 609 else:
612 610 self.drop(f)
613 611
614 612 self._dirty = True
615 613
616 614 def identity(self):
617 615 '''Return identity of dirstate itself to detect changing in storage
618 616
619 617 If identity of previous dirstate is equal to this, writing
620 618 changes based on the former dirstate out can keep consistency.
621 619 '''
622 620 return self._map.identity
623 621
624 622 def write(self, tr):
625 623 if not self._dirty:
626 624 return
627 625
628 626 filename = self._filename
629 627 if tr:
630 628 # 'dirstate.write()' is not only for writing in-memory
631 629 # changes out, but also for dropping ambiguous timestamp.
632 630 # delayed writing re-raise "ambiguous timestamp issue".
633 631 # See also the wiki page below for detail:
634 632 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
635 633
636 634 # emulate dropping timestamp in 'parsers.pack_dirstate'
637 635 now = _getfsnow(self._opener)
638 636 dmap = self._map
639 637 for f in self._updatedfiles:
640 638 e = dmap.get(f)
641 639 if e is not None and e[0] == 'n' and e[3] == now:
642 640 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
643 641 self._map.nonnormalset.add(f)
644 642
645 643 # emulate that all 'dirstate.normal' results are written out
646 644 self._lastnormaltime = 0
647 645 self._updatedfiles.clear()
648 646
649 647 # delay writing in-memory changes out
650 648 tr.addfilegenerator('dirstate', (self._filename,),
651 649 self._writedirstate, location='plain')
652 650 return
653 651
654 652 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
655 653 self._writedirstate(st)
656 654
657 655 def addparentchangecallback(self, category, callback):
658 656 """add a callback to be called when the wd parents are changed
659 657
660 658 Callback will be called with the following arguments:
661 659 dirstate, (oldp1, oldp2), (newp1, newp2)
662 660
663 661 Category is a unique identifier to allow overwriting an old callback
664 662 with a newer callback.
665 663 """
666 664 self._plchangecallbacks[category] = callback
667 665
668 666 def _writedirstate(self, st):
669 667 # notify callbacks about parents change
670 668 if self._origpl is not None and self._origpl != self._pl:
671 669 for c, callback in sorted(self._plchangecallbacks.iteritems()):
672 670 callback(self, self._origpl, self._pl)
673 671 self._origpl = None
674 672 # use the modification time of the newly created temporary file as the
675 673 # filesystem's notion of 'now'
676 674 now = util.fstat(st).st_mtime & _rangemask
677 675
678 676 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
679 677 # timestamp of each entries in dirstate, because of 'now > mtime'
680 678 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite')
681 679 if delaywrite > 0:
682 680 # do we have any files to delay for?
683 681 for f, e in self._map.iteritems():
684 682 if e[0] == 'n' and e[3] == now:
685 683 import time # to avoid useless import
686 684 # rather than sleep n seconds, sleep until the next
687 685 # multiple of n seconds
688 686 clock = time.time()
689 687 start = int(clock) - (int(clock) % delaywrite)
690 688 end = start + delaywrite
691 689 time.sleep(end - clock)
692 690 now = end # trust our estimate that the end is near now
693 691 break
694 692
695 693 self._map.write(st, now)
696 694 self._lastnormaltime = 0
697 695 self._dirty = False
698 696
699 697 def _dirignore(self, f):
700 698 if f == '.':
701 699 return False
702 700 if self._ignore(f):
703 701 return True
704 702 for p in util.finddirs(f):
705 703 if self._ignore(p):
706 704 return True
707 705 return False
708 706
709 707 def _ignorefiles(self):
710 708 files = []
711 709 if os.path.exists(self._join('.hgignore')):
712 710 files.append(self._join('.hgignore'))
713 711 for name, path in self._ui.configitems("ui"):
714 712 if name == 'ignore' or name.startswith('ignore.'):
715 713 # we need to use os.path.join here rather than self._join
716 714 # because path is arbitrary and user-specified
717 715 files.append(os.path.join(self._rootdir, util.expandpath(path)))
718 716 return files
719 717
720 718 def _ignorefileandline(self, f):
721 719 files = collections.deque(self._ignorefiles())
722 720 visited = set()
723 721 while files:
724 722 i = files.popleft()
725 723 patterns = matchmod.readpatternfile(i, self._ui.warn,
726 724 sourceinfo=True)
727 725 for pattern, lineno, line in patterns:
728 726 kind, p = matchmod._patsplit(pattern, 'glob')
729 727 if kind == "subinclude":
730 728 if p not in visited:
731 729 files.append(p)
732 730 continue
733 731 m = matchmod.match(self._root, '', [], [pattern],
734 732 warn=self._ui.warn)
735 733 if m(f):
736 734 return (i, lineno, line)
737 735 visited.add(i)
738 736 return (None, -1, "")
739 737
740 738 def _walkexplicit(self, match, subrepos):
741 739 '''Get stat data about the files explicitly specified by match.
742 740
743 741 Return a triple (results, dirsfound, dirsnotfound).
744 742 - results is a mapping from filename to stat result. It also contains
745 743 listings mapping subrepos and .hg to None.
746 744 - dirsfound is a list of files found to be directories.
747 745 - dirsnotfound is a list of files that the dirstate thinks are
748 746 directories and that were not found.'''
749 747
750 748 def badtype(mode):
751 749 kind = _('unknown')
752 750 if stat.S_ISCHR(mode):
753 751 kind = _('character device')
754 752 elif stat.S_ISBLK(mode):
755 753 kind = _('block device')
756 754 elif stat.S_ISFIFO(mode):
757 755 kind = _('fifo')
758 756 elif stat.S_ISSOCK(mode):
759 757 kind = _('socket')
760 758 elif stat.S_ISDIR(mode):
761 759 kind = _('directory')
762 760 return _('unsupported file type (type is %s)') % kind
763 761
764 762 matchedir = match.explicitdir
765 763 badfn = match.bad
766 764 dmap = self._map
767 765 lstat = os.lstat
768 766 getkind = stat.S_IFMT
769 767 dirkind = stat.S_IFDIR
770 768 regkind = stat.S_IFREG
771 769 lnkkind = stat.S_IFLNK
772 770 join = self._join
773 771 dirsfound = []
774 772 foundadd = dirsfound.append
775 773 dirsnotfound = []
776 774 notfoundadd = dirsnotfound.append
777 775
778 776 if not match.isexact() and self._checkcase:
779 777 normalize = self._normalize
780 778 else:
781 779 normalize = None
782 780
783 781 files = sorted(match.files())
784 782 subrepos.sort()
785 783 i, j = 0, 0
786 784 while i < len(files) and j < len(subrepos):
787 785 subpath = subrepos[j] + "/"
788 786 if files[i] < subpath:
789 787 i += 1
790 788 continue
791 789 while i < len(files) and files[i].startswith(subpath):
792 790 del files[i]
793 791 j += 1
794 792
795 793 if not files or '.' in files:
796 794 files = ['.']
797 795 results = dict.fromkeys(subrepos)
798 796 results['.hg'] = None
799 797
800 798 alldirs = None
801 799 for ff in files:
802 800 # constructing the foldmap is expensive, so don't do it for the
803 801 # common case where files is ['.']
804 802 if normalize and ff != '.':
805 803 nf = normalize(ff, False, True)
806 804 else:
807 805 nf = ff
808 806 if nf in results:
809 807 continue
810 808
811 809 try:
812 810 st = lstat(join(nf))
813 811 kind = getkind(st.st_mode)
814 812 if kind == dirkind:
815 813 if nf in dmap:
816 814 # file replaced by dir on disk but still in dirstate
817 815 results[nf] = None
818 816 if matchedir:
819 817 matchedir(nf)
820 818 foundadd((nf, ff))
821 819 elif kind == regkind or kind == lnkkind:
822 820 results[nf] = st
823 821 else:
824 822 badfn(ff, badtype(kind))
825 823 if nf in dmap:
826 824 results[nf] = None
827 825 except OSError as inst: # nf not found on disk - it is dirstate only
828 826 if nf in dmap: # does it exactly match a missing file?
829 827 results[nf] = None
830 828 else: # does it match a missing directory?
831 829 if alldirs is None:
832 830 alldirs = util.dirs(dmap._map)
833 831 if nf in alldirs:
834 832 if matchedir:
835 833 matchedir(nf)
836 834 notfoundadd(nf)
837 835 else:
838 836 badfn(ff, encoding.strtolocal(inst.strerror))
839 837
840 838 # Case insensitive filesystems cannot rely on lstat() failing to detect
841 839 # a case-only rename. Prune the stat object for any file that does not
842 840 # match the case in the filesystem, if there are multiple files that
843 841 # normalize to the same path.
844 842 if match.isexact() and self._checkcase:
845 843 normed = {}
846 844
847 845 for f, st in results.iteritems():
848 846 if st is None:
849 847 continue
850 848
851 849 nc = util.normcase(f)
852 850 paths = normed.get(nc)
853 851
854 852 if paths is None:
855 853 paths = set()
856 854 normed[nc] = paths
857 855
858 856 paths.add(f)
859 857
860 858 for norm, paths in normed.iteritems():
861 859 if len(paths) > 1:
862 860 for path in paths:
863 861 folded = self._discoverpath(path, norm, True, None,
864 862 self._map.dirfoldmap)
865 863 if path != folded:
866 864 results[path] = None
867 865
868 866 return results, dirsfound, dirsnotfound
869 867
870 868 def walk(self, match, subrepos, unknown, ignored, full=True):
871 869 '''
872 870 Walk recursively through the directory tree, finding all files
873 871 matched by match.
874 872
875 873 If full is False, maybe skip some known-clean files.
876 874
877 875 Return a dict mapping filename to stat-like object (either
878 876 mercurial.osutil.stat instance or return value of os.stat()).
879 877
880 878 '''
881 879 # full is a flag that extensions that hook into walk can use -- this
882 880 # implementation doesn't use it at all. This satisfies the contract
883 881 # because we only guarantee a "maybe".
884 882
885 883 if ignored:
886 884 ignore = util.never
887 885 dirignore = util.never
888 886 elif unknown:
889 887 ignore = self._ignore
890 888 dirignore = self._dirignore
891 889 else:
892 890 # if not unknown and not ignored, drop dir recursion and step 2
893 891 ignore = util.always
894 892 dirignore = util.always
895 893
896 894 matchfn = match.matchfn
897 895 matchalways = match.always()
898 896 matchtdir = match.traversedir
899 897 dmap = self._map
900 898 listdir = util.listdir
901 899 lstat = os.lstat
902 900 dirkind = stat.S_IFDIR
903 901 regkind = stat.S_IFREG
904 902 lnkkind = stat.S_IFLNK
905 903 join = self._join
906 904
907 905 exact = skipstep3 = False
908 906 if match.isexact(): # match.exact
909 907 exact = True
910 908 dirignore = util.always # skip step 2
911 909 elif match.prefix(): # match.match, no patterns
912 910 skipstep3 = True
913 911
914 912 if not exact and self._checkcase:
915 913 normalize = self._normalize
916 914 normalizefile = self._normalizefile
917 915 skipstep3 = False
918 916 else:
919 917 normalize = self._normalize
920 918 normalizefile = None
921 919
922 920 # step 1: find all explicit files
923 921 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
924 922
925 923 skipstep3 = skipstep3 and not (work or dirsnotfound)
926 924 work = [d for d in work if not dirignore(d[0])]
927 925
928 926 # step 2: visit subdirectories
929 927 def traverse(work, alreadynormed):
930 928 wadd = work.append
931 929 while work:
932 930 nd = work.pop()
933 931 if not match.visitdir(nd):
934 932 continue
935 933 skip = None
936 934 if nd == '.':
937 935 nd = ''
938 936 else:
939 937 skip = '.hg'
940 938 try:
941 939 entries = listdir(join(nd), stat=True, skip=skip)
942 940 except OSError as inst:
943 941 if inst.errno in (errno.EACCES, errno.ENOENT):
944 942 match.bad(self.pathto(nd),
945 943 encoding.strtolocal(inst.strerror))
946 944 continue
947 945 raise
948 946 for f, kind, st in entries:
949 947 if normalizefile:
950 948 # even though f might be a directory, we're only
951 949 # interested in comparing it to files currently in the
952 950 # dmap -- therefore normalizefile is enough
953 951 nf = normalizefile(nd and (nd + "/" + f) or f, True,
954 952 True)
955 953 else:
956 954 nf = nd and (nd + "/" + f) or f
957 955 if nf not in results:
958 956 if kind == dirkind:
959 957 if not ignore(nf):
960 958 if matchtdir:
961 959 matchtdir(nf)
962 960 wadd(nf)
963 961 if nf in dmap and (matchalways or matchfn(nf)):
964 962 results[nf] = None
965 963 elif kind == regkind or kind == lnkkind:
966 964 if nf in dmap:
967 965 if matchalways or matchfn(nf):
968 966 results[nf] = st
969 967 elif ((matchalways or matchfn(nf))
970 968 and not ignore(nf)):
971 969 # unknown file -- normalize if necessary
972 970 if not alreadynormed:
973 971 nf = normalize(nf, False, True)
974 972 results[nf] = st
975 973 elif nf in dmap and (matchalways or matchfn(nf)):
976 974 results[nf] = None
977 975
978 976 for nd, d in work:
979 977 # alreadynormed means that processwork doesn't have to do any
980 978 # expensive directory normalization
981 979 alreadynormed = not normalize or nd == d
982 980 traverse([d], alreadynormed)
983 981
984 982 for s in subrepos:
985 983 del results[s]
986 984 del results['.hg']
987 985
988 986 # step 3: visit remaining files from dmap
989 987 if not skipstep3 and not exact:
990 988 # If a dmap file is not in results yet, it was either
991 989 # a) not matching matchfn b) ignored, c) missing, or d) under a
992 990 # symlink directory.
993 991 if not results and matchalways:
994 992 visit = [f for f in dmap]
995 993 else:
996 994 visit = [f for f in dmap if f not in results and matchfn(f)]
997 995 visit.sort()
998 996
999 997 if unknown:
1000 998 # unknown == True means we walked all dirs under the roots
1001 999 # that wasn't ignored, and everything that matched was stat'ed
1002 1000 # and is already in results.
1003 1001 # The rest must thus be ignored or under a symlink.
1004 1002 audit_path = pathutil.pathauditor(self._root, cached=True)
1005 1003
1006 1004 for nf in iter(visit):
1007 1005 # If a stat for the same file was already added with a
1008 1006 # different case, don't add one for this, since that would
1009 1007 # make it appear as if the file exists under both names
1010 1008 # on disk.
1011 1009 if (normalizefile and
1012 1010 normalizefile(nf, True, True) in results):
1013 1011 results[nf] = None
1014 1012 # Report ignored items in the dmap as long as they are not
1015 1013 # under a symlink directory.
1016 1014 elif audit_path.check(nf):
1017 1015 try:
1018 1016 results[nf] = lstat(join(nf))
1019 1017 # file was just ignored, no links, and exists
1020 1018 except OSError:
1021 1019 # file doesn't exist
1022 1020 results[nf] = None
1023 1021 else:
1024 1022 # It's either missing or under a symlink directory
1025 1023 # which we in this case report as missing
1026 1024 results[nf] = None
1027 1025 else:
1028 1026 # We may not have walked the full directory tree above,
1029 1027 # so stat and check everything we missed.
1030 1028 iv = iter(visit)
1031 1029 for st in util.statfiles([join(i) for i in visit]):
1032 1030 results[next(iv)] = st
1033 1031 return results
1034 1032
1035 1033 def status(self, match, subrepos, ignored, clean, unknown):
1036 1034 '''Determine the status of the working copy relative to the
1037 1035 dirstate and return a pair of (unsure, status), where status is of type
1038 1036 scmutil.status and:
1039 1037
1040 1038 unsure:
1041 1039 files that might have been modified since the dirstate was
1042 1040 written, but need to be read to be sure (size is the same
1043 1041 but mtime differs)
1044 1042 status.modified:
1045 1043 files that have definitely been modified since the dirstate
1046 1044 was written (different size or mode)
1047 1045 status.clean:
1048 1046 files that have definitely not been modified since the
1049 1047 dirstate was written
1050 1048 '''
1051 1049 listignored, listclean, listunknown = ignored, clean, unknown
1052 1050 lookup, modified, added, unknown, ignored = [], [], [], [], []
1053 1051 removed, deleted, clean = [], [], []
1054 1052
1055 1053 dmap = self._map
1056 1054 dmap.preload()
1057 1055 dcontains = dmap.__contains__
1058 1056 dget = dmap.__getitem__
1059 1057 ladd = lookup.append # aka "unsure"
1060 1058 madd = modified.append
1061 1059 aadd = added.append
1062 1060 uadd = unknown.append
1063 1061 iadd = ignored.append
1064 1062 radd = removed.append
1065 1063 dadd = deleted.append
1066 1064 cadd = clean.append
1067 1065 mexact = match.exact
1068 1066 dirignore = self._dirignore
1069 1067 checkexec = self._checkexec
1070 1068 copymap = self._map.copymap
1071 1069 lastnormaltime = self._lastnormaltime
1072 1070
1073 1071 # We need to do full walks when either
1074 1072 # - we're listing all clean files, or
1075 1073 # - match.traversedir does something, because match.traversedir should
1076 1074 # be called for every dir in the working dir
1077 1075 full = listclean or match.traversedir is not None
1078 1076 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1079 1077 full=full).iteritems():
1080 1078 if not dcontains(fn):
1081 1079 if (listignored or mexact(fn)) and dirignore(fn):
1082 1080 if listignored:
1083 1081 iadd(fn)
1084 1082 else:
1085 1083 uadd(fn)
1086 1084 continue
1087 1085
1088 1086 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1089 1087 # written like that for performance reasons. dmap[fn] is not a
1090 1088 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1091 1089 # opcode has fast paths when the value to be unpacked is a tuple or
1092 1090 # a list, but falls back to creating a full-fledged iterator in
1093 1091 # general. That is much slower than simply accessing and storing the
1094 1092 # tuple members one by one.
1095 1093 t = dget(fn)
1096 1094 state = t[0]
1097 1095 mode = t[1]
1098 1096 size = t[2]
1099 1097 time = t[3]
1100 1098
1101 1099 if not st and state in "nma":
1102 1100 dadd(fn)
1103 1101 elif state == 'n':
1104 1102 if (size >= 0 and
1105 1103 ((size != st.st_size and size != st.st_size & _rangemask)
1106 1104 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1107 1105 or size == -2 # other parent
1108 1106 or fn in copymap):
1109 1107 madd(fn)
1110 1108 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1111 1109 ladd(fn)
1112 1110 elif st.st_mtime == lastnormaltime:
1113 1111 # fn may have just been marked as normal and it may have
1114 1112 # changed in the same second without changing its size.
1115 1113 # This can happen if we quickly do multiple commits.
1116 1114 # Force lookup, so we don't miss such a racy file change.
1117 1115 ladd(fn)
1118 1116 elif listclean:
1119 1117 cadd(fn)
1120 1118 elif state == 'm':
1121 1119 madd(fn)
1122 1120 elif state == 'a':
1123 1121 aadd(fn)
1124 1122 elif state == 'r':
1125 1123 radd(fn)
1126 1124
1127 1125 return (lookup, scmutil.status(modified, added, removed, deleted,
1128 1126 unknown, ignored, clean))
1129 1127
1130 1128 def matches(self, match):
1131 1129 '''
1132 1130 return files in the dirstate (in whatever state) filtered by match
1133 1131 '''
1134 1132 dmap = self._map
1135 1133 if match.always():
1136 1134 return dmap.keys()
1137 1135 files = match.files()
1138 1136 if match.isexact():
1139 1137 # fast path -- filter the other way around, since typically files is
1140 1138 # much smaller than dmap
1141 1139 return [f for f in files if f in dmap]
1142 1140 if match.prefix() and all(fn in dmap for fn in files):
1143 1141 # fast path -- all the values are known to be files, so just return
1144 1142 # that
1145 1143 return list(files)
1146 1144 return [f for f in dmap if match(f)]
1147 1145
1148 1146 def _actualfilename(self, tr):
1149 1147 if tr:
1150 1148 return self._pendingfilename
1151 1149 else:
1152 1150 return self._filename
1153 1151
1154 1152 def savebackup(self, tr, backupname):
1155 1153 '''Save current dirstate into backup file'''
1156 1154 filename = self._actualfilename(tr)
1157 1155 assert backupname != filename
1158 1156
1159 1157 # use '_writedirstate' instead of 'write' to write changes certainly,
1160 1158 # because the latter omits writing out if transaction is running.
1161 1159 # output file will be used to create backup of dirstate at this point.
1162 1160 if self._dirty or not self._opener.exists(filename):
1163 1161 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1164 1162 checkambig=True))
1165 1163
1166 1164 if tr:
1167 1165 # ensure that subsequent tr.writepending returns True for
1168 1166 # changes written out above, even if dirstate is never
1169 1167 # changed after this
1170 1168 tr.addfilegenerator('dirstate', (self._filename,),
1171 1169 self._writedirstate, location='plain')
1172 1170
1173 1171 # ensure that pending file written above is unlinked at
1174 1172 # failure, even if tr.writepending isn't invoked until the
1175 1173 # end of this transaction
1176 1174 tr.registertmp(filename, location='plain')
1177 1175
1178 1176 self._opener.tryunlink(backupname)
1179 1177 # hardlink backup is okay because _writedirstate is always called
1180 1178 # with an "atomictemp=True" file.
1181 1179 util.copyfile(self._opener.join(filename),
1182 1180 self._opener.join(backupname), hardlink=True)
1183 1181
1184 1182 def restorebackup(self, tr, backupname):
1185 1183 '''Restore dirstate by backup file'''
1186 1184 # this "invalidate()" prevents "wlock.release()" from writing
1187 1185 # changes of dirstate out after restoring from backup file
1188 1186 self.invalidate()
1189 1187 filename = self._actualfilename(tr)
1190 1188 o = self._opener
1191 1189 if util.samefile(o.join(backupname), o.join(filename)):
1192 1190 o.unlink(backupname)
1193 1191 else:
1194 1192 o.rename(backupname, filename, checkambig=True)
1195 1193
1196 1194 def clearbackup(self, tr, backupname):
1197 1195 '''Clear backup file'''
1198 1196 self._opener.unlink(backupname)
1199 1197
1200 1198 class dirstatemap(object):
1201 1199 def __init__(self, ui, opener, root):
1202 1200 self._ui = ui
1203 1201 self._opener = opener
1204 1202 self._root = root
1205 1203 self._filename = 'dirstate'
1206 1204
1207 1205 self._parents = None
1208 1206 self._dirtyparents = False
1209 1207
1210 1208 # for consistent view between _pl() and _read() invocations
1211 1209 self._pendingmode = None
1212 1210
1213 1211 @propertycache
1214 1212 def _map(self):
1215 1213 self._map = {}
1216 1214 self.read()
1217 1215 return self._map
1218 1216
1219 1217 @propertycache
1220 1218 def copymap(self):
1221 1219 self.copymap = {}
1222 1220 self._map
1223 1221 return self.copymap
1224 1222
1225 1223 def clear(self):
1226 1224 self._map.clear()
1227 1225 self.copymap.clear()
1228 1226 self.setparents(nullid, nullid)
1229 1227 util.clearcachedproperty(self, "dirs")
1230 1228 util.clearcachedproperty(self, "filefoldmap")
1231 1229 util.clearcachedproperty(self, "dirfoldmap")
1232 1230 util.clearcachedproperty(self, "nonnormalset")
1233 1231 util.clearcachedproperty(self, "otherparentset")
1234 1232
1235 1233 def iteritems(self):
1236 1234 return self._map.iteritems()
1237 1235
1238 1236 def __len__(self):
1239 1237 return len(self._map)
1240 1238
1241 1239 def __iter__(self):
1242 1240 return iter(self._map)
1243 1241
1244 1242 def get(self, key, default=None):
1245 1243 return self._map.get(key, default)
1246 1244
1247 1245 def __contains__(self, key):
1248 1246 return key in self._map
1249 1247
1250 1248 def __setitem__(self, key, value):
1251 1249 self._map[key] = value
1252 1250
1253 1251 def __getitem__(self, key):
1254 1252 return self._map[key]
1255 1253
1256 1254 def __delitem__(self, key):
1257 1255 del self._map[key]
1258 1256
1259 1257 def keys(self):
1260 1258 return self._map.keys()
1261 1259
1262 1260 def preload(self):
1263 1261 """Loads the underlying data, if it's not already loaded"""
1264 1262 self._map
1265 1263
1266 1264 def nonnormalentries(self):
1267 1265 '''Compute the nonnormal dirstate entries from the dmap'''
1268 1266 try:
1269 1267 return parsers.nonnormalotherparententries(self._map)
1270 1268 except AttributeError:
1271 1269 nonnorm = set()
1272 1270 otherparent = set()
1273 1271 for fname, e in self._map.iteritems():
1274 1272 if e[0] != 'n' or e[3] == -1:
1275 1273 nonnorm.add(fname)
1276 1274 if e[0] == 'n' and e[2] == -2:
1277 1275 otherparent.add(fname)
1278 1276 return nonnorm, otherparent
1279 1277
1280 1278 @propertycache
1281 1279 def filefoldmap(self):
1282 1280 """Returns a dictionary mapping normalized case paths to their
1283 1281 non-normalized versions.
1284 1282 """
1285 1283 try:
1286 1284 makefilefoldmap = parsers.make_file_foldmap
1287 1285 except AttributeError:
1288 1286 pass
1289 1287 else:
1290 1288 return makefilefoldmap(self._map, util.normcasespec,
1291 1289 util.normcasefallback)
1292 1290
1293 1291 f = {}
1294 1292 normcase = util.normcase
1295 1293 for name, s in self._map.iteritems():
1296 1294 if s[0] != 'r':
1297 1295 f[normcase(name)] = name
1298 1296 f['.'] = '.' # prevents useless util.fspath() invocation
1299 1297 return f
1300 1298
1301 1299 @propertycache
1302 1300 def dirs(self):
1303 1301 """Returns a set-like object containing all the directories in the
1304 1302 current dirstate.
1305 1303 """
1306 1304 return util.dirs(self._map, 'r')
1307 1305
1308 1306 def _opendirstatefile(self):
1309 1307 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1310 1308 if self._pendingmode is not None and self._pendingmode != mode:
1311 1309 fp.close()
1312 1310 raise error.Abort(_('working directory state may be '
1313 1311 'changed parallelly'))
1314 1312 self._pendingmode = mode
1315 1313 return fp
1316 1314
1317 1315 def parents(self):
1318 1316 if not self._parents:
1319 1317 try:
1320 1318 fp = self._opendirstatefile()
1321 1319 st = fp.read(40)
1322 1320 fp.close()
1323 1321 except IOError as err:
1324 1322 if err.errno != errno.ENOENT:
1325 1323 raise
1326 1324 # File doesn't exist, so the current state is empty
1327 1325 st = ''
1328 1326
1329 1327 l = len(st)
1330 1328 if l == 40:
1331 1329 self._parents = st[:20], st[20:40]
1332 1330 elif l == 0:
1333 1331 self._parents = [nullid, nullid]
1334 1332 else:
1335 1333 raise error.Abort(_('working directory state appears '
1336 1334 'damaged!'))
1337 1335
1338 1336 return self._parents
1339 1337
1340 1338 def setparents(self, p1, p2):
1341 1339 self._parents = (p1, p2)
1342 1340 self._dirtyparents = True
1343 1341
1344 1342 def read(self):
1345 1343 # ignore HG_PENDING because identity is used only for writing
1346 1344 self.identity = util.filestat.frompath(
1347 1345 self._opener.join(self._filename))
1348 1346
1349 1347 try:
1350 1348 fp = self._opendirstatefile()
1351 1349 try:
1352 1350 st = fp.read()
1353 1351 finally:
1354 1352 fp.close()
1355 1353 except IOError as err:
1356 1354 if err.errno != errno.ENOENT:
1357 1355 raise
1358 1356 return
1359 1357 if not st:
1360 1358 return
1361 1359
1362 1360 if util.safehasattr(parsers, 'dict_new_presized'):
1363 1361 # Make an estimate of the number of files in the dirstate based on
1364 1362 # its size. From a linear regression on a set of real-world repos,
1365 1363 # all over 10,000 files, the size of a dirstate entry is 85
1366 1364 # bytes. The cost of resizing is significantly higher than the cost
1367 1365 # of filling in a larger presized dict, so subtract 20% from the
1368 1366 # size.
1369 1367 #
1370 1368 # This heuristic is imperfect in many ways, so in a future dirstate
1371 1369 # format update it makes sense to just record the number of entries
1372 1370 # on write.
1373 1371 self._map = parsers.dict_new_presized(len(st) / 71)
1374 1372
1375 1373 # Python's garbage collector triggers a GC each time a certain number
1376 1374 # of container objects (the number being defined by
1377 1375 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1378 1376 # for each file in the dirstate. The C version then immediately marks
1379 1377 # them as not to be tracked by the collector. However, this has no
1380 1378 # effect on when GCs are triggered, only on what objects the GC looks
1381 1379 # into. This means that O(number of files) GCs are unavoidable.
1382 1380 # Depending on when in the process's lifetime the dirstate is parsed,
1383 1381 # this can get very expensive. As a workaround, disable GC while
1384 1382 # parsing the dirstate.
1385 1383 #
1386 1384 # (we cannot decorate the function directly since it is in a C module)
1387 1385 parse_dirstate = util.nogc(parsers.parse_dirstate)
1388 1386 p = parse_dirstate(self._map, self.copymap, st)
1389 1387 if not self._dirtyparents:
1390 1388 self.setparents(*p)
1391 1389
1392 1390 # Avoid excess attribute lookups by fast pathing certain checks
1393 1391 self.__contains__ = self._map.__contains__
1394 1392 self.__getitem__ = self._map.__getitem__
1395 1393 self.__setitem__ = self._map.__setitem__
1396 1394 self.__delitem__ = self._map.__delitem__
1397 1395 self.get = self._map.get
1398 1396
1399 1397 def write(self, st, now):
1400 1398 st.write(parsers.pack_dirstate(self._map, self.copymap,
1401 1399 self.parents(), now))
1402 1400 st.close()
1403 1401 self._dirtyparents = False
1404 1402 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1405 1403
1406 1404 @propertycache
1407 1405 def nonnormalset(self):
1408 1406 nonnorm, otherparents = self.nonnormalentries()
1409 1407 self.otherparentset = otherparents
1410 1408 return nonnorm
1411 1409
1412 1410 @propertycache
1413 1411 def otherparentset(self):
1414 1412 nonnorm, otherparents = self.nonnormalentries()
1415 1413 self.nonnormalset = nonnorm
1416 1414 return otherparents
1417 1415
1418 1416 @propertycache
1419 1417 def identity(self):
1420 1418 self._map
1421 1419 return self.identity
1422 1420
1423 1421 @propertycache
1424 1422 def dirfoldmap(self):
1425 1423 f = {}
1426 1424 normcase = util.normcase
1427 1425 for name in self.dirs:
1428 1426 f[normcase(name)] = name
1429 1427 return f
General Comments 0
You need to be logged in to leave comments. Login now