##// END OF EJS Templates
dirstate: implement __len__ on dirstatemap (issue5695)...
Simon Whitaker -
r34409:7d2f71b7 default
parent child Browse files
Show More
@@ -1,1398 +1,1401 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 parsers = policy.importmod(r'parsers')
31 31
32 32 propertycache = util.propertycache
33 33 filecache = scmutil.filecache
34 34 _rangemask = 0x7fffffff
35 35
36 36 dirstatetuple = parsers.dirstatetuple
37 37
38 38 class repocache(filecache):
39 39 """filecache for files in .hg/"""
40 40 def join(self, obj, fname):
41 41 return obj._opener.join(fname)
42 42
43 43 class rootcache(filecache):
44 44 """filecache for files in the repository root"""
45 45 def join(self, obj, fname):
46 46 return obj._join(fname)
47 47
48 48 def _getfsnow(vfs):
49 49 '''Get "now" timestamp on filesystem'''
50 50 tmpfd, tmpname = vfs.mkstemp()
51 51 try:
52 52 return os.fstat(tmpfd).st_mtime
53 53 finally:
54 54 os.close(tmpfd)
55 55 vfs.unlink(tmpname)
56 56
57 57 class dirstate(object):
58 58
59 59 def __init__(self, opener, ui, root, validate, sparsematchfn):
60 60 '''Create a new dirstate object.
61 61
62 62 opener is an open()-like callable that can be used to open the
63 63 dirstate file; root is the root of the directory tracked by
64 64 the dirstate.
65 65 '''
66 66 self._opener = opener
67 67 self._validate = validate
68 68 self._root = root
69 69 self._sparsematchfn = sparsematchfn
70 70 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
71 71 # UNC path pointing to root share (issue4557)
72 72 self._rootdir = pathutil.normasprefix(root)
73 73 self._dirty = False
74 74 self._lastnormaltime = 0
75 75 self._ui = ui
76 76 self._filecache = {}
77 77 self._parentwriters = 0
78 78 self._filename = 'dirstate'
79 79 self._pendingfilename = '%s.pending' % self._filename
80 80 self._plchangecallbacks = {}
81 81 self._origpl = None
82 82 self._updatedfiles = set()
83 83
84 84 @contextlib.contextmanager
85 85 def parentchange(self):
86 86 '''Context manager for handling dirstate parents.
87 87
88 88 If an exception occurs in the scope of the context manager,
89 89 the incoherent dirstate won't be written when wlock is
90 90 released.
91 91 '''
92 92 self._parentwriters += 1
93 93 yield
94 94 # Typically we want the "undo" step of a context manager in a
95 95 # finally block so it happens even when an exception
96 96 # occurs. In this case, however, we only want to decrement
97 97 # parentwriters if the code in the with statement exits
98 98 # normally, so we don't have a try/finally here on purpose.
99 99 self._parentwriters -= 1
100 100
101 101 def beginparentchange(self):
102 102 '''Marks the beginning of a set of changes that involve changing
103 103 the dirstate parents. If there is an exception during this time,
104 104 the dirstate will not be written when the wlock is released. This
105 105 prevents writing an incoherent dirstate where the parent doesn't
106 106 match the contents.
107 107 '''
108 108 self._ui.deprecwarn('beginparentchange is obsoleted by the '
109 109 'parentchange context manager.', '4.3')
110 110 self._parentwriters += 1
111 111
112 112 def endparentchange(self):
113 113 '''Marks the end of a set of changes that involve changing the
114 114 dirstate parents. Once all parent changes have been marked done,
115 115 the wlock will be free to write the dirstate on release.
116 116 '''
117 117 self._ui.deprecwarn('endparentchange is obsoleted by the '
118 118 'parentchange context manager.', '4.3')
119 119 if self._parentwriters > 0:
120 120 self._parentwriters -= 1
121 121
122 122 def pendingparentchange(self):
123 123 '''Returns true if the dirstate is in the middle of a set of changes
124 124 that modify the dirstate parent.
125 125 '''
126 126 return self._parentwriters > 0
127 127
128 128 @propertycache
129 129 def _map(self):
130 130 '''Return the dirstate contents as a map from filename to
131 131 (state, mode, size, time).'''
132 132 self._read()
133 133 return self._map
134 134
135 135 @propertycache
136 136 def _identity(self):
137 137 self._read()
138 138 return self._identity
139 139
140 140 @propertycache
141 141 def _nonnormalset(self):
142 142 nonnorm, otherparents = self._map.nonnormalentries()
143 143 self._otherparentset = otherparents
144 144 return nonnorm
145 145
146 146 @propertycache
147 147 def _otherparentset(self):
148 148 nonnorm, otherparents = self._map.nonnormalentries()
149 149 self._nonnormalset = nonnorm
150 150 return otherparents
151 151
152 152 @propertycache
153 153 def _filefoldmap(self):
154 154 return self._map.filefoldmap()
155 155
156 156 @propertycache
157 157 def _dirfoldmap(self):
158 158 f = {}
159 159 normcase = util.normcase
160 160 for name in self._dirs:
161 161 f[normcase(name)] = name
162 162 return f
163 163
164 164 @property
165 165 def _sparsematcher(self):
166 166 """The matcher for the sparse checkout.
167 167
168 168 The working directory may not include every file from a manifest. The
169 169 matcher obtained by this property will match a path if it is to be
170 170 included in the working directory.
171 171 """
172 172 # TODO there is potential to cache this property. For now, the matcher
173 173 # is resolved on every access. (But the called function does use a
174 174 # cache to keep the lookup fast.)
175 175 return self._sparsematchfn()
176 176
177 177 @repocache('branch')
178 178 def _branch(self):
179 179 try:
180 180 return self._opener.read("branch").strip() or "default"
181 181 except IOError as inst:
182 182 if inst.errno != errno.ENOENT:
183 183 raise
184 184 return "default"
185 185
186 186 @property
187 187 def _pl(self):
188 188 return self._map.parents()
189 189
190 190 @propertycache
191 191 def _dirs(self):
192 192 return self._map.dirs()
193 193
194 194 def dirs(self):
195 195 return self._dirs
196 196
197 197 @rootcache('.hgignore')
198 198 def _ignore(self):
199 199 files = self._ignorefiles()
200 200 if not files:
201 201 return matchmod.never(self._root, '')
202 202
203 203 pats = ['include:%s' % f for f in files]
204 204 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
205 205
206 206 @propertycache
207 207 def _slash(self):
208 208 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
209 209
210 210 @propertycache
211 211 def _checklink(self):
212 212 return util.checklink(self._root)
213 213
214 214 @propertycache
215 215 def _checkexec(self):
216 216 return util.checkexec(self._root)
217 217
218 218 @propertycache
219 219 def _checkcase(self):
220 220 return not util.fscasesensitive(self._join('.hg'))
221 221
222 222 def _join(self, f):
223 223 # much faster than os.path.join()
224 224 # it's safe because f is always a relative path
225 225 return self._rootdir + f
226 226
227 227 def flagfunc(self, buildfallback):
228 228 if self._checklink and self._checkexec:
229 229 def f(x):
230 230 try:
231 231 st = os.lstat(self._join(x))
232 232 if util.statislink(st):
233 233 return 'l'
234 234 if util.statisexec(st):
235 235 return 'x'
236 236 except OSError:
237 237 pass
238 238 return ''
239 239 return f
240 240
241 241 fallback = buildfallback()
242 242 if self._checklink:
243 243 def f(x):
244 244 if os.path.islink(self._join(x)):
245 245 return 'l'
246 246 if 'x' in fallback(x):
247 247 return 'x'
248 248 return ''
249 249 return f
250 250 if self._checkexec:
251 251 def f(x):
252 252 if 'l' in fallback(x):
253 253 return 'l'
254 254 if util.isexec(self._join(x)):
255 255 return 'x'
256 256 return ''
257 257 return f
258 258 else:
259 259 return fallback
260 260
261 261 @propertycache
262 262 def _cwd(self):
263 263 # internal config: ui.forcecwd
264 264 forcecwd = self._ui.config('ui', 'forcecwd')
265 265 if forcecwd:
266 266 return forcecwd
267 267 return pycompat.getcwd()
268 268
269 269 def getcwd(self):
270 270 '''Return the path from which a canonical path is calculated.
271 271
272 272 This path should be used to resolve file patterns or to convert
273 273 canonical paths back to file paths for display. It shouldn't be
274 274 used to get real file paths. Use vfs functions instead.
275 275 '''
276 276 cwd = self._cwd
277 277 if cwd == self._root:
278 278 return ''
279 279 # self._root ends with a path separator if self._root is '/' or 'C:\'
280 280 rootsep = self._root
281 281 if not util.endswithsep(rootsep):
282 282 rootsep += pycompat.ossep
283 283 if cwd.startswith(rootsep):
284 284 return cwd[len(rootsep):]
285 285 else:
286 286 # we're outside the repo. return an absolute path.
287 287 return cwd
288 288
289 289 def pathto(self, f, cwd=None):
290 290 if cwd is None:
291 291 cwd = self.getcwd()
292 292 path = util.pathto(self._root, cwd, f)
293 293 if self._slash:
294 294 return util.pconvert(path)
295 295 return path
296 296
297 297 def __getitem__(self, key):
298 298 '''Return the current state of key (a filename) in the dirstate.
299 299
300 300 States are:
301 301 n normal
302 302 m needs merging
303 303 r marked for removal
304 304 a marked for addition
305 305 ? not tracked
306 306 '''
307 307 return self._map.get(key, ("?",))[0]
308 308
309 309 def __contains__(self, key):
310 310 return key in self._map
311 311
312 312 def __iter__(self):
313 313 return iter(sorted(self._map))
314 314
315 315 def items(self):
316 316 return self._map.iteritems()
317 317
318 318 iteritems = items
319 319
320 320 def parents(self):
321 321 return [self._validate(p) for p in self._pl]
322 322
323 323 def p1(self):
324 324 return self._validate(self._pl[0])
325 325
326 326 def p2(self):
327 327 return self._validate(self._pl[1])
328 328
329 329 def branch(self):
330 330 return encoding.tolocal(self._branch)
331 331
332 332 def setparents(self, p1, p2=nullid):
333 333 """Set dirstate parents to p1 and p2.
334 334
335 335 When moving from two parents to one, 'm' merged entries a
336 336 adjusted to normal and previous copy records discarded and
337 337 returned by the call.
338 338
339 339 See localrepo.setparents()
340 340 """
341 341 if self._parentwriters == 0:
342 342 raise ValueError("cannot set dirstate parent without "
343 343 "calling dirstate.beginparentchange")
344 344
345 345 self._dirty = True
346 346 oldp2 = self._pl[1]
347 347 if self._origpl is None:
348 348 self._origpl = self._pl
349 349 self._map.setparents(p1, p2)
350 350 copies = {}
351 351 if oldp2 != nullid and p2 == nullid:
352 352 candidatefiles = self._nonnormalset.union(self._otherparentset)
353 353 for f in candidatefiles:
354 354 s = self._map.get(f)
355 355 if s is None:
356 356 continue
357 357
358 358 # Discard 'm' markers when moving away from a merge state
359 359 if s[0] == 'm':
360 360 source = self._map.copymap.get(f)
361 361 if source:
362 362 copies[f] = source
363 363 self.normallookup(f)
364 364 # Also fix up otherparent markers
365 365 elif s[0] == 'n' and s[2] == -2:
366 366 source = self._map.copymap.get(f)
367 367 if source:
368 368 copies[f] = source
369 369 self.add(f)
370 370 return copies
371 371
372 372 def setbranch(self, branch):
373 373 self._branch = encoding.fromlocal(branch)
374 374 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
375 375 try:
376 376 f.write(self._branch + '\n')
377 377 f.close()
378 378
379 379 # make sure filecache has the correct stat info for _branch after
380 380 # replacing the underlying file
381 381 ce = self._filecache['_branch']
382 382 if ce:
383 383 ce.refresh()
384 384 except: # re-raises
385 385 f.discard()
386 386 raise
387 387
388 388 def _read(self):
389 389 self._map = dirstatemap(self._ui, self._opener, self._root)
390 390
391 391 # ignore HG_PENDING because identity is used only for writing
392 392 self._identity = util.filestat.frompath(
393 393 self._opener.join(self._filename))
394 394 try:
395 395 fp = self._map._opendirstatefile()
396 396 try:
397 397 st = fp.read()
398 398 finally:
399 399 fp.close()
400 400 except IOError as err:
401 401 if err.errno != errno.ENOENT:
402 402 raise
403 403 return
404 404 if not st:
405 405 return
406 406
407 407 if util.safehasattr(parsers, 'dict_new_presized'):
408 408 # Make an estimate of the number of files in the dirstate based on
409 409 # its size. From a linear regression on a set of real-world repos,
410 410 # all over 10,000 files, the size of a dirstate entry is 85
411 411 # bytes. The cost of resizing is significantly higher than the cost
412 412 # of filling in a larger presized dict, so subtract 20% from the
413 413 # size.
414 414 #
415 415 # This heuristic is imperfect in many ways, so in a future dirstate
416 416 # format update it makes sense to just record the number of entries
417 417 # on write.
418 418 self._map._map = parsers.dict_new_presized(len(st) / 71)
419 419
420 420 # Python's garbage collector triggers a GC each time a certain number
421 421 # of container objects (the number being defined by
422 422 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
423 423 # for each file in the dirstate. The C version then immediately marks
424 424 # them as not to be tracked by the collector. However, this has no
425 425 # effect on when GCs are triggered, only on what objects the GC looks
426 426 # into. This means that O(number of files) GCs are unavoidable.
427 427 # Depending on when in the process's lifetime the dirstate is parsed,
428 428 # this can get very expensive. As a workaround, disable GC while
429 429 # parsing the dirstate.
430 430 #
431 431 # (we cannot decorate the function directly since it is in a C module)
432 432 parse_dirstate = util.nogc(parsers.parse_dirstate)
433 433 p = parse_dirstate(self._map._map, self._map.copymap, st)
434 434 if not self._map._dirtyparents:
435 435 self._map.setparents(*p)
436 436
437 437 def invalidate(self):
438 438 '''Causes the next access to reread the dirstate.
439 439
440 440 This is different from localrepo.invalidatedirstate() because it always
441 441 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
442 442 check whether the dirstate has changed before rereading it.'''
443 443
444 444 for a in ("_map", "_identity",
445 445 "_filefoldmap", "_dirfoldmap", "_branch",
446 446 "_dirs", "_ignore", "_nonnormalset",
447 447 "_otherparentset"):
448 448 if a in self.__dict__:
449 449 delattr(self, a)
450 450 self._lastnormaltime = 0
451 451 self._dirty = False
452 452 self._updatedfiles.clear()
453 453 self._parentwriters = 0
454 454 self._origpl = None
455 455
456 456 def copy(self, source, dest):
457 457 """Mark dest as a copy of source. Unmark dest if source is None."""
458 458 if source == dest:
459 459 return
460 460 self._dirty = True
461 461 if source is not None:
462 462 self._map.copymap[dest] = source
463 463 self._updatedfiles.add(source)
464 464 self._updatedfiles.add(dest)
465 465 elif self._map.copymap.pop(dest, None):
466 466 self._updatedfiles.add(dest)
467 467
468 468 def copied(self, file):
469 469 return self._map.copymap.get(file, None)
470 470
471 471 def copies(self):
472 472 return self._map.copymap
473 473
474 474 def _droppath(self, f):
475 475 if self[f] not in "?r" and "_dirs" in self.__dict__:
476 476 self._dirs.delpath(f)
477 477
478 478 if "_filefoldmap" in self.__dict__:
479 479 normed = util.normcase(f)
480 480 if normed in self._filefoldmap:
481 481 del self._filefoldmap[normed]
482 482
483 483 self._updatedfiles.add(f)
484 484
485 485 def _addpath(self, f, state, mode, size, mtime):
486 486 oldstate = self[f]
487 487 if state == 'a' or oldstate == 'r':
488 488 scmutil.checkfilename(f)
489 489 if f in self._dirs:
490 490 raise error.Abort(_('directory %r already in dirstate') % f)
491 491 # shadows
492 492 for d in util.finddirs(f):
493 493 if d in self._dirs:
494 494 break
495 495 entry = self._map.get(d)
496 496 if entry is not None and entry[0] != 'r':
497 497 raise error.Abort(
498 498 _('file %r in dirstate clashes with %r') % (d, f))
499 499 if oldstate in "?r" and "_dirs" in self.__dict__:
500 500 self._dirs.addpath(f)
501 501 self._dirty = True
502 502 self._updatedfiles.add(f)
503 503 self._map[f] = dirstatetuple(state, mode, size, mtime)
504 504 if state != 'n' or mtime == -1:
505 505 self._nonnormalset.add(f)
506 506 if size == -2:
507 507 self._otherparentset.add(f)
508 508
509 509 def normal(self, f):
510 510 '''Mark a file normal and clean.'''
511 511 s = os.lstat(self._join(f))
512 512 mtime = s.st_mtime
513 513 self._addpath(f, 'n', s.st_mode,
514 514 s.st_size & _rangemask, mtime & _rangemask)
515 515 self._map.copymap.pop(f, None)
516 516 if f in self._nonnormalset:
517 517 self._nonnormalset.remove(f)
518 518 if mtime > self._lastnormaltime:
519 519 # Remember the most recent modification timeslot for status(),
520 520 # to make sure we won't miss future size-preserving file content
521 521 # modifications that happen within the same timeslot.
522 522 self._lastnormaltime = mtime
523 523
524 524 def normallookup(self, f):
525 525 '''Mark a file normal, but possibly dirty.'''
526 526 if self._pl[1] != nullid:
527 527 # if there is a merge going on and the file was either
528 528 # in state 'm' (-1) or coming from other parent (-2) before
529 529 # being removed, restore that state.
530 530 entry = self._map.get(f)
531 531 if entry is not None:
532 532 if entry[0] == 'r' and entry[2] in (-1, -2):
533 533 source = self._map.copymap.get(f)
534 534 if entry[2] == -1:
535 535 self.merge(f)
536 536 elif entry[2] == -2:
537 537 self.otherparent(f)
538 538 if source:
539 539 self.copy(source, f)
540 540 return
541 541 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
542 542 return
543 543 self._addpath(f, 'n', 0, -1, -1)
544 544 self._map.copymap.pop(f, None)
545 545 if f in self._nonnormalset:
546 546 self._nonnormalset.remove(f)
547 547
548 548 def otherparent(self, f):
549 549 '''Mark as coming from the other parent, always dirty.'''
550 550 if self._pl[1] == nullid:
551 551 raise error.Abort(_("setting %r to other parent "
552 552 "only allowed in merges") % f)
553 553 if f in self and self[f] == 'n':
554 554 # merge-like
555 555 self._addpath(f, 'm', 0, -2, -1)
556 556 else:
557 557 # add-like
558 558 self._addpath(f, 'n', 0, -2, -1)
559 559 self._map.copymap.pop(f, None)
560 560
561 561 def add(self, f):
562 562 '''Mark a file added.'''
563 563 self._addpath(f, 'a', 0, -1, -1)
564 564 self._map.copymap.pop(f, None)
565 565
566 566 def remove(self, f):
567 567 '''Mark a file removed.'''
568 568 self._dirty = True
569 569 self._droppath(f)
570 570 size = 0
571 571 if self._pl[1] != nullid:
572 572 entry = self._map.get(f)
573 573 if entry is not None:
574 574 # backup the previous state
575 575 if entry[0] == 'm': # merge
576 576 size = -1
577 577 elif entry[0] == 'n' and entry[2] == -2: # other parent
578 578 size = -2
579 579 self._otherparentset.add(f)
580 580 self._map[f] = dirstatetuple('r', 0, size, 0)
581 581 self._nonnormalset.add(f)
582 582 if size == 0:
583 583 self._map.copymap.pop(f, None)
584 584
585 585 def merge(self, f):
586 586 '''Mark a file merged.'''
587 587 if self._pl[1] == nullid:
588 588 return self.normallookup(f)
589 589 return self.otherparent(f)
590 590
591 591 def drop(self, f):
592 592 '''Drop a file from the dirstate'''
593 593 if f in self._map:
594 594 self._dirty = True
595 595 self._droppath(f)
596 596 del self._map[f]
597 597 if f in self._nonnormalset:
598 598 self._nonnormalset.remove(f)
599 599 self._map.copymap.pop(f, None)
600 600
601 601 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
602 602 if exists is None:
603 603 exists = os.path.lexists(os.path.join(self._root, path))
604 604 if not exists:
605 605 # Maybe a path component exists
606 606 if not ignoremissing and '/' in path:
607 607 d, f = path.rsplit('/', 1)
608 608 d = self._normalize(d, False, ignoremissing, None)
609 609 folded = d + "/" + f
610 610 else:
611 611 # No path components, preserve original case
612 612 folded = path
613 613 else:
614 614 # recursively normalize leading directory components
615 615 # against dirstate
616 616 if '/' in normed:
617 617 d, f = normed.rsplit('/', 1)
618 618 d = self._normalize(d, False, ignoremissing, True)
619 619 r = self._root + "/" + d
620 620 folded = d + "/" + util.fspath(f, r)
621 621 else:
622 622 folded = util.fspath(normed, self._root)
623 623 storemap[normed] = folded
624 624
625 625 return folded
626 626
627 627 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
628 628 normed = util.normcase(path)
629 629 folded = self._filefoldmap.get(normed, None)
630 630 if folded is None:
631 631 if isknown:
632 632 folded = path
633 633 else:
634 634 folded = self._discoverpath(path, normed, ignoremissing, exists,
635 635 self._filefoldmap)
636 636 return folded
637 637
638 638 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
639 639 normed = util.normcase(path)
640 640 folded = self._filefoldmap.get(normed, None)
641 641 if folded is None:
642 642 folded = self._dirfoldmap.get(normed, None)
643 643 if folded is None:
644 644 if isknown:
645 645 folded = path
646 646 else:
647 647 # store discovered result in dirfoldmap so that future
648 648 # normalizefile calls don't start matching directories
649 649 folded = self._discoverpath(path, normed, ignoremissing, exists,
650 650 self._dirfoldmap)
651 651 return folded
652 652
653 653 def normalize(self, path, isknown=False, ignoremissing=False):
654 654 '''
655 655 normalize the case of a pathname when on a casefolding filesystem
656 656
657 657 isknown specifies whether the filename came from walking the
658 658 disk, to avoid extra filesystem access.
659 659
660 660 If ignoremissing is True, missing path are returned
661 661 unchanged. Otherwise, we try harder to normalize possibly
662 662 existing path components.
663 663
664 664 The normalized case is determined based on the following precedence:
665 665
666 666 - version of name already stored in the dirstate
667 667 - version of name stored on disk
668 668 - version provided via command arguments
669 669 '''
670 670
671 671 if self._checkcase:
672 672 return self._normalize(path, isknown, ignoremissing)
673 673 return path
674 674
675 675 def clear(self):
676 676 self._map = dirstatemap(self._ui, self._opener, self._root)
677 677 self._nonnormalset = set()
678 678 self._otherparentset = set()
679 679 if "_dirs" in self.__dict__:
680 680 delattr(self, "_dirs")
681 681 self._map.setparents(nullid, nullid)
682 682 self._lastnormaltime = 0
683 683 self._updatedfiles.clear()
684 684 self._dirty = True
685 685
686 686 def rebuild(self, parent, allfiles, changedfiles=None):
687 687 if changedfiles is None:
688 688 # Rebuild entire dirstate
689 689 changedfiles = allfiles
690 690 lastnormaltime = self._lastnormaltime
691 691 self.clear()
692 692 self._lastnormaltime = lastnormaltime
693 693
694 694 if self._origpl is None:
695 695 self._origpl = self._pl
696 696 self._map.setparents(parent, nullid)
697 697 for f in changedfiles:
698 698 if f in allfiles:
699 699 self.normallookup(f)
700 700 else:
701 701 self.drop(f)
702 702
703 703 self._dirty = True
704 704
705 705 def identity(self):
706 706 '''Return identity of dirstate itself to detect changing in storage
707 707
708 708 If identity of previous dirstate is equal to this, writing
709 709 changes based on the former dirstate out can keep consistency.
710 710 '''
711 711 return self._identity
712 712
713 713 def write(self, tr):
714 714 if not self._dirty:
715 715 return
716 716
717 717 filename = self._filename
718 718 if tr:
719 719 # 'dirstate.write()' is not only for writing in-memory
720 720 # changes out, but also for dropping ambiguous timestamp.
721 721 # delayed writing re-raise "ambiguous timestamp issue".
722 722 # See also the wiki page below for detail:
723 723 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
724 724
725 725 # emulate dropping timestamp in 'parsers.pack_dirstate'
726 726 now = _getfsnow(self._opener)
727 727 dmap = self._map
728 728 for f in self._updatedfiles:
729 729 e = dmap.get(f)
730 730 if e is not None and e[0] == 'n' and e[3] == now:
731 731 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
732 732 self._nonnormalset.add(f)
733 733
734 734 # emulate that all 'dirstate.normal' results are written out
735 735 self._lastnormaltime = 0
736 736 self._updatedfiles.clear()
737 737
738 738 # delay writing in-memory changes out
739 739 tr.addfilegenerator('dirstate', (self._filename,),
740 740 self._writedirstate, location='plain')
741 741 return
742 742
743 743 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
744 744 self._writedirstate(st)
745 745
746 746 def addparentchangecallback(self, category, callback):
747 747 """add a callback to be called when the wd parents are changed
748 748
749 749 Callback will be called with the following arguments:
750 750 dirstate, (oldp1, oldp2), (newp1, newp2)
751 751
752 752 Category is a unique identifier to allow overwriting an old callback
753 753 with a newer callback.
754 754 """
755 755 self._plchangecallbacks[category] = callback
756 756
757 757 def _writedirstate(self, st):
758 758 # notify callbacks about parents change
759 759 if self._origpl is not None and self._origpl != self._pl:
760 760 for c, callback in sorted(self._plchangecallbacks.iteritems()):
761 761 callback(self, self._origpl, self._pl)
762 762 self._origpl = None
763 763 # use the modification time of the newly created temporary file as the
764 764 # filesystem's notion of 'now'
765 765 now = util.fstat(st).st_mtime & _rangemask
766 766
767 767 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
768 768 # timestamp of each entries in dirstate, because of 'now > mtime'
769 769 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
770 770 if delaywrite > 0:
771 771 # do we have any files to delay for?
772 772 for f, e in self._map.iteritems():
773 773 if e[0] == 'n' and e[3] == now:
774 774 import time # to avoid useless import
775 775 # rather than sleep n seconds, sleep until the next
776 776 # multiple of n seconds
777 777 clock = time.time()
778 778 start = int(clock) - (int(clock) % delaywrite)
779 779 end = start + delaywrite
780 780 time.sleep(end - clock)
781 781 now = end # trust our estimate that the end is near now
782 782 break
783 783
784 784 st.write(parsers.pack_dirstate(self._map._map, self._map.copymap,
785 785 self._pl, now))
786 786 self._nonnormalset, self._otherparentset = self._map.nonnormalentries()
787 787 st.close()
788 788 self._lastnormaltime = 0
789 789 self._dirty = self._map._dirtyparents = False
790 790
791 791 def _dirignore(self, f):
792 792 if f == '.':
793 793 return False
794 794 if self._ignore(f):
795 795 return True
796 796 for p in util.finddirs(f):
797 797 if self._ignore(p):
798 798 return True
799 799 return False
800 800
801 801 def _ignorefiles(self):
802 802 files = []
803 803 if os.path.exists(self._join('.hgignore')):
804 804 files.append(self._join('.hgignore'))
805 805 for name, path in self._ui.configitems("ui"):
806 806 if name == 'ignore' or name.startswith('ignore.'):
807 807 # we need to use os.path.join here rather than self._join
808 808 # because path is arbitrary and user-specified
809 809 files.append(os.path.join(self._rootdir, util.expandpath(path)))
810 810 return files
811 811
812 812 def _ignorefileandline(self, f):
813 813 files = collections.deque(self._ignorefiles())
814 814 visited = set()
815 815 while files:
816 816 i = files.popleft()
817 817 patterns = matchmod.readpatternfile(i, self._ui.warn,
818 818 sourceinfo=True)
819 819 for pattern, lineno, line in patterns:
820 820 kind, p = matchmod._patsplit(pattern, 'glob')
821 821 if kind == "subinclude":
822 822 if p not in visited:
823 823 files.append(p)
824 824 continue
825 825 m = matchmod.match(self._root, '', [], [pattern],
826 826 warn=self._ui.warn)
827 827 if m(f):
828 828 return (i, lineno, line)
829 829 visited.add(i)
830 830 return (None, -1, "")
831 831
832 832 def _walkexplicit(self, match, subrepos):
833 833 '''Get stat data about the files explicitly specified by match.
834 834
835 835 Return a triple (results, dirsfound, dirsnotfound).
836 836 - results is a mapping from filename to stat result. It also contains
837 837 listings mapping subrepos and .hg to None.
838 838 - dirsfound is a list of files found to be directories.
839 839 - dirsnotfound is a list of files that the dirstate thinks are
840 840 directories and that were not found.'''
841 841
842 842 def badtype(mode):
843 843 kind = _('unknown')
844 844 if stat.S_ISCHR(mode):
845 845 kind = _('character device')
846 846 elif stat.S_ISBLK(mode):
847 847 kind = _('block device')
848 848 elif stat.S_ISFIFO(mode):
849 849 kind = _('fifo')
850 850 elif stat.S_ISSOCK(mode):
851 851 kind = _('socket')
852 852 elif stat.S_ISDIR(mode):
853 853 kind = _('directory')
854 854 return _('unsupported file type (type is %s)') % kind
855 855
856 856 matchedir = match.explicitdir
857 857 badfn = match.bad
858 858 dmap = self._map
859 859 lstat = os.lstat
860 860 getkind = stat.S_IFMT
861 861 dirkind = stat.S_IFDIR
862 862 regkind = stat.S_IFREG
863 863 lnkkind = stat.S_IFLNK
864 864 join = self._join
865 865 dirsfound = []
866 866 foundadd = dirsfound.append
867 867 dirsnotfound = []
868 868 notfoundadd = dirsnotfound.append
869 869
870 870 if not match.isexact() and self._checkcase:
871 871 normalize = self._normalize
872 872 else:
873 873 normalize = None
874 874
875 875 files = sorted(match.files())
876 876 subrepos.sort()
877 877 i, j = 0, 0
878 878 while i < len(files) and j < len(subrepos):
879 879 subpath = subrepos[j] + "/"
880 880 if files[i] < subpath:
881 881 i += 1
882 882 continue
883 883 while i < len(files) and files[i].startswith(subpath):
884 884 del files[i]
885 885 j += 1
886 886
887 887 if not files or '.' in files:
888 888 files = ['.']
889 889 results = dict.fromkeys(subrepos)
890 890 results['.hg'] = None
891 891
892 892 alldirs = None
893 893 for ff in files:
894 894 # constructing the foldmap is expensive, so don't do it for the
895 895 # common case where files is ['.']
896 896 if normalize and ff != '.':
897 897 nf = normalize(ff, False, True)
898 898 else:
899 899 nf = ff
900 900 if nf in results:
901 901 continue
902 902
903 903 try:
904 904 st = lstat(join(nf))
905 905 kind = getkind(st.st_mode)
906 906 if kind == dirkind:
907 907 if nf in dmap:
908 908 # file replaced by dir on disk but still in dirstate
909 909 results[nf] = None
910 910 if matchedir:
911 911 matchedir(nf)
912 912 foundadd((nf, ff))
913 913 elif kind == regkind or kind == lnkkind:
914 914 results[nf] = st
915 915 else:
916 916 badfn(ff, badtype(kind))
917 917 if nf in dmap:
918 918 results[nf] = None
919 919 except OSError as inst: # nf not found on disk - it is dirstate only
920 920 if nf in dmap: # does it exactly match a missing file?
921 921 results[nf] = None
922 922 else: # does it match a missing directory?
923 923 if alldirs is None:
924 924 alldirs = util.dirs(dmap._map)
925 925 if nf in alldirs:
926 926 if matchedir:
927 927 matchedir(nf)
928 928 notfoundadd(nf)
929 929 else:
930 930 badfn(ff, encoding.strtolocal(inst.strerror))
931 931
932 932 # Case insensitive filesystems cannot rely on lstat() failing to detect
933 933 # a case-only rename. Prune the stat object for any file that does not
934 934 # match the case in the filesystem, if there are multiple files that
935 935 # normalize to the same path.
936 936 if match.isexact() and self._checkcase:
937 937 normed = {}
938 938
939 939 for f, st in results.iteritems():
940 940 if st is None:
941 941 continue
942 942
943 943 nc = util.normcase(f)
944 944 paths = normed.get(nc)
945 945
946 946 if paths is None:
947 947 paths = set()
948 948 normed[nc] = paths
949 949
950 950 paths.add(f)
951 951
952 952 for norm, paths in normed.iteritems():
953 953 if len(paths) > 1:
954 954 for path in paths:
955 955 folded = self._discoverpath(path, norm, True, None,
956 956 self._dirfoldmap)
957 957 if path != folded:
958 958 results[path] = None
959 959
960 960 return results, dirsfound, dirsnotfound
961 961
962 962 def walk(self, match, subrepos, unknown, ignored, full=True):
963 963 '''
964 964 Walk recursively through the directory tree, finding all files
965 965 matched by match.
966 966
967 967 If full is False, maybe skip some known-clean files.
968 968
969 969 Return a dict mapping filename to stat-like object (either
970 970 mercurial.osutil.stat instance or return value of os.stat()).
971 971
972 972 '''
973 973 # full is a flag that extensions that hook into walk can use -- this
974 974 # implementation doesn't use it at all. This satisfies the contract
975 975 # because we only guarantee a "maybe".
976 976
977 977 if ignored:
978 978 ignore = util.never
979 979 dirignore = util.never
980 980 elif unknown:
981 981 ignore = self._ignore
982 982 dirignore = self._dirignore
983 983 else:
984 984 # if not unknown and not ignored, drop dir recursion and step 2
985 985 ignore = util.always
986 986 dirignore = util.always
987 987
988 988 matchfn = match.matchfn
989 989 matchalways = match.always()
990 990 matchtdir = match.traversedir
991 991 dmap = self._map
992 992 listdir = util.listdir
993 993 lstat = os.lstat
994 994 dirkind = stat.S_IFDIR
995 995 regkind = stat.S_IFREG
996 996 lnkkind = stat.S_IFLNK
997 997 join = self._join
998 998
999 999 exact = skipstep3 = False
1000 1000 if match.isexact(): # match.exact
1001 1001 exact = True
1002 1002 dirignore = util.always # skip step 2
1003 1003 elif match.prefix(): # match.match, no patterns
1004 1004 skipstep3 = True
1005 1005
1006 1006 if not exact and self._checkcase:
1007 1007 normalize = self._normalize
1008 1008 normalizefile = self._normalizefile
1009 1009 skipstep3 = False
1010 1010 else:
1011 1011 normalize = self._normalize
1012 1012 normalizefile = None
1013 1013
1014 1014 # step 1: find all explicit files
1015 1015 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1016 1016
1017 1017 skipstep3 = skipstep3 and not (work or dirsnotfound)
1018 1018 work = [d for d in work if not dirignore(d[0])]
1019 1019
1020 1020 # step 2: visit subdirectories
1021 1021 def traverse(work, alreadynormed):
1022 1022 wadd = work.append
1023 1023 while work:
1024 1024 nd = work.pop()
1025 1025 if not match.visitdir(nd):
1026 1026 continue
1027 1027 skip = None
1028 1028 if nd == '.':
1029 1029 nd = ''
1030 1030 else:
1031 1031 skip = '.hg'
1032 1032 try:
1033 1033 entries = listdir(join(nd), stat=True, skip=skip)
1034 1034 except OSError as inst:
1035 1035 if inst.errno in (errno.EACCES, errno.ENOENT):
1036 1036 match.bad(self.pathto(nd),
1037 1037 encoding.strtolocal(inst.strerror))
1038 1038 continue
1039 1039 raise
1040 1040 for f, kind, st in entries:
1041 1041 if normalizefile:
1042 1042 # even though f might be a directory, we're only
1043 1043 # interested in comparing it to files currently in the
1044 1044 # dmap -- therefore normalizefile is enough
1045 1045 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1046 1046 True)
1047 1047 else:
1048 1048 nf = nd and (nd + "/" + f) or f
1049 1049 if nf not in results:
1050 1050 if kind == dirkind:
1051 1051 if not ignore(nf):
1052 1052 if matchtdir:
1053 1053 matchtdir(nf)
1054 1054 wadd(nf)
1055 1055 if nf in dmap and (matchalways or matchfn(nf)):
1056 1056 results[nf] = None
1057 1057 elif kind == regkind or kind == lnkkind:
1058 1058 if nf in dmap:
1059 1059 if matchalways or matchfn(nf):
1060 1060 results[nf] = st
1061 1061 elif ((matchalways or matchfn(nf))
1062 1062 and not ignore(nf)):
1063 1063 # unknown file -- normalize if necessary
1064 1064 if not alreadynormed:
1065 1065 nf = normalize(nf, False, True)
1066 1066 results[nf] = st
1067 1067 elif nf in dmap and (matchalways or matchfn(nf)):
1068 1068 results[nf] = None
1069 1069
1070 1070 for nd, d in work:
1071 1071 # alreadynormed means that processwork doesn't have to do any
1072 1072 # expensive directory normalization
1073 1073 alreadynormed = not normalize or nd == d
1074 1074 traverse([d], alreadynormed)
1075 1075
1076 1076 for s in subrepos:
1077 1077 del results[s]
1078 1078 del results['.hg']
1079 1079
1080 1080 # step 3: visit remaining files from dmap
1081 1081 if not skipstep3 and not exact:
1082 1082 # If a dmap file is not in results yet, it was either
1083 1083 # a) not matching matchfn b) ignored, c) missing, or d) under a
1084 1084 # symlink directory.
1085 1085 if not results and matchalways:
1086 1086 visit = [f for f in dmap]
1087 1087 else:
1088 1088 visit = [f for f in dmap if f not in results and matchfn(f)]
1089 1089 visit.sort()
1090 1090
1091 1091 if unknown:
1092 1092 # unknown == True means we walked all dirs under the roots
1093 1093 # that wasn't ignored, and everything that matched was stat'ed
1094 1094 # and is already in results.
1095 1095 # The rest must thus be ignored or under a symlink.
1096 1096 audit_path = pathutil.pathauditor(self._root, cached=True)
1097 1097
1098 1098 for nf in iter(visit):
1099 1099 # If a stat for the same file was already added with a
1100 1100 # different case, don't add one for this, since that would
1101 1101 # make it appear as if the file exists under both names
1102 1102 # on disk.
1103 1103 if (normalizefile and
1104 1104 normalizefile(nf, True, True) in results):
1105 1105 results[nf] = None
1106 1106 # Report ignored items in the dmap as long as they are not
1107 1107 # under a symlink directory.
1108 1108 elif audit_path.check(nf):
1109 1109 try:
1110 1110 results[nf] = lstat(join(nf))
1111 1111 # file was just ignored, no links, and exists
1112 1112 except OSError:
1113 1113 # file doesn't exist
1114 1114 results[nf] = None
1115 1115 else:
1116 1116 # It's either missing or under a symlink directory
1117 1117 # which we in this case report as missing
1118 1118 results[nf] = None
1119 1119 else:
1120 1120 # We may not have walked the full directory tree above,
1121 1121 # so stat and check everything we missed.
1122 1122 iv = iter(visit)
1123 1123 for st in util.statfiles([join(i) for i in visit]):
1124 1124 results[next(iv)] = st
1125 1125 return results
1126 1126
1127 1127 def status(self, match, subrepos, ignored, clean, unknown):
1128 1128 '''Determine the status of the working copy relative to the
1129 1129 dirstate and return a pair of (unsure, status), where status is of type
1130 1130 scmutil.status and:
1131 1131
1132 1132 unsure:
1133 1133 files that might have been modified since the dirstate was
1134 1134 written, but need to be read to be sure (size is the same
1135 1135 but mtime differs)
1136 1136 status.modified:
1137 1137 files that have definitely been modified since the dirstate
1138 1138 was written (different size or mode)
1139 1139 status.clean:
1140 1140 files that have definitely not been modified since the
1141 1141 dirstate was written
1142 1142 '''
1143 1143 listignored, listclean, listunknown = ignored, clean, unknown
1144 1144 lookup, modified, added, unknown, ignored = [], [], [], [], []
1145 1145 removed, deleted, clean = [], [], []
1146 1146
1147 1147 dmap = self._map
1148 1148 ladd = lookup.append # aka "unsure"
1149 1149 madd = modified.append
1150 1150 aadd = added.append
1151 1151 uadd = unknown.append
1152 1152 iadd = ignored.append
1153 1153 radd = removed.append
1154 1154 dadd = deleted.append
1155 1155 cadd = clean.append
1156 1156 mexact = match.exact
1157 1157 dirignore = self._dirignore
1158 1158 checkexec = self._checkexec
1159 1159 copymap = self._map.copymap
1160 1160 lastnormaltime = self._lastnormaltime
1161 1161
1162 1162 # We need to do full walks when either
1163 1163 # - we're listing all clean files, or
1164 1164 # - match.traversedir does something, because match.traversedir should
1165 1165 # be called for every dir in the working dir
1166 1166 full = listclean or match.traversedir is not None
1167 1167 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1168 1168 full=full).iteritems():
1169 1169 if fn not in dmap:
1170 1170 if (listignored or mexact(fn)) and dirignore(fn):
1171 1171 if listignored:
1172 1172 iadd(fn)
1173 1173 else:
1174 1174 uadd(fn)
1175 1175 continue
1176 1176
1177 1177 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1178 1178 # written like that for performance reasons. dmap[fn] is not a
1179 1179 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1180 1180 # opcode has fast paths when the value to be unpacked is a tuple or
1181 1181 # a list, but falls back to creating a full-fledged iterator in
1182 1182 # general. That is much slower than simply accessing and storing the
1183 1183 # tuple members one by one.
1184 1184 t = dmap[fn]
1185 1185 state = t[0]
1186 1186 mode = t[1]
1187 1187 size = t[2]
1188 1188 time = t[3]
1189 1189
1190 1190 if not st and state in "nma":
1191 1191 dadd(fn)
1192 1192 elif state == 'n':
1193 1193 if (size >= 0 and
1194 1194 ((size != st.st_size and size != st.st_size & _rangemask)
1195 1195 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1196 1196 or size == -2 # other parent
1197 1197 or fn in copymap):
1198 1198 madd(fn)
1199 1199 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1200 1200 ladd(fn)
1201 1201 elif st.st_mtime == lastnormaltime:
1202 1202 # fn may have just been marked as normal and it may have
1203 1203 # changed in the same second without changing its size.
1204 1204 # This can happen if we quickly do multiple commits.
1205 1205 # Force lookup, so we don't miss such a racy file change.
1206 1206 ladd(fn)
1207 1207 elif listclean:
1208 1208 cadd(fn)
1209 1209 elif state == 'm':
1210 1210 madd(fn)
1211 1211 elif state == 'a':
1212 1212 aadd(fn)
1213 1213 elif state == 'r':
1214 1214 radd(fn)
1215 1215
1216 1216 return (lookup, scmutil.status(modified, added, removed, deleted,
1217 1217 unknown, ignored, clean))
1218 1218
1219 1219 def matches(self, match):
1220 1220 '''
1221 1221 return files in the dirstate (in whatever state) filtered by match
1222 1222 '''
1223 1223 dmap = self._map
1224 1224 if match.always():
1225 1225 return dmap.keys()
1226 1226 files = match.files()
1227 1227 if match.isexact():
1228 1228 # fast path -- filter the other way around, since typically files is
1229 1229 # much smaller than dmap
1230 1230 return [f for f in files if f in dmap]
1231 1231 if match.prefix() and all(fn in dmap for fn in files):
1232 1232 # fast path -- all the values are known to be files, so just return
1233 1233 # that
1234 1234 return list(files)
1235 1235 return [f for f in dmap if match(f)]
1236 1236
1237 1237 def _actualfilename(self, tr):
1238 1238 if tr:
1239 1239 return self._pendingfilename
1240 1240 else:
1241 1241 return self._filename
1242 1242
1243 1243 def savebackup(self, tr, backupname):
1244 1244 '''Save current dirstate into backup file'''
1245 1245 filename = self._actualfilename(tr)
1246 1246 assert backupname != filename
1247 1247
1248 1248 # use '_writedirstate' instead of 'write' to write changes certainly,
1249 1249 # because the latter omits writing out if transaction is running.
1250 1250 # output file will be used to create backup of dirstate at this point.
1251 1251 if self._dirty or not self._opener.exists(filename):
1252 1252 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1253 1253 checkambig=True))
1254 1254
1255 1255 if tr:
1256 1256 # ensure that subsequent tr.writepending returns True for
1257 1257 # changes written out above, even if dirstate is never
1258 1258 # changed after this
1259 1259 tr.addfilegenerator('dirstate', (self._filename,),
1260 1260 self._writedirstate, location='plain')
1261 1261
1262 1262 # ensure that pending file written above is unlinked at
1263 1263 # failure, even if tr.writepending isn't invoked until the
1264 1264 # end of this transaction
1265 1265 tr.registertmp(filename, location='plain')
1266 1266
1267 1267 self._opener.tryunlink(backupname)
1268 1268 # hardlink backup is okay because _writedirstate is always called
1269 1269 # with an "atomictemp=True" file.
1270 1270 util.copyfile(self._opener.join(filename),
1271 1271 self._opener.join(backupname), hardlink=True)
1272 1272
1273 1273 def restorebackup(self, tr, backupname):
1274 1274 '''Restore dirstate by backup file'''
1275 1275 # this "invalidate()" prevents "wlock.release()" from writing
1276 1276 # changes of dirstate out after restoring from backup file
1277 1277 self.invalidate()
1278 1278 filename = self._actualfilename(tr)
1279 1279 self._opener.rename(backupname, filename, checkambig=True)
1280 1280
1281 1281 def clearbackup(self, tr, backupname):
1282 1282 '''Clear backup file'''
1283 1283 self._opener.unlink(backupname)
1284 1284
1285 1285 class dirstatemap(object):
1286 1286 def __init__(self, ui, opener, root):
1287 1287 self._ui = ui
1288 1288 self._opener = opener
1289 1289 self._root = root
1290 1290 self._filename = 'dirstate'
1291 1291
1292 1292 self._map = {}
1293 1293 self.copymap = {}
1294 1294 self._parents = None
1295 1295 self._dirtyparents = False
1296 1296
1297 1297 # for consistent view between _pl() and _read() invocations
1298 1298 self._pendingmode = None
1299 1299
1300 1300 def iteritems(self):
1301 1301 return self._map.iteritems()
1302 1302
1303 def __len__(self):
1304 return len(self._map)
1305
1303 1306 def __iter__(self):
1304 1307 return iter(self._map)
1305 1308
1306 1309 def get(self, key, default=None):
1307 1310 return self._map.get(key, default)
1308 1311
1309 1312 def __contains__(self, key):
1310 1313 return key in self._map
1311 1314
1312 1315 def __setitem__(self, key, value):
1313 1316 self._map[key] = value
1314 1317
1315 1318 def __getitem__(self, key):
1316 1319 return self._map[key]
1317 1320
1318 1321 def __delitem__(self, key):
1319 1322 del self._map[key]
1320 1323
1321 1324 def keys(self):
1322 1325 return self._map.keys()
1323 1326
1324 1327 def nonnormalentries(self):
1325 1328 '''Compute the nonnormal dirstate entries from the dmap'''
1326 1329 try:
1327 1330 return parsers.nonnormalotherparententries(self._map)
1328 1331 except AttributeError:
1329 1332 nonnorm = set()
1330 1333 otherparent = set()
1331 1334 for fname, e in self._map.iteritems():
1332 1335 if e[0] != 'n' or e[3] == -1:
1333 1336 nonnorm.add(fname)
1334 1337 if e[0] == 'n' and e[2] == -2:
1335 1338 otherparent.add(fname)
1336 1339 return nonnorm, otherparent
1337 1340
1338 1341 def filefoldmap(self):
1339 1342 """Returns a dictionary mapping normalized case paths to their
1340 1343 non-normalized versions.
1341 1344 """
1342 1345 try:
1343 1346 makefilefoldmap = parsers.make_file_foldmap
1344 1347 except AttributeError:
1345 1348 pass
1346 1349 else:
1347 1350 return makefilefoldmap(self._map, util.normcasespec,
1348 1351 util.normcasefallback)
1349 1352
1350 1353 f = {}
1351 1354 normcase = util.normcase
1352 1355 for name, s in self._map.iteritems():
1353 1356 if s[0] != 'r':
1354 1357 f[normcase(name)] = name
1355 1358 f['.'] = '.' # prevents useless util.fspath() invocation
1356 1359 return f
1357 1360
1358 1361 def dirs(self):
1359 1362 """Returns a set-like object containing all the directories in the
1360 1363 current dirstate.
1361 1364 """
1362 1365 return util.dirs(self._map, 'r')
1363 1366
1364 1367 def _opendirstatefile(self):
1365 1368 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1366 1369 if self._pendingmode is not None and self._pendingmode != mode:
1367 1370 fp.close()
1368 1371 raise error.Abort(_('working directory state may be '
1369 1372 'changed parallelly'))
1370 1373 self._pendingmode = mode
1371 1374 return fp
1372 1375
1373 1376 def parents(self):
1374 1377 if not self._parents:
1375 1378 try:
1376 1379 fp = self._opendirstatefile()
1377 1380 st = fp.read(40)
1378 1381 fp.close()
1379 1382 except IOError as err:
1380 1383 if err.errno != errno.ENOENT:
1381 1384 raise
1382 1385 # File doesn't exist, so the current state is empty
1383 1386 st = ''
1384 1387
1385 1388 l = len(st)
1386 1389 if l == 40:
1387 1390 self._parents = st[:20], st[20:40]
1388 1391 elif l == 0:
1389 1392 self._parents = [nullid, nullid]
1390 1393 else:
1391 1394 raise error.Abort(_('working directory state appears '
1392 1395 'damaged!'))
1393 1396
1394 1397 return self._parents
1395 1398
1396 1399 def setparents(self, p1, p2):
1397 1400 self._parents = (p1, p2)
1398 1401 self._dirtyparents = True
General Comments 0
You need to be logged in to leave comments. Login now