##// END OF EJS Templates
dirstate: move opendirstatefile to dirstatemap...
Durham Goode -
r34338:c36c3fa7 default
parent child Browse files
Show More
@@ -1,1377 +1,1383 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 parsers = policy.importmod(r'parsers')
31 31
32 32 propertycache = util.propertycache
33 33 filecache = scmutil.filecache
34 34 _rangemask = 0x7fffffff
35 35
36 36 dirstatetuple = parsers.dirstatetuple
37 37
38 38 class repocache(filecache):
39 39 """filecache for files in .hg/"""
40 40 def join(self, obj, fname):
41 41 return obj._opener.join(fname)
42 42
43 43 class rootcache(filecache):
44 44 """filecache for files in the repository root"""
45 45 def join(self, obj, fname):
46 46 return obj._join(fname)
47 47
48 48 def _getfsnow(vfs):
49 49 '''Get "now" timestamp on filesystem'''
50 50 tmpfd, tmpname = vfs.mkstemp()
51 51 try:
52 52 return os.fstat(tmpfd).st_mtime
53 53 finally:
54 54 os.close(tmpfd)
55 55 vfs.unlink(tmpname)
56 56
57 57 class dirstate(object):
58 58
59 59 def __init__(self, opener, ui, root, validate, sparsematchfn):
60 60 '''Create a new dirstate object.
61 61
62 62 opener is an open()-like callable that can be used to open the
63 63 dirstate file; root is the root of the directory tracked by
64 64 the dirstate.
65 65 '''
66 66 self._opener = opener
67 67 self._validate = validate
68 68 self._root = root
69 69 self._sparsematchfn = sparsematchfn
70 70 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
71 71 # UNC path pointing to root share (issue4557)
72 72 self._rootdir = pathutil.normasprefix(root)
73 73 self._dirty = False
74 74 self._dirtypl = False
75 75 self._lastnormaltime = 0
76 76 self._ui = ui
77 77 self._filecache = {}
78 78 self._parentwriters = 0
79 79 self._filename = 'dirstate'
80 80 self._pendingfilename = '%s.pending' % self._filename
81 81 self._plchangecallbacks = {}
82 82 self._origpl = None
83 83 self._updatedfiles = set()
84 84
85 # for consistent view between _pl() and _read() invocations
86 self._pendingmode = None
87
88 85 @contextlib.contextmanager
89 86 def parentchange(self):
90 87 '''Context manager for handling dirstate parents.
91 88
92 89 If an exception occurs in the scope of the context manager,
93 90 the incoherent dirstate won't be written when wlock is
94 91 released.
95 92 '''
96 93 self._parentwriters += 1
97 94 yield
98 95 # Typically we want the "undo" step of a context manager in a
99 96 # finally block so it happens even when an exception
100 97 # occurs. In this case, however, we only want to decrement
101 98 # parentwriters if the code in the with statement exits
102 99 # normally, so we don't have a try/finally here on purpose.
103 100 self._parentwriters -= 1
104 101
105 102 def beginparentchange(self):
106 103 '''Marks the beginning of a set of changes that involve changing
107 104 the dirstate parents. If there is an exception during this time,
108 105 the dirstate will not be written when the wlock is released. This
109 106 prevents writing an incoherent dirstate where the parent doesn't
110 107 match the contents.
111 108 '''
112 109 self._ui.deprecwarn('beginparentchange is obsoleted by the '
113 110 'parentchange context manager.', '4.3')
114 111 self._parentwriters += 1
115 112
116 113 def endparentchange(self):
117 114 '''Marks the end of a set of changes that involve changing the
118 115 dirstate parents. Once all parent changes have been marked done,
119 116 the wlock will be free to write the dirstate on release.
120 117 '''
121 118 self._ui.deprecwarn('endparentchange is obsoleted by the '
122 119 'parentchange context manager.', '4.3')
123 120 if self._parentwriters > 0:
124 121 self._parentwriters -= 1
125 122
126 123 def pendingparentchange(self):
127 124 '''Returns true if the dirstate is in the middle of a set of changes
128 125 that modify the dirstate parent.
129 126 '''
130 127 return self._parentwriters > 0
131 128
132 129 @propertycache
133 130 def _map(self):
134 131 '''Return the dirstate contents as a map from filename to
135 132 (state, mode, size, time).'''
136 133 self._read()
137 134 return self._map
138 135
139 136 @propertycache
140 137 def _identity(self):
141 138 self._read()
142 139 return self._identity
143 140
144 141 @propertycache
145 142 def _nonnormalset(self):
146 143 nonnorm, otherparents = self._map.nonnormalentries()
147 144 self._otherparentset = otherparents
148 145 return nonnorm
149 146
150 147 @propertycache
151 148 def _otherparentset(self):
152 149 nonnorm, otherparents = self._map.nonnormalentries()
153 150 self._nonnormalset = nonnorm
154 151 return otherparents
155 152
156 153 @propertycache
157 154 def _filefoldmap(self):
158 155 return self._map.filefoldmap()
159 156
160 157 @propertycache
161 158 def _dirfoldmap(self):
162 159 f = {}
163 160 normcase = util.normcase
164 161 for name in self._dirs:
165 162 f[normcase(name)] = name
166 163 return f
167 164
168 165 @property
169 166 def _sparsematcher(self):
170 167 """The matcher for the sparse checkout.
171 168
172 169 The working directory may not include every file from a manifest. The
173 170 matcher obtained by this property will match a path if it is to be
174 171 included in the working directory.
175 172 """
176 173 # TODO there is potential to cache this property. For now, the matcher
177 174 # is resolved on every access. (But the called function does use a
178 175 # cache to keep the lookup fast.)
179 176 return self._sparsematchfn()
180 177
181 178 @repocache('branch')
182 179 def _branch(self):
183 180 try:
184 181 return self._opener.read("branch").strip() or "default"
185 182 except IOError as inst:
186 183 if inst.errno != errno.ENOENT:
187 184 raise
188 185 return "default"
189 186
190 187 @propertycache
191 188 def _pl(self):
192 189 try:
193 fp = self._opendirstatefile()
190 fp = self._map._opendirstatefile()
194 191 st = fp.read(40)
195 192 fp.close()
196 193 l = len(st)
197 194 if l == 40:
198 195 return st[:20], st[20:40]
199 196 elif l > 0 and l < 40:
200 197 raise error.Abort(_('working directory state appears damaged!'))
201 198 except IOError as err:
202 199 if err.errno != errno.ENOENT:
203 200 raise
204 201 return [nullid, nullid]
205 202
206 203 @propertycache
207 204 def _dirs(self):
208 205 return self._map.dirs()
209 206
210 207 def dirs(self):
211 208 return self._dirs
212 209
213 210 @rootcache('.hgignore')
214 211 def _ignore(self):
215 212 files = self._ignorefiles()
216 213 if not files:
217 214 return matchmod.never(self._root, '')
218 215
219 216 pats = ['include:%s' % f for f in files]
220 217 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
221 218
222 219 @propertycache
223 220 def _slash(self):
224 221 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
225 222
226 223 @propertycache
227 224 def _checklink(self):
228 225 return util.checklink(self._root)
229 226
230 227 @propertycache
231 228 def _checkexec(self):
232 229 return util.checkexec(self._root)
233 230
234 231 @propertycache
235 232 def _checkcase(self):
236 233 return not util.fscasesensitive(self._join('.hg'))
237 234
238 235 def _join(self, f):
239 236 # much faster than os.path.join()
240 237 # it's safe because f is always a relative path
241 238 return self._rootdir + f
242 239
243 240 def flagfunc(self, buildfallback):
244 241 if self._checklink and self._checkexec:
245 242 def f(x):
246 243 try:
247 244 st = os.lstat(self._join(x))
248 245 if util.statislink(st):
249 246 return 'l'
250 247 if util.statisexec(st):
251 248 return 'x'
252 249 except OSError:
253 250 pass
254 251 return ''
255 252 return f
256 253
257 254 fallback = buildfallback()
258 255 if self._checklink:
259 256 def f(x):
260 257 if os.path.islink(self._join(x)):
261 258 return 'l'
262 259 if 'x' in fallback(x):
263 260 return 'x'
264 261 return ''
265 262 return f
266 263 if self._checkexec:
267 264 def f(x):
268 265 if 'l' in fallback(x):
269 266 return 'l'
270 267 if util.isexec(self._join(x)):
271 268 return 'x'
272 269 return ''
273 270 return f
274 271 else:
275 272 return fallback
276 273
277 274 @propertycache
278 275 def _cwd(self):
279 276 # internal config: ui.forcecwd
280 277 forcecwd = self._ui.config('ui', 'forcecwd')
281 278 if forcecwd:
282 279 return forcecwd
283 280 return pycompat.getcwd()
284 281
285 282 def getcwd(self):
286 283 '''Return the path from which a canonical path is calculated.
287 284
288 285 This path should be used to resolve file patterns or to convert
289 286 canonical paths back to file paths for display. It shouldn't be
290 287 used to get real file paths. Use vfs functions instead.
291 288 '''
292 289 cwd = self._cwd
293 290 if cwd == self._root:
294 291 return ''
295 292 # self._root ends with a path separator if self._root is '/' or 'C:\'
296 293 rootsep = self._root
297 294 if not util.endswithsep(rootsep):
298 295 rootsep += pycompat.ossep
299 296 if cwd.startswith(rootsep):
300 297 return cwd[len(rootsep):]
301 298 else:
302 299 # we're outside the repo. return an absolute path.
303 300 return cwd
304 301
305 302 def pathto(self, f, cwd=None):
306 303 if cwd is None:
307 304 cwd = self.getcwd()
308 305 path = util.pathto(self._root, cwd, f)
309 306 if self._slash:
310 307 return util.pconvert(path)
311 308 return path
312 309
313 310 def __getitem__(self, key):
314 311 '''Return the current state of key (a filename) in the dirstate.
315 312
316 313 States are:
317 314 n normal
318 315 m needs merging
319 316 r marked for removal
320 317 a marked for addition
321 318 ? not tracked
322 319 '''
323 320 return self._map.get(key, ("?",))[0]
324 321
325 322 def __contains__(self, key):
326 323 return key in self._map
327 324
328 325 def __iter__(self):
329 326 return iter(sorted(self._map))
330 327
331 328 def items(self):
332 329 return self._map.iteritems()
333 330
334 331 iteritems = items
335 332
336 333 def parents(self):
337 334 return [self._validate(p) for p in self._pl]
338 335
339 336 def p1(self):
340 337 return self._validate(self._pl[0])
341 338
342 339 def p2(self):
343 340 return self._validate(self._pl[1])
344 341
345 342 def branch(self):
346 343 return encoding.tolocal(self._branch)
347 344
348 345 def setparents(self, p1, p2=nullid):
349 346 """Set dirstate parents to p1 and p2.
350 347
351 348 When moving from two parents to one, 'm' merged entries a
352 349 adjusted to normal and previous copy records discarded and
353 350 returned by the call.
354 351
355 352 See localrepo.setparents()
356 353 """
357 354 if self._parentwriters == 0:
358 355 raise ValueError("cannot set dirstate parent without "
359 356 "calling dirstate.beginparentchange")
360 357
361 358 self._dirty = self._dirtypl = True
362 359 oldp2 = self._pl[1]
363 360 if self._origpl is None:
364 361 self._origpl = self._pl
365 362 self._pl = p1, p2
366 363 copies = {}
367 364 if oldp2 != nullid and p2 == nullid:
368 365 candidatefiles = self._nonnormalset.union(self._otherparentset)
369 366 for f in candidatefiles:
370 367 s = self._map.get(f)
371 368 if s is None:
372 369 continue
373 370
374 371 # Discard 'm' markers when moving away from a merge state
375 372 if s[0] == 'm':
376 373 source = self._map.copymap.get(f)
377 374 if source:
378 375 copies[f] = source
379 376 self.normallookup(f)
380 377 # Also fix up otherparent markers
381 378 elif s[0] == 'n' and s[2] == -2:
382 379 source = self._map.copymap.get(f)
383 380 if source:
384 381 copies[f] = source
385 382 self.add(f)
386 383 return copies
387 384
388 385 def setbranch(self, branch):
389 386 self._branch = encoding.fromlocal(branch)
390 387 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
391 388 try:
392 389 f.write(self._branch + '\n')
393 390 f.close()
394 391
395 392 # make sure filecache has the correct stat info for _branch after
396 393 # replacing the underlying file
397 394 ce = self._filecache['_branch']
398 395 if ce:
399 396 ce.refresh()
400 397 except: # re-raises
401 398 f.discard()
402 399 raise
403 400
404 def _opendirstatefile(self):
405 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
406 if self._pendingmode is not None and self._pendingmode != mode:
407 fp.close()
408 raise error.Abort(_('working directory state may be '
409 'changed parallelly'))
410 self._pendingmode = mode
411 return fp
412
413 401 def _read(self):
414 self._map = dirstatemap()
402 self._map = dirstatemap(self._ui, self._opener, self._root)
415 403
416 404 # ignore HG_PENDING because identity is used only for writing
417 405 self._identity = util.filestat.frompath(
418 406 self._opener.join(self._filename))
419 407 try:
420 fp = self._opendirstatefile()
408 fp = self._map._opendirstatefile()
421 409 try:
422 410 st = fp.read()
423 411 finally:
424 412 fp.close()
425 413 except IOError as err:
426 414 if err.errno != errno.ENOENT:
427 415 raise
428 416 return
429 417 if not st:
430 418 return
431 419
432 420 if util.safehasattr(parsers, 'dict_new_presized'):
433 421 # Make an estimate of the number of files in the dirstate based on
434 422 # its size. From a linear regression on a set of real-world repos,
435 423 # all over 10,000 files, the size of a dirstate entry is 85
436 424 # bytes. The cost of resizing is significantly higher than the cost
437 425 # of filling in a larger presized dict, so subtract 20% from the
438 426 # size.
439 427 #
440 428 # This heuristic is imperfect in many ways, so in a future dirstate
441 429 # format update it makes sense to just record the number of entries
442 430 # on write.
443 431 self._map._map = parsers.dict_new_presized(len(st) / 71)
444 432
445 433 # Python's garbage collector triggers a GC each time a certain number
446 434 # of container objects (the number being defined by
447 435 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
448 436 # for each file in the dirstate. The C version then immediately marks
449 437 # them as not to be tracked by the collector. However, this has no
450 438 # effect on when GCs are triggered, only on what objects the GC looks
451 439 # into. This means that O(number of files) GCs are unavoidable.
452 440 # Depending on when in the process's lifetime the dirstate is parsed,
453 441 # this can get very expensive. As a workaround, disable GC while
454 442 # parsing the dirstate.
455 443 #
456 444 # (we cannot decorate the function directly since it is in a C module)
457 445 parse_dirstate = util.nogc(parsers.parse_dirstate)
458 446 p = parse_dirstate(self._map._map, self._map.copymap, st)
459 447 if not self._dirtypl:
460 448 self._pl = p
461 449
462 450 def invalidate(self):
463 451 '''Causes the next access to reread the dirstate.
464 452
465 453 This is different from localrepo.invalidatedirstate() because it always
466 454 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
467 455 check whether the dirstate has changed before rereading it.'''
468 456
469 457 for a in ("_map", "_identity",
470 458 "_filefoldmap", "_dirfoldmap", "_branch",
471 459 "_pl", "_dirs", "_ignore", "_nonnormalset",
472 460 "_otherparentset"):
473 461 if a in self.__dict__:
474 462 delattr(self, a)
475 463 self._lastnormaltime = 0
476 464 self._dirty = False
477 465 self._updatedfiles.clear()
478 466 self._parentwriters = 0
479 467 self._origpl = None
480 468
481 469 def copy(self, source, dest):
482 470 """Mark dest as a copy of source. Unmark dest if source is None."""
483 471 if source == dest:
484 472 return
485 473 self._dirty = True
486 474 if source is not None:
487 475 self._map.copymap[dest] = source
488 476 self._updatedfiles.add(source)
489 477 self._updatedfiles.add(dest)
490 478 elif self._map.copymap.pop(dest, None):
491 479 self._updatedfiles.add(dest)
492 480
493 481 def copied(self, file):
494 482 return self._map.copymap.get(file, None)
495 483
496 484 def copies(self):
497 485 return self._map.copymap
498 486
499 487 def _droppath(self, f):
500 488 if self[f] not in "?r" and "_dirs" in self.__dict__:
501 489 self._dirs.delpath(f)
502 490
503 491 if "_filefoldmap" in self.__dict__:
504 492 normed = util.normcase(f)
505 493 if normed in self._filefoldmap:
506 494 del self._filefoldmap[normed]
507 495
508 496 self._updatedfiles.add(f)
509 497
510 498 def _addpath(self, f, state, mode, size, mtime):
511 499 oldstate = self[f]
512 500 if state == 'a' or oldstate == 'r':
513 501 scmutil.checkfilename(f)
514 502 if f in self._dirs:
515 503 raise error.Abort(_('directory %r already in dirstate') % f)
516 504 # shadows
517 505 for d in util.finddirs(f):
518 506 if d in self._dirs:
519 507 break
520 508 entry = self._map.get(d)
521 509 if entry is not None and entry[0] != 'r':
522 510 raise error.Abort(
523 511 _('file %r in dirstate clashes with %r') % (d, f))
524 512 if oldstate in "?r" and "_dirs" in self.__dict__:
525 513 self._dirs.addpath(f)
526 514 self._dirty = True
527 515 self._updatedfiles.add(f)
528 516 self._map[f] = dirstatetuple(state, mode, size, mtime)
529 517 if state != 'n' or mtime == -1:
530 518 self._nonnormalset.add(f)
531 519 if size == -2:
532 520 self._otherparentset.add(f)
533 521
534 522 def normal(self, f):
535 523 '''Mark a file normal and clean.'''
536 524 s = os.lstat(self._join(f))
537 525 mtime = s.st_mtime
538 526 self._addpath(f, 'n', s.st_mode,
539 527 s.st_size & _rangemask, mtime & _rangemask)
540 528 self._map.copymap.pop(f, None)
541 529 if f in self._nonnormalset:
542 530 self._nonnormalset.remove(f)
543 531 if mtime > self._lastnormaltime:
544 532 # Remember the most recent modification timeslot for status(),
545 533 # to make sure we won't miss future size-preserving file content
546 534 # modifications that happen within the same timeslot.
547 535 self._lastnormaltime = mtime
548 536
549 537 def normallookup(self, f):
550 538 '''Mark a file normal, but possibly dirty.'''
551 539 if self._pl[1] != nullid:
552 540 # if there is a merge going on and the file was either
553 541 # in state 'm' (-1) or coming from other parent (-2) before
554 542 # being removed, restore that state.
555 543 entry = self._map.get(f)
556 544 if entry is not None:
557 545 if entry[0] == 'r' and entry[2] in (-1, -2):
558 546 source = self._map.copymap.get(f)
559 547 if entry[2] == -1:
560 548 self.merge(f)
561 549 elif entry[2] == -2:
562 550 self.otherparent(f)
563 551 if source:
564 552 self.copy(source, f)
565 553 return
566 554 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
567 555 return
568 556 self._addpath(f, 'n', 0, -1, -1)
569 557 self._map.copymap.pop(f, None)
570 558 if f in self._nonnormalset:
571 559 self._nonnormalset.remove(f)
572 560
573 561 def otherparent(self, f):
574 562 '''Mark as coming from the other parent, always dirty.'''
575 563 if self._pl[1] == nullid:
576 564 raise error.Abort(_("setting %r to other parent "
577 565 "only allowed in merges") % f)
578 566 if f in self and self[f] == 'n':
579 567 # merge-like
580 568 self._addpath(f, 'm', 0, -2, -1)
581 569 else:
582 570 # add-like
583 571 self._addpath(f, 'n', 0, -2, -1)
584 572 self._map.copymap.pop(f, None)
585 573
586 574 def add(self, f):
587 575 '''Mark a file added.'''
588 576 self._addpath(f, 'a', 0, -1, -1)
589 577 self._map.copymap.pop(f, None)
590 578
591 579 def remove(self, f):
592 580 '''Mark a file removed.'''
593 581 self._dirty = True
594 582 self._droppath(f)
595 583 size = 0
596 584 if self._pl[1] != nullid:
597 585 entry = self._map.get(f)
598 586 if entry is not None:
599 587 # backup the previous state
600 588 if entry[0] == 'm': # merge
601 589 size = -1
602 590 elif entry[0] == 'n' and entry[2] == -2: # other parent
603 591 size = -2
604 592 self._otherparentset.add(f)
605 593 self._map[f] = dirstatetuple('r', 0, size, 0)
606 594 self._nonnormalset.add(f)
607 595 if size == 0:
608 596 self._map.copymap.pop(f, None)
609 597
610 598 def merge(self, f):
611 599 '''Mark a file merged.'''
612 600 if self._pl[1] == nullid:
613 601 return self.normallookup(f)
614 602 return self.otherparent(f)
615 603
616 604 def drop(self, f):
617 605 '''Drop a file from the dirstate'''
618 606 if f in self._map:
619 607 self._dirty = True
620 608 self._droppath(f)
621 609 del self._map[f]
622 610 if f in self._nonnormalset:
623 611 self._nonnormalset.remove(f)
624 612 self._map.copymap.pop(f, None)
625 613
626 614 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
627 615 if exists is None:
628 616 exists = os.path.lexists(os.path.join(self._root, path))
629 617 if not exists:
630 618 # Maybe a path component exists
631 619 if not ignoremissing and '/' in path:
632 620 d, f = path.rsplit('/', 1)
633 621 d = self._normalize(d, False, ignoremissing, None)
634 622 folded = d + "/" + f
635 623 else:
636 624 # No path components, preserve original case
637 625 folded = path
638 626 else:
639 627 # recursively normalize leading directory components
640 628 # against dirstate
641 629 if '/' in normed:
642 630 d, f = normed.rsplit('/', 1)
643 631 d = self._normalize(d, False, ignoremissing, True)
644 632 r = self._root + "/" + d
645 633 folded = d + "/" + util.fspath(f, r)
646 634 else:
647 635 folded = util.fspath(normed, self._root)
648 636 storemap[normed] = folded
649 637
650 638 return folded
651 639
652 640 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
653 641 normed = util.normcase(path)
654 642 folded = self._filefoldmap.get(normed, None)
655 643 if folded is None:
656 644 if isknown:
657 645 folded = path
658 646 else:
659 647 folded = self._discoverpath(path, normed, ignoremissing, exists,
660 648 self._filefoldmap)
661 649 return folded
662 650
663 651 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
664 652 normed = util.normcase(path)
665 653 folded = self._filefoldmap.get(normed, None)
666 654 if folded is None:
667 655 folded = self._dirfoldmap.get(normed, None)
668 656 if folded is None:
669 657 if isknown:
670 658 folded = path
671 659 else:
672 660 # store discovered result in dirfoldmap so that future
673 661 # normalizefile calls don't start matching directories
674 662 folded = self._discoverpath(path, normed, ignoremissing, exists,
675 663 self._dirfoldmap)
676 664 return folded
677 665
678 666 def normalize(self, path, isknown=False, ignoremissing=False):
679 667 '''
680 668 normalize the case of a pathname when on a casefolding filesystem
681 669
682 670 isknown specifies whether the filename came from walking the
683 671 disk, to avoid extra filesystem access.
684 672
685 673 If ignoremissing is True, missing path are returned
686 674 unchanged. Otherwise, we try harder to normalize possibly
687 675 existing path components.
688 676
689 677 The normalized case is determined based on the following precedence:
690 678
691 679 - version of name already stored in the dirstate
692 680 - version of name stored on disk
693 681 - version provided via command arguments
694 682 '''
695 683
696 684 if self._checkcase:
697 685 return self._normalize(path, isknown, ignoremissing)
698 686 return path
699 687
700 688 def clear(self):
701 self._map = dirstatemap()
689 self._map = dirstatemap(self._ui, self._opener, self._root)
702 690 self._nonnormalset = set()
703 691 self._otherparentset = set()
704 692 if "_dirs" in self.__dict__:
705 693 delattr(self, "_dirs")
706 694 self._pl = [nullid, nullid]
707 695 self._lastnormaltime = 0
708 696 self._updatedfiles.clear()
709 697 self._dirty = True
710 698
711 699 def rebuild(self, parent, allfiles, changedfiles=None):
712 700 if changedfiles is None:
713 701 # Rebuild entire dirstate
714 702 changedfiles = allfiles
715 703 lastnormaltime = self._lastnormaltime
716 704 self.clear()
717 705 self._lastnormaltime = lastnormaltime
718 706
719 707 if self._origpl is None:
720 708 self._origpl = self._pl
721 709 self._pl = (parent, nullid)
722 710 for f in changedfiles:
723 711 if f in allfiles:
724 712 self.normallookup(f)
725 713 else:
726 714 self.drop(f)
727 715
728 716 self._dirty = True
729 717
730 718 def identity(self):
731 719 '''Return identity of dirstate itself to detect changing in storage
732 720
733 721 If identity of previous dirstate is equal to this, writing
734 722 changes based on the former dirstate out can keep consistency.
735 723 '''
736 724 return self._identity
737 725
738 726 def write(self, tr):
739 727 if not self._dirty:
740 728 return
741 729
742 730 filename = self._filename
743 731 if tr:
744 732 # 'dirstate.write()' is not only for writing in-memory
745 733 # changes out, but also for dropping ambiguous timestamp.
746 734 # delayed writing re-raise "ambiguous timestamp issue".
747 735 # See also the wiki page below for detail:
748 736 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
749 737
750 738 # emulate dropping timestamp in 'parsers.pack_dirstate'
751 739 now = _getfsnow(self._opener)
752 740 dmap = self._map
753 741 for f in self._updatedfiles:
754 742 e = dmap.get(f)
755 743 if e is not None and e[0] == 'n' and e[3] == now:
756 744 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
757 745 self._nonnormalset.add(f)
758 746
759 747 # emulate that all 'dirstate.normal' results are written out
760 748 self._lastnormaltime = 0
761 749 self._updatedfiles.clear()
762 750
763 751 # delay writing in-memory changes out
764 752 tr.addfilegenerator('dirstate', (self._filename,),
765 753 self._writedirstate, location='plain')
766 754 return
767 755
768 756 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
769 757 self._writedirstate(st)
770 758
771 759 def addparentchangecallback(self, category, callback):
772 760 """add a callback to be called when the wd parents are changed
773 761
774 762 Callback will be called with the following arguments:
775 763 dirstate, (oldp1, oldp2), (newp1, newp2)
776 764
777 765 Category is a unique identifier to allow overwriting an old callback
778 766 with a newer callback.
779 767 """
780 768 self._plchangecallbacks[category] = callback
781 769
782 770 def _writedirstate(self, st):
783 771 # notify callbacks about parents change
784 772 if self._origpl is not None and self._origpl != self._pl:
785 773 for c, callback in sorted(self._plchangecallbacks.iteritems()):
786 774 callback(self, self._origpl, self._pl)
787 775 self._origpl = None
788 776 # use the modification time of the newly created temporary file as the
789 777 # filesystem's notion of 'now'
790 778 now = util.fstat(st).st_mtime & _rangemask
791 779
792 780 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
793 781 # timestamp of each entries in dirstate, because of 'now > mtime'
794 782 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
795 783 if delaywrite > 0:
796 784 # do we have any files to delay for?
797 785 for f, e in self._map.iteritems():
798 786 if e[0] == 'n' and e[3] == now:
799 787 import time # to avoid useless import
800 788 # rather than sleep n seconds, sleep until the next
801 789 # multiple of n seconds
802 790 clock = time.time()
803 791 start = int(clock) - (int(clock) % delaywrite)
804 792 end = start + delaywrite
805 793 time.sleep(end - clock)
806 794 now = end # trust our estimate that the end is near now
807 795 break
808 796
809 797 st.write(parsers.pack_dirstate(self._map._map, self._map.copymap,
810 798 self._pl, now))
811 799 self._nonnormalset, self._otherparentset = self._map.nonnormalentries()
812 800 st.close()
813 801 self._lastnormaltime = 0
814 802 self._dirty = self._dirtypl = False
815 803
816 804 def _dirignore(self, f):
817 805 if f == '.':
818 806 return False
819 807 if self._ignore(f):
820 808 return True
821 809 for p in util.finddirs(f):
822 810 if self._ignore(p):
823 811 return True
824 812 return False
825 813
826 814 def _ignorefiles(self):
827 815 files = []
828 816 if os.path.exists(self._join('.hgignore')):
829 817 files.append(self._join('.hgignore'))
830 818 for name, path in self._ui.configitems("ui"):
831 819 if name == 'ignore' or name.startswith('ignore.'):
832 820 # we need to use os.path.join here rather than self._join
833 821 # because path is arbitrary and user-specified
834 822 files.append(os.path.join(self._rootdir, util.expandpath(path)))
835 823 return files
836 824
837 825 def _ignorefileandline(self, f):
838 826 files = collections.deque(self._ignorefiles())
839 827 visited = set()
840 828 while files:
841 829 i = files.popleft()
842 830 patterns = matchmod.readpatternfile(i, self._ui.warn,
843 831 sourceinfo=True)
844 832 for pattern, lineno, line in patterns:
845 833 kind, p = matchmod._patsplit(pattern, 'glob')
846 834 if kind == "subinclude":
847 835 if p not in visited:
848 836 files.append(p)
849 837 continue
850 838 m = matchmod.match(self._root, '', [], [pattern],
851 839 warn=self._ui.warn)
852 840 if m(f):
853 841 return (i, lineno, line)
854 842 visited.add(i)
855 843 return (None, -1, "")
856 844
857 845 def _walkexplicit(self, match, subrepos):
858 846 '''Get stat data about the files explicitly specified by match.
859 847
860 848 Return a triple (results, dirsfound, dirsnotfound).
861 849 - results is a mapping from filename to stat result. It also contains
862 850 listings mapping subrepos and .hg to None.
863 851 - dirsfound is a list of files found to be directories.
864 852 - dirsnotfound is a list of files that the dirstate thinks are
865 853 directories and that were not found.'''
866 854
867 855 def badtype(mode):
868 856 kind = _('unknown')
869 857 if stat.S_ISCHR(mode):
870 858 kind = _('character device')
871 859 elif stat.S_ISBLK(mode):
872 860 kind = _('block device')
873 861 elif stat.S_ISFIFO(mode):
874 862 kind = _('fifo')
875 863 elif stat.S_ISSOCK(mode):
876 864 kind = _('socket')
877 865 elif stat.S_ISDIR(mode):
878 866 kind = _('directory')
879 867 return _('unsupported file type (type is %s)') % kind
880 868
881 869 matchedir = match.explicitdir
882 870 badfn = match.bad
883 871 dmap = self._map
884 872 lstat = os.lstat
885 873 getkind = stat.S_IFMT
886 874 dirkind = stat.S_IFDIR
887 875 regkind = stat.S_IFREG
888 876 lnkkind = stat.S_IFLNK
889 877 join = self._join
890 878 dirsfound = []
891 879 foundadd = dirsfound.append
892 880 dirsnotfound = []
893 881 notfoundadd = dirsnotfound.append
894 882
895 883 if not match.isexact() and self._checkcase:
896 884 normalize = self._normalize
897 885 else:
898 886 normalize = None
899 887
900 888 files = sorted(match.files())
901 889 subrepos.sort()
902 890 i, j = 0, 0
903 891 while i < len(files) and j < len(subrepos):
904 892 subpath = subrepos[j] + "/"
905 893 if files[i] < subpath:
906 894 i += 1
907 895 continue
908 896 while i < len(files) and files[i].startswith(subpath):
909 897 del files[i]
910 898 j += 1
911 899
912 900 if not files or '.' in files:
913 901 files = ['.']
914 902 results = dict.fromkeys(subrepos)
915 903 results['.hg'] = None
916 904
917 905 alldirs = None
918 906 for ff in files:
919 907 # constructing the foldmap is expensive, so don't do it for the
920 908 # common case where files is ['.']
921 909 if normalize and ff != '.':
922 910 nf = normalize(ff, False, True)
923 911 else:
924 912 nf = ff
925 913 if nf in results:
926 914 continue
927 915
928 916 try:
929 917 st = lstat(join(nf))
930 918 kind = getkind(st.st_mode)
931 919 if kind == dirkind:
932 920 if nf in dmap:
933 921 # file replaced by dir on disk but still in dirstate
934 922 results[nf] = None
935 923 if matchedir:
936 924 matchedir(nf)
937 925 foundadd((nf, ff))
938 926 elif kind == regkind or kind == lnkkind:
939 927 results[nf] = st
940 928 else:
941 929 badfn(ff, badtype(kind))
942 930 if nf in dmap:
943 931 results[nf] = None
944 932 except OSError as inst: # nf not found on disk - it is dirstate only
945 933 if nf in dmap: # does it exactly match a missing file?
946 934 results[nf] = None
947 935 else: # does it match a missing directory?
948 936 if alldirs is None:
949 937 alldirs = util.dirs(dmap._map)
950 938 if nf in alldirs:
951 939 if matchedir:
952 940 matchedir(nf)
953 941 notfoundadd(nf)
954 942 else:
955 943 badfn(ff, encoding.strtolocal(inst.strerror))
956 944
957 945 # Case insensitive filesystems cannot rely on lstat() failing to detect
958 946 # a case-only rename. Prune the stat object for any file that does not
959 947 # match the case in the filesystem, if there are multiple files that
960 948 # normalize to the same path.
961 949 if match.isexact() and self._checkcase:
962 950 normed = {}
963 951
964 952 for f, st in results.iteritems():
965 953 if st is None:
966 954 continue
967 955
968 956 nc = util.normcase(f)
969 957 paths = normed.get(nc)
970 958
971 959 if paths is None:
972 960 paths = set()
973 961 normed[nc] = paths
974 962
975 963 paths.add(f)
976 964
977 965 for norm, paths in normed.iteritems():
978 966 if len(paths) > 1:
979 967 for path in paths:
980 968 folded = self._discoverpath(path, norm, True, None,
981 969 self._dirfoldmap)
982 970 if path != folded:
983 971 results[path] = None
984 972
985 973 return results, dirsfound, dirsnotfound
986 974
987 975 def walk(self, match, subrepos, unknown, ignored, full=True):
988 976 '''
989 977 Walk recursively through the directory tree, finding all files
990 978 matched by match.
991 979
992 980 If full is False, maybe skip some known-clean files.
993 981
994 982 Return a dict mapping filename to stat-like object (either
995 983 mercurial.osutil.stat instance or return value of os.stat()).
996 984
997 985 '''
998 986 # full is a flag that extensions that hook into walk can use -- this
999 987 # implementation doesn't use it at all. This satisfies the contract
1000 988 # because we only guarantee a "maybe".
1001 989
1002 990 if ignored:
1003 991 ignore = util.never
1004 992 dirignore = util.never
1005 993 elif unknown:
1006 994 ignore = self._ignore
1007 995 dirignore = self._dirignore
1008 996 else:
1009 997 # if not unknown and not ignored, drop dir recursion and step 2
1010 998 ignore = util.always
1011 999 dirignore = util.always
1012 1000
1013 1001 matchfn = match.matchfn
1014 1002 matchalways = match.always()
1015 1003 matchtdir = match.traversedir
1016 1004 dmap = self._map
1017 1005 listdir = util.listdir
1018 1006 lstat = os.lstat
1019 1007 dirkind = stat.S_IFDIR
1020 1008 regkind = stat.S_IFREG
1021 1009 lnkkind = stat.S_IFLNK
1022 1010 join = self._join
1023 1011
1024 1012 exact = skipstep3 = False
1025 1013 if match.isexact(): # match.exact
1026 1014 exact = True
1027 1015 dirignore = util.always # skip step 2
1028 1016 elif match.prefix(): # match.match, no patterns
1029 1017 skipstep3 = True
1030 1018
1031 1019 if not exact and self._checkcase:
1032 1020 normalize = self._normalize
1033 1021 normalizefile = self._normalizefile
1034 1022 skipstep3 = False
1035 1023 else:
1036 1024 normalize = self._normalize
1037 1025 normalizefile = None
1038 1026
1039 1027 # step 1: find all explicit files
1040 1028 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1041 1029
1042 1030 skipstep3 = skipstep3 and not (work or dirsnotfound)
1043 1031 work = [d for d in work if not dirignore(d[0])]
1044 1032
1045 1033 # step 2: visit subdirectories
1046 1034 def traverse(work, alreadynormed):
1047 1035 wadd = work.append
1048 1036 while work:
1049 1037 nd = work.pop()
1050 1038 if not match.visitdir(nd):
1051 1039 continue
1052 1040 skip = None
1053 1041 if nd == '.':
1054 1042 nd = ''
1055 1043 else:
1056 1044 skip = '.hg'
1057 1045 try:
1058 1046 entries = listdir(join(nd), stat=True, skip=skip)
1059 1047 except OSError as inst:
1060 1048 if inst.errno in (errno.EACCES, errno.ENOENT):
1061 1049 match.bad(self.pathto(nd),
1062 1050 encoding.strtolocal(inst.strerror))
1063 1051 continue
1064 1052 raise
1065 1053 for f, kind, st in entries:
1066 1054 if normalizefile:
1067 1055 # even though f might be a directory, we're only
1068 1056 # interested in comparing it to files currently in the
1069 1057 # dmap -- therefore normalizefile is enough
1070 1058 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1071 1059 True)
1072 1060 else:
1073 1061 nf = nd and (nd + "/" + f) or f
1074 1062 if nf not in results:
1075 1063 if kind == dirkind:
1076 1064 if not ignore(nf):
1077 1065 if matchtdir:
1078 1066 matchtdir(nf)
1079 1067 wadd(nf)
1080 1068 if nf in dmap and (matchalways or matchfn(nf)):
1081 1069 results[nf] = None
1082 1070 elif kind == regkind or kind == lnkkind:
1083 1071 if nf in dmap:
1084 1072 if matchalways or matchfn(nf):
1085 1073 results[nf] = st
1086 1074 elif ((matchalways or matchfn(nf))
1087 1075 and not ignore(nf)):
1088 1076 # unknown file -- normalize if necessary
1089 1077 if not alreadynormed:
1090 1078 nf = normalize(nf, False, True)
1091 1079 results[nf] = st
1092 1080 elif nf in dmap and (matchalways or matchfn(nf)):
1093 1081 results[nf] = None
1094 1082
1095 1083 for nd, d in work:
1096 1084 # alreadynormed means that processwork doesn't have to do any
1097 1085 # expensive directory normalization
1098 1086 alreadynormed = not normalize or nd == d
1099 1087 traverse([d], alreadynormed)
1100 1088
1101 1089 for s in subrepos:
1102 1090 del results[s]
1103 1091 del results['.hg']
1104 1092
1105 1093 # step 3: visit remaining files from dmap
1106 1094 if not skipstep3 and not exact:
1107 1095 # If a dmap file is not in results yet, it was either
1108 1096 # a) not matching matchfn b) ignored, c) missing, or d) under a
1109 1097 # symlink directory.
1110 1098 if not results and matchalways:
1111 1099 visit = [f for f in dmap]
1112 1100 else:
1113 1101 visit = [f for f in dmap if f not in results and matchfn(f)]
1114 1102 visit.sort()
1115 1103
1116 1104 if unknown:
1117 1105 # unknown == True means we walked all dirs under the roots
1118 1106 # that wasn't ignored, and everything that matched was stat'ed
1119 1107 # and is already in results.
1120 1108 # The rest must thus be ignored or under a symlink.
1121 1109 audit_path = pathutil.pathauditor(self._root, cached=True)
1122 1110
1123 1111 for nf in iter(visit):
1124 1112 # If a stat for the same file was already added with a
1125 1113 # different case, don't add one for this, since that would
1126 1114 # make it appear as if the file exists under both names
1127 1115 # on disk.
1128 1116 if (normalizefile and
1129 1117 normalizefile(nf, True, True) in results):
1130 1118 results[nf] = None
1131 1119 # Report ignored items in the dmap as long as they are not
1132 1120 # under a symlink directory.
1133 1121 elif audit_path.check(nf):
1134 1122 try:
1135 1123 results[nf] = lstat(join(nf))
1136 1124 # file was just ignored, no links, and exists
1137 1125 except OSError:
1138 1126 # file doesn't exist
1139 1127 results[nf] = None
1140 1128 else:
1141 1129 # It's either missing or under a symlink directory
1142 1130 # which we in this case report as missing
1143 1131 results[nf] = None
1144 1132 else:
1145 1133 # We may not have walked the full directory tree above,
1146 1134 # so stat and check everything we missed.
1147 1135 iv = iter(visit)
1148 1136 for st in util.statfiles([join(i) for i in visit]):
1149 1137 results[next(iv)] = st
1150 1138 return results
1151 1139
1152 1140 def status(self, match, subrepos, ignored, clean, unknown):
1153 1141 '''Determine the status of the working copy relative to the
1154 1142 dirstate and return a pair of (unsure, status), where status is of type
1155 1143 scmutil.status and:
1156 1144
1157 1145 unsure:
1158 1146 files that might have been modified since the dirstate was
1159 1147 written, but need to be read to be sure (size is the same
1160 1148 but mtime differs)
1161 1149 status.modified:
1162 1150 files that have definitely been modified since the dirstate
1163 1151 was written (different size or mode)
1164 1152 status.clean:
1165 1153 files that have definitely not been modified since the
1166 1154 dirstate was written
1167 1155 '''
1168 1156 listignored, listclean, listunknown = ignored, clean, unknown
1169 1157 lookup, modified, added, unknown, ignored = [], [], [], [], []
1170 1158 removed, deleted, clean = [], [], []
1171 1159
1172 1160 dmap = self._map
1173 1161 ladd = lookup.append # aka "unsure"
1174 1162 madd = modified.append
1175 1163 aadd = added.append
1176 1164 uadd = unknown.append
1177 1165 iadd = ignored.append
1178 1166 radd = removed.append
1179 1167 dadd = deleted.append
1180 1168 cadd = clean.append
1181 1169 mexact = match.exact
1182 1170 dirignore = self._dirignore
1183 1171 checkexec = self._checkexec
1184 1172 copymap = self._map.copymap
1185 1173 lastnormaltime = self._lastnormaltime
1186 1174
1187 1175 # We need to do full walks when either
1188 1176 # - we're listing all clean files, or
1189 1177 # - match.traversedir does something, because match.traversedir should
1190 1178 # be called for every dir in the working dir
1191 1179 full = listclean or match.traversedir is not None
1192 1180 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1193 1181 full=full).iteritems():
1194 1182 if fn not in dmap:
1195 1183 if (listignored or mexact(fn)) and dirignore(fn):
1196 1184 if listignored:
1197 1185 iadd(fn)
1198 1186 else:
1199 1187 uadd(fn)
1200 1188 continue
1201 1189
1202 1190 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1203 1191 # written like that for performance reasons. dmap[fn] is not a
1204 1192 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1205 1193 # opcode has fast paths when the value to be unpacked is a tuple or
1206 1194 # a list, but falls back to creating a full-fledged iterator in
1207 1195 # general. That is much slower than simply accessing and storing the
1208 1196 # tuple members one by one.
1209 1197 t = dmap[fn]
1210 1198 state = t[0]
1211 1199 mode = t[1]
1212 1200 size = t[2]
1213 1201 time = t[3]
1214 1202
1215 1203 if not st and state in "nma":
1216 1204 dadd(fn)
1217 1205 elif state == 'n':
1218 1206 if (size >= 0 and
1219 1207 ((size != st.st_size and size != st.st_size & _rangemask)
1220 1208 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1221 1209 or size == -2 # other parent
1222 1210 or fn in copymap):
1223 1211 madd(fn)
1224 1212 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1225 1213 ladd(fn)
1226 1214 elif st.st_mtime == lastnormaltime:
1227 1215 # fn may have just been marked as normal and it may have
1228 1216 # changed in the same second without changing its size.
1229 1217 # This can happen if we quickly do multiple commits.
1230 1218 # Force lookup, so we don't miss such a racy file change.
1231 1219 ladd(fn)
1232 1220 elif listclean:
1233 1221 cadd(fn)
1234 1222 elif state == 'm':
1235 1223 madd(fn)
1236 1224 elif state == 'a':
1237 1225 aadd(fn)
1238 1226 elif state == 'r':
1239 1227 radd(fn)
1240 1228
1241 1229 return (lookup, scmutil.status(modified, added, removed, deleted,
1242 1230 unknown, ignored, clean))
1243 1231
1244 1232 def matches(self, match):
1245 1233 '''
1246 1234 return files in the dirstate (in whatever state) filtered by match
1247 1235 '''
1248 1236 dmap = self._map
1249 1237 if match.always():
1250 1238 return dmap.keys()
1251 1239 files = match.files()
1252 1240 if match.isexact():
1253 1241 # fast path -- filter the other way around, since typically files is
1254 1242 # much smaller than dmap
1255 1243 return [f for f in files if f in dmap]
1256 1244 if match.prefix() and all(fn in dmap for fn in files):
1257 1245 # fast path -- all the values are known to be files, so just return
1258 1246 # that
1259 1247 return list(files)
1260 1248 return [f for f in dmap if match(f)]
1261 1249
1262 1250 def _actualfilename(self, tr):
1263 1251 if tr:
1264 1252 return self._pendingfilename
1265 1253 else:
1266 1254 return self._filename
1267 1255
1268 1256 def savebackup(self, tr, backupname):
1269 1257 '''Save current dirstate into backup file'''
1270 1258 filename = self._actualfilename(tr)
1271 1259 assert backupname != filename
1272 1260
1273 1261 # use '_writedirstate' instead of 'write' to write changes certainly,
1274 1262 # because the latter omits writing out if transaction is running.
1275 1263 # output file will be used to create backup of dirstate at this point.
1276 1264 if self._dirty or not self._opener.exists(filename):
1277 1265 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1278 1266 checkambig=True))
1279 1267
1280 1268 if tr:
1281 1269 # ensure that subsequent tr.writepending returns True for
1282 1270 # changes written out above, even if dirstate is never
1283 1271 # changed after this
1284 1272 tr.addfilegenerator('dirstate', (self._filename,),
1285 1273 self._writedirstate, location='plain')
1286 1274
1287 1275 # ensure that pending file written above is unlinked at
1288 1276 # failure, even if tr.writepending isn't invoked until the
1289 1277 # end of this transaction
1290 1278 tr.registertmp(filename, location='plain')
1291 1279
1292 1280 self._opener.tryunlink(backupname)
1293 1281 # hardlink backup is okay because _writedirstate is always called
1294 1282 # with an "atomictemp=True" file.
1295 1283 util.copyfile(self._opener.join(filename),
1296 1284 self._opener.join(backupname), hardlink=True)
1297 1285
1298 1286 def restorebackup(self, tr, backupname):
1299 1287 '''Restore dirstate by backup file'''
1300 1288 # this "invalidate()" prevents "wlock.release()" from writing
1301 1289 # changes of dirstate out after restoring from backup file
1302 1290 self.invalidate()
1303 1291 filename = self._actualfilename(tr)
1304 1292 self._opener.rename(backupname, filename, checkambig=True)
1305 1293
1306 1294 def clearbackup(self, tr, backupname):
1307 1295 '''Clear backup file'''
1308 1296 self._opener.unlink(backupname)
1309 1297
1310 1298 class dirstatemap(object):
1311 def __init__(self):
1299 def __init__(self, ui, opener, root):
1300 self._ui = ui
1301 self._opener = opener
1302 self._root = root
1303 self._filename = 'dirstate'
1304
1312 1305 self._map = {}
1313 1306 self.copymap = {}
1314 1307
1308 # for consistent view between _pl() and _read() invocations
1309 self._pendingmode = None
1310
1315 1311 def iteritems(self):
1316 1312 return self._map.iteritems()
1317 1313
1318 1314 def __iter__(self):
1319 1315 return iter(self._map)
1320 1316
1321 1317 def get(self, key, default=None):
1322 1318 return self._map.get(key, default)
1323 1319
1324 1320 def __contains__(self, key):
1325 1321 return key in self._map
1326 1322
1327 1323 def __setitem__(self, key, value):
1328 1324 self._map[key] = value
1329 1325
1330 1326 def __getitem__(self, key):
1331 1327 return self._map[key]
1332 1328
1333 1329 def __delitem__(self, key):
1334 1330 del self._map[key]
1335 1331
1336 1332 def keys(self):
1337 1333 return self._map.keys()
1338 1334
1339 1335 def nonnormalentries(self):
1340 1336 '''Compute the nonnormal dirstate entries from the dmap'''
1341 1337 try:
1342 1338 return parsers.nonnormalotherparententries(self._map)
1343 1339 except AttributeError:
1344 1340 nonnorm = set()
1345 1341 otherparent = set()
1346 1342 for fname, e in self._map.iteritems():
1347 1343 if e[0] != 'n' or e[3] == -1:
1348 1344 nonnorm.add(fname)
1349 1345 if e[0] == 'n' and e[2] == -2:
1350 1346 otherparent.add(fname)
1351 1347 return nonnorm, otherparent
1352 1348
1353 1349 def filefoldmap(self):
1354 1350 """Returns a dictionary mapping normalized case paths to their
1355 1351 non-normalized versions.
1356 1352 """
1357 1353 try:
1358 1354 makefilefoldmap = parsers.make_file_foldmap
1359 1355 except AttributeError:
1360 1356 pass
1361 1357 else:
1362 1358 return makefilefoldmap(self._map, util.normcasespec,
1363 1359 util.normcasefallback)
1364 1360
1365 1361 f = {}
1366 1362 normcase = util.normcase
1367 1363 for name, s in self._map.iteritems():
1368 1364 if s[0] != 'r':
1369 1365 f[normcase(name)] = name
1370 1366 f['.'] = '.' # prevents useless util.fspath() invocation
1371 1367 return f
1372 1368
1373 1369 def dirs(self):
1374 1370 """Returns a set-like object containing all the directories in the
1375 1371 current dirstate.
1376 1372 """
1377 1373 return util.dirs(self._map, 'r')
1374
1375 def _opendirstatefile(self):
1376 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1377 if self._pendingmode is not None and self._pendingmode != mode:
1378 fp.close()
1379 raise error.Abort(_('working directory state may be '
1380 'changed parallelly'))
1381 self._pendingmode = mode
1382 return fp
1383
General Comments 0
You need to be logged in to leave comments. Login now