##// END OF EJS Templates
dirstate: move _dirs to dirstatemap...
Durham Goode -
r34336:af972241 default
parent child Browse files
Show More
@@ -1,1377 +1,1383 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 parsers = policy.importmod(r'parsers')
31 31
32 32 propertycache = util.propertycache
33 33 filecache = scmutil.filecache
34 34 _rangemask = 0x7fffffff
35 35
36 36 dirstatetuple = parsers.dirstatetuple
37 37
38 38 class repocache(filecache):
39 39 """filecache for files in .hg/"""
40 40 def join(self, obj, fname):
41 41 return obj._opener.join(fname)
42 42
43 43 class rootcache(filecache):
44 44 """filecache for files in the repository root"""
45 45 def join(self, obj, fname):
46 46 return obj._join(fname)
47 47
48 48 def _getfsnow(vfs):
49 49 '''Get "now" timestamp on filesystem'''
50 50 tmpfd, tmpname = vfs.mkstemp()
51 51 try:
52 52 return os.fstat(tmpfd).st_mtime
53 53 finally:
54 54 os.close(tmpfd)
55 55 vfs.unlink(tmpname)
56 56
57 57 class dirstate(object):
58 58
59 59 def __init__(self, opener, ui, root, validate, sparsematchfn):
60 60 '''Create a new dirstate object.
61 61
62 62 opener is an open()-like callable that can be used to open the
63 63 dirstate file; root is the root of the directory tracked by
64 64 the dirstate.
65 65 '''
66 66 self._opener = opener
67 67 self._validate = validate
68 68 self._root = root
69 69 self._sparsematchfn = sparsematchfn
70 70 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
71 71 # UNC path pointing to root share (issue4557)
72 72 self._rootdir = pathutil.normasprefix(root)
73 73 self._dirty = False
74 74 self._dirtypl = False
75 75 self._lastnormaltime = 0
76 76 self._ui = ui
77 77 self._filecache = {}
78 78 self._parentwriters = 0
79 79 self._filename = 'dirstate'
80 80 self._pendingfilename = '%s.pending' % self._filename
81 81 self._plchangecallbacks = {}
82 82 self._origpl = None
83 83 self._updatedfiles = set()
84 84
85 85 # for consistent view between _pl() and _read() invocations
86 86 self._pendingmode = None
87 87
88 88 @contextlib.contextmanager
89 89 def parentchange(self):
90 90 '''Context manager for handling dirstate parents.
91 91
92 92 If an exception occurs in the scope of the context manager,
93 93 the incoherent dirstate won't be written when wlock is
94 94 released.
95 95 '''
96 96 self._parentwriters += 1
97 97 yield
98 98 # Typically we want the "undo" step of a context manager in a
99 99 # finally block so it happens even when an exception
100 100 # occurs. In this case, however, we only want to decrement
101 101 # parentwriters if the code in the with statement exits
102 102 # normally, so we don't have a try/finally here on purpose.
103 103 self._parentwriters -= 1
104 104
105 105 def beginparentchange(self):
106 106 '''Marks the beginning of a set of changes that involve changing
107 107 the dirstate parents. If there is an exception during this time,
108 108 the dirstate will not be written when the wlock is released. This
109 109 prevents writing an incoherent dirstate where the parent doesn't
110 110 match the contents.
111 111 '''
112 112 self._ui.deprecwarn('beginparentchange is obsoleted by the '
113 113 'parentchange context manager.', '4.3')
114 114 self._parentwriters += 1
115 115
116 116 def endparentchange(self):
117 117 '''Marks the end of a set of changes that involve changing the
118 118 dirstate parents. Once all parent changes have been marked done,
119 119 the wlock will be free to write the dirstate on release.
120 120 '''
121 121 self._ui.deprecwarn('endparentchange is obsoleted by the '
122 122 'parentchange context manager.', '4.3')
123 123 if self._parentwriters > 0:
124 124 self._parentwriters -= 1
125 125
126 126 def pendingparentchange(self):
127 127 '''Returns true if the dirstate is in the middle of a set of changes
128 128 that modify the dirstate parent.
129 129 '''
130 130 return self._parentwriters > 0
131 131
132 132 @propertycache
133 133 def _map(self):
134 134 '''Return the dirstate contents as a map from filename to
135 135 (state, mode, size, time).'''
136 136 self._read()
137 137 return self._map
138 138
139 139 @propertycache
140 140 def _copymap(self):
141 141 self._read()
142 142 return self._copymap
143 143
144 144 @propertycache
145 145 def _identity(self):
146 146 self._read()
147 147 return self._identity
148 148
149 149 @propertycache
150 150 def _nonnormalset(self):
151 151 nonnorm, otherparents = self._map.nonnormalentries()
152 152 self._otherparentset = otherparents
153 153 return nonnorm
154 154
155 155 @propertycache
156 156 def _otherparentset(self):
157 157 nonnorm, otherparents = self._map.nonnormalentries()
158 158 self._nonnormalset = nonnorm
159 159 return otherparents
160 160
161 161 @propertycache
162 162 def _filefoldmap(self):
163 163 return self._map.filefoldmap()
164 164
165 165 @propertycache
166 166 def _dirfoldmap(self):
167 167 f = {}
168 168 normcase = util.normcase
169 169 for name in self._dirs:
170 170 f[normcase(name)] = name
171 171 return f
172 172
173 173 @property
174 174 def _sparsematcher(self):
175 175 """The matcher for the sparse checkout.
176 176
177 177 The working directory may not include every file from a manifest. The
178 178 matcher obtained by this property will match a path if it is to be
179 179 included in the working directory.
180 180 """
181 181 # TODO there is potential to cache this property. For now, the matcher
182 182 # is resolved on every access. (But the called function does use a
183 183 # cache to keep the lookup fast.)
184 184 return self._sparsematchfn()
185 185
186 186 @repocache('branch')
187 187 def _branch(self):
188 188 try:
189 189 return self._opener.read("branch").strip() or "default"
190 190 except IOError as inst:
191 191 if inst.errno != errno.ENOENT:
192 192 raise
193 193 return "default"
194 194
195 195 @propertycache
196 196 def _pl(self):
197 197 try:
198 198 fp = self._opendirstatefile()
199 199 st = fp.read(40)
200 200 fp.close()
201 201 l = len(st)
202 202 if l == 40:
203 203 return st[:20], st[20:40]
204 204 elif l > 0 and l < 40:
205 205 raise error.Abort(_('working directory state appears damaged!'))
206 206 except IOError as err:
207 207 if err.errno != errno.ENOENT:
208 208 raise
209 209 return [nullid, nullid]
210 210
211 211 @propertycache
212 212 def _dirs(self):
213 return util.dirs(self._map._map, 'r')
213 return self._map.dirs()
214 214
215 215 def dirs(self):
216 216 return self._dirs
217 217
218 218 @rootcache('.hgignore')
219 219 def _ignore(self):
220 220 files = self._ignorefiles()
221 221 if not files:
222 222 return matchmod.never(self._root, '')
223 223
224 224 pats = ['include:%s' % f for f in files]
225 225 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
226 226
227 227 @propertycache
228 228 def _slash(self):
229 229 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
230 230
231 231 @propertycache
232 232 def _checklink(self):
233 233 return util.checklink(self._root)
234 234
235 235 @propertycache
236 236 def _checkexec(self):
237 237 return util.checkexec(self._root)
238 238
239 239 @propertycache
240 240 def _checkcase(self):
241 241 return not util.fscasesensitive(self._join('.hg'))
242 242
243 243 def _join(self, f):
244 244 # much faster than os.path.join()
245 245 # it's safe because f is always a relative path
246 246 return self._rootdir + f
247 247
248 248 def flagfunc(self, buildfallback):
249 249 if self._checklink and self._checkexec:
250 250 def f(x):
251 251 try:
252 252 st = os.lstat(self._join(x))
253 253 if util.statislink(st):
254 254 return 'l'
255 255 if util.statisexec(st):
256 256 return 'x'
257 257 except OSError:
258 258 pass
259 259 return ''
260 260 return f
261 261
262 262 fallback = buildfallback()
263 263 if self._checklink:
264 264 def f(x):
265 265 if os.path.islink(self._join(x)):
266 266 return 'l'
267 267 if 'x' in fallback(x):
268 268 return 'x'
269 269 return ''
270 270 return f
271 271 if self._checkexec:
272 272 def f(x):
273 273 if 'l' in fallback(x):
274 274 return 'l'
275 275 if util.isexec(self._join(x)):
276 276 return 'x'
277 277 return ''
278 278 return f
279 279 else:
280 280 return fallback
281 281
282 282 @propertycache
283 283 def _cwd(self):
284 284 # internal config: ui.forcecwd
285 285 forcecwd = self._ui.config('ui', 'forcecwd')
286 286 if forcecwd:
287 287 return forcecwd
288 288 return pycompat.getcwd()
289 289
290 290 def getcwd(self):
291 291 '''Return the path from which a canonical path is calculated.
292 292
293 293 This path should be used to resolve file patterns or to convert
294 294 canonical paths back to file paths for display. It shouldn't be
295 295 used to get real file paths. Use vfs functions instead.
296 296 '''
297 297 cwd = self._cwd
298 298 if cwd == self._root:
299 299 return ''
300 300 # self._root ends with a path separator if self._root is '/' or 'C:\'
301 301 rootsep = self._root
302 302 if not util.endswithsep(rootsep):
303 303 rootsep += pycompat.ossep
304 304 if cwd.startswith(rootsep):
305 305 return cwd[len(rootsep):]
306 306 else:
307 307 # we're outside the repo. return an absolute path.
308 308 return cwd
309 309
310 310 def pathto(self, f, cwd=None):
311 311 if cwd is None:
312 312 cwd = self.getcwd()
313 313 path = util.pathto(self._root, cwd, f)
314 314 if self._slash:
315 315 return util.pconvert(path)
316 316 return path
317 317
318 318 def __getitem__(self, key):
319 319 '''Return the current state of key (a filename) in the dirstate.
320 320
321 321 States are:
322 322 n normal
323 323 m needs merging
324 324 r marked for removal
325 325 a marked for addition
326 326 ? not tracked
327 327 '''
328 328 return self._map.get(key, ("?",))[0]
329 329
330 330 def __contains__(self, key):
331 331 return key in self._map
332 332
333 333 def __iter__(self):
334 334 return iter(sorted(self._map))
335 335
336 336 def items(self):
337 337 return self._map.iteritems()
338 338
339 339 iteritems = items
340 340
341 341 def parents(self):
342 342 return [self._validate(p) for p in self._pl]
343 343
344 344 def p1(self):
345 345 return self._validate(self._pl[0])
346 346
347 347 def p2(self):
348 348 return self._validate(self._pl[1])
349 349
350 350 def branch(self):
351 351 return encoding.tolocal(self._branch)
352 352
353 353 def setparents(self, p1, p2=nullid):
354 354 """Set dirstate parents to p1 and p2.
355 355
356 356 When moving from two parents to one, 'm' merged entries a
357 357 adjusted to normal and previous copy records discarded and
358 358 returned by the call.
359 359
360 360 See localrepo.setparents()
361 361 """
362 362 if self._parentwriters == 0:
363 363 raise ValueError("cannot set dirstate parent without "
364 364 "calling dirstate.beginparentchange")
365 365
366 366 self._dirty = self._dirtypl = True
367 367 oldp2 = self._pl[1]
368 368 if self._origpl is None:
369 369 self._origpl = self._pl
370 370 self._pl = p1, p2
371 371 copies = {}
372 372 if oldp2 != nullid and p2 == nullid:
373 373 candidatefiles = self._nonnormalset.union(self._otherparentset)
374 374 for f in candidatefiles:
375 375 s = self._map.get(f)
376 376 if s is None:
377 377 continue
378 378
379 379 # Discard 'm' markers when moving away from a merge state
380 380 if s[0] == 'm':
381 381 source = self._copymap.get(f)
382 382 if source:
383 383 copies[f] = source
384 384 self.normallookup(f)
385 385 # Also fix up otherparent markers
386 386 elif s[0] == 'n' and s[2] == -2:
387 387 source = self._copymap.get(f)
388 388 if source:
389 389 copies[f] = source
390 390 self.add(f)
391 391 return copies
392 392
393 393 def setbranch(self, branch):
394 394 self._branch = encoding.fromlocal(branch)
395 395 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
396 396 try:
397 397 f.write(self._branch + '\n')
398 398 f.close()
399 399
400 400 # make sure filecache has the correct stat info for _branch after
401 401 # replacing the underlying file
402 402 ce = self._filecache['_branch']
403 403 if ce:
404 404 ce.refresh()
405 405 except: # re-raises
406 406 f.discard()
407 407 raise
408 408
409 409 def _opendirstatefile(self):
410 410 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
411 411 if self._pendingmode is not None and self._pendingmode != mode:
412 412 fp.close()
413 413 raise error.Abort(_('working directory state may be '
414 414 'changed parallelly'))
415 415 self._pendingmode = mode
416 416 return fp
417 417
418 418 def _read(self):
419 419 self._map = dirstatemap()
420 420
421 421 self._copymap = {}
422 422 # ignore HG_PENDING because identity is used only for writing
423 423 self._identity = util.filestat.frompath(
424 424 self._opener.join(self._filename))
425 425 try:
426 426 fp = self._opendirstatefile()
427 427 try:
428 428 st = fp.read()
429 429 finally:
430 430 fp.close()
431 431 except IOError as err:
432 432 if err.errno != errno.ENOENT:
433 433 raise
434 434 return
435 435 if not st:
436 436 return
437 437
438 438 if util.safehasattr(parsers, 'dict_new_presized'):
439 439 # Make an estimate of the number of files in the dirstate based on
440 440 # its size. From a linear regression on a set of real-world repos,
441 441 # all over 10,000 files, the size of a dirstate entry is 85
442 442 # bytes. The cost of resizing is significantly higher than the cost
443 443 # of filling in a larger presized dict, so subtract 20% from the
444 444 # size.
445 445 #
446 446 # This heuristic is imperfect in many ways, so in a future dirstate
447 447 # format update it makes sense to just record the number of entries
448 448 # on write.
449 449 self._map._map = parsers.dict_new_presized(len(st) / 71)
450 450
451 451 # Python's garbage collector triggers a GC each time a certain number
452 452 # of container objects (the number being defined by
453 453 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
454 454 # for each file in the dirstate. The C version then immediately marks
455 455 # them as not to be tracked by the collector. However, this has no
456 456 # effect on when GCs are triggered, only on what objects the GC looks
457 457 # into. This means that O(number of files) GCs are unavoidable.
458 458 # Depending on when in the process's lifetime the dirstate is parsed,
459 459 # this can get very expensive. As a workaround, disable GC while
460 460 # parsing the dirstate.
461 461 #
462 462 # (we cannot decorate the function directly since it is in a C module)
463 463 parse_dirstate = util.nogc(parsers.parse_dirstate)
464 464 p = parse_dirstate(self._map._map, self._copymap, st)
465 465 if not self._dirtypl:
466 466 self._pl = p
467 467
468 468 def invalidate(self):
469 469 '''Causes the next access to reread the dirstate.
470 470
471 471 This is different from localrepo.invalidatedirstate() because it always
472 472 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
473 473 check whether the dirstate has changed before rereading it.'''
474 474
475 475 for a in ("_map", "_copymap", "_identity",
476 476 "_filefoldmap", "_dirfoldmap", "_branch",
477 477 "_pl", "_dirs", "_ignore", "_nonnormalset",
478 478 "_otherparentset"):
479 479 if a in self.__dict__:
480 480 delattr(self, a)
481 481 self._lastnormaltime = 0
482 482 self._dirty = False
483 483 self._updatedfiles.clear()
484 484 self._parentwriters = 0
485 485 self._origpl = None
486 486
487 487 def copy(self, source, dest):
488 488 """Mark dest as a copy of source. Unmark dest if source is None."""
489 489 if source == dest:
490 490 return
491 491 self._dirty = True
492 492 if source is not None:
493 493 self._copymap[dest] = source
494 494 self._updatedfiles.add(source)
495 495 self._updatedfiles.add(dest)
496 496 elif self._copymap.pop(dest, None):
497 497 self._updatedfiles.add(dest)
498 498
499 499 def copied(self, file):
500 500 return self._copymap.get(file, None)
501 501
502 502 def copies(self):
503 503 return self._copymap
504 504
505 505 def _droppath(self, f):
506 506 if self[f] not in "?r" and "_dirs" in self.__dict__:
507 507 self._dirs.delpath(f)
508 508
509 509 if "_filefoldmap" in self.__dict__:
510 510 normed = util.normcase(f)
511 511 if normed in self._filefoldmap:
512 512 del self._filefoldmap[normed]
513 513
514 514 self._updatedfiles.add(f)
515 515
516 516 def _addpath(self, f, state, mode, size, mtime):
517 517 oldstate = self[f]
518 518 if state == 'a' or oldstate == 'r':
519 519 scmutil.checkfilename(f)
520 520 if f in self._dirs:
521 521 raise error.Abort(_('directory %r already in dirstate') % f)
522 522 # shadows
523 523 for d in util.finddirs(f):
524 524 if d in self._dirs:
525 525 break
526 526 entry = self._map.get(d)
527 527 if entry is not None and entry[0] != 'r':
528 528 raise error.Abort(
529 529 _('file %r in dirstate clashes with %r') % (d, f))
530 530 if oldstate in "?r" and "_dirs" in self.__dict__:
531 531 self._dirs.addpath(f)
532 532 self._dirty = True
533 533 self._updatedfiles.add(f)
534 534 self._map[f] = dirstatetuple(state, mode, size, mtime)
535 535 if state != 'n' or mtime == -1:
536 536 self._nonnormalset.add(f)
537 537 if size == -2:
538 538 self._otherparentset.add(f)
539 539
540 540 def normal(self, f):
541 541 '''Mark a file normal and clean.'''
542 542 s = os.lstat(self._join(f))
543 543 mtime = s.st_mtime
544 544 self._addpath(f, 'n', s.st_mode,
545 545 s.st_size & _rangemask, mtime & _rangemask)
546 546 self._copymap.pop(f, None)
547 547 if f in self._nonnormalset:
548 548 self._nonnormalset.remove(f)
549 549 if mtime > self._lastnormaltime:
550 550 # Remember the most recent modification timeslot for status(),
551 551 # to make sure we won't miss future size-preserving file content
552 552 # modifications that happen within the same timeslot.
553 553 self._lastnormaltime = mtime
554 554
555 555 def normallookup(self, f):
556 556 '''Mark a file normal, but possibly dirty.'''
557 557 if self._pl[1] != nullid:
558 558 # if there is a merge going on and the file was either
559 559 # in state 'm' (-1) or coming from other parent (-2) before
560 560 # being removed, restore that state.
561 561 entry = self._map.get(f)
562 562 if entry is not None:
563 563 if entry[0] == 'r' and entry[2] in (-1, -2):
564 564 source = self._copymap.get(f)
565 565 if entry[2] == -1:
566 566 self.merge(f)
567 567 elif entry[2] == -2:
568 568 self.otherparent(f)
569 569 if source:
570 570 self.copy(source, f)
571 571 return
572 572 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
573 573 return
574 574 self._addpath(f, 'n', 0, -1, -1)
575 575 self._copymap.pop(f, None)
576 576 if f in self._nonnormalset:
577 577 self._nonnormalset.remove(f)
578 578
579 579 def otherparent(self, f):
580 580 '''Mark as coming from the other parent, always dirty.'''
581 581 if self._pl[1] == nullid:
582 582 raise error.Abort(_("setting %r to other parent "
583 583 "only allowed in merges") % f)
584 584 if f in self and self[f] == 'n':
585 585 # merge-like
586 586 self._addpath(f, 'm', 0, -2, -1)
587 587 else:
588 588 # add-like
589 589 self._addpath(f, 'n', 0, -2, -1)
590 590 self._copymap.pop(f, None)
591 591
592 592 def add(self, f):
593 593 '''Mark a file added.'''
594 594 self._addpath(f, 'a', 0, -1, -1)
595 595 self._copymap.pop(f, None)
596 596
597 597 def remove(self, f):
598 598 '''Mark a file removed.'''
599 599 self._dirty = True
600 600 self._droppath(f)
601 601 size = 0
602 602 if self._pl[1] != nullid:
603 603 entry = self._map.get(f)
604 604 if entry is not None:
605 605 # backup the previous state
606 606 if entry[0] == 'm': # merge
607 607 size = -1
608 608 elif entry[0] == 'n' and entry[2] == -2: # other parent
609 609 size = -2
610 610 self._otherparentset.add(f)
611 611 self._map[f] = dirstatetuple('r', 0, size, 0)
612 612 self._nonnormalset.add(f)
613 613 if size == 0:
614 614 self._copymap.pop(f, None)
615 615
616 616 def merge(self, f):
617 617 '''Mark a file merged.'''
618 618 if self._pl[1] == nullid:
619 619 return self.normallookup(f)
620 620 return self.otherparent(f)
621 621
622 622 def drop(self, f):
623 623 '''Drop a file from the dirstate'''
624 624 if f in self._map:
625 625 self._dirty = True
626 626 self._droppath(f)
627 627 del self._map[f]
628 628 if f in self._nonnormalset:
629 629 self._nonnormalset.remove(f)
630 630 self._copymap.pop(f, None)
631 631
632 632 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
633 633 if exists is None:
634 634 exists = os.path.lexists(os.path.join(self._root, path))
635 635 if not exists:
636 636 # Maybe a path component exists
637 637 if not ignoremissing and '/' in path:
638 638 d, f = path.rsplit('/', 1)
639 639 d = self._normalize(d, False, ignoremissing, None)
640 640 folded = d + "/" + f
641 641 else:
642 642 # No path components, preserve original case
643 643 folded = path
644 644 else:
645 645 # recursively normalize leading directory components
646 646 # against dirstate
647 647 if '/' in normed:
648 648 d, f = normed.rsplit('/', 1)
649 649 d = self._normalize(d, False, ignoremissing, True)
650 650 r = self._root + "/" + d
651 651 folded = d + "/" + util.fspath(f, r)
652 652 else:
653 653 folded = util.fspath(normed, self._root)
654 654 storemap[normed] = folded
655 655
656 656 return folded
657 657
658 658 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
659 659 normed = util.normcase(path)
660 660 folded = self._filefoldmap.get(normed, None)
661 661 if folded is None:
662 662 if isknown:
663 663 folded = path
664 664 else:
665 665 folded = self._discoverpath(path, normed, ignoremissing, exists,
666 666 self._filefoldmap)
667 667 return folded
668 668
669 669 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
670 670 normed = util.normcase(path)
671 671 folded = self._filefoldmap.get(normed, None)
672 672 if folded is None:
673 673 folded = self._dirfoldmap.get(normed, None)
674 674 if folded is None:
675 675 if isknown:
676 676 folded = path
677 677 else:
678 678 # store discovered result in dirfoldmap so that future
679 679 # normalizefile calls don't start matching directories
680 680 folded = self._discoverpath(path, normed, ignoremissing, exists,
681 681 self._dirfoldmap)
682 682 return folded
683 683
684 684 def normalize(self, path, isknown=False, ignoremissing=False):
685 685 '''
686 686 normalize the case of a pathname when on a casefolding filesystem
687 687
688 688 isknown specifies whether the filename came from walking the
689 689 disk, to avoid extra filesystem access.
690 690
691 691 If ignoremissing is True, missing path are returned
692 692 unchanged. Otherwise, we try harder to normalize possibly
693 693 existing path components.
694 694
695 695 The normalized case is determined based on the following precedence:
696 696
697 697 - version of name already stored in the dirstate
698 698 - version of name stored on disk
699 699 - version provided via command arguments
700 700 '''
701 701
702 702 if self._checkcase:
703 703 return self._normalize(path, isknown, ignoremissing)
704 704 return path
705 705
706 706 def clear(self):
707 707 self._map = dirstatemap()
708 708 self._nonnormalset = set()
709 709 self._otherparentset = set()
710 710 if "_dirs" in self.__dict__:
711 711 delattr(self, "_dirs")
712 712 self._copymap = {}
713 713 self._pl = [nullid, nullid]
714 714 self._lastnormaltime = 0
715 715 self._updatedfiles.clear()
716 716 self._dirty = True
717 717
718 718 def rebuild(self, parent, allfiles, changedfiles=None):
719 719 if changedfiles is None:
720 720 # Rebuild entire dirstate
721 721 changedfiles = allfiles
722 722 lastnormaltime = self._lastnormaltime
723 723 self.clear()
724 724 self._lastnormaltime = lastnormaltime
725 725
726 726 if self._origpl is None:
727 727 self._origpl = self._pl
728 728 self._pl = (parent, nullid)
729 729 for f in changedfiles:
730 730 if f in allfiles:
731 731 self.normallookup(f)
732 732 else:
733 733 self.drop(f)
734 734
735 735 self._dirty = True
736 736
737 737 def identity(self):
738 738 '''Return identity of dirstate itself to detect changing in storage
739 739
740 740 If identity of previous dirstate is equal to this, writing
741 741 changes based on the former dirstate out can keep consistency.
742 742 '''
743 743 return self._identity
744 744
745 745 def write(self, tr):
746 746 if not self._dirty:
747 747 return
748 748
749 749 filename = self._filename
750 750 if tr:
751 751 # 'dirstate.write()' is not only for writing in-memory
752 752 # changes out, but also for dropping ambiguous timestamp.
753 753 # delayed writing re-raise "ambiguous timestamp issue".
754 754 # See also the wiki page below for detail:
755 755 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
756 756
757 757 # emulate dropping timestamp in 'parsers.pack_dirstate'
758 758 now = _getfsnow(self._opener)
759 759 dmap = self._map
760 760 for f in self._updatedfiles:
761 761 e = dmap.get(f)
762 762 if e is not None and e[0] == 'n' and e[3] == now:
763 763 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
764 764 self._nonnormalset.add(f)
765 765
766 766 # emulate that all 'dirstate.normal' results are written out
767 767 self._lastnormaltime = 0
768 768 self._updatedfiles.clear()
769 769
770 770 # delay writing in-memory changes out
771 771 tr.addfilegenerator('dirstate', (self._filename,),
772 772 self._writedirstate, location='plain')
773 773 return
774 774
775 775 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
776 776 self._writedirstate(st)
777 777
778 778 def addparentchangecallback(self, category, callback):
779 779 """add a callback to be called when the wd parents are changed
780 780
781 781 Callback will be called with the following arguments:
782 782 dirstate, (oldp1, oldp2), (newp1, newp2)
783 783
784 784 Category is a unique identifier to allow overwriting an old callback
785 785 with a newer callback.
786 786 """
787 787 self._plchangecallbacks[category] = callback
788 788
789 789 def _writedirstate(self, st):
790 790 # notify callbacks about parents change
791 791 if self._origpl is not None and self._origpl != self._pl:
792 792 for c, callback in sorted(self._plchangecallbacks.iteritems()):
793 793 callback(self, self._origpl, self._pl)
794 794 self._origpl = None
795 795 # use the modification time of the newly created temporary file as the
796 796 # filesystem's notion of 'now'
797 797 now = util.fstat(st).st_mtime & _rangemask
798 798
799 799 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
800 800 # timestamp of each entries in dirstate, because of 'now > mtime'
801 801 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
802 802 if delaywrite > 0:
803 803 # do we have any files to delay for?
804 804 for f, e in self._map.iteritems():
805 805 if e[0] == 'n' and e[3] == now:
806 806 import time # to avoid useless import
807 807 # rather than sleep n seconds, sleep until the next
808 808 # multiple of n seconds
809 809 clock = time.time()
810 810 start = int(clock) - (int(clock) % delaywrite)
811 811 end = start + delaywrite
812 812 time.sleep(end - clock)
813 813 now = end # trust our estimate that the end is near now
814 814 break
815 815
816 816 st.write(parsers.pack_dirstate(self._map._map, self._copymap, self._pl,
817 817 now))
818 818 self._nonnormalset, self._otherparentset = self._map.nonnormalentries()
819 819 st.close()
820 820 self._lastnormaltime = 0
821 821 self._dirty = self._dirtypl = False
822 822
823 823 def _dirignore(self, f):
824 824 if f == '.':
825 825 return False
826 826 if self._ignore(f):
827 827 return True
828 828 for p in util.finddirs(f):
829 829 if self._ignore(p):
830 830 return True
831 831 return False
832 832
833 833 def _ignorefiles(self):
834 834 files = []
835 835 if os.path.exists(self._join('.hgignore')):
836 836 files.append(self._join('.hgignore'))
837 837 for name, path in self._ui.configitems("ui"):
838 838 if name == 'ignore' or name.startswith('ignore.'):
839 839 # we need to use os.path.join here rather than self._join
840 840 # because path is arbitrary and user-specified
841 841 files.append(os.path.join(self._rootdir, util.expandpath(path)))
842 842 return files
843 843
844 844 def _ignorefileandline(self, f):
845 845 files = collections.deque(self._ignorefiles())
846 846 visited = set()
847 847 while files:
848 848 i = files.popleft()
849 849 patterns = matchmod.readpatternfile(i, self._ui.warn,
850 850 sourceinfo=True)
851 851 for pattern, lineno, line in patterns:
852 852 kind, p = matchmod._patsplit(pattern, 'glob')
853 853 if kind == "subinclude":
854 854 if p not in visited:
855 855 files.append(p)
856 856 continue
857 857 m = matchmod.match(self._root, '', [], [pattern],
858 858 warn=self._ui.warn)
859 859 if m(f):
860 860 return (i, lineno, line)
861 861 visited.add(i)
862 862 return (None, -1, "")
863 863
864 864 def _walkexplicit(self, match, subrepos):
865 865 '''Get stat data about the files explicitly specified by match.
866 866
867 867 Return a triple (results, dirsfound, dirsnotfound).
868 868 - results is a mapping from filename to stat result. It also contains
869 869 listings mapping subrepos and .hg to None.
870 870 - dirsfound is a list of files found to be directories.
871 871 - dirsnotfound is a list of files that the dirstate thinks are
872 872 directories and that were not found.'''
873 873
874 874 def badtype(mode):
875 875 kind = _('unknown')
876 876 if stat.S_ISCHR(mode):
877 877 kind = _('character device')
878 878 elif stat.S_ISBLK(mode):
879 879 kind = _('block device')
880 880 elif stat.S_ISFIFO(mode):
881 881 kind = _('fifo')
882 882 elif stat.S_ISSOCK(mode):
883 883 kind = _('socket')
884 884 elif stat.S_ISDIR(mode):
885 885 kind = _('directory')
886 886 return _('unsupported file type (type is %s)') % kind
887 887
888 888 matchedir = match.explicitdir
889 889 badfn = match.bad
890 890 dmap = self._map
891 891 lstat = os.lstat
892 892 getkind = stat.S_IFMT
893 893 dirkind = stat.S_IFDIR
894 894 regkind = stat.S_IFREG
895 895 lnkkind = stat.S_IFLNK
896 896 join = self._join
897 897 dirsfound = []
898 898 foundadd = dirsfound.append
899 899 dirsnotfound = []
900 900 notfoundadd = dirsnotfound.append
901 901
902 902 if not match.isexact() and self._checkcase:
903 903 normalize = self._normalize
904 904 else:
905 905 normalize = None
906 906
907 907 files = sorted(match.files())
908 908 subrepos.sort()
909 909 i, j = 0, 0
910 910 while i < len(files) and j < len(subrepos):
911 911 subpath = subrepos[j] + "/"
912 912 if files[i] < subpath:
913 913 i += 1
914 914 continue
915 915 while i < len(files) and files[i].startswith(subpath):
916 916 del files[i]
917 917 j += 1
918 918
919 919 if not files or '.' in files:
920 920 files = ['.']
921 921 results = dict.fromkeys(subrepos)
922 922 results['.hg'] = None
923 923
924 924 alldirs = None
925 925 for ff in files:
926 926 # constructing the foldmap is expensive, so don't do it for the
927 927 # common case where files is ['.']
928 928 if normalize and ff != '.':
929 929 nf = normalize(ff, False, True)
930 930 else:
931 931 nf = ff
932 932 if nf in results:
933 933 continue
934 934
935 935 try:
936 936 st = lstat(join(nf))
937 937 kind = getkind(st.st_mode)
938 938 if kind == dirkind:
939 939 if nf in dmap:
940 940 # file replaced by dir on disk but still in dirstate
941 941 results[nf] = None
942 942 if matchedir:
943 943 matchedir(nf)
944 944 foundadd((nf, ff))
945 945 elif kind == regkind or kind == lnkkind:
946 946 results[nf] = st
947 947 else:
948 948 badfn(ff, badtype(kind))
949 949 if nf in dmap:
950 950 results[nf] = None
951 951 except OSError as inst: # nf not found on disk - it is dirstate only
952 952 if nf in dmap: # does it exactly match a missing file?
953 953 results[nf] = None
954 954 else: # does it match a missing directory?
955 955 if alldirs is None:
956 956 alldirs = util.dirs(dmap._map)
957 957 if nf in alldirs:
958 958 if matchedir:
959 959 matchedir(nf)
960 960 notfoundadd(nf)
961 961 else:
962 962 badfn(ff, encoding.strtolocal(inst.strerror))
963 963
964 964 # Case insensitive filesystems cannot rely on lstat() failing to detect
965 965 # a case-only rename. Prune the stat object for any file that does not
966 966 # match the case in the filesystem, if there are multiple files that
967 967 # normalize to the same path.
968 968 if match.isexact() and self._checkcase:
969 969 normed = {}
970 970
971 971 for f, st in results.iteritems():
972 972 if st is None:
973 973 continue
974 974
975 975 nc = util.normcase(f)
976 976 paths = normed.get(nc)
977 977
978 978 if paths is None:
979 979 paths = set()
980 980 normed[nc] = paths
981 981
982 982 paths.add(f)
983 983
984 984 for norm, paths in normed.iteritems():
985 985 if len(paths) > 1:
986 986 for path in paths:
987 987 folded = self._discoverpath(path, norm, True, None,
988 988 self._dirfoldmap)
989 989 if path != folded:
990 990 results[path] = None
991 991
992 992 return results, dirsfound, dirsnotfound
993 993
994 994 def walk(self, match, subrepos, unknown, ignored, full=True):
995 995 '''
996 996 Walk recursively through the directory tree, finding all files
997 997 matched by match.
998 998
999 999 If full is False, maybe skip some known-clean files.
1000 1000
1001 1001 Return a dict mapping filename to stat-like object (either
1002 1002 mercurial.osutil.stat instance or return value of os.stat()).
1003 1003
1004 1004 '''
1005 1005 # full is a flag that extensions that hook into walk can use -- this
1006 1006 # implementation doesn't use it at all. This satisfies the contract
1007 1007 # because we only guarantee a "maybe".
1008 1008
1009 1009 if ignored:
1010 1010 ignore = util.never
1011 1011 dirignore = util.never
1012 1012 elif unknown:
1013 1013 ignore = self._ignore
1014 1014 dirignore = self._dirignore
1015 1015 else:
1016 1016 # if not unknown and not ignored, drop dir recursion and step 2
1017 1017 ignore = util.always
1018 1018 dirignore = util.always
1019 1019
1020 1020 matchfn = match.matchfn
1021 1021 matchalways = match.always()
1022 1022 matchtdir = match.traversedir
1023 1023 dmap = self._map
1024 1024 listdir = util.listdir
1025 1025 lstat = os.lstat
1026 1026 dirkind = stat.S_IFDIR
1027 1027 regkind = stat.S_IFREG
1028 1028 lnkkind = stat.S_IFLNK
1029 1029 join = self._join
1030 1030
1031 1031 exact = skipstep3 = False
1032 1032 if match.isexact(): # match.exact
1033 1033 exact = True
1034 1034 dirignore = util.always # skip step 2
1035 1035 elif match.prefix(): # match.match, no patterns
1036 1036 skipstep3 = True
1037 1037
1038 1038 if not exact and self._checkcase:
1039 1039 normalize = self._normalize
1040 1040 normalizefile = self._normalizefile
1041 1041 skipstep3 = False
1042 1042 else:
1043 1043 normalize = self._normalize
1044 1044 normalizefile = None
1045 1045
1046 1046 # step 1: find all explicit files
1047 1047 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1048 1048
1049 1049 skipstep3 = skipstep3 and not (work or dirsnotfound)
1050 1050 work = [d for d in work if not dirignore(d[0])]
1051 1051
1052 1052 # step 2: visit subdirectories
1053 1053 def traverse(work, alreadynormed):
1054 1054 wadd = work.append
1055 1055 while work:
1056 1056 nd = work.pop()
1057 1057 if not match.visitdir(nd):
1058 1058 continue
1059 1059 skip = None
1060 1060 if nd == '.':
1061 1061 nd = ''
1062 1062 else:
1063 1063 skip = '.hg'
1064 1064 try:
1065 1065 entries = listdir(join(nd), stat=True, skip=skip)
1066 1066 except OSError as inst:
1067 1067 if inst.errno in (errno.EACCES, errno.ENOENT):
1068 1068 match.bad(self.pathto(nd),
1069 1069 encoding.strtolocal(inst.strerror))
1070 1070 continue
1071 1071 raise
1072 1072 for f, kind, st in entries:
1073 1073 if normalizefile:
1074 1074 # even though f might be a directory, we're only
1075 1075 # interested in comparing it to files currently in the
1076 1076 # dmap -- therefore normalizefile is enough
1077 1077 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1078 1078 True)
1079 1079 else:
1080 1080 nf = nd and (nd + "/" + f) or f
1081 1081 if nf not in results:
1082 1082 if kind == dirkind:
1083 1083 if not ignore(nf):
1084 1084 if matchtdir:
1085 1085 matchtdir(nf)
1086 1086 wadd(nf)
1087 1087 if nf in dmap and (matchalways or matchfn(nf)):
1088 1088 results[nf] = None
1089 1089 elif kind == regkind or kind == lnkkind:
1090 1090 if nf in dmap:
1091 1091 if matchalways or matchfn(nf):
1092 1092 results[nf] = st
1093 1093 elif ((matchalways or matchfn(nf))
1094 1094 and not ignore(nf)):
1095 1095 # unknown file -- normalize if necessary
1096 1096 if not alreadynormed:
1097 1097 nf = normalize(nf, False, True)
1098 1098 results[nf] = st
1099 1099 elif nf in dmap and (matchalways or matchfn(nf)):
1100 1100 results[nf] = None
1101 1101
1102 1102 for nd, d in work:
1103 1103 # alreadynormed means that processwork doesn't have to do any
1104 1104 # expensive directory normalization
1105 1105 alreadynormed = not normalize or nd == d
1106 1106 traverse([d], alreadynormed)
1107 1107
1108 1108 for s in subrepos:
1109 1109 del results[s]
1110 1110 del results['.hg']
1111 1111
1112 1112 # step 3: visit remaining files from dmap
1113 1113 if not skipstep3 and not exact:
1114 1114 # If a dmap file is not in results yet, it was either
1115 1115 # a) not matching matchfn b) ignored, c) missing, or d) under a
1116 1116 # symlink directory.
1117 1117 if not results and matchalways:
1118 1118 visit = [f for f in dmap]
1119 1119 else:
1120 1120 visit = [f for f in dmap if f not in results and matchfn(f)]
1121 1121 visit.sort()
1122 1122
1123 1123 if unknown:
1124 1124 # unknown == True means we walked all dirs under the roots
1125 1125 # that wasn't ignored, and everything that matched was stat'ed
1126 1126 # and is already in results.
1127 1127 # The rest must thus be ignored or under a symlink.
1128 1128 audit_path = pathutil.pathauditor(self._root, cached=True)
1129 1129
1130 1130 for nf in iter(visit):
1131 1131 # If a stat for the same file was already added with a
1132 1132 # different case, don't add one for this, since that would
1133 1133 # make it appear as if the file exists under both names
1134 1134 # on disk.
1135 1135 if (normalizefile and
1136 1136 normalizefile(nf, True, True) in results):
1137 1137 results[nf] = None
1138 1138 # Report ignored items in the dmap as long as they are not
1139 1139 # under a symlink directory.
1140 1140 elif audit_path.check(nf):
1141 1141 try:
1142 1142 results[nf] = lstat(join(nf))
1143 1143 # file was just ignored, no links, and exists
1144 1144 except OSError:
1145 1145 # file doesn't exist
1146 1146 results[nf] = None
1147 1147 else:
1148 1148 # It's either missing or under a symlink directory
1149 1149 # which we in this case report as missing
1150 1150 results[nf] = None
1151 1151 else:
1152 1152 # We may not have walked the full directory tree above,
1153 1153 # so stat and check everything we missed.
1154 1154 iv = iter(visit)
1155 1155 for st in util.statfiles([join(i) for i in visit]):
1156 1156 results[next(iv)] = st
1157 1157 return results
1158 1158
1159 1159 def status(self, match, subrepos, ignored, clean, unknown):
1160 1160 '''Determine the status of the working copy relative to the
1161 1161 dirstate and return a pair of (unsure, status), where status is of type
1162 1162 scmutil.status and:
1163 1163
1164 1164 unsure:
1165 1165 files that might have been modified since the dirstate was
1166 1166 written, but need to be read to be sure (size is the same
1167 1167 but mtime differs)
1168 1168 status.modified:
1169 1169 files that have definitely been modified since the dirstate
1170 1170 was written (different size or mode)
1171 1171 status.clean:
1172 1172 files that have definitely not been modified since the
1173 1173 dirstate was written
1174 1174 '''
1175 1175 listignored, listclean, listunknown = ignored, clean, unknown
1176 1176 lookup, modified, added, unknown, ignored = [], [], [], [], []
1177 1177 removed, deleted, clean = [], [], []
1178 1178
1179 1179 dmap = self._map
1180 1180 ladd = lookup.append # aka "unsure"
1181 1181 madd = modified.append
1182 1182 aadd = added.append
1183 1183 uadd = unknown.append
1184 1184 iadd = ignored.append
1185 1185 radd = removed.append
1186 1186 dadd = deleted.append
1187 1187 cadd = clean.append
1188 1188 mexact = match.exact
1189 1189 dirignore = self._dirignore
1190 1190 checkexec = self._checkexec
1191 1191 copymap = self._copymap
1192 1192 lastnormaltime = self._lastnormaltime
1193 1193
1194 1194 # We need to do full walks when either
1195 1195 # - we're listing all clean files, or
1196 1196 # - match.traversedir does something, because match.traversedir should
1197 1197 # be called for every dir in the working dir
1198 1198 full = listclean or match.traversedir is not None
1199 1199 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1200 1200 full=full).iteritems():
1201 1201 if fn not in dmap:
1202 1202 if (listignored or mexact(fn)) and dirignore(fn):
1203 1203 if listignored:
1204 1204 iadd(fn)
1205 1205 else:
1206 1206 uadd(fn)
1207 1207 continue
1208 1208
1209 1209 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1210 1210 # written like that for performance reasons. dmap[fn] is not a
1211 1211 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1212 1212 # opcode has fast paths when the value to be unpacked is a tuple or
1213 1213 # a list, but falls back to creating a full-fledged iterator in
1214 1214 # general. That is much slower than simply accessing and storing the
1215 1215 # tuple members one by one.
1216 1216 t = dmap[fn]
1217 1217 state = t[0]
1218 1218 mode = t[1]
1219 1219 size = t[2]
1220 1220 time = t[3]
1221 1221
1222 1222 if not st and state in "nma":
1223 1223 dadd(fn)
1224 1224 elif state == 'n':
1225 1225 if (size >= 0 and
1226 1226 ((size != st.st_size and size != st.st_size & _rangemask)
1227 1227 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1228 1228 or size == -2 # other parent
1229 1229 or fn in copymap):
1230 1230 madd(fn)
1231 1231 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1232 1232 ladd(fn)
1233 1233 elif st.st_mtime == lastnormaltime:
1234 1234 # fn may have just been marked as normal and it may have
1235 1235 # changed in the same second without changing its size.
1236 1236 # This can happen if we quickly do multiple commits.
1237 1237 # Force lookup, so we don't miss such a racy file change.
1238 1238 ladd(fn)
1239 1239 elif listclean:
1240 1240 cadd(fn)
1241 1241 elif state == 'm':
1242 1242 madd(fn)
1243 1243 elif state == 'a':
1244 1244 aadd(fn)
1245 1245 elif state == 'r':
1246 1246 radd(fn)
1247 1247
1248 1248 return (lookup, scmutil.status(modified, added, removed, deleted,
1249 1249 unknown, ignored, clean))
1250 1250
1251 1251 def matches(self, match):
1252 1252 '''
1253 1253 return files in the dirstate (in whatever state) filtered by match
1254 1254 '''
1255 1255 dmap = self._map
1256 1256 if match.always():
1257 1257 return dmap.keys()
1258 1258 files = match.files()
1259 1259 if match.isexact():
1260 1260 # fast path -- filter the other way around, since typically files is
1261 1261 # much smaller than dmap
1262 1262 return [f for f in files if f in dmap]
1263 1263 if match.prefix() and all(fn in dmap for fn in files):
1264 1264 # fast path -- all the values are known to be files, so just return
1265 1265 # that
1266 1266 return list(files)
1267 1267 return [f for f in dmap if match(f)]
1268 1268
1269 1269 def _actualfilename(self, tr):
1270 1270 if tr:
1271 1271 return self._pendingfilename
1272 1272 else:
1273 1273 return self._filename
1274 1274
1275 1275 def savebackup(self, tr, backupname):
1276 1276 '''Save current dirstate into backup file'''
1277 1277 filename = self._actualfilename(tr)
1278 1278 assert backupname != filename
1279 1279
1280 1280 # use '_writedirstate' instead of 'write' to write changes certainly,
1281 1281 # because the latter omits writing out if transaction is running.
1282 1282 # output file will be used to create backup of dirstate at this point.
1283 1283 if self._dirty or not self._opener.exists(filename):
1284 1284 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1285 1285 checkambig=True))
1286 1286
1287 1287 if tr:
1288 1288 # ensure that subsequent tr.writepending returns True for
1289 1289 # changes written out above, even if dirstate is never
1290 1290 # changed after this
1291 1291 tr.addfilegenerator('dirstate', (self._filename,),
1292 1292 self._writedirstate, location='plain')
1293 1293
1294 1294 # ensure that pending file written above is unlinked at
1295 1295 # failure, even if tr.writepending isn't invoked until the
1296 1296 # end of this transaction
1297 1297 tr.registertmp(filename, location='plain')
1298 1298
1299 1299 self._opener.tryunlink(backupname)
1300 1300 # hardlink backup is okay because _writedirstate is always called
1301 1301 # with an "atomictemp=True" file.
1302 1302 util.copyfile(self._opener.join(filename),
1303 1303 self._opener.join(backupname), hardlink=True)
1304 1304
1305 1305 def restorebackup(self, tr, backupname):
1306 1306 '''Restore dirstate by backup file'''
1307 1307 # this "invalidate()" prevents "wlock.release()" from writing
1308 1308 # changes of dirstate out after restoring from backup file
1309 1309 self.invalidate()
1310 1310 filename = self._actualfilename(tr)
1311 1311 self._opener.rename(backupname, filename, checkambig=True)
1312 1312
1313 1313 def clearbackup(self, tr, backupname):
1314 1314 '''Clear backup file'''
1315 1315 self._opener.unlink(backupname)
1316 1316
1317 1317 class dirstatemap(object):
1318 1318 def __init__(self):
1319 1319 self._map = {}
1320 1320
1321 1321 def iteritems(self):
1322 1322 return self._map.iteritems()
1323 1323
1324 1324 def __iter__(self):
1325 1325 return iter(self._map)
1326 1326
1327 1327 def get(self, key, default=None):
1328 1328 return self._map.get(key, default)
1329 1329
1330 1330 def __contains__(self, key):
1331 1331 return key in self._map
1332 1332
1333 1333 def __setitem__(self, key, value):
1334 1334 self._map[key] = value
1335 1335
1336 1336 def __getitem__(self, key):
1337 1337 return self._map[key]
1338 1338
1339 1339 def __delitem__(self, key):
1340 1340 del self._map[key]
1341 1341
1342 1342 def keys(self):
1343 1343 return self._map.keys()
1344 1344
1345 1345 def nonnormalentries(self):
1346 1346 '''Compute the nonnormal dirstate entries from the dmap'''
1347 1347 try:
1348 1348 return parsers.nonnormalotherparententries(self._map)
1349 1349 except AttributeError:
1350 1350 nonnorm = set()
1351 1351 otherparent = set()
1352 1352 for fname, e in self._map.iteritems():
1353 1353 if e[0] != 'n' or e[3] == -1:
1354 1354 nonnorm.add(fname)
1355 1355 if e[0] == 'n' and e[2] == -2:
1356 1356 otherparent.add(fname)
1357 1357 return nonnorm, otherparent
1358 1358
1359 1359 def filefoldmap(self):
1360 1360 """Returns a dictionary mapping normalized case paths to their
1361 1361 non-normalized versions.
1362 1362 """
1363 1363 try:
1364 1364 makefilefoldmap = parsers.make_file_foldmap
1365 1365 except AttributeError:
1366 1366 pass
1367 1367 else:
1368 1368 return makefilefoldmap(self._map, util.normcasespec,
1369 1369 util.normcasefallback)
1370 1370
1371 1371 f = {}
1372 1372 normcase = util.normcase
1373 1373 for name, s in self._map.iteritems():
1374 1374 if s[0] != 'r':
1375 1375 f[normcase(name)] = name
1376 1376 f['.'] = '.' # prevents useless util.fspath() invocation
1377 1377 return f
1378
1379 def dirs(self):
1380 """Returns a set-like object containing all the directories in the
1381 current dirstate.
1382 """
1383 return util.dirs(self._map, 'r')
General Comments 0
You need to be logged in to leave comments. Login now