##// END OF EJS Templates
dirstate: move _copymap to dirstatemap...
Durham Goode -
r34337:0865d25e default
parent child Browse files
Show More
@@ -1,1383 +1,1377 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 parsers = policy.importmod(r'parsers')
31 31
32 32 propertycache = util.propertycache
33 33 filecache = scmutil.filecache
34 34 _rangemask = 0x7fffffff
35 35
36 36 dirstatetuple = parsers.dirstatetuple
37 37
38 38 class repocache(filecache):
39 39 """filecache for files in .hg/"""
40 40 def join(self, obj, fname):
41 41 return obj._opener.join(fname)
42 42
43 43 class rootcache(filecache):
44 44 """filecache for files in the repository root"""
45 45 def join(self, obj, fname):
46 46 return obj._join(fname)
47 47
48 48 def _getfsnow(vfs):
49 49 '''Get "now" timestamp on filesystem'''
50 50 tmpfd, tmpname = vfs.mkstemp()
51 51 try:
52 52 return os.fstat(tmpfd).st_mtime
53 53 finally:
54 54 os.close(tmpfd)
55 55 vfs.unlink(tmpname)
56 56
57 57 class dirstate(object):
58 58
59 59 def __init__(self, opener, ui, root, validate, sparsematchfn):
60 60 '''Create a new dirstate object.
61 61
62 62 opener is an open()-like callable that can be used to open the
63 63 dirstate file; root is the root of the directory tracked by
64 64 the dirstate.
65 65 '''
66 66 self._opener = opener
67 67 self._validate = validate
68 68 self._root = root
69 69 self._sparsematchfn = sparsematchfn
70 70 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
71 71 # UNC path pointing to root share (issue4557)
72 72 self._rootdir = pathutil.normasprefix(root)
73 73 self._dirty = False
74 74 self._dirtypl = False
75 75 self._lastnormaltime = 0
76 76 self._ui = ui
77 77 self._filecache = {}
78 78 self._parentwriters = 0
79 79 self._filename = 'dirstate'
80 80 self._pendingfilename = '%s.pending' % self._filename
81 81 self._plchangecallbacks = {}
82 82 self._origpl = None
83 83 self._updatedfiles = set()
84 84
85 85 # for consistent view between _pl() and _read() invocations
86 86 self._pendingmode = None
87 87
88 88 @contextlib.contextmanager
89 89 def parentchange(self):
90 90 '''Context manager for handling dirstate parents.
91 91
92 92 If an exception occurs in the scope of the context manager,
93 93 the incoherent dirstate won't be written when wlock is
94 94 released.
95 95 '''
96 96 self._parentwriters += 1
97 97 yield
98 98 # Typically we want the "undo" step of a context manager in a
99 99 # finally block so it happens even when an exception
100 100 # occurs. In this case, however, we only want to decrement
101 101 # parentwriters if the code in the with statement exits
102 102 # normally, so we don't have a try/finally here on purpose.
103 103 self._parentwriters -= 1
104 104
105 105 def beginparentchange(self):
106 106 '''Marks the beginning of a set of changes that involve changing
107 107 the dirstate parents. If there is an exception during this time,
108 108 the dirstate will not be written when the wlock is released. This
109 109 prevents writing an incoherent dirstate where the parent doesn't
110 110 match the contents.
111 111 '''
112 112 self._ui.deprecwarn('beginparentchange is obsoleted by the '
113 113 'parentchange context manager.', '4.3')
114 114 self._parentwriters += 1
115 115
116 116 def endparentchange(self):
117 117 '''Marks the end of a set of changes that involve changing the
118 118 dirstate parents. Once all parent changes have been marked done,
119 119 the wlock will be free to write the dirstate on release.
120 120 '''
121 121 self._ui.deprecwarn('endparentchange is obsoleted by the '
122 122 'parentchange context manager.', '4.3')
123 123 if self._parentwriters > 0:
124 124 self._parentwriters -= 1
125 125
126 126 def pendingparentchange(self):
127 127 '''Returns true if the dirstate is in the middle of a set of changes
128 128 that modify the dirstate parent.
129 129 '''
130 130 return self._parentwriters > 0
131 131
132 132 @propertycache
133 133 def _map(self):
134 134 '''Return the dirstate contents as a map from filename to
135 135 (state, mode, size, time).'''
136 136 self._read()
137 137 return self._map
138 138
139 139 @propertycache
140 def _copymap(self):
141 self._read()
142 return self._copymap
143
144 @propertycache
145 140 def _identity(self):
146 141 self._read()
147 142 return self._identity
148 143
149 144 @propertycache
150 145 def _nonnormalset(self):
151 146 nonnorm, otherparents = self._map.nonnormalentries()
152 147 self._otherparentset = otherparents
153 148 return nonnorm
154 149
155 150 @propertycache
156 151 def _otherparentset(self):
157 152 nonnorm, otherparents = self._map.nonnormalentries()
158 153 self._nonnormalset = nonnorm
159 154 return otherparents
160 155
161 156 @propertycache
162 157 def _filefoldmap(self):
163 158 return self._map.filefoldmap()
164 159
165 160 @propertycache
166 161 def _dirfoldmap(self):
167 162 f = {}
168 163 normcase = util.normcase
169 164 for name in self._dirs:
170 165 f[normcase(name)] = name
171 166 return f
172 167
173 168 @property
174 169 def _sparsematcher(self):
175 170 """The matcher for the sparse checkout.
176 171
177 172 The working directory may not include every file from a manifest. The
178 173 matcher obtained by this property will match a path if it is to be
179 174 included in the working directory.
180 175 """
181 176 # TODO there is potential to cache this property. For now, the matcher
182 177 # is resolved on every access. (But the called function does use a
183 178 # cache to keep the lookup fast.)
184 179 return self._sparsematchfn()
185 180
186 181 @repocache('branch')
187 182 def _branch(self):
188 183 try:
189 184 return self._opener.read("branch").strip() or "default"
190 185 except IOError as inst:
191 186 if inst.errno != errno.ENOENT:
192 187 raise
193 188 return "default"
194 189
195 190 @propertycache
196 191 def _pl(self):
197 192 try:
198 193 fp = self._opendirstatefile()
199 194 st = fp.read(40)
200 195 fp.close()
201 196 l = len(st)
202 197 if l == 40:
203 198 return st[:20], st[20:40]
204 199 elif l > 0 and l < 40:
205 200 raise error.Abort(_('working directory state appears damaged!'))
206 201 except IOError as err:
207 202 if err.errno != errno.ENOENT:
208 203 raise
209 204 return [nullid, nullid]
210 205
211 206 @propertycache
212 207 def _dirs(self):
213 208 return self._map.dirs()
214 209
215 210 def dirs(self):
216 211 return self._dirs
217 212
218 213 @rootcache('.hgignore')
219 214 def _ignore(self):
220 215 files = self._ignorefiles()
221 216 if not files:
222 217 return matchmod.never(self._root, '')
223 218
224 219 pats = ['include:%s' % f for f in files]
225 220 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
226 221
227 222 @propertycache
228 223 def _slash(self):
229 224 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
230 225
231 226 @propertycache
232 227 def _checklink(self):
233 228 return util.checklink(self._root)
234 229
235 230 @propertycache
236 231 def _checkexec(self):
237 232 return util.checkexec(self._root)
238 233
239 234 @propertycache
240 235 def _checkcase(self):
241 236 return not util.fscasesensitive(self._join('.hg'))
242 237
243 238 def _join(self, f):
244 239 # much faster than os.path.join()
245 240 # it's safe because f is always a relative path
246 241 return self._rootdir + f
247 242
248 243 def flagfunc(self, buildfallback):
249 244 if self._checklink and self._checkexec:
250 245 def f(x):
251 246 try:
252 247 st = os.lstat(self._join(x))
253 248 if util.statislink(st):
254 249 return 'l'
255 250 if util.statisexec(st):
256 251 return 'x'
257 252 except OSError:
258 253 pass
259 254 return ''
260 255 return f
261 256
262 257 fallback = buildfallback()
263 258 if self._checklink:
264 259 def f(x):
265 260 if os.path.islink(self._join(x)):
266 261 return 'l'
267 262 if 'x' in fallback(x):
268 263 return 'x'
269 264 return ''
270 265 return f
271 266 if self._checkexec:
272 267 def f(x):
273 268 if 'l' in fallback(x):
274 269 return 'l'
275 270 if util.isexec(self._join(x)):
276 271 return 'x'
277 272 return ''
278 273 return f
279 274 else:
280 275 return fallback
281 276
282 277 @propertycache
283 278 def _cwd(self):
284 279 # internal config: ui.forcecwd
285 280 forcecwd = self._ui.config('ui', 'forcecwd')
286 281 if forcecwd:
287 282 return forcecwd
288 283 return pycompat.getcwd()
289 284
290 285 def getcwd(self):
291 286 '''Return the path from which a canonical path is calculated.
292 287
293 288 This path should be used to resolve file patterns or to convert
294 289 canonical paths back to file paths for display. It shouldn't be
295 290 used to get real file paths. Use vfs functions instead.
296 291 '''
297 292 cwd = self._cwd
298 293 if cwd == self._root:
299 294 return ''
300 295 # self._root ends with a path separator if self._root is '/' or 'C:\'
301 296 rootsep = self._root
302 297 if not util.endswithsep(rootsep):
303 298 rootsep += pycompat.ossep
304 299 if cwd.startswith(rootsep):
305 300 return cwd[len(rootsep):]
306 301 else:
307 302 # we're outside the repo. return an absolute path.
308 303 return cwd
309 304
310 305 def pathto(self, f, cwd=None):
311 306 if cwd is None:
312 307 cwd = self.getcwd()
313 308 path = util.pathto(self._root, cwd, f)
314 309 if self._slash:
315 310 return util.pconvert(path)
316 311 return path
317 312
318 313 def __getitem__(self, key):
319 314 '''Return the current state of key (a filename) in the dirstate.
320 315
321 316 States are:
322 317 n normal
323 318 m needs merging
324 319 r marked for removal
325 320 a marked for addition
326 321 ? not tracked
327 322 '''
328 323 return self._map.get(key, ("?",))[0]
329 324
330 325 def __contains__(self, key):
331 326 return key in self._map
332 327
333 328 def __iter__(self):
334 329 return iter(sorted(self._map))
335 330
336 331 def items(self):
337 332 return self._map.iteritems()
338 333
339 334 iteritems = items
340 335
341 336 def parents(self):
342 337 return [self._validate(p) for p in self._pl]
343 338
344 339 def p1(self):
345 340 return self._validate(self._pl[0])
346 341
347 342 def p2(self):
348 343 return self._validate(self._pl[1])
349 344
350 345 def branch(self):
351 346 return encoding.tolocal(self._branch)
352 347
353 348 def setparents(self, p1, p2=nullid):
354 349 """Set dirstate parents to p1 and p2.
355 350
356 351 When moving from two parents to one, 'm' merged entries a
357 352 adjusted to normal and previous copy records discarded and
358 353 returned by the call.
359 354
360 355 See localrepo.setparents()
361 356 """
362 357 if self._parentwriters == 0:
363 358 raise ValueError("cannot set dirstate parent without "
364 359 "calling dirstate.beginparentchange")
365 360
366 361 self._dirty = self._dirtypl = True
367 362 oldp2 = self._pl[1]
368 363 if self._origpl is None:
369 364 self._origpl = self._pl
370 365 self._pl = p1, p2
371 366 copies = {}
372 367 if oldp2 != nullid and p2 == nullid:
373 368 candidatefiles = self._nonnormalset.union(self._otherparentset)
374 369 for f in candidatefiles:
375 370 s = self._map.get(f)
376 371 if s is None:
377 372 continue
378 373
379 374 # Discard 'm' markers when moving away from a merge state
380 375 if s[0] == 'm':
381 source = self._copymap.get(f)
376 source = self._map.copymap.get(f)
382 377 if source:
383 378 copies[f] = source
384 379 self.normallookup(f)
385 380 # Also fix up otherparent markers
386 381 elif s[0] == 'n' and s[2] == -2:
387 source = self._copymap.get(f)
382 source = self._map.copymap.get(f)
388 383 if source:
389 384 copies[f] = source
390 385 self.add(f)
391 386 return copies
392 387
393 388 def setbranch(self, branch):
394 389 self._branch = encoding.fromlocal(branch)
395 390 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
396 391 try:
397 392 f.write(self._branch + '\n')
398 393 f.close()
399 394
400 395 # make sure filecache has the correct stat info for _branch after
401 396 # replacing the underlying file
402 397 ce = self._filecache['_branch']
403 398 if ce:
404 399 ce.refresh()
405 400 except: # re-raises
406 401 f.discard()
407 402 raise
408 403
409 404 def _opendirstatefile(self):
410 405 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
411 406 if self._pendingmode is not None and self._pendingmode != mode:
412 407 fp.close()
413 408 raise error.Abort(_('working directory state may be '
414 409 'changed parallelly'))
415 410 self._pendingmode = mode
416 411 return fp
417 412
418 413 def _read(self):
419 414 self._map = dirstatemap()
420 415
421 self._copymap = {}
422 416 # ignore HG_PENDING because identity is used only for writing
423 417 self._identity = util.filestat.frompath(
424 418 self._opener.join(self._filename))
425 419 try:
426 420 fp = self._opendirstatefile()
427 421 try:
428 422 st = fp.read()
429 423 finally:
430 424 fp.close()
431 425 except IOError as err:
432 426 if err.errno != errno.ENOENT:
433 427 raise
434 428 return
435 429 if not st:
436 430 return
437 431
438 432 if util.safehasattr(parsers, 'dict_new_presized'):
439 433 # Make an estimate of the number of files in the dirstate based on
440 434 # its size. From a linear regression on a set of real-world repos,
441 435 # all over 10,000 files, the size of a dirstate entry is 85
442 436 # bytes. The cost of resizing is significantly higher than the cost
443 437 # of filling in a larger presized dict, so subtract 20% from the
444 438 # size.
445 439 #
446 440 # This heuristic is imperfect in many ways, so in a future dirstate
447 441 # format update it makes sense to just record the number of entries
448 442 # on write.
449 443 self._map._map = parsers.dict_new_presized(len(st) / 71)
450 444
451 445 # Python's garbage collector triggers a GC each time a certain number
452 446 # of container objects (the number being defined by
453 447 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
454 448 # for each file in the dirstate. The C version then immediately marks
455 449 # them as not to be tracked by the collector. However, this has no
456 450 # effect on when GCs are triggered, only on what objects the GC looks
457 451 # into. This means that O(number of files) GCs are unavoidable.
458 452 # Depending on when in the process's lifetime the dirstate is parsed,
459 453 # this can get very expensive. As a workaround, disable GC while
460 454 # parsing the dirstate.
461 455 #
462 456 # (we cannot decorate the function directly since it is in a C module)
463 457 parse_dirstate = util.nogc(parsers.parse_dirstate)
464 p = parse_dirstate(self._map._map, self._copymap, st)
458 p = parse_dirstate(self._map._map, self._map.copymap, st)
465 459 if not self._dirtypl:
466 460 self._pl = p
467 461
468 462 def invalidate(self):
469 463 '''Causes the next access to reread the dirstate.
470 464
471 465 This is different from localrepo.invalidatedirstate() because it always
472 466 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
473 467 check whether the dirstate has changed before rereading it.'''
474 468
475 for a in ("_map", "_copymap", "_identity",
469 for a in ("_map", "_identity",
476 470 "_filefoldmap", "_dirfoldmap", "_branch",
477 471 "_pl", "_dirs", "_ignore", "_nonnormalset",
478 472 "_otherparentset"):
479 473 if a in self.__dict__:
480 474 delattr(self, a)
481 475 self._lastnormaltime = 0
482 476 self._dirty = False
483 477 self._updatedfiles.clear()
484 478 self._parentwriters = 0
485 479 self._origpl = None
486 480
487 481 def copy(self, source, dest):
488 482 """Mark dest as a copy of source. Unmark dest if source is None."""
489 483 if source == dest:
490 484 return
491 485 self._dirty = True
492 486 if source is not None:
493 self._copymap[dest] = source
487 self._map.copymap[dest] = source
494 488 self._updatedfiles.add(source)
495 489 self._updatedfiles.add(dest)
496 elif self._copymap.pop(dest, None):
490 elif self._map.copymap.pop(dest, None):
497 491 self._updatedfiles.add(dest)
498 492
499 493 def copied(self, file):
500 return self._copymap.get(file, None)
494 return self._map.copymap.get(file, None)
501 495
502 496 def copies(self):
503 return self._copymap
497 return self._map.copymap
504 498
505 499 def _droppath(self, f):
506 500 if self[f] not in "?r" and "_dirs" in self.__dict__:
507 501 self._dirs.delpath(f)
508 502
509 503 if "_filefoldmap" in self.__dict__:
510 504 normed = util.normcase(f)
511 505 if normed in self._filefoldmap:
512 506 del self._filefoldmap[normed]
513 507
514 508 self._updatedfiles.add(f)
515 509
516 510 def _addpath(self, f, state, mode, size, mtime):
517 511 oldstate = self[f]
518 512 if state == 'a' or oldstate == 'r':
519 513 scmutil.checkfilename(f)
520 514 if f in self._dirs:
521 515 raise error.Abort(_('directory %r already in dirstate') % f)
522 516 # shadows
523 517 for d in util.finddirs(f):
524 518 if d in self._dirs:
525 519 break
526 520 entry = self._map.get(d)
527 521 if entry is not None and entry[0] != 'r':
528 522 raise error.Abort(
529 523 _('file %r in dirstate clashes with %r') % (d, f))
530 524 if oldstate in "?r" and "_dirs" in self.__dict__:
531 525 self._dirs.addpath(f)
532 526 self._dirty = True
533 527 self._updatedfiles.add(f)
534 528 self._map[f] = dirstatetuple(state, mode, size, mtime)
535 529 if state != 'n' or mtime == -1:
536 530 self._nonnormalset.add(f)
537 531 if size == -2:
538 532 self._otherparentset.add(f)
539 533
540 534 def normal(self, f):
541 535 '''Mark a file normal and clean.'''
542 536 s = os.lstat(self._join(f))
543 537 mtime = s.st_mtime
544 538 self._addpath(f, 'n', s.st_mode,
545 539 s.st_size & _rangemask, mtime & _rangemask)
546 self._copymap.pop(f, None)
540 self._map.copymap.pop(f, None)
547 541 if f in self._nonnormalset:
548 542 self._nonnormalset.remove(f)
549 543 if mtime > self._lastnormaltime:
550 544 # Remember the most recent modification timeslot for status(),
551 545 # to make sure we won't miss future size-preserving file content
552 546 # modifications that happen within the same timeslot.
553 547 self._lastnormaltime = mtime
554 548
555 549 def normallookup(self, f):
556 550 '''Mark a file normal, but possibly dirty.'''
557 551 if self._pl[1] != nullid:
558 552 # if there is a merge going on and the file was either
559 553 # in state 'm' (-1) or coming from other parent (-2) before
560 554 # being removed, restore that state.
561 555 entry = self._map.get(f)
562 556 if entry is not None:
563 557 if entry[0] == 'r' and entry[2] in (-1, -2):
564 source = self._copymap.get(f)
558 source = self._map.copymap.get(f)
565 559 if entry[2] == -1:
566 560 self.merge(f)
567 561 elif entry[2] == -2:
568 562 self.otherparent(f)
569 563 if source:
570 564 self.copy(source, f)
571 565 return
572 566 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
573 567 return
574 568 self._addpath(f, 'n', 0, -1, -1)
575 self._copymap.pop(f, None)
569 self._map.copymap.pop(f, None)
576 570 if f in self._nonnormalset:
577 571 self._nonnormalset.remove(f)
578 572
579 573 def otherparent(self, f):
580 574 '''Mark as coming from the other parent, always dirty.'''
581 575 if self._pl[1] == nullid:
582 576 raise error.Abort(_("setting %r to other parent "
583 577 "only allowed in merges") % f)
584 578 if f in self and self[f] == 'n':
585 579 # merge-like
586 580 self._addpath(f, 'm', 0, -2, -1)
587 581 else:
588 582 # add-like
589 583 self._addpath(f, 'n', 0, -2, -1)
590 self._copymap.pop(f, None)
584 self._map.copymap.pop(f, None)
591 585
592 586 def add(self, f):
593 587 '''Mark a file added.'''
594 588 self._addpath(f, 'a', 0, -1, -1)
595 self._copymap.pop(f, None)
589 self._map.copymap.pop(f, None)
596 590
597 591 def remove(self, f):
598 592 '''Mark a file removed.'''
599 593 self._dirty = True
600 594 self._droppath(f)
601 595 size = 0
602 596 if self._pl[1] != nullid:
603 597 entry = self._map.get(f)
604 598 if entry is not None:
605 599 # backup the previous state
606 600 if entry[0] == 'm': # merge
607 601 size = -1
608 602 elif entry[0] == 'n' and entry[2] == -2: # other parent
609 603 size = -2
610 604 self._otherparentset.add(f)
611 605 self._map[f] = dirstatetuple('r', 0, size, 0)
612 606 self._nonnormalset.add(f)
613 607 if size == 0:
614 self._copymap.pop(f, None)
608 self._map.copymap.pop(f, None)
615 609
616 610 def merge(self, f):
617 611 '''Mark a file merged.'''
618 612 if self._pl[1] == nullid:
619 613 return self.normallookup(f)
620 614 return self.otherparent(f)
621 615
622 616 def drop(self, f):
623 617 '''Drop a file from the dirstate'''
624 618 if f in self._map:
625 619 self._dirty = True
626 620 self._droppath(f)
627 621 del self._map[f]
628 622 if f in self._nonnormalset:
629 623 self._nonnormalset.remove(f)
630 self._copymap.pop(f, None)
624 self._map.copymap.pop(f, None)
631 625
632 626 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
633 627 if exists is None:
634 628 exists = os.path.lexists(os.path.join(self._root, path))
635 629 if not exists:
636 630 # Maybe a path component exists
637 631 if not ignoremissing and '/' in path:
638 632 d, f = path.rsplit('/', 1)
639 633 d = self._normalize(d, False, ignoremissing, None)
640 634 folded = d + "/" + f
641 635 else:
642 636 # No path components, preserve original case
643 637 folded = path
644 638 else:
645 639 # recursively normalize leading directory components
646 640 # against dirstate
647 641 if '/' in normed:
648 642 d, f = normed.rsplit('/', 1)
649 643 d = self._normalize(d, False, ignoremissing, True)
650 644 r = self._root + "/" + d
651 645 folded = d + "/" + util.fspath(f, r)
652 646 else:
653 647 folded = util.fspath(normed, self._root)
654 648 storemap[normed] = folded
655 649
656 650 return folded
657 651
658 652 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
659 653 normed = util.normcase(path)
660 654 folded = self._filefoldmap.get(normed, None)
661 655 if folded is None:
662 656 if isknown:
663 657 folded = path
664 658 else:
665 659 folded = self._discoverpath(path, normed, ignoremissing, exists,
666 660 self._filefoldmap)
667 661 return folded
668 662
669 663 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
670 664 normed = util.normcase(path)
671 665 folded = self._filefoldmap.get(normed, None)
672 666 if folded is None:
673 667 folded = self._dirfoldmap.get(normed, None)
674 668 if folded is None:
675 669 if isknown:
676 670 folded = path
677 671 else:
678 672 # store discovered result in dirfoldmap so that future
679 673 # normalizefile calls don't start matching directories
680 674 folded = self._discoverpath(path, normed, ignoremissing, exists,
681 675 self._dirfoldmap)
682 676 return folded
683 677
684 678 def normalize(self, path, isknown=False, ignoremissing=False):
685 679 '''
686 680 normalize the case of a pathname when on a casefolding filesystem
687 681
688 682 isknown specifies whether the filename came from walking the
689 683 disk, to avoid extra filesystem access.
690 684
691 685 If ignoremissing is True, missing path are returned
692 686 unchanged. Otherwise, we try harder to normalize possibly
693 687 existing path components.
694 688
695 689 The normalized case is determined based on the following precedence:
696 690
697 691 - version of name already stored in the dirstate
698 692 - version of name stored on disk
699 693 - version provided via command arguments
700 694 '''
701 695
702 696 if self._checkcase:
703 697 return self._normalize(path, isknown, ignoremissing)
704 698 return path
705 699
706 700 def clear(self):
707 701 self._map = dirstatemap()
708 702 self._nonnormalset = set()
709 703 self._otherparentset = set()
710 704 if "_dirs" in self.__dict__:
711 705 delattr(self, "_dirs")
712 self._copymap = {}
713 706 self._pl = [nullid, nullid]
714 707 self._lastnormaltime = 0
715 708 self._updatedfiles.clear()
716 709 self._dirty = True
717 710
718 711 def rebuild(self, parent, allfiles, changedfiles=None):
719 712 if changedfiles is None:
720 713 # Rebuild entire dirstate
721 714 changedfiles = allfiles
722 715 lastnormaltime = self._lastnormaltime
723 716 self.clear()
724 717 self._lastnormaltime = lastnormaltime
725 718
726 719 if self._origpl is None:
727 720 self._origpl = self._pl
728 721 self._pl = (parent, nullid)
729 722 for f in changedfiles:
730 723 if f in allfiles:
731 724 self.normallookup(f)
732 725 else:
733 726 self.drop(f)
734 727
735 728 self._dirty = True
736 729
737 730 def identity(self):
738 731 '''Return identity of dirstate itself to detect changing in storage
739 732
740 733 If identity of previous dirstate is equal to this, writing
741 734 changes based on the former dirstate out can keep consistency.
742 735 '''
743 736 return self._identity
744 737
745 738 def write(self, tr):
746 739 if not self._dirty:
747 740 return
748 741
749 742 filename = self._filename
750 743 if tr:
751 744 # 'dirstate.write()' is not only for writing in-memory
752 745 # changes out, but also for dropping ambiguous timestamp.
753 746 # delayed writing re-raise "ambiguous timestamp issue".
754 747 # See also the wiki page below for detail:
755 748 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
756 749
757 750 # emulate dropping timestamp in 'parsers.pack_dirstate'
758 751 now = _getfsnow(self._opener)
759 752 dmap = self._map
760 753 for f in self._updatedfiles:
761 754 e = dmap.get(f)
762 755 if e is not None and e[0] == 'n' and e[3] == now:
763 756 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
764 757 self._nonnormalset.add(f)
765 758
766 759 # emulate that all 'dirstate.normal' results are written out
767 760 self._lastnormaltime = 0
768 761 self._updatedfiles.clear()
769 762
770 763 # delay writing in-memory changes out
771 764 tr.addfilegenerator('dirstate', (self._filename,),
772 765 self._writedirstate, location='plain')
773 766 return
774 767
775 768 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
776 769 self._writedirstate(st)
777 770
778 771 def addparentchangecallback(self, category, callback):
779 772 """add a callback to be called when the wd parents are changed
780 773
781 774 Callback will be called with the following arguments:
782 775 dirstate, (oldp1, oldp2), (newp1, newp2)
783 776
784 777 Category is a unique identifier to allow overwriting an old callback
785 778 with a newer callback.
786 779 """
787 780 self._plchangecallbacks[category] = callback
788 781
789 782 def _writedirstate(self, st):
790 783 # notify callbacks about parents change
791 784 if self._origpl is not None and self._origpl != self._pl:
792 785 for c, callback in sorted(self._plchangecallbacks.iteritems()):
793 786 callback(self, self._origpl, self._pl)
794 787 self._origpl = None
795 788 # use the modification time of the newly created temporary file as the
796 789 # filesystem's notion of 'now'
797 790 now = util.fstat(st).st_mtime & _rangemask
798 791
799 792 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
800 793 # timestamp of each entries in dirstate, because of 'now > mtime'
801 794 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
802 795 if delaywrite > 0:
803 796 # do we have any files to delay for?
804 797 for f, e in self._map.iteritems():
805 798 if e[0] == 'n' and e[3] == now:
806 799 import time # to avoid useless import
807 800 # rather than sleep n seconds, sleep until the next
808 801 # multiple of n seconds
809 802 clock = time.time()
810 803 start = int(clock) - (int(clock) % delaywrite)
811 804 end = start + delaywrite
812 805 time.sleep(end - clock)
813 806 now = end # trust our estimate that the end is near now
814 807 break
815 808
816 st.write(parsers.pack_dirstate(self._map._map, self._copymap, self._pl,
817 now))
809 st.write(parsers.pack_dirstate(self._map._map, self._map.copymap,
810 self._pl, now))
818 811 self._nonnormalset, self._otherparentset = self._map.nonnormalentries()
819 812 st.close()
820 813 self._lastnormaltime = 0
821 814 self._dirty = self._dirtypl = False
822 815
823 816 def _dirignore(self, f):
824 817 if f == '.':
825 818 return False
826 819 if self._ignore(f):
827 820 return True
828 821 for p in util.finddirs(f):
829 822 if self._ignore(p):
830 823 return True
831 824 return False
832 825
833 826 def _ignorefiles(self):
834 827 files = []
835 828 if os.path.exists(self._join('.hgignore')):
836 829 files.append(self._join('.hgignore'))
837 830 for name, path in self._ui.configitems("ui"):
838 831 if name == 'ignore' or name.startswith('ignore.'):
839 832 # we need to use os.path.join here rather than self._join
840 833 # because path is arbitrary and user-specified
841 834 files.append(os.path.join(self._rootdir, util.expandpath(path)))
842 835 return files
843 836
844 837 def _ignorefileandline(self, f):
845 838 files = collections.deque(self._ignorefiles())
846 839 visited = set()
847 840 while files:
848 841 i = files.popleft()
849 842 patterns = matchmod.readpatternfile(i, self._ui.warn,
850 843 sourceinfo=True)
851 844 for pattern, lineno, line in patterns:
852 845 kind, p = matchmod._patsplit(pattern, 'glob')
853 846 if kind == "subinclude":
854 847 if p not in visited:
855 848 files.append(p)
856 849 continue
857 850 m = matchmod.match(self._root, '', [], [pattern],
858 851 warn=self._ui.warn)
859 852 if m(f):
860 853 return (i, lineno, line)
861 854 visited.add(i)
862 855 return (None, -1, "")
863 856
864 857 def _walkexplicit(self, match, subrepos):
865 858 '''Get stat data about the files explicitly specified by match.
866 859
867 860 Return a triple (results, dirsfound, dirsnotfound).
868 861 - results is a mapping from filename to stat result. It also contains
869 862 listings mapping subrepos and .hg to None.
870 863 - dirsfound is a list of files found to be directories.
871 864 - dirsnotfound is a list of files that the dirstate thinks are
872 865 directories and that were not found.'''
873 866
874 867 def badtype(mode):
875 868 kind = _('unknown')
876 869 if stat.S_ISCHR(mode):
877 870 kind = _('character device')
878 871 elif stat.S_ISBLK(mode):
879 872 kind = _('block device')
880 873 elif stat.S_ISFIFO(mode):
881 874 kind = _('fifo')
882 875 elif stat.S_ISSOCK(mode):
883 876 kind = _('socket')
884 877 elif stat.S_ISDIR(mode):
885 878 kind = _('directory')
886 879 return _('unsupported file type (type is %s)') % kind
887 880
888 881 matchedir = match.explicitdir
889 882 badfn = match.bad
890 883 dmap = self._map
891 884 lstat = os.lstat
892 885 getkind = stat.S_IFMT
893 886 dirkind = stat.S_IFDIR
894 887 regkind = stat.S_IFREG
895 888 lnkkind = stat.S_IFLNK
896 889 join = self._join
897 890 dirsfound = []
898 891 foundadd = dirsfound.append
899 892 dirsnotfound = []
900 893 notfoundadd = dirsnotfound.append
901 894
902 895 if not match.isexact() and self._checkcase:
903 896 normalize = self._normalize
904 897 else:
905 898 normalize = None
906 899
907 900 files = sorted(match.files())
908 901 subrepos.sort()
909 902 i, j = 0, 0
910 903 while i < len(files) and j < len(subrepos):
911 904 subpath = subrepos[j] + "/"
912 905 if files[i] < subpath:
913 906 i += 1
914 907 continue
915 908 while i < len(files) and files[i].startswith(subpath):
916 909 del files[i]
917 910 j += 1
918 911
919 912 if not files or '.' in files:
920 913 files = ['.']
921 914 results = dict.fromkeys(subrepos)
922 915 results['.hg'] = None
923 916
924 917 alldirs = None
925 918 for ff in files:
926 919 # constructing the foldmap is expensive, so don't do it for the
927 920 # common case where files is ['.']
928 921 if normalize and ff != '.':
929 922 nf = normalize(ff, False, True)
930 923 else:
931 924 nf = ff
932 925 if nf in results:
933 926 continue
934 927
935 928 try:
936 929 st = lstat(join(nf))
937 930 kind = getkind(st.st_mode)
938 931 if kind == dirkind:
939 932 if nf in dmap:
940 933 # file replaced by dir on disk but still in dirstate
941 934 results[nf] = None
942 935 if matchedir:
943 936 matchedir(nf)
944 937 foundadd((nf, ff))
945 938 elif kind == regkind or kind == lnkkind:
946 939 results[nf] = st
947 940 else:
948 941 badfn(ff, badtype(kind))
949 942 if nf in dmap:
950 943 results[nf] = None
951 944 except OSError as inst: # nf not found on disk - it is dirstate only
952 945 if nf in dmap: # does it exactly match a missing file?
953 946 results[nf] = None
954 947 else: # does it match a missing directory?
955 948 if alldirs is None:
956 949 alldirs = util.dirs(dmap._map)
957 950 if nf in alldirs:
958 951 if matchedir:
959 952 matchedir(nf)
960 953 notfoundadd(nf)
961 954 else:
962 955 badfn(ff, encoding.strtolocal(inst.strerror))
963 956
964 957 # Case insensitive filesystems cannot rely on lstat() failing to detect
965 958 # a case-only rename. Prune the stat object for any file that does not
966 959 # match the case in the filesystem, if there are multiple files that
967 960 # normalize to the same path.
968 961 if match.isexact() and self._checkcase:
969 962 normed = {}
970 963
971 964 for f, st in results.iteritems():
972 965 if st is None:
973 966 continue
974 967
975 968 nc = util.normcase(f)
976 969 paths = normed.get(nc)
977 970
978 971 if paths is None:
979 972 paths = set()
980 973 normed[nc] = paths
981 974
982 975 paths.add(f)
983 976
984 977 for norm, paths in normed.iteritems():
985 978 if len(paths) > 1:
986 979 for path in paths:
987 980 folded = self._discoverpath(path, norm, True, None,
988 981 self._dirfoldmap)
989 982 if path != folded:
990 983 results[path] = None
991 984
992 985 return results, dirsfound, dirsnotfound
993 986
994 987 def walk(self, match, subrepos, unknown, ignored, full=True):
995 988 '''
996 989 Walk recursively through the directory tree, finding all files
997 990 matched by match.
998 991
999 992 If full is False, maybe skip some known-clean files.
1000 993
1001 994 Return a dict mapping filename to stat-like object (either
1002 995 mercurial.osutil.stat instance or return value of os.stat()).
1003 996
1004 997 '''
1005 998 # full is a flag that extensions that hook into walk can use -- this
1006 999 # implementation doesn't use it at all. This satisfies the contract
1007 1000 # because we only guarantee a "maybe".
1008 1001
1009 1002 if ignored:
1010 1003 ignore = util.never
1011 1004 dirignore = util.never
1012 1005 elif unknown:
1013 1006 ignore = self._ignore
1014 1007 dirignore = self._dirignore
1015 1008 else:
1016 1009 # if not unknown and not ignored, drop dir recursion and step 2
1017 1010 ignore = util.always
1018 1011 dirignore = util.always
1019 1012
1020 1013 matchfn = match.matchfn
1021 1014 matchalways = match.always()
1022 1015 matchtdir = match.traversedir
1023 1016 dmap = self._map
1024 1017 listdir = util.listdir
1025 1018 lstat = os.lstat
1026 1019 dirkind = stat.S_IFDIR
1027 1020 regkind = stat.S_IFREG
1028 1021 lnkkind = stat.S_IFLNK
1029 1022 join = self._join
1030 1023
1031 1024 exact = skipstep3 = False
1032 1025 if match.isexact(): # match.exact
1033 1026 exact = True
1034 1027 dirignore = util.always # skip step 2
1035 1028 elif match.prefix(): # match.match, no patterns
1036 1029 skipstep3 = True
1037 1030
1038 1031 if not exact and self._checkcase:
1039 1032 normalize = self._normalize
1040 1033 normalizefile = self._normalizefile
1041 1034 skipstep3 = False
1042 1035 else:
1043 1036 normalize = self._normalize
1044 1037 normalizefile = None
1045 1038
1046 1039 # step 1: find all explicit files
1047 1040 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1048 1041
1049 1042 skipstep3 = skipstep3 and not (work or dirsnotfound)
1050 1043 work = [d for d in work if not dirignore(d[0])]
1051 1044
1052 1045 # step 2: visit subdirectories
1053 1046 def traverse(work, alreadynormed):
1054 1047 wadd = work.append
1055 1048 while work:
1056 1049 nd = work.pop()
1057 1050 if not match.visitdir(nd):
1058 1051 continue
1059 1052 skip = None
1060 1053 if nd == '.':
1061 1054 nd = ''
1062 1055 else:
1063 1056 skip = '.hg'
1064 1057 try:
1065 1058 entries = listdir(join(nd), stat=True, skip=skip)
1066 1059 except OSError as inst:
1067 1060 if inst.errno in (errno.EACCES, errno.ENOENT):
1068 1061 match.bad(self.pathto(nd),
1069 1062 encoding.strtolocal(inst.strerror))
1070 1063 continue
1071 1064 raise
1072 1065 for f, kind, st in entries:
1073 1066 if normalizefile:
1074 1067 # even though f might be a directory, we're only
1075 1068 # interested in comparing it to files currently in the
1076 1069 # dmap -- therefore normalizefile is enough
1077 1070 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1078 1071 True)
1079 1072 else:
1080 1073 nf = nd and (nd + "/" + f) or f
1081 1074 if nf not in results:
1082 1075 if kind == dirkind:
1083 1076 if not ignore(nf):
1084 1077 if matchtdir:
1085 1078 matchtdir(nf)
1086 1079 wadd(nf)
1087 1080 if nf in dmap and (matchalways or matchfn(nf)):
1088 1081 results[nf] = None
1089 1082 elif kind == regkind or kind == lnkkind:
1090 1083 if nf in dmap:
1091 1084 if matchalways or matchfn(nf):
1092 1085 results[nf] = st
1093 1086 elif ((matchalways or matchfn(nf))
1094 1087 and not ignore(nf)):
1095 1088 # unknown file -- normalize if necessary
1096 1089 if not alreadynormed:
1097 1090 nf = normalize(nf, False, True)
1098 1091 results[nf] = st
1099 1092 elif nf in dmap and (matchalways or matchfn(nf)):
1100 1093 results[nf] = None
1101 1094
1102 1095 for nd, d in work:
1103 1096 # alreadynormed means that processwork doesn't have to do any
1104 1097 # expensive directory normalization
1105 1098 alreadynormed = not normalize or nd == d
1106 1099 traverse([d], alreadynormed)
1107 1100
1108 1101 for s in subrepos:
1109 1102 del results[s]
1110 1103 del results['.hg']
1111 1104
1112 1105 # step 3: visit remaining files from dmap
1113 1106 if not skipstep3 and not exact:
1114 1107 # If a dmap file is not in results yet, it was either
1115 1108 # a) not matching matchfn b) ignored, c) missing, or d) under a
1116 1109 # symlink directory.
1117 1110 if not results and matchalways:
1118 1111 visit = [f for f in dmap]
1119 1112 else:
1120 1113 visit = [f for f in dmap if f not in results and matchfn(f)]
1121 1114 visit.sort()
1122 1115
1123 1116 if unknown:
1124 1117 # unknown == True means we walked all dirs under the roots
1125 1118 # that wasn't ignored, and everything that matched was stat'ed
1126 1119 # and is already in results.
1127 1120 # The rest must thus be ignored or under a symlink.
1128 1121 audit_path = pathutil.pathauditor(self._root, cached=True)
1129 1122
1130 1123 for nf in iter(visit):
1131 1124 # If a stat for the same file was already added with a
1132 1125 # different case, don't add one for this, since that would
1133 1126 # make it appear as if the file exists under both names
1134 1127 # on disk.
1135 1128 if (normalizefile and
1136 1129 normalizefile(nf, True, True) in results):
1137 1130 results[nf] = None
1138 1131 # Report ignored items in the dmap as long as they are not
1139 1132 # under a symlink directory.
1140 1133 elif audit_path.check(nf):
1141 1134 try:
1142 1135 results[nf] = lstat(join(nf))
1143 1136 # file was just ignored, no links, and exists
1144 1137 except OSError:
1145 1138 # file doesn't exist
1146 1139 results[nf] = None
1147 1140 else:
1148 1141 # It's either missing or under a symlink directory
1149 1142 # which we in this case report as missing
1150 1143 results[nf] = None
1151 1144 else:
1152 1145 # We may not have walked the full directory tree above,
1153 1146 # so stat and check everything we missed.
1154 1147 iv = iter(visit)
1155 1148 for st in util.statfiles([join(i) for i in visit]):
1156 1149 results[next(iv)] = st
1157 1150 return results
1158 1151
1159 1152 def status(self, match, subrepos, ignored, clean, unknown):
1160 1153 '''Determine the status of the working copy relative to the
1161 1154 dirstate and return a pair of (unsure, status), where status is of type
1162 1155 scmutil.status and:
1163 1156
1164 1157 unsure:
1165 1158 files that might have been modified since the dirstate was
1166 1159 written, but need to be read to be sure (size is the same
1167 1160 but mtime differs)
1168 1161 status.modified:
1169 1162 files that have definitely been modified since the dirstate
1170 1163 was written (different size or mode)
1171 1164 status.clean:
1172 1165 files that have definitely not been modified since the
1173 1166 dirstate was written
1174 1167 '''
1175 1168 listignored, listclean, listunknown = ignored, clean, unknown
1176 1169 lookup, modified, added, unknown, ignored = [], [], [], [], []
1177 1170 removed, deleted, clean = [], [], []
1178 1171
1179 1172 dmap = self._map
1180 1173 ladd = lookup.append # aka "unsure"
1181 1174 madd = modified.append
1182 1175 aadd = added.append
1183 1176 uadd = unknown.append
1184 1177 iadd = ignored.append
1185 1178 radd = removed.append
1186 1179 dadd = deleted.append
1187 1180 cadd = clean.append
1188 1181 mexact = match.exact
1189 1182 dirignore = self._dirignore
1190 1183 checkexec = self._checkexec
1191 copymap = self._copymap
1184 copymap = self._map.copymap
1192 1185 lastnormaltime = self._lastnormaltime
1193 1186
1194 1187 # We need to do full walks when either
1195 1188 # - we're listing all clean files, or
1196 1189 # - match.traversedir does something, because match.traversedir should
1197 1190 # be called for every dir in the working dir
1198 1191 full = listclean or match.traversedir is not None
1199 1192 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1200 1193 full=full).iteritems():
1201 1194 if fn not in dmap:
1202 1195 if (listignored or mexact(fn)) and dirignore(fn):
1203 1196 if listignored:
1204 1197 iadd(fn)
1205 1198 else:
1206 1199 uadd(fn)
1207 1200 continue
1208 1201
1209 1202 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1210 1203 # written like that for performance reasons. dmap[fn] is not a
1211 1204 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1212 1205 # opcode has fast paths when the value to be unpacked is a tuple or
1213 1206 # a list, but falls back to creating a full-fledged iterator in
1214 1207 # general. That is much slower than simply accessing and storing the
1215 1208 # tuple members one by one.
1216 1209 t = dmap[fn]
1217 1210 state = t[0]
1218 1211 mode = t[1]
1219 1212 size = t[2]
1220 1213 time = t[3]
1221 1214
1222 1215 if not st and state in "nma":
1223 1216 dadd(fn)
1224 1217 elif state == 'n':
1225 1218 if (size >= 0 and
1226 1219 ((size != st.st_size and size != st.st_size & _rangemask)
1227 1220 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1228 1221 or size == -2 # other parent
1229 1222 or fn in copymap):
1230 1223 madd(fn)
1231 1224 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1232 1225 ladd(fn)
1233 1226 elif st.st_mtime == lastnormaltime:
1234 1227 # fn may have just been marked as normal and it may have
1235 1228 # changed in the same second without changing its size.
1236 1229 # This can happen if we quickly do multiple commits.
1237 1230 # Force lookup, so we don't miss such a racy file change.
1238 1231 ladd(fn)
1239 1232 elif listclean:
1240 1233 cadd(fn)
1241 1234 elif state == 'm':
1242 1235 madd(fn)
1243 1236 elif state == 'a':
1244 1237 aadd(fn)
1245 1238 elif state == 'r':
1246 1239 radd(fn)
1247 1240
1248 1241 return (lookup, scmutil.status(modified, added, removed, deleted,
1249 1242 unknown, ignored, clean))
1250 1243
1251 1244 def matches(self, match):
1252 1245 '''
1253 1246 return files in the dirstate (in whatever state) filtered by match
1254 1247 '''
1255 1248 dmap = self._map
1256 1249 if match.always():
1257 1250 return dmap.keys()
1258 1251 files = match.files()
1259 1252 if match.isexact():
1260 1253 # fast path -- filter the other way around, since typically files is
1261 1254 # much smaller than dmap
1262 1255 return [f for f in files if f in dmap]
1263 1256 if match.prefix() and all(fn in dmap for fn in files):
1264 1257 # fast path -- all the values are known to be files, so just return
1265 1258 # that
1266 1259 return list(files)
1267 1260 return [f for f in dmap if match(f)]
1268 1261
1269 1262 def _actualfilename(self, tr):
1270 1263 if tr:
1271 1264 return self._pendingfilename
1272 1265 else:
1273 1266 return self._filename
1274 1267
1275 1268 def savebackup(self, tr, backupname):
1276 1269 '''Save current dirstate into backup file'''
1277 1270 filename = self._actualfilename(tr)
1278 1271 assert backupname != filename
1279 1272
1280 1273 # use '_writedirstate' instead of 'write' to write changes certainly,
1281 1274 # because the latter omits writing out if transaction is running.
1282 1275 # output file will be used to create backup of dirstate at this point.
1283 1276 if self._dirty or not self._opener.exists(filename):
1284 1277 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1285 1278 checkambig=True))
1286 1279
1287 1280 if tr:
1288 1281 # ensure that subsequent tr.writepending returns True for
1289 1282 # changes written out above, even if dirstate is never
1290 1283 # changed after this
1291 1284 tr.addfilegenerator('dirstate', (self._filename,),
1292 1285 self._writedirstate, location='plain')
1293 1286
1294 1287 # ensure that pending file written above is unlinked at
1295 1288 # failure, even if tr.writepending isn't invoked until the
1296 1289 # end of this transaction
1297 1290 tr.registertmp(filename, location='plain')
1298 1291
1299 1292 self._opener.tryunlink(backupname)
1300 1293 # hardlink backup is okay because _writedirstate is always called
1301 1294 # with an "atomictemp=True" file.
1302 1295 util.copyfile(self._opener.join(filename),
1303 1296 self._opener.join(backupname), hardlink=True)
1304 1297
1305 1298 def restorebackup(self, tr, backupname):
1306 1299 '''Restore dirstate by backup file'''
1307 1300 # this "invalidate()" prevents "wlock.release()" from writing
1308 1301 # changes of dirstate out after restoring from backup file
1309 1302 self.invalidate()
1310 1303 filename = self._actualfilename(tr)
1311 1304 self._opener.rename(backupname, filename, checkambig=True)
1312 1305
1313 1306 def clearbackup(self, tr, backupname):
1314 1307 '''Clear backup file'''
1315 1308 self._opener.unlink(backupname)
1316 1309
1317 1310 class dirstatemap(object):
1318 1311 def __init__(self):
1319 1312 self._map = {}
1313 self.copymap = {}
1320 1314
1321 1315 def iteritems(self):
1322 1316 return self._map.iteritems()
1323 1317
1324 1318 def __iter__(self):
1325 1319 return iter(self._map)
1326 1320
1327 1321 def get(self, key, default=None):
1328 1322 return self._map.get(key, default)
1329 1323
1330 1324 def __contains__(self, key):
1331 1325 return key in self._map
1332 1326
1333 1327 def __setitem__(self, key, value):
1334 1328 self._map[key] = value
1335 1329
1336 1330 def __getitem__(self, key):
1337 1331 return self._map[key]
1338 1332
1339 1333 def __delitem__(self, key):
1340 1334 del self._map[key]
1341 1335
1342 1336 def keys(self):
1343 1337 return self._map.keys()
1344 1338
1345 1339 def nonnormalentries(self):
1346 1340 '''Compute the nonnormal dirstate entries from the dmap'''
1347 1341 try:
1348 1342 return parsers.nonnormalotherparententries(self._map)
1349 1343 except AttributeError:
1350 1344 nonnorm = set()
1351 1345 otherparent = set()
1352 1346 for fname, e in self._map.iteritems():
1353 1347 if e[0] != 'n' or e[3] == -1:
1354 1348 nonnorm.add(fname)
1355 1349 if e[0] == 'n' and e[2] == -2:
1356 1350 otherparent.add(fname)
1357 1351 return nonnorm, otherparent
1358 1352
1359 1353 def filefoldmap(self):
1360 1354 """Returns a dictionary mapping normalized case paths to their
1361 1355 non-normalized versions.
1362 1356 """
1363 1357 try:
1364 1358 makefilefoldmap = parsers.make_file_foldmap
1365 1359 except AttributeError:
1366 1360 pass
1367 1361 else:
1368 1362 return makefilefoldmap(self._map, util.normcasespec,
1369 1363 util.normcasefallback)
1370 1364
1371 1365 f = {}
1372 1366 normcase = util.normcase
1373 1367 for name, s in self._map.iteritems():
1374 1368 if s[0] != 'r':
1375 1369 f[normcase(name)] = name
1376 1370 f['.'] = '.' # prevents useless util.fspath() invocation
1377 1371 return f
1378 1372
1379 1373 def dirs(self):
1380 1374 """Returns a set-like object containing all the directories in the
1381 1375 current dirstate.
1382 1376 """
1383 1377 return util.dirs(self._map, 'r')
General Comments 0
You need to be logged in to leave comments. Login now