##// END OF EJS Templates
dirstate: move nonnormalentries to dirstatemap...
Durham Goode -
r34334:4ac04418 default
parent child Browse files
Show More
@@ -1,1371 +1,1372 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 parsers = policy.importmod(r'parsers')
31 31
32 32 propertycache = util.propertycache
33 33 filecache = scmutil.filecache
34 34 _rangemask = 0x7fffffff
35 35
36 36 dirstatetuple = parsers.dirstatetuple
37 37
38 38 class repocache(filecache):
39 39 """filecache for files in .hg/"""
40 40 def join(self, obj, fname):
41 41 return obj._opener.join(fname)
42 42
43 43 class rootcache(filecache):
44 44 """filecache for files in the repository root"""
45 45 def join(self, obj, fname):
46 46 return obj._join(fname)
47 47
48 48 def _getfsnow(vfs):
49 49 '''Get "now" timestamp on filesystem'''
50 50 tmpfd, tmpname = vfs.mkstemp()
51 51 try:
52 52 return os.fstat(tmpfd).st_mtime
53 53 finally:
54 54 os.close(tmpfd)
55 55 vfs.unlink(tmpname)
56 56
57 def nonnormalentries(dmap):
58 '''Compute the nonnormal dirstate entries from the dmap'''
59 try:
60 return parsers.nonnormalotherparententries(dmap._map)
61 except AttributeError:
62 nonnorm = set()
63 otherparent = set()
64 for fname, e in dmap.iteritems():
65 if e[0] != 'n' or e[3] == -1:
66 nonnorm.add(fname)
67 if e[0] == 'n' and e[2] == -2:
68 otherparent.add(fname)
69 return nonnorm, otherparent
70
71 57 class dirstate(object):
72 58
73 59 def __init__(self, opener, ui, root, validate, sparsematchfn):
74 60 '''Create a new dirstate object.
75 61
76 62 opener is an open()-like callable that can be used to open the
77 63 dirstate file; root is the root of the directory tracked by
78 64 the dirstate.
79 65 '''
80 66 self._opener = opener
81 67 self._validate = validate
82 68 self._root = root
83 69 self._sparsematchfn = sparsematchfn
84 70 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
85 71 # UNC path pointing to root share (issue4557)
86 72 self._rootdir = pathutil.normasprefix(root)
87 73 self._dirty = False
88 74 self._dirtypl = False
89 75 self._lastnormaltime = 0
90 76 self._ui = ui
91 77 self._filecache = {}
92 78 self._parentwriters = 0
93 79 self._filename = 'dirstate'
94 80 self._pendingfilename = '%s.pending' % self._filename
95 81 self._plchangecallbacks = {}
96 82 self._origpl = None
97 83 self._updatedfiles = set()
98 84
99 85 # for consistent view between _pl() and _read() invocations
100 86 self._pendingmode = None
101 87
102 88 @contextlib.contextmanager
103 89 def parentchange(self):
104 90 '''Context manager for handling dirstate parents.
105 91
106 92 If an exception occurs in the scope of the context manager,
107 93 the incoherent dirstate won't be written when wlock is
108 94 released.
109 95 '''
110 96 self._parentwriters += 1
111 97 yield
112 98 # Typically we want the "undo" step of a context manager in a
113 99 # finally block so it happens even when an exception
114 100 # occurs. In this case, however, we only want to decrement
115 101 # parentwriters if the code in the with statement exits
116 102 # normally, so we don't have a try/finally here on purpose.
117 103 self._parentwriters -= 1
118 104
119 105 def beginparentchange(self):
120 106 '''Marks the beginning of a set of changes that involve changing
121 107 the dirstate parents. If there is an exception during this time,
122 108 the dirstate will not be written when the wlock is released. This
123 109 prevents writing an incoherent dirstate where the parent doesn't
124 110 match the contents.
125 111 '''
126 112 self._ui.deprecwarn('beginparentchange is obsoleted by the '
127 113 'parentchange context manager.', '4.3')
128 114 self._parentwriters += 1
129 115
130 116 def endparentchange(self):
131 117 '''Marks the end of a set of changes that involve changing the
132 118 dirstate parents. Once all parent changes have been marked done,
133 119 the wlock will be free to write the dirstate on release.
134 120 '''
135 121 self._ui.deprecwarn('endparentchange is obsoleted by the '
136 122 'parentchange context manager.', '4.3')
137 123 if self._parentwriters > 0:
138 124 self._parentwriters -= 1
139 125
140 126 def pendingparentchange(self):
141 127 '''Returns true if the dirstate is in the middle of a set of changes
142 128 that modify the dirstate parent.
143 129 '''
144 130 return self._parentwriters > 0
145 131
146 132 @propertycache
147 133 def _map(self):
148 134 '''Return the dirstate contents as a map from filename to
149 135 (state, mode, size, time).'''
150 136 self._read()
151 137 return self._map
152 138
153 139 @propertycache
154 140 def _copymap(self):
155 141 self._read()
156 142 return self._copymap
157 143
158 144 @propertycache
159 145 def _identity(self):
160 146 self._read()
161 147 return self._identity
162 148
163 149 @propertycache
164 150 def _nonnormalset(self):
165 nonnorm, otherparents = nonnormalentries(self._map)
151 nonnorm, otherparents = self._map.nonnormalentries()
166 152 self._otherparentset = otherparents
167 153 return nonnorm
168 154
169 155 @propertycache
170 156 def _otherparentset(self):
171 nonnorm, otherparents = nonnormalentries(self._map)
157 nonnorm, otherparents = self._map.nonnormalentries()
172 158 self._nonnormalset = nonnorm
173 159 return otherparents
174 160
175 161 @propertycache
176 162 def _filefoldmap(self):
177 163 try:
178 164 makefilefoldmap = parsers.make_file_foldmap
179 165 except AttributeError:
180 166 pass
181 167 else:
182 168 return makefilefoldmap(self._map._map, util.normcasespec,
183 169 util.normcasefallback)
184 170
185 171 f = {}
186 172 normcase = util.normcase
187 173 for name, s in self._map.iteritems():
188 174 if s[0] != 'r':
189 175 f[normcase(name)] = name
190 176 f['.'] = '.' # prevents useless util.fspath() invocation
191 177 return f
192 178
193 179 @propertycache
194 180 def _dirfoldmap(self):
195 181 f = {}
196 182 normcase = util.normcase
197 183 for name in self._dirs:
198 184 f[normcase(name)] = name
199 185 return f
200 186
201 187 @property
202 188 def _sparsematcher(self):
203 189 """The matcher for the sparse checkout.
204 190
205 191 The working directory may not include every file from a manifest. The
206 192 matcher obtained by this property will match a path if it is to be
207 193 included in the working directory.
208 194 """
209 195 # TODO there is potential to cache this property. For now, the matcher
210 196 # is resolved on every access. (But the called function does use a
211 197 # cache to keep the lookup fast.)
212 198 return self._sparsematchfn()
213 199
214 200 @repocache('branch')
215 201 def _branch(self):
216 202 try:
217 203 return self._opener.read("branch").strip() or "default"
218 204 except IOError as inst:
219 205 if inst.errno != errno.ENOENT:
220 206 raise
221 207 return "default"
222 208
223 209 @propertycache
224 210 def _pl(self):
225 211 try:
226 212 fp = self._opendirstatefile()
227 213 st = fp.read(40)
228 214 fp.close()
229 215 l = len(st)
230 216 if l == 40:
231 217 return st[:20], st[20:40]
232 218 elif l > 0 and l < 40:
233 219 raise error.Abort(_('working directory state appears damaged!'))
234 220 except IOError as err:
235 221 if err.errno != errno.ENOENT:
236 222 raise
237 223 return [nullid, nullid]
238 224
239 225 @propertycache
240 226 def _dirs(self):
241 227 return util.dirs(self._map._map, 'r')
242 228
243 229 def dirs(self):
244 230 return self._dirs
245 231
246 232 @rootcache('.hgignore')
247 233 def _ignore(self):
248 234 files = self._ignorefiles()
249 235 if not files:
250 236 return matchmod.never(self._root, '')
251 237
252 238 pats = ['include:%s' % f for f in files]
253 239 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
254 240
255 241 @propertycache
256 242 def _slash(self):
257 243 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
258 244
259 245 @propertycache
260 246 def _checklink(self):
261 247 return util.checklink(self._root)
262 248
263 249 @propertycache
264 250 def _checkexec(self):
265 251 return util.checkexec(self._root)
266 252
267 253 @propertycache
268 254 def _checkcase(self):
269 255 return not util.fscasesensitive(self._join('.hg'))
270 256
271 257 def _join(self, f):
272 258 # much faster than os.path.join()
273 259 # it's safe because f is always a relative path
274 260 return self._rootdir + f
275 261
276 262 def flagfunc(self, buildfallback):
277 263 if self._checklink and self._checkexec:
278 264 def f(x):
279 265 try:
280 266 st = os.lstat(self._join(x))
281 267 if util.statislink(st):
282 268 return 'l'
283 269 if util.statisexec(st):
284 270 return 'x'
285 271 except OSError:
286 272 pass
287 273 return ''
288 274 return f
289 275
290 276 fallback = buildfallback()
291 277 if self._checklink:
292 278 def f(x):
293 279 if os.path.islink(self._join(x)):
294 280 return 'l'
295 281 if 'x' in fallback(x):
296 282 return 'x'
297 283 return ''
298 284 return f
299 285 if self._checkexec:
300 286 def f(x):
301 287 if 'l' in fallback(x):
302 288 return 'l'
303 289 if util.isexec(self._join(x)):
304 290 return 'x'
305 291 return ''
306 292 return f
307 293 else:
308 294 return fallback
309 295
310 296 @propertycache
311 297 def _cwd(self):
312 298 # internal config: ui.forcecwd
313 299 forcecwd = self._ui.config('ui', 'forcecwd')
314 300 if forcecwd:
315 301 return forcecwd
316 302 return pycompat.getcwd()
317 303
318 304 def getcwd(self):
319 305 '''Return the path from which a canonical path is calculated.
320 306
321 307 This path should be used to resolve file patterns or to convert
322 308 canonical paths back to file paths for display. It shouldn't be
323 309 used to get real file paths. Use vfs functions instead.
324 310 '''
325 311 cwd = self._cwd
326 312 if cwd == self._root:
327 313 return ''
328 314 # self._root ends with a path separator if self._root is '/' or 'C:\'
329 315 rootsep = self._root
330 316 if not util.endswithsep(rootsep):
331 317 rootsep += pycompat.ossep
332 318 if cwd.startswith(rootsep):
333 319 return cwd[len(rootsep):]
334 320 else:
335 321 # we're outside the repo. return an absolute path.
336 322 return cwd
337 323
338 324 def pathto(self, f, cwd=None):
339 325 if cwd is None:
340 326 cwd = self.getcwd()
341 327 path = util.pathto(self._root, cwd, f)
342 328 if self._slash:
343 329 return util.pconvert(path)
344 330 return path
345 331
346 332 def __getitem__(self, key):
347 333 '''Return the current state of key (a filename) in the dirstate.
348 334
349 335 States are:
350 336 n normal
351 337 m needs merging
352 338 r marked for removal
353 339 a marked for addition
354 340 ? not tracked
355 341 '''
356 342 return self._map.get(key, ("?",))[0]
357 343
358 344 def __contains__(self, key):
359 345 return key in self._map
360 346
361 347 def __iter__(self):
362 348 return iter(sorted(self._map))
363 349
364 350 def items(self):
365 351 return self._map.iteritems()
366 352
367 353 iteritems = items
368 354
369 355 def parents(self):
370 356 return [self._validate(p) for p in self._pl]
371 357
372 358 def p1(self):
373 359 return self._validate(self._pl[0])
374 360
375 361 def p2(self):
376 362 return self._validate(self._pl[1])
377 363
378 364 def branch(self):
379 365 return encoding.tolocal(self._branch)
380 366
381 367 def setparents(self, p1, p2=nullid):
382 368 """Set dirstate parents to p1 and p2.
383 369
384 370 When moving from two parents to one, 'm' merged entries a
385 371 adjusted to normal and previous copy records discarded and
386 372 returned by the call.
387 373
388 374 See localrepo.setparents()
389 375 """
390 376 if self._parentwriters == 0:
391 377 raise ValueError("cannot set dirstate parent without "
392 378 "calling dirstate.beginparentchange")
393 379
394 380 self._dirty = self._dirtypl = True
395 381 oldp2 = self._pl[1]
396 382 if self._origpl is None:
397 383 self._origpl = self._pl
398 384 self._pl = p1, p2
399 385 copies = {}
400 386 if oldp2 != nullid and p2 == nullid:
401 387 candidatefiles = self._nonnormalset.union(self._otherparentset)
402 388 for f in candidatefiles:
403 389 s = self._map.get(f)
404 390 if s is None:
405 391 continue
406 392
407 393 # Discard 'm' markers when moving away from a merge state
408 394 if s[0] == 'm':
409 395 source = self._copymap.get(f)
410 396 if source:
411 397 copies[f] = source
412 398 self.normallookup(f)
413 399 # Also fix up otherparent markers
414 400 elif s[0] == 'n' and s[2] == -2:
415 401 source = self._copymap.get(f)
416 402 if source:
417 403 copies[f] = source
418 404 self.add(f)
419 405 return copies
420 406
421 407 def setbranch(self, branch):
422 408 self._branch = encoding.fromlocal(branch)
423 409 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
424 410 try:
425 411 f.write(self._branch + '\n')
426 412 f.close()
427 413
428 414 # make sure filecache has the correct stat info for _branch after
429 415 # replacing the underlying file
430 416 ce = self._filecache['_branch']
431 417 if ce:
432 418 ce.refresh()
433 419 except: # re-raises
434 420 f.discard()
435 421 raise
436 422
437 423 def _opendirstatefile(self):
438 424 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
439 425 if self._pendingmode is not None and self._pendingmode != mode:
440 426 fp.close()
441 427 raise error.Abort(_('working directory state may be '
442 428 'changed parallelly'))
443 429 self._pendingmode = mode
444 430 return fp
445 431
446 432 def _read(self):
447 433 self._map = dirstatemap()
448 434
449 435 self._copymap = {}
450 436 # ignore HG_PENDING because identity is used only for writing
451 437 self._identity = util.filestat.frompath(
452 438 self._opener.join(self._filename))
453 439 try:
454 440 fp = self._opendirstatefile()
455 441 try:
456 442 st = fp.read()
457 443 finally:
458 444 fp.close()
459 445 except IOError as err:
460 446 if err.errno != errno.ENOENT:
461 447 raise
462 448 return
463 449 if not st:
464 450 return
465 451
466 452 if util.safehasattr(parsers, 'dict_new_presized'):
467 453 # Make an estimate of the number of files in the dirstate based on
468 454 # its size. From a linear regression on a set of real-world repos,
469 455 # all over 10,000 files, the size of a dirstate entry is 85
470 456 # bytes. The cost of resizing is significantly higher than the cost
471 457 # of filling in a larger presized dict, so subtract 20% from the
472 458 # size.
473 459 #
474 460 # This heuristic is imperfect in many ways, so in a future dirstate
475 461 # format update it makes sense to just record the number of entries
476 462 # on write.
477 463 self._map._map = parsers.dict_new_presized(len(st) / 71)
478 464
479 465 # Python's garbage collector triggers a GC each time a certain number
480 466 # of container objects (the number being defined by
481 467 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
482 468 # for each file in the dirstate. The C version then immediately marks
483 469 # them as not to be tracked by the collector. However, this has no
484 470 # effect on when GCs are triggered, only on what objects the GC looks
485 471 # into. This means that O(number of files) GCs are unavoidable.
486 472 # Depending on when in the process's lifetime the dirstate is parsed,
487 473 # this can get very expensive. As a workaround, disable GC while
488 474 # parsing the dirstate.
489 475 #
490 476 # (we cannot decorate the function directly since it is in a C module)
491 477 parse_dirstate = util.nogc(parsers.parse_dirstate)
492 478 p = parse_dirstate(self._map._map, self._copymap, st)
493 479 if not self._dirtypl:
494 480 self._pl = p
495 481
496 482 def invalidate(self):
497 483 '''Causes the next access to reread the dirstate.
498 484
499 485 This is different from localrepo.invalidatedirstate() because it always
500 486 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
501 487 check whether the dirstate has changed before rereading it.'''
502 488
503 489 for a in ("_map", "_copymap", "_identity",
504 490 "_filefoldmap", "_dirfoldmap", "_branch",
505 491 "_pl", "_dirs", "_ignore", "_nonnormalset",
506 492 "_otherparentset"):
507 493 if a in self.__dict__:
508 494 delattr(self, a)
509 495 self._lastnormaltime = 0
510 496 self._dirty = False
511 497 self._updatedfiles.clear()
512 498 self._parentwriters = 0
513 499 self._origpl = None
514 500
515 501 def copy(self, source, dest):
516 502 """Mark dest as a copy of source. Unmark dest if source is None."""
517 503 if source == dest:
518 504 return
519 505 self._dirty = True
520 506 if source is not None:
521 507 self._copymap[dest] = source
522 508 self._updatedfiles.add(source)
523 509 self._updatedfiles.add(dest)
524 510 elif self._copymap.pop(dest, None):
525 511 self._updatedfiles.add(dest)
526 512
527 513 def copied(self, file):
528 514 return self._copymap.get(file, None)
529 515
530 516 def copies(self):
531 517 return self._copymap
532 518
533 519 def _droppath(self, f):
534 520 if self[f] not in "?r" and "_dirs" in self.__dict__:
535 521 self._dirs.delpath(f)
536 522
537 523 if "_filefoldmap" in self.__dict__:
538 524 normed = util.normcase(f)
539 525 if normed in self._filefoldmap:
540 526 del self._filefoldmap[normed]
541 527
542 528 self._updatedfiles.add(f)
543 529
544 530 def _addpath(self, f, state, mode, size, mtime):
545 531 oldstate = self[f]
546 532 if state == 'a' or oldstate == 'r':
547 533 scmutil.checkfilename(f)
548 534 if f in self._dirs:
549 535 raise error.Abort(_('directory %r already in dirstate') % f)
550 536 # shadows
551 537 for d in util.finddirs(f):
552 538 if d in self._dirs:
553 539 break
554 540 entry = self._map.get(d)
555 541 if entry is not None and entry[0] != 'r':
556 542 raise error.Abort(
557 543 _('file %r in dirstate clashes with %r') % (d, f))
558 544 if oldstate in "?r" and "_dirs" in self.__dict__:
559 545 self._dirs.addpath(f)
560 546 self._dirty = True
561 547 self._updatedfiles.add(f)
562 548 self._map[f] = dirstatetuple(state, mode, size, mtime)
563 549 if state != 'n' or mtime == -1:
564 550 self._nonnormalset.add(f)
565 551 if size == -2:
566 552 self._otherparentset.add(f)
567 553
568 554 def normal(self, f):
569 555 '''Mark a file normal and clean.'''
570 556 s = os.lstat(self._join(f))
571 557 mtime = s.st_mtime
572 558 self._addpath(f, 'n', s.st_mode,
573 559 s.st_size & _rangemask, mtime & _rangemask)
574 560 self._copymap.pop(f, None)
575 561 if f in self._nonnormalset:
576 562 self._nonnormalset.remove(f)
577 563 if mtime > self._lastnormaltime:
578 564 # Remember the most recent modification timeslot for status(),
579 565 # to make sure we won't miss future size-preserving file content
580 566 # modifications that happen within the same timeslot.
581 567 self._lastnormaltime = mtime
582 568
583 569 def normallookup(self, f):
584 570 '''Mark a file normal, but possibly dirty.'''
585 571 if self._pl[1] != nullid:
586 572 # if there is a merge going on and the file was either
587 573 # in state 'm' (-1) or coming from other parent (-2) before
588 574 # being removed, restore that state.
589 575 entry = self._map.get(f)
590 576 if entry is not None:
591 577 if entry[0] == 'r' and entry[2] in (-1, -2):
592 578 source = self._copymap.get(f)
593 579 if entry[2] == -1:
594 580 self.merge(f)
595 581 elif entry[2] == -2:
596 582 self.otherparent(f)
597 583 if source:
598 584 self.copy(source, f)
599 585 return
600 586 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
601 587 return
602 588 self._addpath(f, 'n', 0, -1, -1)
603 589 self._copymap.pop(f, None)
604 590 if f in self._nonnormalset:
605 591 self._nonnormalset.remove(f)
606 592
607 593 def otherparent(self, f):
608 594 '''Mark as coming from the other parent, always dirty.'''
609 595 if self._pl[1] == nullid:
610 596 raise error.Abort(_("setting %r to other parent "
611 597 "only allowed in merges") % f)
612 598 if f in self and self[f] == 'n':
613 599 # merge-like
614 600 self._addpath(f, 'm', 0, -2, -1)
615 601 else:
616 602 # add-like
617 603 self._addpath(f, 'n', 0, -2, -1)
618 604 self._copymap.pop(f, None)
619 605
620 606 def add(self, f):
621 607 '''Mark a file added.'''
622 608 self._addpath(f, 'a', 0, -1, -1)
623 609 self._copymap.pop(f, None)
624 610
625 611 def remove(self, f):
626 612 '''Mark a file removed.'''
627 613 self._dirty = True
628 614 self._droppath(f)
629 615 size = 0
630 616 if self._pl[1] != nullid:
631 617 entry = self._map.get(f)
632 618 if entry is not None:
633 619 # backup the previous state
634 620 if entry[0] == 'm': # merge
635 621 size = -1
636 622 elif entry[0] == 'n' and entry[2] == -2: # other parent
637 623 size = -2
638 624 self._otherparentset.add(f)
639 625 self._map[f] = dirstatetuple('r', 0, size, 0)
640 626 self._nonnormalset.add(f)
641 627 if size == 0:
642 628 self._copymap.pop(f, None)
643 629
644 630 def merge(self, f):
645 631 '''Mark a file merged.'''
646 632 if self._pl[1] == nullid:
647 633 return self.normallookup(f)
648 634 return self.otherparent(f)
649 635
650 636 def drop(self, f):
651 637 '''Drop a file from the dirstate'''
652 638 if f in self._map:
653 639 self._dirty = True
654 640 self._droppath(f)
655 641 del self._map[f]
656 642 if f in self._nonnormalset:
657 643 self._nonnormalset.remove(f)
658 644 self._copymap.pop(f, None)
659 645
660 646 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
661 647 if exists is None:
662 648 exists = os.path.lexists(os.path.join(self._root, path))
663 649 if not exists:
664 650 # Maybe a path component exists
665 651 if not ignoremissing and '/' in path:
666 652 d, f = path.rsplit('/', 1)
667 653 d = self._normalize(d, False, ignoremissing, None)
668 654 folded = d + "/" + f
669 655 else:
670 656 # No path components, preserve original case
671 657 folded = path
672 658 else:
673 659 # recursively normalize leading directory components
674 660 # against dirstate
675 661 if '/' in normed:
676 662 d, f = normed.rsplit('/', 1)
677 663 d = self._normalize(d, False, ignoremissing, True)
678 664 r = self._root + "/" + d
679 665 folded = d + "/" + util.fspath(f, r)
680 666 else:
681 667 folded = util.fspath(normed, self._root)
682 668 storemap[normed] = folded
683 669
684 670 return folded
685 671
686 672 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
687 673 normed = util.normcase(path)
688 674 folded = self._filefoldmap.get(normed, None)
689 675 if folded is None:
690 676 if isknown:
691 677 folded = path
692 678 else:
693 679 folded = self._discoverpath(path, normed, ignoremissing, exists,
694 680 self._filefoldmap)
695 681 return folded
696 682
697 683 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
698 684 normed = util.normcase(path)
699 685 folded = self._filefoldmap.get(normed, None)
700 686 if folded is None:
701 687 folded = self._dirfoldmap.get(normed, None)
702 688 if folded is None:
703 689 if isknown:
704 690 folded = path
705 691 else:
706 692 # store discovered result in dirfoldmap so that future
707 693 # normalizefile calls don't start matching directories
708 694 folded = self._discoverpath(path, normed, ignoremissing, exists,
709 695 self._dirfoldmap)
710 696 return folded
711 697
712 698 def normalize(self, path, isknown=False, ignoremissing=False):
713 699 '''
714 700 normalize the case of a pathname when on a casefolding filesystem
715 701
716 702 isknown specifies whether the filename came from walking the
717 703 disk, to avoid extra filesystem access.
718 704
719 705 If ignoremissing is True, missing path are returned
720 706 unchanged. Otherwise, we try harder to normalize possibly
721 707 existing path components.
722 708
723 709 The normalized case is determined based on the following precedence:
724 710
725 711 - version of name already stored in the dirstate
726 712 - version of name stored on disk
727 713 - version provided via command arguments
728 714 '''
729 715
730 716 if self._checkcase:
731 717 return self._normalize(path, isknown, ignoremissing)
732 718 return path
733 719
734 720 def clear(self):
735 721 self._map = dirstatemap()
736 722 self._nonnormalset = set()
737 723 self._otherparentset = set()
738 724 if "_dirs" in self.__dict__:
739 725 delattr(self, "_dirs")
740 726 self._copymap = {}
741 727 self._pl = [nullid, nullid]
742 728 self._lastnormaltime = 0
743 729 self._updatedfiles.clear()
744 730 self._dirty = True
745 731
746 732 def rebuild(self, parent, allfiles, changedfiles=None):
747 733 if changedfiles is None:
748 734 # Rebuild entire dirstate
749 735 changedfiles = allfiles
750 736 lastnormaltime = self._lastnormaltime
751 737 self.clear()
752 738 self._lastnormaltime = lastnormaltime
753 739
754 740 if self._origpl is None:
755 741 self._origpl = self._pl
756 742 self._pl = (parent, nullid)
757 743 for f in changedfiles:
758 744 if f in allfiles:
759 745 self.normallookup(f)
760 746 else:
761 747 self.drop(f)
762 748
763 749 self._dirty = True
764 750
765 751 def identity(self):
766 752 '''Return identity of dirstate itself to detect changing in storage
767 753
768 754 If identity of previous dirstate is equal to this, writing
769 755 changes based on the former dirstate out can keep consistency.
770 756 '''
771 757 return self._identity
772 758
773 759 def write(self, tr):
774 760 if not self._dirty:
775 761 return
776 762
777 763 filename = self._filename
778 764 if tr:
779 765 # 'dirstate.write()' is not only for writing in-memory
780 766 # changes out, but also for dropping ambiguous timestamp.
781 767 # delayed writing re-raise "ambiguous timestamp issue".
782 768 # See also the wiki page below for detail:
783 769 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
784 770
785 771 # emulate dropping timestamp in 'parsers.pack_dirstate'
786 772 now = _getfsnow(self._opener)
787 773 dmap = self._map
788 774 for f in self._updatedfiles:
789 775 e = dmap.get(f)
790 776 if e is not None and e[0] == 'n' and e[3] == now:
791 777 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
792 778 self._nonnormalset.add(f)
793 779
794 780 # emulate that all 'dirstate.normal' results are written out
795 781 self._lastnormaltime = 0
796 782 self._updatedfiles.clear()
797 783
798 784 # delay writing in-memory changes out
799 785 tr.addfilegenerator('dirstate', (self._filename,),
800 786 self._writedirstate, location='plain')
801 787 return
802 788
803 789 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
804 790 self._writedirstate(st)
805 791
806 792 def addparentchangecallback(self, category, callback):
807 793 """add a callback to be called when the wd parents are changed
808 794
809 795 Callback will be called with the following arguments:
810 796 dirstate, (oldp1, oldp2), (newp1, newp2)
811 797
812 798 Category is a unique identifier to allow overwriting an old callback
813 799 with a newer callback.
814 800 """
815 801 self._plchangecallbacks[category] = callback
816 802
817 803 def _writedirstate(self, st):
818 804 # notify callbacks about parents change
819 805 if self._origpl is not None and self._origpl != self._pl:
820 806 for c, callback in sorted(self._plchangecallbacks.iteritems()):
821 807 callback(self, self._origpl, self._pl)
822 808 self._origpl = None
823 809 # use the modification time of the newly created temporary file as the
824 810 # filesystem's notion of 'now'
825 811 now = util.fstat(st).st_mtime & _rangemask
826 812
827 813 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
828 814 # timestamp of each entries in dirstate, because of 'now > mtime'
829 815 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
830 816 if delaywrite > 0:
831 817 # do we have any files to delay for?
832 818 for f, e in self._map.iteritems():
833 819 if e[0] == 'n' and e[3] == now:
834 820 import time # to avoid useless import
835 821 # rather than sleep n seconds, sleep until the next
836 822 # multiple of n seconds
837 823 clock = time.time()
838 824 start = int(clock) - (int(clock) % delaywrite)
839 825 end = start + delaywrite
840 826 time.sleep(end - clock)
841 827 now = end # trust our estimate that the end is near now
842 828 break
843 829
844 830 st.write(parsers.pack_dirstate(self._map._map, self._copymap, self._pl,
845 831 now))
846 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
832 self._nonnormalset, self._otherparentset = self._map.nonnormalentries()
847 833 st.close()
848 834 self._lastnormaltime = 0
849 835 self._dirty = self._dirtypl = False
850 836
851 837 def _dirignore(self, f):
852 838 if f == '.':
853 839 return False
854 840 if self._ignore(f):
855 841 return True
856 842 for p in util.finddirs(f):
857 843 if self._ignore(p):
858 844 return True
859 845 return False
860 846
861 847 def _ignorefiles(self):
862 848 files = []
863 849 if os.path.exists(self._join('.hgignore')):
864 850 files.append(self._join('.hgignore'))
865 851 for name, path in self._ui.configitems("ui"):
866 852 if name == 'ignore' or name.startswith('ignore.'):
867 853 # we need to use os.path.join here rather than self._join
868 854 # because path is arbitrary and user-specified
869 855 files.append(os.path.join(self._rootdir, util.expandpath(path)))
870 856 return files
871 857
872 858 def _ignorefileandline(self, f):
873 859 files = collections.deque(self._ignorefiles())
874 860 visited = set()
875 861 while files:
876 862 i = files.popleft()
877 863 patterns = matchmod.readpatternfile(i, self._ui.warn,
878 864 sourceinfo=True)
879 865 for pattern, lineno, line in patterns:
880 866 kind, p = matchmod._patsplit(pattern, 'glob')
881 867 if kind == "subinclude":
882 868 if p not in visited:
883 869 files.append(p)
884 870 continue
885 871 m = matchmod.match(self._root, '', [], [pattern],
886 872 warn=self._ui.warn)
887 873 if m(f):
888 874 return (i, lineno, line)
889 875 visited.add(i)
890 876 return (None, -1, "")
891 877
892 878 def _walkexplicit(self, match, subrepos):
893 879 '''Get stat data about the files explicitly specified by match.
894 880
895 881 Return a triple (results, dirsfound, dirsnotfound).
896 882 - results is a mapping from filename to stat result. It also contains
897 883 listings mapping subrepos and .hg to None.
898 884 - dirsfound is a list of files found to be directories.
899 885 - dirsnotfound is a list of files that the dirstate thinks are
900 886 directories and that were not found.'''
901 887
902 888 def badtype(mode):
903 889 kind = _('unknown')
904 890 if stat.S_ISCHR(mode):
905 891 kind = _('character device')
906 892 elif stat.S_ISBLK(mode):
907 893 kind = _('block device')
908 894 elif stat.S_ISFIFO(mode):
909 895 kind = _('fifo')
910 896 elif stat.S_ISSOCK(mode):
911 897 kind = _('socket')
912 898 elif stat.S_ISDIR(mode):
913 899 kind = _('directory')
914 900 return _('unsupported file type (type is %s)') % kind
915 901
916 902 matchedir = match.explicitdir
917 903 badfn = match.bad
918 904 dmap = self._map
919 905 lstat = os.lstat
920 906 getkind = stat.S_IFMT
921 907 dirkind = stat.S_IFDIR
922 908 regkind = stat.S_IFREG
923 909 lnkkind = stat.S_IFLNK
924 910 join = self._join
925 911 dirsfound = []
926 912 foundadd = dirsfound.append
927 913 dirsnotfound = []
928 914 notfoundadd = dirsnotfound.append
929 915
930 916 if not match.isexact() and self._checkcase:
931 917 normalize = self._normalize
932 918 else:
933 919 normalize = None
934 920
935 921 files = sorted(match.files())
936 922 subrepos.sort()
937 923 i, j = 0, 0
938 924 while i < len(files) and j < len(subrepos):
939 925 subpath = subrepos[j] + "/"
940 926 if files[i] < subpath:
941 927 i += 1
942 928 continue
943 929 while i < len(files) and files[i].startswith(subpath):
944 930 del files[i]
945 931 j += 1
946 932
947 933 if not files or '.' in files:
948 934 files = ['.']
949 935 results = dict.fromkeys(subrepos)
950 936 results['.hg'] = None
951 937
952 938 alldirs = None
953 939 for ff in files:
954 940 # constructing the foldmap is expensive, so don't do it for the
955 941 # common case where files is ['.']
956 942 if normalize and ff != '.':
957 943 nf = normalize(ff, False, True)
958 944 else:
959 945 nf = ff
960 946 if nf in results:
961 947 continue
962 948
963 949 try:
964 950 st = lstat(join(nf))
965 951 kind = getkind(st.st_mode)
966 952 if kind == dirkind:
967 953 if nf in dmap:
968 954 # file replaced by dir on disk but still in dirstate
969 955 results[nf] = None
970 956 if matchedir:
971 957 matchedir(nf)
972 958 foundadd((nf, ff))
973 959 elif kind == regkind or kind == lnkkind:
974 960 results[nf] = st
975 961 else:
976 962 badfn(ff, badtype(kind))
977 963 if nf in dmap:
978 964 results[nf] = None
979 965 except OSError as inst: # nf not found on disk - it is dirstate only
980 966 if nf in dmap: # does it exactly match a missing file?
981 967 results[nf] = None
982 968 else: # does it match a missing directory?
983 969 if alldirs is None:
984 970 alldirs = util.dirs(dmap._map)
985 971 if nf in alldirs:
986 972 if matchedir:
987 973 matchedir(nf)
988 974 notfoundadd(nf)
989 975 else:
990 976 badfn(ff, encoding.strtolocal(inst.strerror))
991 977
992 978 # Case insensitive filesystems cannot rely on lstat() failing to detect
993 979 # a case-only rename. Prune the stat object for any file that does not
994 980 # match the case in the filesystem, if there are multiple files that
995 981 # normalize to the same path.
996 982 if match.isexact() and self._checkcase:
997 983 normed = {}
998 984
999 985 for f, st in results.iteritems():
1000 986 if st is None:
1001 987 continue
1002 988
1003 989 nc = util.normcase(f)
1004 990 paths = normed.get(nc)
1005 991
1006 992 if paths is None:
1007 993 paths = set()
1008 994 normed[nc] = paths
1009 995
1010 996 paths.add(f)
1011 997
1012 998 for norm, paths in normed.iteritems():
1013 999 if len(paths) > 1:
1014 1000 for path in paths:
1015 1001 folded = self._discoverpath(path, norm, True, None,
1016 1002 self._dirfoldmap)
1017 1003 if path != folded:
1018 1004 results[path] = None
1019 1005
1020 1006 return results, dirsfound, dirsnotfound
1021 1007
1022 1008 def walk(self, match, subrepos, unknown, ignored, full=True):
1023 1009 '''
1024 1010 Walk recursively through the directory tree, finding all files
1025 1011 matched by match.
1026 1012
1027 1013 If full is False, maybe skip some known-clean files.
1028 1014
1029 1015 Return a dict mapping filename to stat-like object (either
1030 1016 mercurial.osutil.stat instance or return value of os.stat()).
1031 1017
1032 1018 '''
1033 1019 # full is a flag that extensions that hook into walk can use -- this
1034 1020 # implementation doesn't use it at all. This satisfies the contract
1035 1021 # because we only guarantee a "maybe".
1036 1022
1037 1023 if ignored:
1038 1024 ignore = util.never
1039 1025 dirignore = util.never
1040 1026 elif unknown:
1041 1027 ignore = self._ignore
1042 1028 dirignore = self._dirignore
1043 1029 else:
1044 1030 # if not unknown and not ignored, drop dir recursion and step 2
1045 1031 ignore = util.always
1046 1032 dirignore = util.always
1047 1033
1048 1034 matchfn = match.matchfn
1049 1035 matchalways = match.always()
1050 1036 matchtdir = match.traversedir
1051 1037 dmap = self._map
1052 1038 listdir = util.listdir
1053 1039 lstat = os.lstat
1054 1040 dirkind = stat.S_IFDIR
1055 1041 regkind = stat.S_IFREG
1056 1042 lnkkind = stat.S_IFLNK
1057 1043 join = self._join
1058 1044
1059 1045 exact = skipstep3 = False
1060 1046 if match.isexact(): # match.exact
1061 1047 exact = True
1062 1048 dirignore = util.always # skip step 2
1063 1049 elif match.prefix(): # match.match, no patterns
1064 1050 skipstep3 = True
1065 1051
1066 1052 if not exact and self._checkcase:
1067 1053 normalize = self._normalize
1068 1054 normalizefile = self._normalizefile
1069 1055 skipstep3 = False
1070 1056 else:
1071 1057 normalize = self._normalize
1072 1058 normalizefile = None
1073 1059
1074 1060 # step 1: find all explicit files
1075 1061 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1076 1062
1077 1063 skipstep3 = skipstep3 and not (work or dirsnotfound)
1078 1064 work = [d for d in work if not dirignore(d[0])]
1079 1065
1080 1066 # step 2: visit subdirectories
1081 1067 def traverse(work, alreadynormed):
1082 1068 wadd = work.append
1083 1069 while work:
1084 1070 nd = work.pop()
1085 1071 if not match.visitdir(nd):
1086 1072 continue
1087 1073 skip = None
1088 1074 if nd == '.':
1089 1075 nd = ''
1090 1076 else:
1091 1077 skip = '.hg'
1092 1078 try:
1093 1079 entries = listdir(join(nd), stat=True, skip=skip)
1094 1080 except OSError as inst:
1095 1081 if inst.errno in (errno.EACCES, errno.ENOENT):
1096 1082 match.bad(self.pathto(nd),
1097 1083 encoding.strtolocal(inst.strerror))
1098 1084 continue
1099 1085 raise
1100 1086 for f, kind, st in entries:
1101 1087 if normalizefile:
1102 1088 # even though f might be a directory, we're only
1103 1089 # interested in comparing it to files currently in the
1104 1090 # dmap -- therefore normalizefile is enough
1105 1091 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1106 1092 True)
1107 1093 else:
1108 1094 nf = nd and (nd + "/" + f) or f
1109 1095 if nf not in results:
1110 1096 if kind == dirkind:
1111 1097 if not ignore(nf):
1112 1098 if matchtdir:
1113 1099 matchtdir(nf)
1114 1100 wadd(nf)
1115 1101 if nf in dmap and (matchalways or matchfn(nf)):
1116 1102 results[nf] = None
1117 1103 elif kind == regkind or kind == lnkkind:
1118 1104 if nf in dmap:
1119 1105 if matchalways or matchfn(nf):
1120 1106 results[nf] = st
1121 1107 elif ((matchalways or matchfn(nf))
1122 1108 and not ignore(nf)):
1123 1109 # unknown file -- normalize if necessary
1124 1110 if not alreadynormed:
1125 1111 nf = normalize(nf, False, True)
1126 1112 results[nf] = st
1127 1113 elif nf in dmap and (matchalways or matchfn(nf)):
1128 1114 results[nf] = None
1129 1115
1130 1116 for nd, d in work:
1131 1117 # alreadynormed means that processwork doesn't have to do any
1132 1118 # expensive directory normalization
1133 1119 alreadynormed = not normalize or nd == d
1134 1120 traverse([d], alreadynormed)
1135 1121
1136 1122 for s in subrepos:
1137 1123 del results[s]
1138 1124 del results['.hg']
1139 1125
1140 1126 # step 3: visit remaining files from dmap
1141 1127 if not skipstep3 and not exact:
1142 1128 # If a dmap file is not in results yet, it was either
1143 1129 # a) not matching matchfn b) ignored, c) missing, or d) under a
1144 1130 # symlink directory.
1145 1131 if not results and matchalways:
1146 1132 visit = [f for f in dmap]
1147 1133 else:
1148 1134 visit = [f for f in dmap if f not in results and matchfn(f)]
1149 1135 visit.sort()
1150 1136
1151 1137 if unknown:
1152 1138 # unknown == True means we walked all dirs under the roots
1153 1139 # that wasn't ignored, and everything that matched was stat'ed
1154 1140 # and is already in results.
1155 1141 # The rest must thus be ignored or under a symlink.
1156 1142 audit_path = pathutil.pathauditor(self._root, cached=True)
1157 1143
1158 1144 for nf in iter(visit):
1159 1145 # If a stat for the same file was already added with a
1160 1146 # different case, don't add one for this, since that would
1161 1147 # make it appear as if the file exists under both names
1162 1148 # on disk.
1163 1149 if (normalizefile and
1164 1150 normalizefile(nf, True, True) in results):
1165 1151 results[nf] = None
1166 1152 # Report ignored items in the dmap as long as they are not
1167 1153 # under a symlink directory.
1168 1154 elif audit_path.check(nf):
1169 1155 try:
1170 1156 results[nf] = lstat(join(nf))
1171 1157 # file was just ignored, no links, and exists
1172 1158 except OSError:
1173 1159 # file doesn't exist
1174 1160 results[nf] = None
1175 1161 else:
1176 1162 # It's either missing or under a symlink directory
1177 1163 # which we in this case report as missing
1178 1164 results[nf] = None
1179 1165 else:
1180 1166 # We may not have walked the full directory tree above,
1181 1167 # so stat and check everything we missed.
1182 1168 iv = iter(visit)
1183 1169 for st in util.statfiles([join(i) for i in visit]):
1184 1170 results[next(iv)] = st
1185 1171 return results
1186 1172
1187 1173 def status(self, match, subrepos, ignored, clean, unknown):
1188 1174 '''Determine the status of the working copy relative to the
1189 1175 dirstate and return a pair of (unsure, status), where status is of type
1190 1176 scmutil.status and:
1191 1177
1192 1178 unsure:
1193 1179 files that might have been modified since the dirstate was
1194 1180 written, but need to be read to be sure (size is the same
1195 1181 but mtime differs)
1196 1182 status.modified:
1197 1183 files that have definitely been modified since the dirstate
1198 1184 was written (different size or mode)
1199 1185 status.clean:
1200 1186 files that have definitely not been modified since the
1201 1187 dirstate was written
1202 1188 '''
1203 1189 listignored, listclean, listunknown = ignored, clean, unknown
1204 1190 lookup, modified, added, unknown, ignored = [], [], [], [], []
1205 1191 removed, deleted, clean = [], [], []
1206 1192
1207 1193 dmap = self._map
1208 1194 ladd = lookup.append # aka "unsure"
1209 1195 madd = modified.append
1210 1196 aadd = added.append
1211 1197 uadd = unknown.append
1212 1198 iadd = ignored.append
1213 1199 radd = removed.append
1214 1200 dadd = deleted.append
1215 1201 cadd = clean.append
1216 1202 mexact = match.exact
1217 1203 dirignore = self._dirignore
1218 1204 checkexec = self._checkexec
1219 1205 copymap = self._copymap
1220 1206 lastnormaltime = self._lastnormaltime
1221 1207
1222 1208 # We need to do full walks when either
1223 1209 # - we're listing all clean files, or
1224 1210 # - match.traversedir does something, because match.traversedir should
1225 1211 # be called for every dir in the working dir
1226 1212 full = listclean or match.traversedir is not None
1227 1213 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1228 1214 full=full).iteritems():
1229 1215 if fn not in dmap:
1230 1216 if (listignored or mexact(fn)) and dirignore(fn):
1231 1217 if listignored:
1232 1218 iadd(fn)
1233 1219 else:
1234 1220 uadd(fn)
1235 1221 continue
1236 1222
1237 1223 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1238 1224 # written like that for performance reasons. dmap[fn] is not a
1239 1225 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1240 1226 # opcode has fast paths when the value to be unpacked is a tuple or
1241 1227 # a list, but falls back to creating a full-fledged iterator in
1242 1228 # general. That is much slower than simply accessing and storing the
1243 1229 # tuple members one by one.
1244 1230 t = dmap[fn]
1245 1231 state = t[0]
1246 1232 mode = t[1]
1247 1233 size = t[2]
1248 1234 time = t[3]
1249 1235
1250 1236 if not st and state in "nma":
1251 1237 dadd(fn)
1252 1238 elif state == 'n':
1253 1239 if (size >= 0 and
1254 1240 ((size != st.st_size and size != st.st_size & _rangemask)
1255 1241 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1256 1242 or size == -2 # other parent
1257 1243 or fn in copymap):
1258 1244 madd(fn)
1259 1245 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1260 1246 ladd(fn)
1261 1247 elif st.st_mtime == lastnormaltime:
1262 1248 # fn may have just been marked as normal and it may have
1263 1249 # changed in the same second without changing its size.
1264 1250 # This can happen if we quickly do multiple commits.
1265 1251 # Force lookup, so we don't miss such a racy file change.
1266 1252 ladd(fn)
1267 1253 elif listclean:
1268 1254 cadd(fn)
1269 1255 elif state == 'm':
1270 1256 madd(fn)
1271 1257 elif state == 'a':
1272 1258 aadd(fn)
1273 1259 elif state == 'r':
1274 1260 radd(fn)
1275 1261
1276 1262 return (lookup, scmutil.status(modified, added, removed, deleted,
1277 1263 unknown, ignored, clean))
1278 1264
1279 1265 def matches(self, match):
1280 1266 '''
1281 1267 return files in the dirstate (in whatever state) filtered by match
1282 1268 '''
1283 1269 dmap = self._map
1284 1270 if match.always():
1285 1271 return dmap.keys()
1286 1272 files = match.files()
1287 1273 if match.isexact():
1288 1274 # fast path -- filter the other way around, since typically files is
1289 1275 # much smaller than dmap
1290 1276 return [f for f in files if f in dmap]
1291 1277 if match.prefix() and all(fn in dmap for fn in files):
1292 1278 # fast path -- all the values are known to be files, so just return
1293 1279 # that
1294 1280 return list(files)
1295 1281 return [f for f in dmap if match(f)]
1296 1282
1297 1283 def _actualfilename(self, tr):
1298 1284 if tr:
1299 1285 return self._pendingfilename
1300 1286 else:
1301 1287 return self._filename
1302 1288
1303 1289 def savebackup(self, tr, backupname):
1304 1290 '''Save current dirstate into backup file'''
1305 1291 filename = self._actualfilename(tr)
1306 1292 assert backupname != filename
1307 1293
1308 1294 # use '_writedirstate' instead of 'write' to write changes certainly,
1309 1295 # because the latter omits writing out if transaction is running.
1310 1296 # output file will be used to create backup of dirstate at this point.
1311 1297 if self._dirty or not self._opener.exists(filename):
1312 1298 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1313 1299 checkambig=True))
1314 1300
1315 1301 if tr:
1316 1302 # ensure that subsequent tr.writepending returns True for
1317 1303 # changes written out above, even if dirstate is never
1318 1304 # changed after this
1319 1305 tr.addfilegenerator('dirstate', (self._filename,),
1320 1306 self._writedirstate, location='plain')
1321 1307
1322 1308 # ensure that pending file written above is unlinked at
1323 1309 # failure, even if tr.writepending isn't invoked until the
1324 1310 # end of this transaction
1325 1311 tr.registertmp(filename, location='plain')
1326 1312
1327 1313 self._opener.tryunlink(backupname)
1328 1314 # hardlink backup is okay because _writedirstate is always called
1329 1315 # with an "atomictemp=True" file.
1330 1316 util.copyfile(self._opener.join(filename),
1331 1317 self._opener.join(backupname), hardlink=True)
1332 1318
1333 1319 def restorebackup(self, tr, backupname):
1334 1320 '''Restore dirstate by backup file'''
1335 1321 # this "invalidate()" prevents "wlock.release()" from writing
1336 1322 # changes of dirstate out after restoring from backup file
1337 1323 self.invalidate()
1338 1324 filename = self._actualfilename(tr)
1339 1325 self._opener.rename(backupname, filename, checkambig=True)
1340 1326
1341 1327 def clearbackup(self, tr, backupname):
1342 1328 '''Clear backup file'''
1343 1329 self._opener.unlink(backupname)
1344 1330
1345 1331 class dirstatemap(object):
1346 1332 def __init__(self):
1347 1333 self._map = {}
1348 1334
1349 1335 def iteritems(self):
1350 1336 return self._map.iteritems()
1351 1337
1352 1338 def __iter__(self):
1353 1339 return iter(self._map)
1354 1340
1355 1341 def get(self, key, default=None):
1356 1342 return self._map.get(key, default)
1357 1343
1358 1344 def __contains__(self, key):
1359 1345 return key in self._map
1360 1346
1361 1347 def __setitem__(self, key, value):
1362 1348 self._map[key] = value
1363 1349
1364 1350 def __getitem__(self, key):
1365 1351 return self._map[key]
1366 1352
1367 1353 def __delitem__(self, key):
1368 1354 del self._map[key]
1369 1355
1370 1356 def keys(self):
1371 1357 return self._map.keys()
1358
1359 def nonnormalentries(self):
1360 '''Compute the nonnormal dirstate entries from the dmap'''
1361 try:
1362 return parsers.nonnormalotherparententries(self._map)
1363 except AttributeError:
1364 nonnorm = set()
1365 otherparent = set()
1366 for fname, e in self._map.iteritems():
1367 if e[0] != 'n' or e[3] == -1:
1368 nonnorm.add(fname)
1369 if e[0] == 'n' and e[2] == -2:
1370 otherparent.add(fname)
1371 return nonnorm, otherparent
1372
General Comments 0
You need to be logged in to leave comments. Login now