##// END OF EJS Templates
dirstate: create new dirstatemap class...
Durham Goode -
r34333:b36881c6 default
parent child Browse files
Show More
@@ -1,1341 +1,1371 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 parsers = policy.importmod(r'parsers')
31 31
32 32 propertycache = util.propertycache
33 33 filecache = scmutil.filecache
34 34 _rangemask = 0x7fffffff
35 35
36 36 dirstatetuple = parsers.dirstatetuple
37 37
38 38 class repocache(filecache):
39 39 """filecache for files in .hg/"""
40 40 def join(self, obj, fname):
41 41 return obj._opener.join(fname)
42 42
43 43 class rootcache(filecache):
44 44 """filecache for files in the repository root"""
45 45 def join(self, obj, fname):
46 46 return obj._join(fname)
47 47
48 48 def _getfsnow(vfs):
49 49 '''Get "now" timestamp on filesystem'''
50 50 tmpfd, tmpname = vfs.mkstemp()
51 51 try:
52 52 return os.fstat(tmpfd).st_mtime
53 53 finally:
54 54 os.close(tmpfd)
55 55 vfs.unlink(tmpname)
56 56
57 57 def nonnormalentries(dmap):
58 58 '''Compute the nonnormal dirstate entries from the dmap'''
59 59 try:
60 return parsers.nonnormalotherparententries(dmap)
60 return parsers.nonnormalotherparententries(dmap._map)
61 61 except AttributeError:
62 62 nonnorm = set()
63 63 otherparent = set()
64 64 for fname, e in dmap.iteritems():
65 65 if e[0] != 'n' or e[3] == -1:
66 66 nonnorm.add(fname)
67 67 if e[0] == 'n' and e[2] == -2:
68 68 otherparent.add(fname)
69 69 return nonnorm, otherparent
70 70
71 71 class dirstate(object):
72 72
73 73 def __init__(self, opener, ui, root, validate, sparsematchfn):
74 74 '''Create a new dirstate object.
75 75
76 76 opener is an open()-like callable that can be used to open the
77 77 dirstate file; root is the root of the directory tracked by
78 78 the dirstate.
79 79 '''
80 80 self._opener = opener
81 81 self._validate = validate
82 82 self._root = root
83 83 self._sparsematchfn = sparsematchfn
84 84 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
85 85 # UNC path pointing to root share (issue4557)
86 86 self._rootdir = pathutil.normasprefix(root)
87 87 self._dirty = False
88 88 self._dirtypl = False
89 89 self._lastnormaltime = 0
90 90 self._ui = ui
91 91 self._filecache = {}
92 92 self._parentwriters = 0
93 93 self._filename = 'dirstate'
94 94 self._pendingfilename = '%s.pending' % self._filename
95 95 self._plchangecallbacks = {}
96 96 self._origpl = None
97 97 self._updatedfiles = set()
98 98
99 99 # for consistent view between _pl() and _read() invocations
100 100 self._pendingmode = None
101 101
102 102 @contextlib.contextmanager
103 103 def parentchange(self):
104 104 '''Context manager for handling dirstate parents.
105 105
106 106 If an exception occurs in the scope of the context manager,
107 107 the incoherent dirstate won't be written when wlock is
108 108 released.
109 109 '''
110 110 self._parentwriters += 1
111 111 yield
112 112 # Typically we want the "undo" step of a context manager in a
113 113 # finally block so it happens even when an exception
114 114 # occurs. In this case, however, we only want to decrement
115 115 # parentwriters if the code in the with statement exits
116 116 # normally, so we don't have a try/finally here on purpose.
117 117 self._parentwriters -= 1
118 118
119 119 def beginparentchange(self):
120 120 '''Marks the beginning of a set of changes that involve changing
121 121 the dirstate parents. If there is an exception during this time,
122 122 the dirstate will not be written when the wlock is released. This
123 123 prevents writing an incoherent dirstate where the parent doesn't
124 124 match the contents.
125 125 '''
126 126 self._ui.deprecwarn('beginparentchange is obsoleted by the '
127 127 'parentchange context manager.', '4.3')
128 128 self._parentwriters += 1
129 129
130 130 def endparentchange(self):
131 131 '''Marks the end of a set of changes that involve changing the
132 132 dirstate parents. Once all parent changes have been marked done,
133 133 the wlock will be free to write the dirstate on release.
134 134 '''
135 135 self._ui.deprecwarn('endparentchange is obsoleted by the '
136 136 'parentchange context manager.', '4.3')
137 137 if self._parentwriters > 0:
138 138 self._parentwriters -= 1
139 139
140 140 def pendingparentchange(self):
141 141 '''Returns true if the dirstate is in the middle of a set of changes
142 142 that modify the dirstate parent.
143 143 '''
144 144 return self._parentwriters > 0
145 145
146 146 @propertycache
147 147 def _map(self):
148 148 '''Return the dirstate contents as a map from filename to
149 149 (state, mode, size, time).'''
150 150 self._read()
151 151 return self._map
152 152
153 153 @propertycache
154 154 def _copymap(self):
155 155 self._read()
156 156 return self._copymap
157 157
158 158 @propertycache
159 159 def _identity(self):
160 160 self._read()
161 161 return self._identity
162 162
163 163 @propertycache
164 164 def _nonnormalset(self):
165 165 nonnorm, otherparents = nonnormalentries(self._map)
166 166 self._otherparentset = otherparents
167 167 return nonnorm
168 168
169 169 @propertycache
170 170 def _otherparentset(self):
171 171 nonnorm, otherparents = nonnormalentries(self._map)
172 172 self._nonnormalset = nonnorm
173 173 return otherparents
174 174
175 175 @propertycache
176 176 def _filefoldmap(self):
177 177 try:
178 178 makefilefoldmap = parsers.make_file_foldmap
179 179 except AttributeError:
180 180 pass
181 181 else:
182 return makefilefoldmap(self._map, util.normcasespec,
182 return makefilefoldmap(self._map._map, util.normcasespec,
183 183 util.normcasefallback)
184 184
185 185 f = {}
186 186 normcase = util.normcase
187 187 for name, s in self._map.iteritems():
188 188 if s[0] != 'r':
189 189 f[normcase(name)] = name
190 190 f['.'] = '.' # prevents useless util.fspath() invocation
191 191 return f
192 192
193 193 @propertycache
194 194 def _dirfoldmap(self):
195 195 f = {}
196 196 normcase = util.normcase
197 197 for name in self._dirs:
198 198 f[normcase(name)] = name
199 199 return f
200 200
201 201 @property
202 202 def _sparsematcher(self):
203 203 """The matcher for the sparse checkout.
204 204
205 205 The working directory may not include every file from a manifest. The
206 206 matcher obtained by this property will match a path if it is to be
207 207 included in the working directory.
208 208 """
209 209 # TODO there is potential to cache this property. For now, the matcher
210 210 # is resolved on every access. (But the called function does use a
211 211 # cache to keep the lookup fast.)
212 212 return self._sparsematchfn()
213 213
214 214 @repocache('branch')
215 215 def _branch(self):
216 216 try:
217 217 return self._opener.read("branch").strip() or "default"
218 218 except IOError as inst:
219 219 if inst.errno != errno.ENOENT:
220 220 raise
221 221 return "default"
222 222
223 223 @propertycache
224 224 def _pl(self):
225 225 try:
226 226 fp = self._opendirstatefile()
227 227 st = fp.read(40)
228 228 fp.close()
229 229 l = len(st)
230 230 if l == 40:
231 231 return st[:20], st[20:40]
232 232 elif l > 0 and l < 40:
233 233 raise error.Abort(_('working directory state appears damaged!'))
234 234 except IOError as err:
235 235 if err.errno != errno.ENOENT:
236 236 raise
237 237 return [nullid, nullid]
238 238
239 239 @propertycache
240 240 def _dirs(self):
241 return util.dirs(self._map, 'r')
241 return util.dirs(self._map._map, 'r')
242 242
243 243 def dirs(self):
244 244 return self._dirs
245 245
246 246 @rootcache('.hgignore')
247 247 def _ignore(self):
248 248 files = self._ignorefiles()
249 249 if not files:
250 250 return matchmod.never(self._root, '')
251 251
252 252 pats = ['include:%s' % f for f in files]
253 253 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
254 254
255 255 @propertycache
256 256 def _slash(self):
257 257 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
258 258
259 259 @propertycache
260 260 def _checklink(self):
261 261 return util.checklink(self._root)
262 262
263 263 @propertycache
264 264 def _checkexec(self):
265 265 return util.checkexec(self._root)
266 266
267 267 @propertycache
268 268 def _checkcase(self):
269 269 return not util.fscasesensitive(self._join('.hg'))
270 270
271 271 def _join(self, f):
272 272 # much faster than os.path.join()
273 273 # it's safe because f is always a relative path
274 274 return self._rootdir + f
275 275
276 276 def flagfunc(self, buildfallback):
277 277 if self._checklink and self._checkexec:
278 278 def f(x):
279 279 try:
280 280 st = os.lstat(self._join(x))
281 281 if util.statislink(st):
282 282 return 'l'
283 283 if util.statisexec(st):
284 284 return 'x'
285 285 except OSError:
286 286 pass
287 287 return ''
288 288 return f
289 289
290 290 fallback = buildfallback()
291 291 if self._checklink:
292 292 def f(x):
293 293 if os.path.islink(self._join(x)):
294 294 return 'l'
295 295 if 'x' in fallback(x):
296 296 return 'x'
297 297 return ''
298 298 return f
299 299 if self._checkexec:
300 300 def f(x):
301 301 if 'l' in fallback(x):
302 302 return 'l'
303 303 if util.isexec(self._join(x)):
304 304 return 'x'
305 305 return ''
306 306 return f
307 307 else:
308 308 return fallback
309 309
310 310 @propertycache
311 311 def _cwd(self):
312 312 # internal config: ui.forcecwd
313 313 forcecwd = self._ui.config('ui', 'forcecwd')
314 314 if forcecwd:
315 315 return forcecwd
316 316 return pycompat.getcwd()
317 317
318 318 def getcwd(self):
319 319 '''Return the path from which a canonical path is calculated.
320 320
321 321 This path should be used to resolve file patterns or to convert
322 322 canonical paths back to file paths for display. It shouldn't be
323 323 used to get real file paths. Use vfs functions instead.
324 324 '''
325 325 cwd = self._cwd
326 326 if cwd == self._root:
327 327 return ''
328 328 # self._root ends with a path separator if self._root is '/' or 'C:\'
329 329 rootsep = self._root
330 330 if not util.endswithsep(rootsep):
331 331 rootsep += pycompat.ossep
332 332 if cwd.startswith(rootsep):
333 333 return cwd[len(rootsep):]
334 334 else:
335 335 # we're outside the repo. return an absolute path.
336 336 return cwd
337 337
338 338 def pathto(self, f, cwd=None):
339 339 if cwd is None:
340 340 cwd = self.getcwd()
341 341 path = util.pathto(self._root, cwd, f)
342 342 if self._slash:
343 343 return util.pconvert(path)
344 344 return path
345 345
346 346 def __getitem__(self, key):
347 347 '''Return the current state of key (a filename) in the dirstate.
348 348
349 349 States are:
350 350 n normal
351 351 m needs merging
352 352 r marked for removal
353 353 a marked for addition
354 354 ? not tracked
355 355 '''
356 356 return self._map.get(key, ("?",))[0]
357 357
358 358 def __contains__(self, key):
359 359 return key in self._map
360 360
361 361 def __iter__(self):
362 362 return iter(sorted(self._map))
363 363
364 364 def items(self):
365 365 return self._map.iteritems()
366 366
367 367 iteritems = items
368 368
369 369 def parents(self):
370 370 return [self._validate(p) for p in self._pl]
371 371
372 372 def p1(self):
373 373 return self._validate(self._pl[0])
374 374
375 375 def p2(self):
376 376 return self._validate(self._pl[1])
377 377
378 378 def branch(self):
379 379 return encoding.tolocal(self._branch)
380 380
381 381 def setparents(self, p1, p2=nullid):
382 382 """Set dirstate parents to p1 and p2.
383 383
384 384 When moving from two parents to one, 'm' merged entries a
385 385 adjusted to normal and previous copy records discarded and
386 386 returned by the call.
387 387
388 388 See localrepo.setparents()
389 389 """
390 390 if self._parentwriters == 0:
391 391 raise ValueError("cannot set dirstate parent without "
392 392 "calling dirstate.beginparentchange")
393 393
394 394 self._dirty = self._dirtypl = True
395 395 oldp2 = self._pl[1]
396 396 if self._origpl is None:
397 397 self._origpl = self._pl
398 398 self._pl = p1, p2
399 399 copies = {}
400 400 if oldp2 != nullid and p2 == nullid:
401 401 candidatefiles = self._nonnormalset.union(self._otherparentset)
402 402 for f in candidatefiles:
403 403 s = self._map.get(f)
404 404 if s is None:
405 405 continue
406 406
407 407 # Discard 'm' markers when moving away from a merge state
408 408 if s[0] == 'm':
409 409 source = self._copymap.get(f)
410 410 if source:
411 411 copies[f] = source
412 412 self.normallookup(f)
413 413 # Also fix up otherparent markers
414 414 elif s[0] == 'n' and s[2] == -2:
415 415 source = self._copymap.get(f)
416 416 if source:
417 417 copies[f] = source
418 418 self.add(f)
419 419 return copies
420 420
421 421 def setbranch(self, branch):
422 422 self._branch = encoding.fromlocal(branch)
423 423 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
424 424 try:
425 425 f.write(self._branch + '\n')
426 426 f.close()
427 427
428 428 # make sure filecache has the correct stat info for _branch after
429 429 # replacing the underlying file
430 430 ce = self._filecache['_branch']
431 431 if ce:
432 432 ce.refresh()
433 433 except: # re-raises
434 434 f.discard()
435 435 raise
436 436
437 437 def _opendirstatefile(self):
438 438 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
439 439 if self._pendingmode is not None and self._pendingmode != mode:
440 440 fp.close()
441 441 raise error.Abort(_('working directory state may be '
442 442 'changed parallelly'))
443 443 self._pendingmode = mode
444 444 return fp
445 445
446 446 def _read(self):
447 self._map = {}
447 self._map = dirstatemap()
448
448 449 self._copymap = {}
449 450 # ignore HG_PENDING because identity is used only for writing
450 451 self._identity = util.filestat.frompath(
451 452 self._opener.join(self._filename))
452 453 try:
453 454 fp = self._opendirstatefile()
454 455 try:
455 456 st = fp.read()
456 457 finally:
457 458 fp.close()
458 459 except IOError as err:
459 460 if err.errno != errno.ENOENT:
460 461 raise
461 462 return
462 463 if not st:
463 464 return
464 465
465 466 if util.safehasattr(parsers, 'dict_new_presized'):
466 467 # Make an estimate of the number of files in the dirstate based on
467 468 # its size. From a linear regression on a set of real-world repos,
468 469 # all over 10,000 files, the size of a dirstate entry is 85
469 470 # bytes. The cost of resizing is significantly higher than the cost
470 471 # of filling in a larger presized dict, so subtract 20% from the
471 472 # size.
472 473 #
473 474 # This heuristic is imperfect in many ways, so in a future dirstate
474 475 # format update it makes sense to just record the number of entries
475 476 # on write.
476 self._map = parsers.dict_new_presized(len(st) / 71)
477 self._map._map = parsers.dict_new_presized(len(st) / 71)
477 478
478 479 # Python's garbage collector triggers a GC each time a certain number
479 480 # of container objects (the number being defined by
480 481 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
481 482 # for each file in the dirstate. The C version then immediately marks
482 483 # them as not to be tracked by the collector. However, this has no
483 484 # effect on when GCs are triggered, only on what objects the GC looks
484 485 # into. This means that O(number of files) GCs are unavoidable.
485 486 # Depending on when in the process's lifetime the dirstate is parsed,
486 487 # this can get very expensive. As a workaround, disable GC while
487 488 # parsing the dirstate.
488 489 #
489 490 # (we cannot decorate the function directly since it is in a C module)
490 491 parse_dirstate = util.nogc(parsers.parse_dirstate)
491 p = parse_dirstate(self._map, self._copymap, st)
492 p = parse_dirstate(self._map._map, self._copymap, st)
492 493 if not self._dirtypl:
493 494 self._pl = p
494 495
495 496 def invalidate(self):
496 497 '''Causes the next access to reread the dirstate.
497 498
498 499 This is different from localrepo.invalidatedirstate() because it always
499 500 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
500 501 check whether the dirstate has changed before rereading it.'''
501 502
502 503 for a in ("_map", "_copymap", "_identity",
503 504 "_filefoldmap", "_dirfoldmap", "_branch",
504 505 "_pl", "_dirs", "_ignore", "_nonnormalset",
505 506 "_otherparentset"):
506 507 if a in self.__dict__:
507 508 delattr(self, a)
508 509 self._lastnormaltime = 0
509 510 self._dirty = False
510 511 self._updatedfiles.clear()
511 512 self._parentwriters = 0
512 513 self._origpl = None
513 514
514 515 def copy(self, source, dest):
515 516 """Mark dest as a copy of source. Unmark dest if source is None."""
516 517 if source == dest:
517 518 return
518 519 self._dirty = True
519 520 if source is not None:
520 521 self._copymap[dest] = source
521 522 self._updatedfiles.add(source)
522 523 self._updatedfiles.add(dest)
523 524 elif self._copymap.pop(dest, None):
524 525 self._updatedfiles.add(dest)
525 526
526 527 def copied(self, file):
527 528 return self._copymap.get(file, None)
528 529
529 530 def copies(self):
530 531 return self._copymap
531 532
532 533 def _droppath(self, f):
533 534 if self[f] not in "?r" and "_dirs" in self.__dict__:
534 535 self._dirs.delpath(f)
535 536
536 537 if "_filefoldmap" in self.__dict__:
537 538 normed = util.normcase(f)
538 539 if normed in self._filefoldmap:
539 540 del self._filefoldmap[normed]
540 541
541 542 self._updatedfiles.add(f)
542 543
543 544 def _addpath(self, f, state, mode, size, mtime):
544 545 oldstate = self[f]
545 546 if state == 'a' or oldstate == 'r':
546 547 scmutil.checkfilename(f)
547 548 if f in self._dirs:
548 549 raise error.Abort(_('directory %r already in dirstate') % f)
549 550 # shadows
550 551 for d in util.finddirs(f):
551 552 if d in self._dirs:
552 553 break
553 554 entry = self._map.get(d)
554 555 if entry is not None and entry[0] != 'r':
555 556 raise error.Abort(
556 557 _('file %r in dirstate clashes with %r') % (d, f))
557 558 if oldstate in "?r" and "_dirs" in self.__dict__:
558 559 self._dirs.addpath(f)
559 560 self._dirty = True
560 561 self._updatedfiles.add(f)
561 562 self._map[f] = dirstatetuple(state, mode, size, mtime)
562 563 if state != 'n' or mtime == -1:
563 564 self._nonnormalset.add(f)
564 565 if size == -2:
565 566 self._otherparentset.add(f)
566 567
567 568 def normal(self, f):
568 569 '''Mark a file normal and clean.'''
569 570 s = os.lstat(self._join(f))
570 571 mtime = s.st_mtime
571 572 self._addpath(f, 'n', s.st_mode,
572 573 s.st_size & _rangemask, mtime & _rangemask)
573 574 self._copymap.pop(f, None)
574 575 if f in self._nonnormalset:
575 576 self._nonnormalset.remove(f)
576 577 if mtime > self._lastnormaltime:
577 578 # Remember the most recent modification timeslot for status(),
578 579 # to make sure we won't miss future size-preserving file content
579 580 # modifications that happen within the same timeslot.
580 581 self._lastnormaltime = mtime
581 582
582 583 def normallookup(self, f):
583 584 '''Mark a file normal, but possibly dirty.'''
584 585 if self._pl[1] != nullid:
585 586 # if there is a merge going on and the file was either
586 587 # in state 'm' (-1) or coming from other parent (-2) before
587 588 # being removed, restore that state.
588 589 entry = self._map.get(f)
589 590 if entry is not None:
590 591 if entry[0] == 'r' and entry[2] in (-1, -2):
591 592 source = self._copymap.get(f)
592 593 if entry[2] == -1:
593 594 self.merge(f)
594 595 elif entry[2] == -2:
595 596 self.otherparent(f)
596 597 if source:
597 598 self.copy(source, f)
598 599 return
599 600 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
600 601 return
601 602 self._addpath(f, 'n', 0, -1, -1)
602 603 self._copymap.pop(f, None)
603 604 if f in self._nonnormalset:
604 605 self._nonnormalset.remove(f)
605 606
606 607 def otherparent(self, f):
607 608 '''Mark as coming from the other parent, always dirty.'''
608 609 if self._pl[1] == nullid:
609 610 raise error.Abort(_("setting %r to other parent "
610 611 "only allowed in merges") % f)
611 612 if f in self and self[f] == 'n':
612 613 # merge-like
613 614 self._addpath(f, 'm', 0, -2, -1)
614 615 else:
615 616 # add-like
616 617 self._addpath(f, 'n', 0, -2, -1)
617 618 self._copymap.pop(f, None)
618 619
619 620 def add(self, f):
620 621 '''Mark a file added.'''
621 622 self._addpath(f, 'a', 0, -1, -1)
622 623 self._copymap.pop(f, None)
623 624
624 625 def remove(self, f):
625 626 '''Mark a file removed.'''
626 627 self._dirty = True
627 628 self._droppath(f)
628 629 size = 0
629 630 if self._pl[1] != nullid:
630 631 entry = self._map.get(f)
631 632 if entry is not None:
632 633 # backup the previous state
633 634 if entry[0] == 'm': # merge
634 635 size = -1
635 636 elif entry[0] == 'n' and entry[2] == -2: # other parent
636 637 size = -2
637 638 self._otherparentset.add(f)
638 639 self._map[f] = dirstatetuple('r', 0, size, 0)
639 640 self._nonnormalset.add(f)
640 641 if size == 0:
641 642 self._copymap.pop(f, None)
642 643
643 644 def merge(self, f):
644 645 '''Mark a file merged.'''
645 646 if self._pl[1] == nullid:
646 647 return self.normallookup(f)
647 648 return self.otherparent(f)
648 649
649 650 def drop(self, f):
650 651 '''Drop a file from the dirstate'''
651 652 if f in self._map:
652 653 self._dirty = True
653 654 self._droppath(f)
654 655 del self._map[f]
655 656 if f in self._nonnormalset:
656 657 self._nonnormalset.remove(f)
657 658 self._copymap.pop(f, None)
658 659
659 660 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
660 661 if exists is None:
661 662 exists = os.path.lexists(os.path.join(self._root, path))
662 663 if not exists:
663 664 # Maybe a path component exists
664 665 if not ignoremissing and '/' in path:
665 666 d, f = path.rsplit('/', 1)
666 667 d = self._normalize(d, False, ignoremissing, None)
667 668 folded = d + "/" + f
668 669 else:
669 670 # No path components, preserve original case
670 671 folded = path
671 672 else:
672 673 # recursively normalize leading directory components
673 674 # against dirstate
674 675 if '/' in normed:
675 676 d, f = normed.rsplit('/', 1)
676 677 d = self._normalize(d, False, ignoremissing, True)
677 678 r = self._root + "/" + d
678 679 folded = d + "/" + util.fspath(f, r)
679 680 else:
680 681 folded = util.fspath(normed, self._root)
681 682 storemap[normed] = folded
682 683
683 684 return folded
684 685
685 686 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
686 687 normed = util.normcase(path)
687 688 folded = self._filefoldmap.get(normed, None)
688 689 if folded is None:
689 690 if isknown:
690 691 folded = path
691 692 else:
692 693 folded = self._discoverpath(path, normed, ignoremissing, exists,
693 694 self._filefoldmap)
694 695 return folded
695 696
696 697 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
697 698 normed = util.normcase(path)
698 699 folded = self._filefoldmap.get(normed, None)
699 700 if folded is None:
700 701 folded = self._dirfoldmap.get(normed, None)
701 702 if folded is None:
702 703 if isknown:
703 704 folded = path
704 705 else:
705 706 # store discovered result in dirfoldmap so that future
706 707 # normalizefile calls don't start matching directories
707 708 folded = self._discoverpath(path, normed, ignoremissing, exists,
708 709 self._dirfoldmap)
709 710 return folded
710 711
711 712 def normalize(self, path, isknown=False, ignoremissing=False):
712 713 '''
713 714 normalize the case of a pathname when on a casefolding filesystem
714 715
715 716 isknown specifies whether the filename came from walking the
716 717 disk, to avoid extra filesystem access.
717 718
718 719 If ignoremissing is True, missing path are returned
719 720 unchanged. Otherwise, we try harder to normalize possibly
720 721 existing path components.
721 722
722 723 The normalized case is determined based on the following precedence:
723 724
724 725 - version of name already stored in the dirstate
725 726 - version of name stored on disk
726 727 - version provided via command arguments
727 728 '''
728 729
729 730 if self._checkcase:
730 731 return self._normalize(path, isknown, ignoremissing)
731 732 return path
732 733
733 734 def clear(self):
734 self._map = {}
735 self._map = dirstatemap()
735 736 self._nonnormalset = set()
736 737 self._otherparentset = set()
737 738 if "_dirs" in self.__dict__:
738 739 delattr(self, "_dirs")
739 740 self._copymap = {}
740 741 self._pl = [nullid, nullid]
741 742 self._lastnormaltime = 0
742 743 self._updatedfiles.clear()
743 744 self._dirty = True
744 745
745 746 def rebuild(self, parent, allfiles, changedfiles=None):
746 747 if changedfiles is None:
747 748 # Rebuild entire dirstate
748 749 changedfiles = allfiles
749 750 lastnormaltime = self._lastnormaltime
750 751 self.clear()
751 752 self._lastnormaltime = lastnormaltime
752 753
753 754 if self._origpl is None:
754 755 self._origpl = self._pl
755 756 self._pl = (parent, nullid)
756 757 for f in changedfiles:
757 758 if f in allfiles:
758 759 self.normallookup(f)
759 760 else:
760 761 self.drop(f)
761 762
762 763 self._dirty = True
763 764
764 765 def identity(self):
765 766 '''Return identity of dirstate itself to detect changing in storage
766 767
767 768 If identity of previous dirstate is equal to this, writing
768 769 changes based on the former dirstate out can keep consistency.
769 770 '''
770 771 return self._identity
771 772
772 773 def write(self, tr):
773 774 if not self._dirty:
774 775 return
775 776
776 777 filename = self._filename
777 778 if tr:
778 779 # 'dirstate.write()' is not only for writing in-memory
779 780 # changes out, but also for dropping ambiguous timestamp.
780 781 # delayed writing re-raise "ambiguous timestamp issue".
781 782 # See also the wiki page below for detail:
782 783 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
783 784
784 785 # emulate dropping timestamp in 'parsers.pack_dirstate'
785 786 now = _getfsnow(self._opener)
786 787 dmap = self._map
787 788 for f in self._updatedfiles:
788 789 e = dmap.get(f)
789 790 if e is not None and e[0] == 'n' and e[3] == now:
790 791 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
791 792 self._nonnormalset.add(f)
792 793
793 794 # emulate that all 'dirstate.normal' results are written out
794 795 self._lastnormaltime = 0
795 796 self._updatedfiles.clear()
796 797
797 798 # delay writing in-memory changes out
798 799 tr.addfilegenerator('dirstate', (self._filename,),
799 800 self._writedirstate, location='plain')
800 801 return
801 802
802 803 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
803 804 self._writedirstate(st)
804 805
805 806 def addparentchangecallback(self, category, callback):
806 807 """add a callback to be called when the wd parents are changed
807 808
808 809 Callback will be called with the following arguments:
809 810 dirstate, (oldp1, oldp2), (newp1, newp2)
810 811
811 812 Category is a unique identifier to allow overwriting an old callback
812 813 with a newer callback.
813 814 """
814 815 self._plchangecallbacks[category] = callback
815 816
816 817 def _writedirstate(self, st):
817 818 # notify callbacks about parents change
818 819 if self._origpl is not None and self._origpl != self._pl:
819 820 for c, callback in sorted(self._plchangecallbacks.iteritems()):
820 821 callback(self, self._origpl, self._pl)
821 822 self._origpl = None
822 823 # use the modification time of the newly created temporary file as the
823 824 # filesystem's notion of 'now'
824 825 now = util.fstat(st).st_mtime & _rangemask
825 826
826 827 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
827 828 # timestamp of each entries in dirstate, because of 'now > mtime'
828 829 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
829 830 if delaywrite > 0:
830 831 # do we have any files to delay for?
831 832 for f, e in self._map.iteritems():
832 833 if e[0] == 'n' and e[3] == now:
833 834 import time # to avoid useless import
834 835 # rather than sleep n seconds, sleep until the next
835 836 # multiple of n seconds
836 837 clock = time.time()
837 838 start = int(clock) - (int(clock) % delaywrite)
838 839 end = start + delaywrite
839 840 time.sleep(end - clock)
840 841 now = end # trust our estimate that the end is near now
841 842 break
842 843
843 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
844 st.write(parsers.pack_dirstate(self._map._map, self._copymap, self._pl,
845 now))
844 846 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
845 847 st.close()
846 848 self._lastnormaltime = 0
847 849 self._dirty = self._dirtypl = False
848 850
849 851 def _dirignore(self, f):
850 852 if f == '.':
851 853 return False
852 854 if self._ignore(f):
853 855 return True
854 856 for p in util.finddirs(f):
855 857 if self._ignore(p):
856 858 return True
857 859 return False
858 860
859 861 def _ignorefiles(self):
860 862 files = []
861 863 if os.path.exists(self._join('.hgignore')):
862 864 files.append(self._join('.hgignore'))
863 865 for name, path in self._ui.configitems("ui"):
864 866 if name == 'ignore' or name.startswith('ignore.'):
865 867 # we need to use os.path.join here rather than self._join
866 868 # because path is arbitrary and user-specified
867 869 files.append(os.path.join(self._rootdir, util.expandpath(path)))
868 870 return files
869 871
870 872 def _ignorefileandline(self, f):
871 873 files = collections.deque(self._ignorefiles())
872 874 visited = set()
873 875 while files:
874 876 i = files.popleft()
875 877 patterns = matchmod.readpatternfile(i, self._ui.warn,
876 878 sourceinfo=True)
877 879 for pattern, lineno, line in patterns:
878 880 kind, p = matchmod._patsplit(pattern, 'glob')
879 881 if kind == "subinclude":
880 882 if p not in visited:
881 883 files.append(p)
882 884 continue
883 885 m = matchmod.match(self._root, '', [], [pattern],
884 886 warn=self._ui.warn)
885 887 if m(f):
886 888 return (i, lineno, line)
887 889 visited.add(i)
888 890 return (None, -1, "")
889 891
890 892 def _walkexplicit(self, match, subrepos):
891 893 '''Get stat data about the files explicitly specified by match.
892 894
893 895 Return a triple (results, dirsfound, dirsnotfound).
894 896 - results is a mapping from filename to stat result. It also contains
895 897 listings mapping subrepos and .hg to None.
896 898 - dirsfound is a list of files found to be directories.
897 899 - dirsnotfound is a list of files that the dirstate thinks are
898 900 directories and that were not found.'''
899 901
900 902 def badtype(mode):
901 903 kind = _('unknown')
902 904 if stat.S_ISCHR(mode):
903 905 kind = _('character device')
904 906 elif stat.S_ISBLK(mode):
905 907 kind = _('block device')
906 908 elif stat.S_ISFIFO(mode):
907 909 kind = _('fifo')
908 910 elif stat.S_ISSOCK(mode):
909 911 kind = _('socket')
910 912 elif stat.S_ISDIR(mode):
911 913 kind = _('directory')
912 914 return _('unsupported file type (type is %s)') % kind
913 915
914 916 matchedir = match.explicitdir
915 917 badfn = match.bad
916 918 dmap = self._map
917 919 lstat = os.lstat
918 920 getkind = stat.S_IFMT
919 921 dirkind = stat.S_IFDIR
920 922 regkind = stat.S_IFREG
921 923 lnkkind = stat.S_IFLNK
922 924 join = self._join
923 925 dirsfound = []
924 926 foundadd = dirsfound.append
925 927 dirsnotfound = []
926 928 notfoundadd = dirsnotfound.append
927 929
928 930 if not match.isexact() and self._checkcase:
929 931 normalize = self._normalize
930 932 else:
931 933 normalize = None
932 934
933 935 files = sorted(match.files())
934 936 subrepos.sort()
935 937 i, j = 0, 0
936 938 while i < len(files) and j < len(subrepos):
937 939 subpath = subrepos[j] + "/"
938 940 if files[i] < subpath:
939 941 i += 1
940 942 continue
941 943 while i < len(files) and files[i].startswith(subpath):
942 944 del files[i]
943 945 j += 1
944 946
945 947 if not files or '.' in files:
946 948 files = ['.']
947 949 results = dict.fromkeys(subrepos)
948 950 results['.hg'] = None
949 951
950 952 alldirs = None
951 953 for ff in files:
952 954 # constructing the foldmap is expensive, so don't do it for the
953 955 # common case where files is ['.']
954 956 if normalize and ff != '.':
955 957 nf = normalize(ff, False, True)
956 958 else:
957 959 nf = ff
958 960 if nf in results:
959 961 continue
960 962
961 963 try:
962 964 st = lstat(join(nf))
963 965 kind = getkind(st.st_mode)
964 966 if kind == dirkind:
965 967 if nf in dmap:
966 968 # file replaced by dir on disk but still in dirstate
967 969 results[nf] = None
968 970 if matchedir:
969 971 matchedir(nf)
970 972 foundadd((nf, ff))
971 973 elif kind == regkind or kind == lnkkind:
972 974 results[nf] = st
973 975 else:
974 976 badfn(ff, badtype(kind))
975 977 if nf in dmap:
976 978 results[nf] = None
977 979 except OSError as inst: # nf not found on disk - it is dirstate only
978 980 if nf in dmap: # does it exactly match a missing file?
979 981 results[nf] = None
980 982 else: # does it match a missing directory?
981 983 if alldirs is None:
982 alldirs = util.dirs(dmap)
984 alldirs = util.dirs(dmap._map)
983 985 if nf in alldirs:
984 986 if matchedir:
985 987 matchedir(nf)
986 988 notfoundadd(nf)
987 989 else:
988 990 badfn(ff, encoding.strtolocal(inst.strerror))
989 991
990 992 # Case insensitive filesystems cannot rely on lstat() failing to detect
991 993 # a case-only rename. Prune the stat object for any file that does not
992 994 # match the case in the filesystem, if there are multiple files that
993 995 # normalize to the same path.
994 996 if match.isexact() and self._checkcase:
995 997 normed = {}
996 998
997 999 for f, st in results.iteritems():
998 1000 if st is None:
999 1001 continue
1000 1002
1001 1003 nc = util.normcase(f)
1002 1004 paths = normed.get(nc)
1003 1005
1004 1006 if paths is None:
1005 1007 paths = set()
1006 1008 normed[nc] = paths
1007 1009
1008 1010 paths.add(f)
1009 1011
1010 1012 for norm, paths in normed.iteritems():
1011 1013 if len(paths) > 1:
1012 1014 for path in paths:
1013 1015 folded = self._discoverpath(path, norm, True, None,
1014 1016 self._dirfoldmap)
1015 1017 if path != folded:
1016 1018 results[path] = None
1017 1019
1018 1020 return results, dirsfound, dirsnotfound
1019 1021
1020 1022 def walk(self, match, subrepos, unknown, ignored, full=True):
1021 1023 '''
1022 1024 Walk recursively through the directory tree, finding all files
1023 1025 matched by match.
1024 1026
1025 1027 If full is False, maybe skip some known-clean files.
1026 1028
1027 1029 Return a dict mapping filename to stat-like object (either
1028 1030 mercurial.osutil.stat instance or return value of os.stat()).
1029 1031
1030 1032 '''
1031 1033 # full is a flag that extensions that hook into walk can use -- this
1032 1034 # implementation doesn't use it at all. This satisfies the contract
1033 1035 # because we only guarantee a "maybe".
1034 1036
1035 1037 if ignored:
1036 1038 ignore = util.never
1037 1039 dirignore = util.never
1038 1040 elif unknown:
1039 1041 ignore = self._ignore
1040 1042 dirignore = self._dirignore
1041 1043 else:
1042 1044 # if not unknown and not ignored, drop dir recursion and step 2
1043 1045 ignore = util.always
1044 1046 dirignore = util.always
1045 1047
1046 1048 matchfn = match.matchfn
1047 1049 matchalways = match.always()
1048 1050 matchtdir = match.traversedir
1049 1051 dmap = self._map
1050 1052 listdir = util.listdir
1051 1053 lstat = os.lstat
1052 1054 dirkind = stat.S_IFDIR
1053 1055 regkind = stat.S_IFREG
1054 1056 lnkkind = stat.S_IFLNK
1055 1057 join = self._join
1056 1058
1057 1059 exact = skipstep3 = False
1058 1060 if match.isexact(): # match.exact
1059 1061 exact = True
1060 1062 dirignore = util.always # skip step 2
1061 1063 elif match.prefix(): # match.match, no patterns
1062 1064 skipstep3 = True
1063 1065
1064 1066 if not exact and self._checkcase:
1065 1067 normalize = self._normalize
1066 1068 normalizefile = self._normalizefile
1067 1069 skipstep3 = False
1068 1070 else:
1069 1071 normalize = self._normalize
1070 1072 normalizefile = None
1071 1073
1072 1074 # step 1: find all explicit files
1073 1075 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1074 1076
1075 1077 skipstep3 = skipstep3 and not (work or dirsnotfound)
1076 1078 work = [d for d in work if not dirignore(d[0])]
1077 1079
1078 1080 # step 2: visit subdirectories
1079 1081 def traverse(work, alreadynormed):
1080 1082 wadd = work.append
1081 1083 while work:
1082 1084 nd = work.pop()
1083 1085 if not match.visitdir(nd):
1084 1086 continue
1085 1087 skip = None
1086 1088 if nd == '.':
1087 1089 nd = ''
1088 1090 else:
1089 1091 skip = '.hg'
1090 1092 try:
1091 1093 entries = listdir(join(nd), stat=True, skip=skip)
1092 1094 except OSError as inst:
1093 1095 if inst.errno in (errno.EACCES, errno.ENOENT):
1094 1096 match.bad(self.pathto(nd),
1095 1097 encoding.strtolocal(inst.strerror))
1096 1098 continue
1097 1099 raise
1098 1100 for f, kind, st in entries:
1099 1101 if normalizefile:
1100 1102 # even though f might be a directory, we're only
1101 1103 # interested in comparing it to files currently in the
1102 1104 # dmap -- therefore normalizefile is enough
1103 1105 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1104 1106 True)
1105 1107 else:
1106 1108 nf = nd and (nd + "/" + f) or f
1107 1109 if nf not in results:
1108 1110 if kind == dirkind:
1109 1111 if not ignore(nf):
1110 1112 if matchtdir:
1111 1113 matchtdir(nf)
1112 1114 wadd(nf)
1113 1115 if nf in dmap and (matchalways or matchfn(nf)):
1114 1116 results[nf] = None
1115 1117 elif kind == regkind or kind == lnkkind:
1116 1118 if nf in dmap:
1117 1119 if matchalways or matchfn(nf):
1118 1120 results[nf] = st
1119 1121 elif ((matchalways or matchfn(nf))
1120 1122 and not ignore(nf)):
1121 1123 # unknown file -- normalize if necessary
1122 1124 if not alreadynormed:
1123 1125 nf = normalize(nf, False, True)
1124 1126 results[nf] = st
1125 1127 elif nf in dmap and (matchalways or matchfn(nf)):
1126 1128 results[nf] = None
1127 1129
1128 1130 for nd, d in work:
1129 1131 # alreadynormed means that processwork doesn't have to do any
1130 1132 # expensive directory normalization
1131 1133 alreadynormed = not normalize or nd == d
1132 1134 traverse([d], alreadynormed)
1133 1135
1134 1136 for s in subrepos:
1135 1137 del results[s]
1136 1138 del results['.hg']
1137 1139
1138 1140 # step 3: visit remaining files from dmap
1139 1141 if not skipstep3 and not exact:
1140 1142 # If a dmap file is not in results yet, it was either
1141 1143 # a) not matching matchfn b) ignored, c) missing, or d) under a
1142 1144 # symlink directory.
1143 1145 if not results and matchalways:
1144 1146 visit = [f for f in dmap]
1145 1147 else:
1146 1148 visit = [f for f in dmap if f not in results and matchfn(f)]
1147 1149 visit.sort()
1148 1150
1149 1151 if unknown:
1150 1152 # unknown == True means we walked all dirs under the roots
1151 1153 # that wasn't ignored, and everything that matched was stat'ed
1152 1154 # and is already in results.
1153 1155 # The rest must thus be ignored or under a symlink.
1154 1156 audit_path = pathutil.pathauditor(self._root, cached=True)
1155 1157
1156 1158 for nf in iter(visit):
1157 1159 # If a stat for the same file was already added with a
1158 1160 # different case, don't add one for this, since that would
1159 1161 # make it appear as if the file exists under both names
1160 1162 # on disk.
1161 1163 if (normalizefile and
1162 1164 normalizefile(nf, True, True) in results):
1163 1165 results[nf] = None
1164 1166 # Report ignored items in the dmap as long as they are not
1165 1167 # under a symlink directory.
1166 1168 elif audit_path.check(nf):
1167 1169 try:
1168 1170 results[nf] = lstat(join(nf))
1169 1171 # file was just ignored, no links, and exists
1170 1172 except OSError:
1171 1173 # file doesn't exist
1172 1174 results[nf] = None
1173 1175 else:
1174 1176 # It's either missing or under a symlink directory
1175 1177 # which we in this case report as missing
1176 1178 results[nf] = None
1177 1179 else:
1178 1180 # We may not have walked the full directory tree above,
1179 1181 # so stat and check everything we missed.
1180 1182 iv = iter(visit)
1181 1183 for st in util.statfiles([join(i) for i in visit]):
1182 1184 results[next(iv)] = st
1183 1185 return results
1184 1186
1185 1187 def status(self, match, subrepos, ignored, clean, unknown):
1186 1188 '''Determine the status of the working copy relative to the
1187 1189 dirstate and return a pair of (unsure, status), where status is of type
1188 1190 scmutil.status and:
1189 1191
1190 1192 unsure:
1191 1193 files that might have been modified since the dirstate was
1192 1194 written, but need to be read to be sure (size is the same
1193 1195 but mtime differs)
1194 1196 status.modified:
1195 1197 files that have definitely been modified since the dirstate
1196 1198 was written (different size or mode)
1197 1199 status.clean:
1198 1200 files that have definitely not been modified since the
1199 1201 dirstate was written
1200 1202 '''
1201 1203 listignored, listclean, listunknown = ignored, clean, unknown
1202 1204 lookup, modified, added, unknown, ignored = [], [], [], [], []
1203 1205 removed, deleted, clean = [], [], []
1204 1206
1205 1207 dmap = self._map
1206 1208 ladd = lookup.append # aka "unsure"
1207 1209 madd = modified.append
1208 1210 aadd = added.append
1209 1211 uadd = unknown.append
1210 1212 iadd = ignored.append
1211 1213 radd = removed.append
1212 1214 dadd = deleted.append
1213 1215 cadd = clean.append
1214 1216 mexact = match.exact
1215 1217 dirignore = self._dirignore
1216 1218 checkexec = self._checkexec
1217 1219 copymap = self._copymap
1218 1220 lastnormaltime = self._lastnormaltime
1219 1221
1220 1222 # We need to do full walks when either
1221 1223 # - we're listing all clean files, or
1222 1224 # - match.traversedir does something, because match.traversedir should
1223 1225 # be called for every dir in the working dir
1224 1226 full = listclean or match.traversedir is not None
1225 1227 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1226 1228 full=full).iteritems():
1227 1229 if fn not in dmap:
1228 1230 if (listignored or mexact(fn)) and dirignore(fn):
1229 1231 if listignored:
1230 1232 iadd(fn)
1231 1233 else:
1232 1234 uadd(fn)
1233 1235 continue
1234 1236
1235 1237 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1236 1238 # written like that for performance reasons. dmap[fn] is not a
1237 1239 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1238 1240 # opcode has fast paths when the value to be unpacked is a tuple or
1239 1241 # a list, but falls back to creating a full-fledged iterator in
1240 1242 # general. That is much slower than simply accessing and storing the
1241 1243 # tuple members one by one.
1242 1244 t = dmap[fn]
1243 1245 state = t[0]
1244 1246 mode = t[1]
1245 1247 size = t[2]
1246 1248 time = t[3]
1247 1249
1248 1250 if not st and state in "nma":
1249 1251 dadd(fn)
1250 1252 elif state == 'n':
1251 1253 if (size >= 0 and
1252 1254 ((size != st.st_size and size != st.st_size & _rangemask)
1253 1255 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1254 1256 or size == -2 # other parent
1255 1257 or fn in copymap):
1256 1258 madd(fn)
1257 1259 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1258 1260 ladd(fn)
1259 1261 elif st.st_mtime == lastnormaltime:
1260 1262 # fn may have just been marked as normal and it may have
1261 1263 # changed in the same second without changing its size.
1262 1264 # This can happen if we quickly do multiple commits.
1263 1265 # Force lookup, so we don't miss such a racy file change.
1264 1266 ladd(fn)
1265 1267 elif listclean:
1266 1268 cadd(fn)
1267 1269 elif state == 'm':
1268 1270 madd(fn)
1269 1271 elif state == 'a':
1270 1272 aadd(fn)
1271 1273 elif state == 'r':
1272 1274 radd(fn)
1273 1275
1274 1276 return (lookup, scmutil.status(modified, added, removed, deleted,
1275 1277 unknown, ignored, clean))
1276 1278
1277 1279 def matches(self, match):
1278 1280 '''
1279 1281 return files in the dirstate (in whatever state) filtered by match
1280 1282 '''
1281 1283 dmap = self._map
1282 1284 if match.always():
1283 1285 return dmap.keys()
1284 1286 files = match.files()
1285 1287 if match.isexact():
1286 1288 # fast path -- filter the other way around, since typically files is
1287 1289 # much smaller than dmap
1288 1290 return [f for f in files if f in dmap]
1289 1291 if match.prefix() and all(fn in dmap for fn in files):
1290 1292 # fast path -- all the values are known to be files, so just return
1291 1293 # that
1292 1294 return list(files)
1293 1295 return [f for f in dmap if match(f)]
1294 1296
1295 1297 def _actualfilename(self, tr):
1296 1298 if tr:
1297 1299 return self._pendingfilename
1298 1300 else:
1299 1301 return self._filename
1300 1302
1301 1303 def savebackup(self, tr, backupname):
1302 1304 '''Save current dirstate into backup file'''
1303 1305 filename = self._actualfilename(tr)
1304 1306 assert backupname != filename
1305 1307
1306 1308 # use '_writedirstate' instead of 'write' to write changes certainly,
1307 1309 # because the latter omits writing out if transaction is running.
1308 1310 # output file will be used to create backup of dirstate at this point.
1309 1311 if self._dirty or not self._opener.exists(filename):
1310 1312 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1311 1313 checkambig=True))
1312 1314
1313 1315 if tr:
1314 1316 # ensure that subsequent tr.writepending returns True for
1315 1317 # changes written out above, even if dirstate is never
1316 1318 # changed after this
1317 1319 tr.addfilegenerator('dirstate', (self._filename,),
1318 1320 self._writedirstate, location='plain')
1319 1321
1320 1322 # ensure that pending file written above is unlinked at
1321 1323 # failure, even if tr.writepending isn't invoked until the
1322 1324 # end of this transaction
1323 1325 tr.registertmp(filename, location='plain')
1324 1326
1325 1327 self._opener.tryunlink(backupname)
1326 1328 # hardlink backup is okay because _writedirstate is always called
1327 1329 # with an "atomictemp=True" file.
1328 1330 util.copyfile(self._opener.join(filename),
1329 1331 self._opener.join(backupname), hardlink=True)
1330 1332
1331 1333 def restorebackup(self, tr, backupname):
1332 1334 '''Restore dirstate by backup file'''
1333 1335 # this "invalidate()" prevents "wlock.release()" from writing
1334 1336 # changes of dirstate out after restoring from backup file
1335 1337 self.invalidate()
1336 1338 filename = self._actualfilename(tr)
1337 1339 self._opener.rename(backupname, filename, checkambig=True)
1338 1340
1339 1341 def clearbackup(self, tr, backupname):
1340 1342 '''Clear backup file'''
1341 1343 self._opener.unlink(backupname)
1344
1345 class dirstatemap(object):
1346 def __init__(self):
1347 self._map = {}
1348
1349 def iteritems(self):
1350 return self._map.iteritems()
1351
1352 def __iter__(self):
1353 return iter(self._map)
1354
1355 def get(self, key, default=None):
1356 return self._map.get(key, default)
1357
1358 def __contains__(self, key):
1359 return key in self._map
1360
1361 def __setitem__(self, key, value):
1362 self._map[key] = value
1363
1364 def __getitem__(self, key):
1365 return self._map[key]
1366
1367 def __delitem__(self, key):
1368 del self._map[key]
1369
1370 def keys(self):
1371 return self._map.keys()
General Comments 0
You need to be logged in to leave comments. Login now