##// END OF EJS Templates
dirstate: centralize _cwd handling into _cwd method...
FUJIWARA Katsunori -
r33212:b7f6885c default
parent child Browse files
Show More
@@ -1,1336 +1,1336
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 parsers = policy.importmod(r'parsers')
31 31
32 32 propertycache = util.propertycache
33 33 filecache = scmutil.filecache
34 34 _rangemask = 0x7fffffff
35 35
36 36 dirstatetuple = parsers.dirstatetuple
37 37
38 38 class repocache(filecache):
39 39 """filecache for files in .hg/"""
40 40 def join(self, obj, fname):
41 41 return obj._opener.join(fname)
42 42
43 43 class rootcache(filecache):
44 44 """filecache for files in the repository root"""
45 45 def join(self, obj, fname):
46 46 return obj._join(fname)
47 47
48 48 def _getfsnow(vfs):
49 49 '''Get "now" timestamp on filesystem'''
50 50 tmpfd, tmpname = vfs.mkstemp()
51 51 try:
52 52 return os.fstat(tmpfd).st_mtime
53 53 finally:
54 54 os.close(tmpfd)
55 55 vfs.unlink(tmpname)
56 56
57 57 def nonnormalentries(dmap):
58 58 '''Compute the nonnormal dirstate entries from the dmap'''
59 59 try:
60 60 return parsers.nonnormalotherparententries(dmap)
61 61 except AttributeError:
62 62 nonnorm = set()
63 63 otherparent = set()
64 64 for fname, e in dmap.iteritems():
65 65 if e[0] != 'n' or e[3] == -1:
66 66 nonnorm.add(fname)
67 67 if e[0] == 'n' and e[2] == -2:
68 68 otherparent.add(fname)
69 69 return nonnorm, otherparent
70 70
71 71 class dirstate(object):
72 72
73 73 def __init__(self, opener, ui, root, validate):
74 74 '''Create a new dirstate object.
75 75
76 76 opener is an open()-like callable that can be used to open the
77 77 dirstate file; root is the root of the directory tracked by
78 78 the dirstate.
79 79 '''
80 80 self._opener = opener
81 81 self._validate = validate
82 82 self._root = root
83 83 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
84 84 # UNC path pointing to root share (issue4557)
85 85 self._rootdir = pathutil.normasprefix(root)
86 # internal config: ui.forcecwd
87 forcecwd = ui.config('ui', 'forcecwd')
88 if forcecwd:
89 self._cwd = forcecwd
90 86 self._dirty = False
91 87 self._dirtypl = False
92 88 self._lastnormaltime = 0
93 89 self._ui = ui
94 90 self._filecache = {}
95 91 self._parentwriters = 0
96 92 self._filename = 'dirstate'
97 93 self._pendingfilename = '%s.pending' % self._filename
98 94 self._plchangecallbacks = {}
99 95 self._origpl = None
100 96 self._updatedfiles = set()
101 97
102 98 # for consistent view between _pl() and _read() invocations
103 99 self._pendingmode = None
104 100
105 101 @contextlib.contextmanager
106 102 def parentchange(self):
107 103 '''Context manager for handling dirstate parents.
108 104
109 105 If an exception occurs in the scope of the context manager,
110 106 the incoherent dirstate won't be written when wlock is
111 107 released.
112 108 '''
113 109 self._parentwriters += 1
114 110 yield
115 111 # Typically we want the "undo" step of a context manager in a
116 112 # finally block so it happens even when an exception
117 113 # occurs. In this case, however, we only want to decrement
118 114 # parentwriters if the code in the with statement exits
119 115 # normally, so we don't have a try/finally here on purpose.
120 116 self._parentwriters -= 1
121 117
122 118 def beginparentchange(self):
123 119 '''Marks the beginning of a set of changes that involve changing
124 120 the dirstate parents. If there is an exception during this time,
125 121 the dirstate will not be written when the wlock is released. This
126 122 prevents writing an incoherent dirstate where the parent doesn't
127 123 match the contents.
128 124 '''
129 125 self._ui.deprecwarn('beginparentchange is obsoleted by the '
130 126 'parentchange context manager.', '4.3')
131 127 self._parentwriters += 1
132 128
133 129 def endparentchange(self):
134 130 '''Marks the end of a set of changes that involve changing the
135 131 dirstate parents. Once all parent changes have been marked done,
136 132 the wlock will be free to write the dirstate on release.
137 133 '''
138 134 self._ui.deprecwarn('endparentchange is obsoleted by the '
139 135 'parentchange context manager.', '4.3')
140 136 if self._parentwriters > 0:
141 137 self._parentwriters -= 1
142 138
143 139 def pendingparentchange(self):
144 140 '''Returns true if the dirstate is in the middle of a set of changes
145 141 that modify the dirstate parent.
146 142 '''
147 143 return self._parentwriters > 0
148 144
149 145 @propertycache
150 146 def _map(self):
151 147 '''Return the dirstate contents as a map from filename to
152 148 (state, mode, size, time).'''
153 149 self._read()
154 150 return self._map
155 151
156 152 @propertycache
157 153 def _copymap(self):
158 154 self._read()
159 155 return self._copymap
160 156
161 157 @propertycache
162 158 def _identity(self):
163 159 self._read()
164 160 return self._identity
165 161
166 162 @propertycache
167 163 def _nonnormalset(self):
168 164 nonnorm, otherparents = nonnormalentries(self._map)
169 165 self._otherparentset = otherparents
170 166 return nonnorm
171 167
172 168 @propertycache
173 169 def _otherparentset(self):
174 170 nonnorm, otherparents = nonnormalentries(self._map)
175 171 self._nonnormalset = nonnorm
176 172 return otherparents
177 173
178 174 @propertycache
179 175 def _filefoldmap(self):
180 176 try:
181 177 makefilefoldmap = parsers.make_file_foldmap
182 178 except AttributeError:
183 179 pass
184 180 else:
185 181 return makefilefoldmap(self._map, util.normcasespec,
186 182 util.normcasefallback)
187 183
188 184 f = {}
189 185 normcase = util.normcase
190 186 for name, s in self._map.iteritems():
191 187 if s[0] != 'r':
192 188 f[normcase(name)] = name
193 189 f['.'] = '.' # prevents useless util.fspath() invocation
194 190 return f
195 191
196 192 @propertycache
197 193 def _dirfoldmap(self):
198 194 f = {}
199 195 normcase = util.normcase
200 196 for name in self._dirs:
201 197 f[normcase(name)] = name
202 198 return f
203 199
204 200 @repocache('branch')
205 201 def _branch(self):
206 202 try:
207 203 return self._opener.read("branch").strip() or "default"
208 204 except IOError as inst:
209 205 if inst.errno != errno.ENOENT:
210 206 raise
211 207 return "default"
212 208
213 209 @propertycache
214 210 def _pl(self):
215 211 try:
216 212 fp = self._opendirstatefile()
217 213 st = fp.read(40)
218 214 fp.close()
219 215 l = len(st)
220 216 if l == 40:
221 217 return st[:20], st[20:40]
222 218 elif l > 0 and l < 40:
223 219 raise error.Abort(_('working directory state appears damaged!'))
224 220 except IOError as err:
225 221 if err.errno != errno.ENOENT:
226 222 raise
227 223 return [nullid, nullid]
228 224
229 225 @propertycache
230 226 def _dirs(self):
231 227 return util.dirs(self._map, 'r')
232 228
233 229 def dirs(self):
234 230 return self._dirs
235 231
236 232 @rootcache('.hgignore')
237 233 def _ignore(self):
238 234 files = self._ignorefiles()
239 235 if not files:
240 236 return matchmod.never(self._root, '')
241 237
242 238 pats = ['include:%s' % f for f in files]
243 239 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
244 240
245 241 @propertycache
246 242 def _slash(self):
247 243 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
248 244
249 245 @propertycache
250 246 def _checklink(self):
251 247 return util.checklink(self._root)
252 248
253 249 @propertycache
254 250 def _checkexec(self):
255 251 return util.checkexec(self._root)
256 252
257 253 @propertycache
258 254 def _checkcase(self):
259 255 return not util.fscasesensitive(self._join('.hg'))
260 256
261 257 def _join(self, f):
262 258 # much faster than os.path.join()
263 259 # it's safe because f is always a relative path
264 260 return self._rootdir + f
265 261
266 262 def flagfunc(self, buildfallback):
267 263 if self._checklink and self._checkexec:
268 264 def f(x):
269 265 try:
270 266 st = os.lstat(self._join(x))
271 267 if util.statislink(st):
272 268 return 'l'
273 269 if util.statisexec(st):
274 270 return 'x'
275 271 except OSError:
276 272 pass
277 273 return ''
278 274 return f
279 275
280 276 fallback = buildfallback()
281 277 if self._checklink:
282 278 def f(x):
283 279 if os.path.islink(self._join(x)):
284 280 return 'l'
285 281 if 'x' in fallback(x):
286 282 return 'x'
287 283 return ''
288 284 return f
289 285 if self._checkexec:
290 286 def f(x):
291 287 if 'l' in fallback(x):
292 288 return 'l'
293 289 if util.isexec(self._join(x)):
294 290 return 'x'
295 291 return ''
296 292 return f
297 293 else:
298 294 return fallback
299 295
300 296 @propertycache
301 297 def _cwd(self):
298 # internal config: ui.forcecwd
299 forcecwd = self._ui.config('ui', 'forcecwd')
300 if forcecwd:
301 return forcecwd
302 302 return pycompat.getcwd()
303 303
304 304 def getcwd(self):
305 305 '''Return the path from which a canonical path is calculated.
306 306
307 307 This path should be used to resolve file patterns or to convert
308 308 canonical paths back to file paths for display. It shouldn't be
309 309 used to get real file paths. Use vfs functions instead.
310 310 '''
311 311 cwd = self._cwd
312 312 if cwd == self._root:
313 313 return ''
314 314 # self._root ends with a path separator if self._root is '/' or 'C:\'
315 315 rootsep = self._root
316 316 if not util.endswithsep(rootsep):
317 317 rootsep += pycompat.ossep
318 318 if cwd.startswith(rootsep):
319 319 return cwd[len(rootsep):]
320 320 else:
321 321 # we're outside the repo. return an absolute path.
322 322 return cwd
323 323
324 324 def pathto(self, f, cwd=None):
325 325 if cwd is None:
326 326 cwd = self.getcwd()
327 327 path = util.pathto(self._root, cwd, f)
328 328 if self._slash:
329 329 return util.pconvert(path)
330 330 return path
331 331
332 332 def __getitem__(self, key):
333 333 '''Return the current state of key (a filename) in the dirstate.
334 334
335 335 States are:
336 336 n normal
337 337 m needs merging
338 338 r marked for removal
339 339 a marked for addition
340 340 ? not tracked
341 341 '''
342 342 return self._map.get(key, ("?",))[0]
343 343
344 344 def __contains__(self, key):
345 345 return key in self._map
346 346
347 347 def __iter__(self):
348 348 for x in sorted(self._map):
349 349 yield x
350 350
351 351 def items(self):
352 352 return self._map.iteritems()
353 353
354 354 iteritems = items
355 355
356 356 def parents(self):
357 357 return [self._validate(p) for p in self._pl]
358 358
359 359 def p1(self):
360 360 return self._validate(self._pl[0])
361 361
362 362 def p2(self):
363 363 return self._validate(self._pl[1])
364 364
365 365 def branch(self):
366 366 return encoding.tolocal(self._branch)
367 367
368 368 def setparents(self, p1, p2=nullid):
369 369 """Set dirstate parents to p1 and p2.
370 370
371 371 When moving from two parents to one, 'm' merged entries a
372 372 adjusted to normal and previous copy records discarded and
373 373 returned by the call.
374 374
375 375 See localrepo.setparents()
376 376 """
377 377 if self._parentwriters == 0:
378 378 raise ValueError("cannot set dirstate parent without "
379 379 "calling dirstate.beginparentchange")
380 380
381 381 self._dirty = self._dirtypl = True
382 382 oldp2 = self._pl[1]
383 383 if self._origpl is None:
384 384 self._origpl = self._pl
385 385 self._pl = p1, p2
386 386 copies = {}
387 387 if oldp2 != nullid and p2 == nullid:
388 388 candidatefiles = self._nonnormalset.union(self._otherparentset)
389 389 for f in candidatefiles:
390 390 s = self._map.get(f)
391 391 if s is None:
392 392 continue
393 393
394 394 # Discard 'm' markers when moving away from a merge state
395 395 if s[0] == 'm':
396 396 if f in self._copymap:
397 397 copies[f] = self._copymap[f]
398 398 self.normallookup(f)
399 399 # Also fix up otherparent markers
400 400 elif s[0] == 'n' and s[2] == -2:
401 401 if f in self._copymap:
402 402 copies[f] = self._copymap[f]
403 403 self.add(f)
404 404 return copies
405 405
406 406 def setbranch(self, branch):
407 407 self._branch = encoding.fromlocal(branch)
408 408 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
409 409 try:
410 410 f.write(self._branch + '\n')
411 411 f.close()
412 412
413 413 # make sure filecache has the correct stat info for _branch after
414 414 # replacing the underlying file
415 415 ce = self._filecache['_branch']
416 416 if ce:
417 417 ce.refresh()
418 418 except: # re-raises
419 419 f.discard()
420 420 raise
421 421
422 422 def _opendirstatefile(self):
423 423 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
424 424 if self._pendingmode is not None and self._pendingmode != mode:
425 425 fp.close()
426 426 raise error.Abort(_('working directory state may be '
427 427 'changed parallelly'))
428 428 self._pendingmode = mode
429 429 return fp
430 430
431 431 def _read(self):
432 432 self._map = {}
433 433 self._copymap = {}
434 434 # ignore HG_PENDING because identity is used only for writing
435 435 self._identity = util.filestat.frompath(
436 436 self._opener.join(self._filename))
437 437 try:
438 438 fp = self._opendirstatefile()
439 439 try:
440 440 st = fp.read()
441 441 finally:
442 442 fp.close()
443 443 except IOError as err:
444 444 if err.errno != errno.ENOENT:
445 445 raise
446 446 return
447 447 if not st:
448 448 return
449 449
450 450 if util.safehasattr(parsers, 'dict_new_presized'):
451 451 # Make an estimate of the number of files in the dirstate based on
452 452 # its size. From a linear regression on a set of real-world repos,
453 453 # all over 10,000 files, the size of a dirstate entry is 85
454 454 # bytes. The cost of resizing is significantly higher than the cost
455 455 # of filling in a larger presized dict, so subtract 20% from the
456 456 # size.
457 457 #
458 458 # This heuristic is imperfect in many ways, so in a future dirstate
459 459 # format update it makes sense to just record the number of entries
460 460 # on write.
461 461 self._map = parsers.dict_new_presized(len(st) / 71)
462 462
463 463 # Python's garbage collector triggers a GC each time a certain number
464 464 # of container objects (the number being defined by
465 465 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
466 466 # for each file in the dirstate. The C version then immediately marks
467 467 # them as not to be tracked by the collector. However, this has no
468 468 # effect on when GCs are triggered, only on what objects the GC looks
469 469 # into. This means that O(number of files) GCs are unavoidable.
470 470 # Depending on when in the process's lifetime the dirstate is parsed,
471 471 # this can get very expensive. As a workaround, disable GC while
472 472 # parsing the dirstate.
473 473 #
474 474 # (we cannot decorate the function directly since it is in a C module)
475 475 parse_dirstate = util.nogc(parsers.parse_dirstate)
476 476 p = parse_dirstate(self._map, self._copymap, st)
477 477 if not self._dirtypl:
478 478 self._pl = p
479 479
480 480 def invalidate(self):
481 481 '''Causes the next access to reread the dirstate.
482 482
483 483 This is different from localrepo.invalidatedirstate() because it always
484 484 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
485 485 check whether the dirstate has changed before rereading it.'''
486 486
487 487 for a in ("_map", "_copymap", "_identity",
488 488 "_filefoldmap", "_dirfoldmap", "_branch",
489 489 "_pl", "_dirs", "_ignore", "_nonnormalset",
490 490 "_otherparentset"):
491 491 if a in self.__dict__:
492 492 delattr(self, a)
493 493 self._lastnormaltime = 0
494 494 self._dirty = False
495 495 self._updatedfiles.clear()
496 496 self._parentwriters = 0
497 497 self._origpl = None
498 498
499 499 def copy(self, source, dest):
500 500 """Mark dest as a copy of source. Unmark dest if source is None."""
501 501 if source == dest:
502 502 return
503 503 self._dirty = True
504 504 if source is not None:
505 505 self._copymap[dest] = source
506 506 self._updatedfiles.add(source)
507 507 self._updatedfiles.add(dest)
508 508 elif dest in self._copymap:
509 509 del self._copymap[dest]
510 510 self._updatedfiles.add(dest)
511 511
512 512 def copied(self, file):
513 513 return self._copymap.get(file, None)
514 514
515 515 def copies(self):
516 516 return self._copymap
517 517
518 518 def _droppath(self, f):
519 519 if self[f] not in "?r" and "_dirs" in self.__dict__:
520 520 self._dirs.delpath(f)
521 521
522 522 if "_filefoldmap" in self.__dict__:
523 523 normed = util.normcase(f)
524 524 if normed in self._filefoldmap:
525 525 del self._filefoldmap[normed]
526 526
527 527 self._updatedfiles.add(f)
528 528
529 529 def _addpath(self, f, state, mode, size, mtime):
530 530 oldstate = self[f]
531 531 if state == 'a' or oldstate == 'r':
532 532 scmutil.checkfilename(f)
533 533 if f in self._dirs:
534 534 raise error.Abort(_('directory %r already in dirstate') % f)
535 535 # shadows
536 536 for d in util.finddirs(f):
537 537 if d in self._dirs:
538 538 break
539 539 if d in self._map and self[d] != 'r':
540 540 raise error.Abort(
541 541 _('file %r in dirstate clashes with %r') % (d, f))
542 542 if oldstate in "?r" and "_dirs" in self.__dict__:
543 543 self._dirs.addpath(f)
544 544 self._dirty = True
545 545 self._updatedfiles.add(f)
546 546 self._map[f] = dirstatetuple(state, mode, size, mtime)
547 547 if state != 'n' or mtime == -1:
548 548 self._nonnormalset.add(f)
549 549 if size == -2:
550 550 self._otherparentset.add(f)
551 551
552 552 def normal(self, f):
553 553 '''Mark a file normal and clean.'''
554 554 s = os.lstat(self._join(f))
555 555 mtime = s.st_mtime
556 556 self._addpath(f, 'n', s.st_mode,
557 557 s.st_size & _rangemask, mtime & _rangemask)
558 558 if f in self._copymap:
559 559 del self._copymap[f]
560 560 if f in self._nonnormalset:
561 561 self._nonnormalset.remove(f)
562 562 if mtime > self._lastnormaltime:
563 563 # Remember the most recent modification timeslot for status(),
564 564 # to make sure we won't miss future size-preserving file content
565 565 # modifications that happen within the same timeslot.
566 566 self._lastnormaltime = mtime
567 567
568 568 def normallookup(self, f):
569 569 '''Mark a file normal, but possibly dirty.'''
570 570 if self._pl[1] != nullid and f in self._map:
571 571 # if there is a merge going on and the file was either
572 572 # in state 'm' (-1) or coming from other parent (-2) before
573 573 # being removed, restore that state.
574 574 entry = self._map[f]
575 575 if entry[0] == 'r' and entry[2] in (-1, -2):
576 576 source = self._copymap.get(f)
577 577 if entry[2] == -1:
578 578 self.merge(f)
579 579 elif entry[2] == -2:
580 580 self.otherparent(f)
581 581 if source:
582 582 self.copy(source, f)
583 583 return
584 584 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
585 585 return
586 586 self._addpath(f, 'n', 0, -1, -1)
587 587 if f in self._copymap:
588 588 del self._copymap[f]
589 589 if f in self._nonnormalset:
590 590 self._nonnormalset.remove(f)
591 591
592 592 def otherparent(self, f):
593 593 '''Mark as coming from the other parent, always dirty.'''
594 594 if self._pl[1] == nullid:
595 595 raise error.Abort(_("setting %r to other parent "
596 596 "only allowed in merges") % f)
597 597 if f in self and self[f] == 'n':
598 598 # merge-like
599 599 self._addpath(f, 'm', 0, -2, -1)
600 600 else:
601 601 # add-like
602 602 self._addpath(f, 'n', 0, -2, -1)
603 603
604 604 if f in self._copymap:
605 605 del self._copymap[f]
606 606
607 607 def add(self, f):
608 608 '''Mark a file added.'''
609 609 self._addpath(f, 'a', 0, -1, -1)
610 610 if f in self._copymap:
611 611 del self._copymap[f]
612 612
613 613 def remove(self, f):
614 614 '''Mark a file removed.'''
615 615 self._dirty = True
616 616 self._droppath(f)
617 617 size = 0
618 618 if self._pl[1] != nullid and f in self._map:
619 619 # backup the previous state
620 620 entry = self._map[f]
621 621 if entry[0] == 'm': # merge
622 622 size = -1
623 623 elif entry[0] == 'n' and entry[2] == -2: # other parent
624 624 size = -2
625 625 self._otherparentset.add(f)
626 626 self._map[f] = dirstatetuple('r', 0, size, 0)
627 627 self._nonnormalset.add(f)
628 628 if size == 0 and f in self._copymap:
629 629 del self._copymap[f]
630 630
631 631 def merge(self, f):
632 632 '''Mark a file merged.'''
633 633 if self._pl[1] == nullid:
634 634 return self.normallookup(f)
635 635 return self.otherparent(f)
636 636
637 637 def drop(self, f):
638 638 '''Drop a file from the dirstate'''
639 639 if f in self._map:
640 640 self._dirty = True
641 641 self._droppath(f)
642 642 del self._map[f]
643 643 if f in self._nonnormalset:
644 644 self._nonnormalset.remove(f)
645 645 if f in self._copymap:
646 646 del self._copymap[f]
647 647
648 648 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
649 649 if exists is None:
650 650 exists = os.path.lexists(os.path.join(self._root, path))
651 651 if not exists:
652 652 # Maybe a path component exists
653 653 if not ignoremissing and '/' in path:
654 654 d, f = path.rsplit('/', 1)
655 655 d = self._normalize(d, False, ignoremissing, None)
656 656 folded = d + "/" + f
657 657 else:
658 658 # No path components, preserve original case
659 659 folded = path
660 660 else:
661 661 # recursively normalize leading directory components
662 662 # against dirstate
663 663 if '/' in normed:
664 664 d, f = normed.rsplit('/', 1)
665 665 d = self._normalize(d, False, ignoremissing, True)
666 666 r = self._root + "/" + d
667 667 folded = d + "/" + util.fspath(f, r)
668 668 else:
669 669 folded = util.fspath(normed, self._root)
670 670 storemap[normed] = folded
671 671
672 672 return folded
673 673
674 674 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
675 675 normed = util.normcase(path)
676 676 folded = self._filefoldmap.get(normed, None)
677 677 if folded is None:
678 678 if isknown:
679 679 folded = path
680 680 else:
681 681 folded = self._discoverpath(path, normed, ignoremissing, exists,
682 682 self._filefoldmap)
683 683 return folded
684 684
685 685 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
686 686 normed = util.normcase(path)
687 687 folded = self._filefoldmap.get(normed, None)
688 688 if folded is None:
689 689 folded = self._dirfoldmap.get(normed, None)
690 690 if folded is None:
691 691 if isknown:
692 692 folded = path
693 693 else:
694 694 # store discovered result in dirfoldmap so that future
695 695 # normalizefile calls don't start matching directories
696 696 folded = self._discoverpath(path, normed, ignoremissing, exists,
697 697 self._dirfoldmap)
698 698 return folded
699 699
700 700 def normalize(self, path, isknown=False, ignoremissing=False):
701 701 '''
702 702 normalize the case of a pathname when on a casefolding filesystem
703 703
704 704 isknown specifies whether the filename came from walking the
705 705 disk, to avoid extra filesystem access.
706 706
707 707 If ignoremissing is True, missing path are returned
708 708 unchanged. Otherwise, we try harder to normalize possibly
709 709 existing path components.
710 710
711 711 The normalized case is determined based on the following precedence:
712 712
713 713 - version of name already stored in the dirstate
714 714 - version of name stored on disk
715 715 - version provided via command arguments
716 716 '''
717 717
718 718 if self._checkcase:
719 719 return self._normalize(path, isknown, ignoremissing)
720 720 return path
721 721
722 722 def clear(self):
723 723 self._map = {}
724 724 self._nonnormalset = set()
725 725 self._otherparentset = set()
726 726 if "_dirs" in self.__dict__:
727 727 delattr(self, "_dirs")
728 728 self._copymap = {}
729 729 self._pl = [nullid, nullid]
730 730 self._lastnormaltime = 0
731 731 self._updatedfiles.clear()
732 732 self._dirty = True
733 733
734 734 def rebuild(self, parent, allfiles, changedfiles=None):
735 735 if changedfiles is None:
736 736 # Rebuild entire dirstate
737 737 changedfiles = allfiles
738 738 lastnormaltime = self._lastnormaltime
739 739 self.clear()
740 740 self._lastnormaltime = lastnormaltime
741 741
742 742 if self._origpl is None:
743 743 self._origpl = self._pl
744 744 self._pl = (parent, nullid)
745 745 for f in changedfiles:
746 746 if f in allfiles:
747 747 self.normallookup(f)
748 748 else:
749 749 self.drop(f)
750 750
751 751 self._dirty = True
752 752
753 753 def identity(self):
754 754 '''Return identity of dirstate itself to detect changing in storage
755 755
756 756 If identity of previous dirstate is equal to this, writing
757 757 changes based on the former dirstate out can keep consistency.
758 758 '''
759 759 return self._identity
760 760
761 761 def write(self, tr):
762 762 if not self._dirty:
763 763 return
764 764
765 765 filename = self._filename
766 766 if tr:
767 767 # 'dirstate.write()' is not only for writing in-memory
768 768 # changes out, but also for dropping ambiguous timestamp.
769 769 # delayed writing re-raise "ambiguous timestamp issue".
770 770 # See also the wiki page below for detail:
771 771 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
772 772
773 773 # emulate dropping timestamp in 'parsers.pack_dirstate'
774 774 now = _getfsnow(self._opener)
775 775 dmap = self._map
776 776 for f in self._updatedfiles:
777 777 e = dmap.get(f)
778 778 if e is not None and e[0] == 'n' and e[3] == now:
779 779 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
780 780 self._nonnormalset.add(f)
781 781
782 782 # emulate that all 'dirstate.normal' results are written out
783 783 self._lastnormaltime = 0
784 784 self._updatedfiles.clear()
785 785
786 786 # delay writing in-memory changes out
787 787 tr.addfilegenerator('dirstate', (self._filename,),
788 788 self._writedirstate, location='plain')
789 789 return
790 790
791 791 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
792 792 self._writedirstate(st)
793 793
794 794 def addparentchangecallback(self, category, callback):
795 795 """add a callback to be called when the wd parents are changed
796 796
797 797 Callback will be called with the following arguments:
798 798 dirstate, (oldp1, oldp2), (newp1, newp2)
799 799
800 800 Category is a unique identifier to allow overwriting an old callback
801 801 with a newer callback.
802 802 """
803 803 self._plchangecallbacks[category] = callback
804 804
805 805 def _writedirstate(self, st):
806 806 # notify callbacks about parents change
807 807 if self._origpl is not None and self._origpl != self._pl:
808 808 for c, callback in sorted(self._plchangecallbacks.iteritems()):
809 809 callback(self, self._origpl, self._pl)
810 810 self._origpl = None
811 811 # use the modification time of the newly created temporary file as the
812 812 # filesystem's notion of 'now'
813 813 now = util.fstat(st).st_mtime & _rangemask
814 814
815 815 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
816 816 # timestamp of each entries in dirstate, because of 'now > mtime'
817 817 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
818 818 if delaywrite > 0:
819 819 # do we have any files to delay for?
820 820 for f, e in self._map.iteritems():
821 821 if e[0] == 'n' and e[3] == now:
822 822 import time # to avoid useless import
823 823 # rather than sleep n seconds, sleep until the next
824 824 # multiple of n seconds
825 825 clock = time.time()
826 826 start = int(clock) - (int(clock) % delaywrite)
827 827 end = start + delaywrite
828 828 time.sleep(end - clock)
829 829 now = end # trust our estimate that the end is near now
830 830 break
831 831
832 832 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
833 833 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
834 834 st.close()
835 835 self._lastnormaltime = 0
836 836 self._dirty = self._dirtypl = False
837 837
838 838 def _dirignore(self, f):
839 839 if f == '.':
840 840 return False
841 841 if self._ignore(f):
842 842 return True
843 843 for p in util.finddirs(f):
844 844 if self._ignore(p):
845 845 return True
846 846 return False
847 847
848 848 def _ignorefiles(self):
849 849 files = []
850 850 if os.path.exists(self._join('.hgignore')):
851 851 files.append(self._join('.hgignore'))
852 852 for name, path in self._ui.configitems("ui"):
853 853 if name == 'ignore' or name.startswith('ignore.'):
854 854 # we need to use os.path.join here rather than self._join
855 855 # because path is arbitrary and user-specified
856 856 files.append(os.path.join(self._rootdir, util.expandpath(path)))
857 857 return files
858 858
859 859 def _ignorefileandline(self, f):
860 860 files = collections.deque(self._ignorefiles())
861 861 visited = set()
862 862 while files:
863 863 i = files.popleft()
864 864 patterns = matchmod.readpatternfile(i, self._ui.warn,
865 865 sourceinfo=True)
866 866 for pattern, lineno, line in patterns:
867 867 kind, p = matchmod._patsplit(pattern, 'glob')
868 868 if kind == "subinclude":
869 869 if p not in visited:
870 870 files.append(p)
871 871 continue
872 872 m = matchmod.match(self._root, '', [], [pattern],
873 873 warn=self._ui.warn)
874 874 if m(f):
875 875 return (i, lineno, line)
876 876 visited.add(i)
877 877 return (None, -1, "")
878 878
879 879 def _walkexplicit(self, match, subrepos):
880 880 '''Get stat data about the files explicitly specified by match.
881 881
882 882 Return a triple (results, dirsfound, dirsnotfound).
883 883 - results is a mapping from filename to stat result. It also contains
884 884 listings mapping subrepos and .hg to None.
885 885 - dirsfound is a list of files found to be directories.
886 886 - dirsnotfound is a list of files that the dirstate thinks are
887 887 directories and that were not found.'''
888 888
889 889 def badtype(mode):
890 890 kind = _('unknown')
891 891 if stat.S_ISCHR(mode):
892 892 kind = _('character device')
893 893 elif stat.S_ISBLK(mode):
894 894 kind = _('block device')
895 895 elif stat.S_ISFIFO(mode):
896 896 kind = _('fifo')
897 897 elif stat.S_ISSOCK(mode):
898 898 kind = _('socket')
899 899 elif stat.S_ISDIR(mode):
900 900 kind = _('directory')
901 901 return _('unsupported file type (type is %s)') % kind
902 902
903 903 matchedir = match.explicitdir
904 904 badfn = match.bad
905 905 dmap = self._map
906 906 lstat = os.lstat
907 907 getkind = stat.S_IFMT
908 908 dirkind = stat.S_IFDIR
909 909 regkind = stat.S_IFREG
910 910 lnkkind = stat.S_IFLNK
911 911 join = self._join
912 912 dirsfound = []
913 913 foundadd = dirsfound.append
914 914 dirsnotfound = []
915 915 notfoundadd = dirsnotfound.append
916 916
917 917 if not match.isexact() and self._checkcase:
918 918 normalize = self._normalize
919 919 else:
920 920 normalize = None
921 921
922 922 files = sorted(match.files())
923 923 subrepos.sort()
924 924 i, j = 0, 0
925 925 while i < len(files) and j < len(subrepos):
926 926 subpath = subrepos[j] + "/"
927 927 if files[i] < subpath:
928 928 i += 1
929 929 continue
930 930 while i < len(files) and files[i].startswith(subpath):
931 931 del files[i]
932 932 j += 1
933 933
934 934 if not files or '.' in files:
935 935 files = ['.']
936 936 results = dict.fromkeys(subrepos)
937 937 results['.hg'] = None
938 938
939 939 alldirs = None
940 940 for ff in files:
941 941 # constructing the foldmap is expensive, so don't do it for the
942 942 # common case where files is ['.']
943 943 if normalize and ff != '.':
944 944 nf = normalize(ff, False, True)
945 945 else:
946 946 nf = ff
947 947 if nf in results:
948 948 continue
949 949
950 950 try:
951 951 st = lstat(join(nf))
952 952 kind = getkind(st.st_mode)
953 953 if kind == dirkind:
954 954 if nf in dmap:
955 955 # file replaced by dir on disk but still in dirstate
956 956 results[nf] = None
957 957 if matchedir:
958 958 matchedir(nf)
959 959 foundadd((nf, ff))
960 960 elif kind == regkind or kind == lnkkind:
961 961 results[nf] = st
962 962 else:
963 963 badfn(ff, badtype(kind))
964 964 if nf in dmap:
965 965 results[nf] = None
966 966 except OSError as inst: # nf not found on disk - it is dirstate only
967 967 if nf in dmap: # does it exactly match a missing file?
968 968 results[nf] = None
969 969 else: # does it match a missing directory?
970 970 if alldirs is None:
971 971 alldirs = util.dirs(dmap)
972 972 if nf in alldirs:
973 973 if matchedir:
974 974 matchedir(nf)
975 975 notfoundadd(nf)
976 976 else:
977 977 badfn(ff, inst.strerror)
978 978
979 979 # Case insensitive filesystems cannot rely on lstat() failing to detect
980 980 # a case-only rename. Prune the stat object for any file that does not
981 981 # match the case in the filesystem, if there are multiple files that
982 982 # normalize to the same path.
983 983 if match.isexact() and self._checkcase:
984 984 normed = {}
985 985
986 986 for f, st in results.iteritems():
987 987 if st is None:
988 988 continue
989 989
990 990 nc = util.normcase(f)
991 991 paths = normed.get(nc)
992 992
993 993 if paths is None:
994 994 paths = set()
995 995 normed[nc] = paths
996 996
997 997 paths.add(f)
998 998
999 999 for norm, paths in normed.iteritems():
1000 1000 if len(paths) > 1:
1001 1001 for path in paths:
1002 1002 folded = self._discoverpath(path, norm, True, None,
1003 1003 self._dirfoldmap)
1004 1004 if path != folded:
1005 1005 results[path] = None
1006 1006
1007 1007 return results, dirsfound, dirsnotfound
1008 1008
1009 1009 def walk(self, match, subrepos, unknown, ignored, full=True):
1010 1010 '''
1011 1011 Walk recursively through the directory tree, finding all files
1012 1012 matched by match.
1013 1013
1014 1014 If full is False, maybe skip some known-clean files.
1015 1015
1016 1016 Return a dict mapping filename to stat-like object (either
1017 1017 mercurial.osutil.stat instance or return value of os.stat()).
1018 1018
1019 1019 '''
1020 1020 # full is a flag that extensions that hook into walk can use -- this
1021 1021 # implementation doesn't use it at all. This satisfies the contract
1022 1022 # because we only guarantee a "maybe".
1023 1023
1024 1024 if ignored:
1025 1025 ignore = util.never
1026 1026 dirignore = util.never
1027 1027 elif unknown:
1028 1028 ignore = self._ignore
1029 1029 dirignore = self._dirignore
1030 1030 else:
1031 1031 # if not unknown and not ignored, drop dir recursion and step 2
1032 1032 ignore = util.always
1033 1033 dirignore = util.always
1034 1034
1035 1035 matchfn = match.matchfn
1036 1036 matchalways = match.always()
1037 1037 matchtdir = match.traversedir
1038 1038 dmap = self._map
1039 1039 listdir = util.listdir
1040 1040 lstat = os.lstat
1041 1041 dirkind = stat.S_IFDIR
1042 1042 regkind = stat.S_IFREG
1043 1043 lnkkind = stat.S_IFLNK
1044 1044 join = self._join
1045 1045
1046 1046 exact = skipstep3 = False
1047 1047 if match.isexact(): # match.exact
1048 1048 exact = True
1049 1049 dirignore = util.always # skip step 2
1050 1050 elif match.prefix(): # match.match, no patterns
1051 1051 skipstep3 = True
1052 1052
1053 1053 if not exact and self._checkcase:
1054 1054 normalize = self._normalize
1055 1055 normalizefile = self._normalizefile
1056 1056 skipstep3 = False
1057 1057 else:
1058 1058 normalize = self._normalize
1059 1059 normalizefile = None
1060 1060
1061 1061 # step 1: find all explicit files
1062 1062 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1063 1063
1064 1064 skipstep3 = skipstep3 and not (work or dirsnotfound)
1065 1065 work = [d for d in work if not dirignore(d[0])]
1066 1066
1067 1067 # step 2: visit subdirectories
1068 1068 def traverse(work, alreadynormed):
1069 1069 wadd = work.append
1070 1070 while work:
1071 1071 nd = work.pop()
1072 1072 if not match.visitdir(nd):
1073 1073 continue
1074 1074 skip = None
1075 1075 if nd == '.':
1076 1076 nd = ''
1077 1077 else:
1078 1078 skip = '.hg'
1079 1079 try:
1080 1080 entries = listdir(join(nd), stat=True, skip=skip)
1081 1081 except OSError as inst:
1082 1082 if inst.errno in (errno.EACCES, errno.ENOENT):
1083 1083 match.bad(self.pathto(nd), inst.strerror)
1084 1084 continue
1085 1085 raise
1086 1086 for f, kind, st in entries:
1087 1087 if normalizefile:
1088 1088 # even though f might be a directory, we're only
1089 1089 # interested in comparing it to files currently in the
1090 1090 # dmap -- therefore normalizefile is enough
1091 1091 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1092 1092 True)
1093 1093 else:
1094 1094 nf = nd and (nd + "/" + f) or f
1095 1095 if nf not in results:
1096 1096 if kind == dirkind:
1097 1097 if not ignore(nf):
1098 1098 if matchtdir:
1099 1099 matchtdir(nf)
1100 1100 wadd(nf)
1101 1101 if nf in dmap and (matchalways or matchfn(nf)):
1102 1102 results[nf] = None
1103 1103 elif kind == regkind or kind == lnkkind:
1104 1104 if nf in dmap:
1105 1105 if matchalways or matchfn(nf):
1106 1106 results[nf] = st
1107 1107 elif ((matchalways or matchfn(nf))
1108 1108 and not ignore(nf)):
1109 1109 # unknown file -- normalize if necessary
1110 1110 if not alreadynormed:
1111 1111 nf = normalize(nf, False, True)
1112 1112 results[nf] = st
1113 1113 elif nf in dmap and (matchalways or matchfn(nf)):
1114 1114 results[nf] = None
1115 1115
1116 1116 for nd, d in work:
1117 1117 # alreadynormed means that processwork doesn't have to do any
1118 1118 # expensive directory normalization
1119 1119 alreadynormed = not normalize or nd == d
1120 1120 traverse([d], alreadynormed)
1121 1121
1122 1122 for s in subrepos:
1123 1123 del results[s]
1124 1124 del results['.hg']
1125 1125
1126 1126 # step 3: visit remaining files from dmap
1127 1127 if not skipstep3 and not exact:
1128 1128 # If a dmap file is not in results yet, it was either
1129 1129 # a) not matching matchfn b) ignored, c) missing, or d) under a
1130 1130 # symlink directory.
1131 1131 if not results and matchalways:
1132 1132 visit = [f for f in dmap]
1133 1133 else:
1134 1134 visit = [f for f in dmap if f not in results and matchfn(f)]
1135 1135 visit.sort()
1136 1136
1137 1137 if unknown:
1138 1138 # unknown == True means we walked all dirs under the roots
1139 1139 # that wasn't ignored, and everything that matched was stat'ed
1140 1140 # and is already in results.
1141 1141 # The rest must thus be ignored or under a symlink.
1142 1142 audit_path = pathutil.pathauditor(self._root)
1143 1143
1144 1144 for nf in iter(visit):
1145 1145 # If a stat for the same file was already added with a
1146 1146 # different case, don't add one for this, since that would
1147 1147 # make it appear as if the file exists under both names
1148 1148 # on disk.
1149 1149 if (normalizefile and
1150 1150 normalizefile(nf, True, True) in results):
1151 1151 results[nf] = None
1152 1152 # Report ignored items in the dmap as long as they are not
1153 1153 # under a symlink directory.
1154 1154 elif audit_path.check(nf):
1155 1155 try:
1156 1156 results[nf] = lstat(join(nf))
1157 1157 # file was just ignored, no links, and exists
1158 1158 except OSError:
1159 1159 # file doesn't exist
1160 1160 results[nf] = None
1161 1161 else:
1162 1162 # It's either missing or under a symlink directory
1163 1163 # which we in this case report as missing
1164 1164 results[nf] = None
1165 1165 else:
1166 1166 # We may not have walked the full directory tree above,
1167 1167 # so stat and check everything we missed.
1168 1168 iv = iter(visit)
1169 1169 for st in util.statfiles([join(i) for i in visit]):
1170 1170 results[next(iv)] = st
1171 1171 return results
1172 1172
1173 1173 def status(self, match, subrepos, ignored, clean, unknown):
1174 1174 '''Determine the status of the working copy relative to the
1175 1175 dirstate and return a pair of (unsure, status), where status is of type
1176 1176 scmutil.status and:
1177 1177
1178 1178 unsure:
1179 1179 files that might have been modified since the dirstate was
1180 1180 written, but need to be read to be sure (size is the same
1181 1181 but mtime differs)
1182 1182 status.modified:
1183 1183 files that have definitely been modified since the dirstate
1184 1184 was written (different size or mode)
1185 1185 status.clean:
1186 1186 files that have definitely not been modified since the
1187 1187 dirstate was written
1188 1188 '''
1189 1189 listignored, listclean, listunknown = ignored, clean, unknown
1190 1190 lookup, modified, added, unknown, ignored = [], [], [], [], []
1191 1191 removed, deleted, clean = [], [], []
1192 1192
1193 1193 dmap = self._map
1194 1194 ladd = lookup.append # aka "unsure"
1195 1195 madd = modified.append
1196 1196 aadd = added.append
1197 1197 uadd = unknown.append
1198 1198 iadd = ignored.append
1199 1199 radd = removed.append
1200 1200 dadd = deleted.append
1201 1201 cadd = clean.append
1202 1202 mexact = match.exact
1203 1203 dirignore = self._dirignore
1204 1204 checkexec = self._checkexec
1205 1205 copymap = self._copymap
1206 1206 lastnormaltime = self._lastnormaltime
1207 1207
1208 1208 # We need to do full walks when either
1209 1209 # - we're listing all clean files, or
1210 1210 # - match.traversedir does something, because match.traversedir should
1211 1211 # be called for every dir in the working dir
1212 1212 full = listclean or match.traversedir is not None
1213 1213 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1214 1214 full=full).iteritems():
1215 1215 if fn not in dmap:
1216 1216 if (listignored or mexact(fn)) and dirignore(fn):
1217 1217 if listignored:
1218 1218 iadd(fn)
1219 1219 else:
1220 1220 uadd(fn)
1221 1221 continue
1222 1222
1223 1223 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1224 1224 # written like that for performance reasons. dmap[fn] is not a
1225 1225 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1226 1226 # opcode has fast paths when the value to be unpacked is a tuple or
1227 1227 # a list, but falls back to creating a full-fledged iterator in
1228 1228 # general. That is much slower than simply accessing and storing the
1229 1229 # tuple members one by one.
1230 1230 t = dmap[fn]
1231 1231 state = t[0]
1232 1232 mode = t[1]
1233 1233 size = t[2]
1234 1234 time = t[3]
1235 1235
1236 1236 if not st and state in "nma":
1237 1237 dadd(fn)
1238 1238 elif state == 'n':
1239 1239 if (size >= 0 and
1240 1240 ((size != st.st_size and size != st.st_size & _rangemask)
1241 1241 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1242 1242 or size == -2 # other parent
1243 1243 or fn in copymap):
1244 1244 madd(fn)
1245 1245 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1246 1246 ladd(fn)
1247 1247 elif st.st_mtime == lastnormaltime:
1248 1248 # fn may have just been marked as normal and it may have
1249 1249 # changed in the same second without changing its size.
1250 1250 # This can happen if we quickly do multiple commits.
1251 1251 # Force lookup, so we don't miss such a racy file change.
1252 1252 ladd(fn)
1253 1253 elif listclean:
1254 1254 cadd(fn)
1255 1255 elif state == 'm':
1256 1256 madd(fn)
1257 1257 elif state == 'a':
1258 1258 aadd(fn)
1259 1259 elif state == 'r':
1260 1260 radd(fn)
1261 1261
1262 1262 return (lookup, scmutil.status(modified, added, removed, deleted,
1263 1263 unknown, ignored, clean))
1264 1264
1265 1265 def matches(self, match):
1266 1266 '''
1267 1267 return files in the dirstate (in whatever state) filtered by match
1268 1268 '''
1269 1269 dmap = self._map
1270 1270 if match.always():
1271 1271 return dmap.keys()
1272 1272 files = match.files()
1273 1273 if match.isexact():
1274 1274 # fast path -- filter the other way around, since typically files is
1275 1275 # much smaller than dmap
1276 1276 return [f for f in files if f in dmap]
1277 1277 if match.prefix() and all(fn in dmap for fn in files):
1278 1278 # fast path -- all the values are known to be files, so just return
1279 1279 # that
1280 1280 return list(files)
1281 1281 return [f for f in dmap if match(f)]
1282 1282
1283 1283 def _actualfilename(self, tr):
1284 1284 if tr:
1285 1285 return self._pendingfilename
1286 1286 else:
1287 1287 return self._filename
1288 1288
1289 1289 def savebackup(self, tr, suffix='', prefix=''):
1290 1290 '''Save current dirstate into backup file with suffix'''
1291 1291 assert len(suffix) > 0 or len(prefix) > 0
1292 1292 filename = self._actualfilename(tr)
1293 1293
1294 1294 # use '_writedirstate' instead of 'write' to write changes certainly,
1295 1295 # because the latter omits writing out if transaction is running.
1296 1296 # output file will be used to create backup of dirstate at this point.
1297 1297 if self._dirty or not self._opener.exists(filename):
1298 1298 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1299 1299 checkambig=True))
1300 1300
1301 1301 if tr:
1302 1302 # ensure that subsequent tr.writepending returns True for
1303 1303 # changes written out above, even if dirstate is never
1304 1304 # changed after this
1305 1305 tr.addfilegenerator('dirstate', (self._filename,),
1306 1306 self._writedirstate, location='plain')
1307 1307
1308 1308 # ensure that pending file written above is unlinked at
1309 1309 # failure, even if tr.writepending isn't invoked until the
1310 1310 # end of this transaction
1311 1311 tr.registertmp(filename, location='plain')
1312 1312
1313 1313 backupname = prefix + self._filename + suffix
1314 1314 assert backupname != filename
1315 1315 self._opener.tryunlink(backupname)
1316 1316 # hardlink backup is okay because _writedirstate is always called
1317 1317 # with an "atomictemp=True" file.
1318 1318 util.copyfile(self._opener.join(filename),
1319 1319 self._opener.join(backupname), hardlink=True)
1320 1320
1321 1321 def restorebackup(self, tr, suffix='', prefix=''):
1322 1322 '''Restore dirstate by backup file with suffix'''
1323 1323 assert len(suffix) > 0 or len(prefix) > 0
1324 1324 # this "invalidate()" prevents "wlock.release()" from writing
1325 1325 # changes of dirstate out after restoring from backup file
1326 1326 self.invalidate()
1327 1327 filename = self._actualfilename(tr)
1328 1328 # using self._filename to avoid having "pending" in the backup filename
1329 1329 self._opener.rename(prefix + self._filename + suffix, filename,
1330 1330 checkambig=True)
1331 1331
1332 1332 def clearbackup(self, tr, suffix='', prefix=''):
1333 1333 '''Clear backup file with suffix'''
1334 1334 assert len(suffix) > 0 or len(prefix) > 0
1335 1335 # using self._filename to avoid having "pending" in the backup filename
1336 1336 self._opener.unlink(prefix + self._filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now