##// END OF EJS Templates
dirstate: move write into dirstatemap...
Durham Goode -
r34674:e2214632 default
parent child Browse files
Show More
@@ -1,1403 +1,1407 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 parsers = policy.importmod(r'parsers')
31 31
32 32 propertycache = util.propertycache
33 33 filecache = scmutil.filecache
34 34 _rangemask = 0x7fffffff
35 35
36 36 dirstatetuple = parsers.dirstatetuple
37 37
38 38 class repocache(filecache):
39 39 """filecache for files in .hg/"""
40 40 def join(self, obj, fname):
41 41 return obj._opener.join(fname)
42 42
43 43 class rootcache(filecache):
44 44 """filecache for files in the repository root"""
45 45 def join(self, obj, fname):
46 46 return obj._join(fname)
47 47
48 48 def _getfsnow(vfs):
49 49 '''Get "now" timestamp on filesystem'''
50 50 tmpfd, tmpname = vfs.mkstemp()
51 51 try:
52 52 return os.fstat(tmpfd).st_mtime
53 53 finally:
54 54 os.close(tmpfd)
55 55 vfs.unlink(tmpname)
56 56
57 57 class dirstate(object):
58 58
59 59 def __init__(self, opener, ui, root, validate, sparsematchfn):
60 60 '''Create a new dirstate object.
61 61
62 62 opener is an open()-like callable that can be used to open the
63 63 dirstate file; root is the root of the directory tracked by
64 64 the dirstate.
65 65 '''
66 66 self._opener = opener
67 67 self._validate = validate
68 68 self._root = root
69 69 self._sparsematchfn = sparsematchfn
70 70 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
71 71 # UNC path pointing to root share (issue4557)
72 72 self._rootdir = pathutil.normasprefix(root)
73 73 self._dirty = False
74 74 self._lastnormaltime = 0
75 75 self._ui = ui
76 76 self._filecache = {}
77 77 self._parentwriters = 0
78 78 self._filename = 'dirstate'
79 79 self._pendingfilename = '%s.pending' % self._filename
80 80 self._plchangecallbacks = {}
81 81 self._origpl = None
82 82 self._updatedfiles = set()
83 83
84 84 @contextlib.contextmanager
85 85 def parentchange(self):
86 86 '''Context manager for handling dirstate parents.
87 87
88 88 If an exception occurs in the scope of the context manager,
89 89 the incoherent dirstate won't be written when wlock is
90 90 released.
91 91 '''
92 92 self._parentwriters += 1
93 93 yield
94 94 # Typically we want the "undo" step of a context manager in a
95 95 # finally block so it happens even when an exception
96 96 # occurs. In this case, however, we only want to decrement
97 97 # parentwriters if the code in the with statement exits
98 98 # normally, so we don't have a try/finally here on purpose.
99 99 self._parentwriters -= 1
100 100
101 101 def beginparentchange(self):
102 102 '''Marks the beginning of a set of changes that involve changing
103 103 the dirstate parents. If there is an exception during this time,
104 104 the dirstate will not be written when the wlock is released. This
105 105 prevents writing an incoherent dirstate where the parent doesn't
106 106 match the contents.
107 107 '''
108 108 self._ui.deprecwarn('beginparentchange is obsoleted by the '
109 109 'parentchange context manager.', '4.3')
110 110 self._parentwriters += 1
111 111
112 112 def endparentchange(self):
113 113 '''Marks the end of a set of changes that involve changing the
114 114 dirstate parents. Once all parent changes have been marked done,
115 115 the wlock will be free to write the dirstate on release.
116 116 '''
117 117 self._ui.deprecwarn('endparentchange is obsoleted by the '
118 118 'parentchange context manager.', '4.3')
119 119 if self._parentwriters > 0:
120 120 self._parentwriters -= 1
121 121
122 122 def pendingparentchange(self):
123 123 '''Returns true if the dirstate is in the middle of a set of changes
124 124 that modify the dirstate parent.
125 125 '''
126 126 return self._parentwriters > 0
127 127
128 128 @propertycache
129 129 def _map(self):
130 130 '''Return the dirstate contents as a map from filename to
131 131 (state, mode, size, time).'''
132 132 self._read()
133 133 return self._map
134 134
135 135 @propertycache
136 136 def _identity(self):
137 137 self._read()
138 138 return self._identity
139 139
140 140 @propertycache
141 141 def _nonnormalset(self):
142 142 nonnorm, otherparents = self._map.nonnormalentries()
143 143 self._otherparentset = otherparents
144 144 return nonnorm
145 145
146 146 @propertycache
147 147 def _otherparentset(self):
148 148 nonnorm, otherparents = self._map.nonnormalentries()
149 149 self._nonnormalset = nonnorm
150 150 return otherparents
151 151
152 152 @propertycache
153 153 def _filefoldmap(self):
154 154 return self._map.filefoldmap()
155 155
156 156 @propertycache
157 157 def _dirfoldmap(self):
158 158 f = {}
159 159 normcase = util.normcase
160 160 for name in self._dirs:
161 161 f[normcase(name)] = name
162 162 return f
163 163
164 164 @property
165 165 def _sparsematcher(self):
166 166 """The matcher for the sparse checkout.
167 167
168 168 The working directory may not include every file from a manifest. The
169 169 matcher obtained by this property will match a path if it is to be
170 170 included in the working directory.
171 171 """
172 172 # TODO there is potential to cache this property. For now, the matcher
173 173 # is resolved on every access. (But the called function does use a
174 174 # cache to keep the lookup fast.)
175 175 return self._sparsematchfn()
176 176
177 177 @repocache('branch')
178 178 def _branch(self):
179 179 try:
180 180 return self._opener.read("branch").strip() or "default"
181 181 except IOError as inst:
182 182 if inst.errno != errno.ENOENT:
183 183 raise
184 184 return "default"
185 185
186 186 @property
187 187 def _pl(self):
188 188 return self._map.parents()
189 189
190 190 @propertycache
191 191 def _dirs(self):
192 192 return self._map.dirs()
193 193
194 194 def dirs(self):
195 195 return self._dirs
196 196
197 197 @rootcache('.hgignore')
198 198 def _ignore(self):
199 199 files = self._ignorefiles()
200 200 if not files:
201 201 return matchmod.never(self._root, '')
202 202
203 203 pats = ['include:%s' % f for f in files]
204 204 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
205 205
206 206 @propertycache
207 207 def _slash(self):
208 208 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
209 209
210 210 @propertycache
211 211 def _checklink(self):
212 212 return util.checklink(self._root)
213 213
214 214 @propertycache
215 215 def _checkexec(self):
216 216 return util.checkexec(self._root)
217 217
218 218 @propertycache
219 219 def _checkcase(self):
220 220 return not util.fscasesensitive(self._join('.hg'))
221 221
222 222 def _join(self, f):
223 223 # much faster than os.path.join()
224 224 # it's safe because f is always a relative path
225 225 return self._rootdir + f
226 226
227 227 def flagfunc(self, buildfallback):
228 228 if self._checklink and self._checkexec:
229 229 def f(x):
230 230 try:
231 231 st = os.lstat(self._join(x))
232 232 if util.statislink(st):
233 233 return 'l'
234 234 if util.statisexec(st):
235 235 return 'x'
236 236 except OSError:
237 237 pass
238 238 return ''
239 239 return f
240 240
241 241 fallback = buildfallback()
242 242 if self._checklink:
243 243 def f(x):
244 244 if os.path.islink(self._join(x)):
245 245 return 'l'
246 246 if 'x' in fallback(x):
247 247 return 'x'
248 248 return ''
249 249 return f
250 250 if self._checkexec:
251 251 def f(x):
252 252 if 'l' in fallback(x):
253 253 return 'l'
254 254 if util.isexec(self._join(x)):
255 255 return 'x'
256 256 return ''
257 257 return f
258 258 else:
259 259 return fallback
260 260
261 261 @propertycache
262 262 def _cwd(self):
263 263 # internal config: ui.forcecwd
264 264 forcecwd = self._ui.config('ui', 'forcecwd')
265 265 if forcecwd:
266 266 return forcecwd
267 267 return pycompat.getcwd()
268 268
269 269 def getcwd(self):
270 270 '''Return the path from which a canonical path is calculated.
271 271
272 272 This path should be used to resolve file patterns or to convert
273 273 canonical paths back to file paths for display. It shouldn't be
274 274 used to get real file paths. Use vfs functions instead.
275 275 '''
276 276 cwd = self._cwd
277 277 if cwd == self._root:
278 278 return ''
279 279 # self._root ends with a path separator if self._root is '/' or 'C:\'
280 280 rootsep = self._root
281 281 if not util.endswithsep(rootsep):
282 282 rootsep += pycompat.ossep
283 283 if cwd.startswith(rootsep):
284 284 return cwd[len(rootsep):]
285 285 else:
286 286 # we're outside the repo. return an absolute path.
287 287 return cwd
288 288
289 289 def pathto(self, f, cwd=None):
290 290 if cwd is None:
291 291 cwd = self.getcwd()
292 292 path = util.pathto(self._root, cwd, f)
293 293 if self._slash:
294 294 return util.pconvert(path)
295 295 return path
296 296
297 297 def __getitem__(self, key):
298 298 '''Return the current state of key (a filename) in the dirstate.
299 299
300 300 States are:
301 301 n normal
302 302 m needs merging
303 303 r marked for removal
304 304 a marked for addition
305 305 ? not tracked
306 306 '''
307 307 return self._map.get(key, ("?",))[0]
308 308
309 309 def __contains__(self, key):
310 310 return key in self._map
311 311
312 312 def __iter__(self):
313 313 return iter(sorted(self._map))
314 314
315 315 def items(self):
316 316 return self._map.iteritems()
317 317
318 318 iteritems = items
319 319
320 320 def parents(self):
321 321 return [self._validate(p) for p in self._pl]
322 322
323 323 def p1(self):
324 324 return self._validate(self._pl[0])
325 325
326 326 def p2(self):
327 327 return self._validate(self._pl[1])
328 328
329 329 def branch(self):
330 330 return encoding.tolocal(self._branch)
331 331
332 332 def setparents(self, p1, p2=nullid):
333 333 """Set dirstate parents to p1 and p2.
334 334
335 335 When moving from two parents to one, 'm' merged entries a
336 336 adjusted to normal and previous copy records discarded and
337 337 returned by the call.
338 338
339 339 See localrepo.setparents()
340 340 """
341 341 if self._parentwriters == 0:
342 342 raise ValueError("cannot set dirstate parent without "
343 343 "calling dirstate.beginparentchange")
344 344
345 345 self._dirty = True
346 346 oldp2 = self._pl[1]
347 347 if self._origpl is None:
348 348 self._origpl = self._pl
349 349 self._map.setparents(p1, p2)
350 350 copies = {}
351 351 if oldp2 != nullid and p2 == nullid:
352 352 candidatefiles = self._nonnormalset.union(self._otherparentset)
353 353 for f in candidatefiles:
354 354 s = self._map.get(f)
355 355 if s is None:
356 356 continue
357 357
358 358 # Discard 'm' markers when moving away from a merge state
359 359 if s[0] == 'm':
360 360 source = self._map.copymap.get(f)
361 361 if source:
362 362 copies[f] = source
363 363 self.normallookup(f)
364 364 # Also fix up otherparent markers
365 365 elif s[0] == 'n' and s[2] == -2:
366 366 source = self._map.copymap.get(f)
367 367 if source:
368 368 copies[f] = source
369 369 self.add(f)
370 370 return copies
371 371
372 372 def setbranch(self, branch):
373 373 self._branch = encoding.fromlocal(branch)
374 374 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
375 375 try:
376 376 f.write(self._branch + '\n')
377 377 f.close()
378 378
379 379 # make sure filecache has the correct stat info for _branch after
380 380 # replacing the underlying file
381 381 ce = self._filecache['_branch']
382 382 if ce:
383 383 ce.refresh()
384 384 except: # re-raises
385 385 f.discard()
386 386 raise
387 387
388 388 def _read(self):
389 389 # ignore HG_PENDING because identity is used only for writing
390 390 self._identity = util.filestat.frompath(
391 391 self._opener.join(self._filename))
392 392 self._map = dirstatemap(self._ui, self._opener, self._root)
393 393 self._map.read()
394 394
395 395 def invalidate(self):
396 396 '''Causes the next access to reread the dirstate.
397 397
398 398 This is different from localrepo.invalidatedirstate() because it always
399 399 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
400 400 check whether the dirstate has changed before rereading it.'''
401 401
402 402 for a in ("_map", "_identity",
403 403 "_filefoldmap", "_dirfoldmap", "_branch",
404 404 "_dirs", "_ignore", "_nonnormalset",
405 405 "_otherparentset"):
406 406 if a in self.__dict__:
407 407 delattr(self, a)
408 408 self._lastnormaltime = 0
409 409 self._dirty = False
410 410 self._updatedfiles.clear()
411 411 self._parentwriters = 0
412 412 self._origpl = None
413 413
414 414 def copy(self, source, dest):
415 415 """Mark dest as a copy of source. Unmark dest if source is None."""
416 416 if source == dest:
417 417 return
418 418 self._dirty = True
419 419 if source is not None:
420 420 self._map.copymap[dest] = source
421 421 self._updatedfiles.add(source)
422 422 self._updatedfiles.add(dest)
423 423 elif self._map.copymap.pop(dest, None):
424 424 self._updatedfiles.add(dest)
425 425
426 426 def copied(self, file):
427 427 return self._map.copymap.get(file, None)
428 428
429 429 def copies(self):
430 430 return self._map.copymap
431 431
432 432 def _droppath(self, f):
433 433 if self[f] not in "?r" and "_dirs" in self.__dict__:
434 434 self._dirs.delpath(f)
435 435
436 436 if "_filefoldmap" in self.__dict__:
437 437 normed = util.normcase(f)
438 438 if normed in self._filefoldmap:
439 439 del self._filefoldmap[normed]
440 440
441 441 self._updatedfiles.add(f)
442 442
443 443 def _addpath(self, f, state, mode, size, mtime):
444 444 oldstate = self[f]
445 445 if state == 'a' or oldstate == 'r':
446 446 scmutil.checkfilename(f)
447 447 if f in self._dirs:
448 448 raise error.Abort(_('directory %r already in dirstate') % f)
449 449 # shadows
450 450 for d in util.finddirs(f):
451 451 if d in self._dirs:
452 452 break
453 453 entry = self._map.get(d)
454 454 if entry is not None and entry[0] != 'r':
455 455 raise error.Abort(
456 456 _('file %r in dirstate clashes with %r') % (d, f))
457 457 if oldstate in "?r" and "_dirs" in self.__dict__:
458 458 self._dirs.addpath(f)
459 459 self._dirty = True
460 460 self._updatedfiles.add(f)
461 461 self._map[f] = dirstatetuple(state, mode, size, mtime)
462 462 if state != 'n' or mtime == -1:
463 463 self._nonnormalset.add(f)
464 464 if size == -2:
465 465 self._otherparentset.add(f)
466 466
467 467 def normal(self, f):
468 468 '''Mark a file normal and clean.'''
469 469 s = os.lstat(self._join(f))
470 470 mtime = s.st_mtime
471 471 self._addpath(f, 'n', s.st_mode,
472 472 s.st_size & _rangemask, mtime & _rangemask)
473 473 self._map.copymap.pop(f, None)
474 474 if f in self._nonnormalset:
475 475 self._nonnormalset.remove(f)
476 476 if mtime > self._lastnormaltime:
477 477 # Remember the most recent modification timeslot for status(),
478 478 # to make sure we won't miss future size-preserving file content
479 479 # modifications that happen within the same timeslot.
480 480 self._lastnormaltime = mtime
481 481
482 482 def normallookup(self, f):
483 483 '''Mark a file normal, but possibly dirty.'''
484 484 if self._pl[1] != nullid:
485 485 # if there is a merge going on and the file was either
486 486 # in state 'm' (-1) or coming from other parent (-2) before
487 487 # being removed, restore that state.
488 488 entry = self._map.get(f)
489 489 if entry is not None:
490 490 if entry[0] == 'r' and entry[2] in (-1, -2):
491 491 source = self._map.copymap.get(f)
492 492 if entry[2] == -1:
493 493 self.merge(f)
494 494 elif entry[2] == -2:
495 495 self.otherparent(f)
496 496 if source:
497 497 self.copy(source, f)
498 498 return
499 499 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
500 500 return
501 501 self._addpath(f, 'n', 0, -1, -1)
502 502 self._map.copymap.pop(f, None)
503 503 if f in self._nonnormalset:
504 504 self._nonnormalset.remove(f)
505 505
506 506 def otherparent(self, f):
507 507 '''Mark as coming from the other parent, always dirty.'''
508 508 if self._pl[1] == nullid:
509 509 raise error.Abort(_("setting %r to other parent "
510 510 "only allowed in merges") % f)
511 511 if f in self and self[f] == 'n':
512 512 # merge-like
513 513 self._addpath(f, 'm', 0, -2, -1)
514 514 else:
515 515 # add-like
516 516 self._addpath(f, 'n', 0, -2, -1)
517 517 self._map.copymap.pop(f, None)
518 518
519 519 def add(self, f):
520 520 '''Mark a file added.'''
521 521 self._addpath(f, 'a', 0, -1, -1)
522 522 self._map.copymap.pop(f, None)
523 523
524 524 def remove(self, f):
525 525 '''Mark a file removed.'''
526 526 self._dirty = True
527 527 self._droppath(f)
528 528 size = 0
529 529 if self._pl[1] != nullid:
530 530 entry = self._map.get(f)
531 531 if entry is not None:
532 532 # backup the previous state
533 533 if entry[0] == 'm': # merge
534 534 size = -1
535 535 elif entry[0] == 'n' and entry[2] == -2: # other parent
536 536 size = -2
537 537 self._otherparentset.add(f)
538 538 self._map[f] = dirstatetuple('r', 0, size, 0)
539 539 self._nonnormalset.add(f)
540 540 if size == 0:
541 541 self._map.copymap.pop(f, None)
542 542
543 543 def merge(self, f):
544 544 '''Mark a file merged.'''
545 545 if self._pl[1] == nullid:
546 546 return self.normallookup(f)
547 547 return self.otherparent(f)
548 548
549 549 def drop(self, f):
550 550 '''Drop a file from the dirstate'''
551 551 if f in self._map:
552 552 self._dirty = True
553 553 self._droppath(f)
554 554 del self._map[f]
555 555 if f in self._nonnormalset:
556 556 self._nonnormalset.remove(f)
557 557 self._map.copymap.pop(f, None)
558 558
559 559 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
560 560 if exists is None:
561 561 exists = os.path.lexists(os.path.join(self._root, path))
562 562 if not exists:
563 563 # Maybe a path component exists
564 564 if not ignoremissing and '/' in path:
565 565 d, f = path.rsplit('/', 1)
566 566 d = self._normalize(d, False, ignoremissing, None)
567 567 folded = d + "/" + f
568 568 else:
569 569 # No path components, preserve original case
570 570 folded = path
571 571 else:
572 572 # recursively normalize leading directory components
573 573 # against dirstate
574 574 if '/' in normed:
575 575 d, f = normed.rsplit('/', 1)
576 576 d = self._normalize(d, False, ignoremissing, True)
577 577 r = self._root + "/" + d
578 578 folded = d + "/" + util.fspath(f, r)
579 579 else:
580 580 folded = util.fspath(normed, self._root)
581 581 storemap[normed] = folded
582 582
583 583 return folded
584 584
585 585 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
586 586 normed = util.normcase(path)
587 587 folded = self._filefoldmap.get(normed, None)
588 588 if folded is None:
589 589 if isknown:
590 590 folded = path
591 591 else:
592 592 folded = self._discoverpath(path, normed, ignoremissing, exists,
593 593 self._filefoldmap)
594 594 return folded
595 595
596 596 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
597 597 normed = util.normcase(path)
598 598 folded = self._filefoldmap.get(normed, None)
599 599 if folded is None:
600 600 folded = self._dirfoldmap.get(normed, None)
601 601 if folded is None:
602 602 if isknown:
603 603 folded = path
604 604 else:
605 605 # store discovered result in dirfoldmap so that future
606 606 # normalizefile calls don't start matching directories
607 607 folded = self._discoverpath(path, normed, ignoremissing, exists,
608 608 self._dirfoldmap)
609 609 return folded
610 610
611 611 def normalize(self, path, isknown=False, ignoremissing=False):
612 612 '''
613 613 normalize the case of a pathname when on a casefolding filesystem
614 614
615 615 isknown specifies whether the filename came from walking the
616 616 disk, to avoid extra filesystem access.
617 617
618 618 If ignoremissing is True, missing path are returned
619 619 unchanged. Otherwise, we try harder to normalize possibly
620 620 existing path components.
621 621
622 622 The normalized case is determined based on the following precedence:
623 623
624 624 - version of name already stored in the dirstate
625 625 - version of name stored on disk
626 626 - version provided via command arguments
627 627 '''
628 628
629 629 if self._checkcase:
630 630 return self._normalize(path, isknown, ignoremissing)
631 631 return path
632 632
633 633 def clear(self):
634 634 self._map = dirstatemap(self._ui, self._opener, self._root)
635 635 self._nonnormalset = set()
636 636 self._otherparentset = set()
637 637 if "_dirs" in self.__dict__:
638 638 delattr(self, "_dirs")
639 639 self._map.setparents(nullid, nullid)
640 640 self._lastnormaltime = 0
641 641 self._updatedfiles.clear()
642 642 self._dirty = True
643 643
644 644 def rebuild(self, parent, allfiles, changedfiles=None):
645 645 if changedfiles is None:
646 646 # Rebuild entire dirstate
647 647 changedfiles = allfiles
648 648 lastnormaltime = self._lastnormaltime
649 649 self.clear()
650 650 self._lastnormaltime = lastnormaltime
651 651
652 652 if self._origpl is None:
653 653 self._origpl = self._pl
654 654 self._map.setparents(parent, nullid)
655 655 for f in changedfiles:
656 656 if f in allfiles:
657 657 self.normallookup(f)
658 658 else:
659 659 self.drop(f)
660 660
661 661 self._dirty = True
662 662
663 663 def identity(self):
664 664 '''Return identity of dirstate itself to detect changing in storage
665 665
666 666 If identity of previous dirstate is equal to this, writing
667 667 changes based on the former dirstate out can keep consistency.
668 668 '''
669 669 return self._identity
670 670
671 671 def write(self, tr):
672 672 if not self._dirty:
673 673 return
674 674
675 675 filename = self._filename
676 676 if tr:
677 677 # 'dirstate.write()' is not only for writing in-memory
678 678 # changes out, but also for dropping ambiguous timestamp.
679 679 # delayed writing re-raise "ambiguous timestamp issue".
680 680 # See also the wiki page below for detail:
681 681 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
682 682
683 683 # emulate dropping timestamp in 'parsers.pack_dirstate'
684 684 now = _getfsnow(self._opener)
685 685 dmap = self._map
686 686 for f in self._updatedfiles:
687 687 e = dmap.get(f)
688 688 if e is not None and e[0] == 'n' and e[3] == now:
689 689 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
690 690 self._nonnormalset.add(f)
691 691
692 692 # emulate that all 'dirstate.normal' results are written out
693 693 self._lastnormaltime = 0
694 694 self._updatedfiles.clear()
695 695
696 696 # delay writing in-memory changes out
697 697 tr.addfilegenerator('dirstate', (self._filename,),
698 698 self._writedirstate, location='plain')
699 699 return
700 700
701 701 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
702 702 self._writedirstate(st)
703 703
704 704 def addparentchangecallback(self, category, callback):
705 705 """add a callback to be called when the wd parents are changed
706 706
707 707 Callback will be called with the following arguments:
708 708 dirstate, (oldp1, oldp2), (newp1, newp2)
709 709
710 710 Category is a unique identifier to allow overwriting an old callback
711 711 with a newer callback.
712 712 """
713 713 self._plchangecallbacks[category] = callback
714 714
715 715 def _writedirstate(self, st):
716 716 # notify callbacks about parents change
717 717 if self._origpl is not None and self._origpl != self._pl:
718 718 for c, callback in sorted(self._plchangecallbacks.iteritems()):
719 719 callback(self, self._origpl, self._pl)
720 720 self._origpl = None
721 721 # use the modification time of the newly created temporary file as the
722 722 # filesystem's notion of 'now'
723 723 now = util.fstat(st).st_mtime & _rangemask
724 724
725 725 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
726 726 # timestamp of each entries in dirstate, because of 'now > mtime'
727 727 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite')
728 728 if delaywrite > 0:
729 729 # do we have any files to delay for?
730 730 for f, e in self._map.iteritems():
731 731 if e[0] == 'n' and e[3] == now:
732 732 import time # to avoid useless import
733 733 # rather than sleep n seconds, sleep until the next
734 734 # multiple of n seconds
735 735 clock = time.time()
736 736 start = int(clock) - (int(clock) % delaywrite)
737 737 end = start + delaywrite
738 738 time.sleep(end - clock)
739 739 now = end # trust our estimate that the end is near now
740 740 break
741 741
742 st.write(parsers.pack_dirstate(self._map._map, self._map.copymap,
743 self._pl, now))
742 self._map.write(st, now)
744 743 self._nonnormalset, self._otherparentset = self._map.nonnormalentries()
745 st.close()
746 744 self._lastnormaltime = 0
747 self._dirty = self._map._dirtyparents = False
745 self._dirty = False
748 746
749 747 def _dirignore(self, f):
750 748 if f == '.':
751 749 return False
752 750 if self._ignore(f):
753 751 return True
754 752 for p in util.finddirs(f):
755 753 if self._ignore(p):
756 754 return True
757 755 return False
758 756
759 757 def _ignorefiles(self):
760 758 files = []
761 759 if os.path.exists(self._join('.hgignore')):
762 760 files.append(self._join('.hgignore'))
763 761 for name, path in self._ui.configitems("ui"):
764 762 if name == 'ignore' or name.startswith('ignore.'):
765 763 # we need to use os.path.join here rather than self._join
766 764 # because path is arbitrary and user-specified
767 765 files.append(os.path.join(self._rootdir, util.expandpath(path)))
768 766 return files
769 767
770 768 def _ignorefileandline(self, f):
771 769 files = collections.deque(self._ignorefiles())
772 770 visited = set()
773 771 while files:
774 772 i = files.popleft()
775 773 patterns = matchmod.readpatternfile(i, self._ui.warn,
776 774 sourceinfo=True)
777 775 for pattern, lineno, line in patterns:
778 776 kind, p = matchmod._patsplit(pattern, 'glob')
779 777 if kind == "subinclude":
780 778 if p not in visited:
781 779 files.append(p)
782 780 continue
783 781 m = matchmod.match(self._root, '', [], [pattern],
784 782 warn=self._ui.warn)
785 783 if m(f):
786 784 return (i, lineno, line)
787 785 visited.add(i)
788 786 return (None, -1, "")
789 787
790 788 def _walkexplicit(self, match, subrepos):
791 789 '''Get stat data about the files explicitly specified by match.
792 790
793 791 Return a triple (results, dirsfound, dirsnotfound).
794 792 - results is a mapping from filename to stat result. It also contains
795 793 listings mapping subrepos and .hg to None.
796 794 - dirsfound is a list of files found to be directories.
797 795 - dirsnotfound is a list of files that the dirstate thinks are
798 796 directories and that were not found.'''
799 797
800 798 def badtype(mode):
801 799 kind = _('unknown')
802 800 if stat.S_ISCHR(mode):
803 801 kind = _('character device')
804 802 elif stat.S_ISBLK(mode):
805 803 kind = _('block device')
806 804 elif stat.S_ISFIFO(mode):
807 805 kind = _('fifo')
808 806 elif stat.S_ISSOCK(mode):
809 807 kind = _('socket')
810 808 elif stat.S_ISDIR(mode):
811 809 kind = _('directory')
812 810 return _('unsupported file type (type is %s)') % kind
813 811
814 812 matchedir = match.explicitdir
815 813 badfn = match.bad
816 814 dmap = self._map
817 815 lstat = os.lstat
818 816 getkind = stat.S_IFMT
819 817 dirkind = stat.S_IFDIR
820 818 regkind = stat.S_IFREG
821 819 lnkkind = stat.S_IFLNK
822 820 join = self._join
823 821 dirsfound = []
824 822 foundadd = dirsfound.append
825 823 dirsnotfound = []
826 824 notfoundadd = dirsnotfound.append
827 825
828 826 if not match.isexact() and self._checkcase:
829 827 normalize = self._normalize
830 828 else:
831 829 normalize = None
832 830
833 831 files = sorted(match.files())
834 832 subrepos.sort()
835 833 i, j = 0, 0
836 834 while i < len(files) and j < len(subrepos):
837 835 subpath = subrepos[j] + "/"
838 836 if files[i] < subpath:
839 837 i += 1
840 838 continue
841 839 while i < len(files) and files[i].startswith(subpath):
842 840 del files[i]
843 841 j += 1
844 842
845 843 if not files or '.' in files:
846 844 files = ['.']
847 845 results = dict.fromkeys(subrepos)
848 846 results['.hg'] = None
849 847
850 848 alldirs = None
851 849 for ff in files:
852 850 # constructing the foldmap is expensive, so don't do it for the
853 851 # common case where files is ['.']
854 852 if normalize and ff != '.':
855 853 nf = normalize(ff, False, True)
856 854 else:
857 855 nf = ff
858 856 if nf in results:
859 857 continue
860 858
861 859 try:
862 860 st = lstat(join(nf))
863 861 kind = getkind(st.st_mode)
864 862 if kind == dirkind:
865 863 if nf in dmap:
866 864 # file replaced by dir on disk but still in dirstate
867 865 results[nf] = None
868 866 if matchedir:
869 867 matchedir(nf)
870 868 foundadd((nf, ff))
871 869 elif kind == regkind or kind == lnkkind:
872 870 results[nf] = st
873 871 else:
874 872 badfn(ff, badtype(kind))
875 873 if nf in dmap:
876 874 results[nf] = None
877 875 except OSError as inst: # nf not found on disk - it is dirstate only
878 876 if nf in dmap: # does it exactly match a missing file?
879 877 results[nf] = None
880 878 else: # does it match a missing directory?
881 879 if alldirs is None:
882 880 alldirs = util.dirs(dmap._map)
883 881 if nf in alldirs:
884 882 if matchedir:
885 883 matchedir(nf)
886 884 notfoundadd(nf)
887 885 else:
888 886 badfn(ff, encoding.strtolocal(inst.strerror))
889 887
890 888 # Case insensitive filesystems cannot rely on lstat() failing to detect
891 889 # a case-only rename. Prune the stat object for any file that does not
892 890 # match the case in the filesystem, if there are multiple files that
893 891 # normalize to the same path.
894 892 if match.isexact() and self._checkcase:
895 893 normed = {}
896 894
897 895 for f, st in results.iteritems():
898 896 if st is None:
899 897 continue
900 898
901 899 nc = util.normcase(f)
902 900 paths = normed.get(nc)
903 901
904 902 if paths is None:
905 903 paths = set()
906 904 normed[nc] = paths
907 905
908 906 paths.add(f)
909 907
910 908 for norm, paths in normed.iteritems():
911 909 if len(paths) > 1:
912 910 for path in paths:
913 911 folded = self._discoverpath(path, norm, True, None,
914 912 self._dirfoldmap)
915 913 if path != folded:
916 914 results[path] = None
917 915
918 916 return results, dirsfound, dirsnotfound
919 917
920 918 def walk(self, match, subrepos, unknown, ignored, full=True):
921 919 '''
922 920 Walk recursively through the directory tree, finding all files
923 921 matched by match.
924 922
925 923 If full is False, maybe skip some known-clean files.
926 924
927 925 Return a dict mapping filename to stat-like object (either
928 926 mercurial.osutil.stat instance or return value of os.stat()).
929 927
930 928 '''
931 929 # full is a flag that extensions that hook into walk can use -- this
932 930 # implementation doesn't use it at all. This satisfies the contract
933 931 # because we only guarantee a "maybe".
934 932
935 933 if ignored:
936 934 ignore = util.never
937 935 dirignore = util.never
938 936 elif unknown:
939 937 ignore = self._ignore
940 938 dirignore = self._dirignore
941 939 else:
942 940 # if not unknown and not ignored, drop dir recursion and step 2
943 941 ignore = util.always
944 942 dirignore = util.always
945 943
946 944 matchfn = match.matchfn
947 945 matchalways = match.always()
948 946 matchtdir = match.traversedir
949 947 dmap = self._map
950 948 listdir = util.listdir
951 949 lstat = os.lstat
952 950 dirkind = stat.S_IFDIR
953 951 regkind = stat.S_IFREG
954 952 lnkkind = stat.S_IFLNK
955 953 join = self._join
956 954
957 955 exact = skipstep3 = False
958 956 if match.isexact(): # match.exact
959 957 exact = True
960 958 dirignore = util.always # skip step 2
961 959 elif match.prefix(): # match.match, no patterns
962 960 skipstep3 = True
963 961
964 962 if not exact and self._checkcase:
965 963 normalize = self._normalize
966 964 normalizefile = self._normalizefile
967 965 skipstep3 = False
968 966 else:
969 967 normalize = self._normalize
970 968 normalizefile = None
971 969
972 970 # step 1: find all explicit files
973 971 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
974 972
975 973 skipstep3 = skipstep3 and not (work or dirsnotfound)
976 974 work = [d for d in work if not dirignore(d[0])]
977 975
978 976 # step 2: visit subdirectories
979 977 def traverse(work, alreadynormed):
980 978 wadd = work.append
981 979 while work:
982 980 nd = work.pop()
983 981 if not match.visitdir(nd):
984 982 continue
985 983 skip = None
986 984 if nd == '.':
987 985 nd = ''
988 986 else:
989 987 skip = '.hg'
990 988 try:
991 989 entries = listdir(join(nd), stat=True, skip=skip)
992 990 except OSError as inst:
993 991 if inst.errno in (errno.EACCES, errno.ENOENT):
994 992 match.bad(self.pathto(nd),
995 993 encoding.strtolocal(inst.strerror))
996 994 continue
997 995 raise
998 996 for f, kind, st in entries:
999 997 if normalizefile:
1000 998 # even though f might be a directory, we're only
1001 999 # interested in comparing it to files currently in the
1002 1000 # dmap -- therefore normalizefile is enough
1003 1001 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1004 1002 True)
1005 1003 else:
1006 1004 nf = nd and (nd + "/" + f) or f
1007 1005 if nf not in results:
1008 1006 if kind == dirkind:
1009 1007 if not ignore(nf):
1010 1008 if matchtdir:
1011 1009 matchtdir(nf)
1012 1010 wadd(nf)
1013 1011 if nf in dmap and (matchalways or matchfn(nf)):
1014 1012 results[nf] = None
1015 1013 elif kind == regkind or kind == lnkkind:
1016 1014 if nf in dmap:
1017 1015 if matchalways or matchfn(nf):
1018 1016 results[nf] = st
1019 1017 elif ((matchalways or matchfn(nf))
1020 1018 and not ignore(nf)):
1021 1019 # unknown file -- normalize if necessary
1022 1020 if not alreadynormed:
1023 1021 nf = normalize(nf, False, True)
1024 1022 results[nf] = st
1025 1023 elif nf in dmap and (matchalways or matchfn(nf)):
1026 1024 results[nf] = None
1027 1025
1028 1026 for nd, d in work:
1029 1027 # alreadynormed means that processwork doesn't have to do any
1030 1028 # expensive directory normalization
1031 1029 alreadynormed = not normalize or nd == d
1032 1030 traverse([d], alreadynormed)
1033 1031
1034 1032 for s in subrepos:
1035 1033 del results[s]
1036 1034 del results['.hg']
1037 1035
1038 1036 # step 3: visit remaining files from dmap
1039 1037 if not skipstep3 and not exact:
1040 1038 # If a dmap file is not in results yet, it was either
1041 1039 # a) not matching matchfn b) ignored, c) missing, or d) under a
1042 1040 # symlink directory.
1043 1041 if not results and matchalways:
1044 1042 visit = [f for f in dmap]
1045 1043 else:
1046 1044 visit = [f for f in dmap if f not in results and matchfn(f)]
1047 1045 visit.sort()
1048 1046
1049 1047 if unknown:
1050 1048 # unknown == True means we walked all dirs under the roots
1051 1049 # that wasn't ignored, and everything that matched was stat'ed
1052 1050 # and is already in results.
1053 1051 # The rest must thus be ignored or under a symlink.
1054 1052 audit_path = pathutil.pathauditor(self._root, cached=True)
1055 1053
1056 1054 for nf in iter(visit):
1057 1055 # If a stat for the same file was already added with a
1058 1056 # different case, don't add one for this, since that would
1059 1057 # make it appear as if the file exists under both names
1060 1058 # on disk.
1061 1059 if (normalizefile and
1062 1060 normalizefile(nf, True, True) in results):
1063 1061 results[nf] = None
1064 1062 # Report ignored items in the dmap as long as they are not
1065 1063 # under a symlink directory.
1066 1064 elif audit_path.check(nf):
1067 1065 try:
1068 1066 results[nf] = lstat(join(nf))
1069 1067 # file was just ignored, no links, and exists
1070 1068 except OSError:
1071 1069 # file doesn't exist
1072 1070 results[nf] = None
1073 1071 else:
1074 1072 # It's either missing or under a symlink directory
1075 1073 # which we in this case report as missing
1076 1074 results[nf] = None
1077 1075 else:
1078 1076 # We may not have walked the full directory tree above,
1079 1077 # so stat and check everything we missed.
1080 1078 iv = iter(visit)
1081 1079 for st in util.statfiles([join(i) for i in visit]):
1082 1080 results[next(iv)] = st
1083 1081 return results
1084 1082
1085 1083 def status(self, match, subrepos, ignored, clean, unknown):
1086 1084 '''Determine the status of the working copy relative to the
1087 1085 dirstate and return a pair of (unsure, status), where status is of type
1088 1086 scmutil.status and:
1089 1087
1090 1088 unsure:
1091 1089 files that might have been modified since the dirstate was
1092 1090 written, but need to be read to be sure (size is the same
1093 1091 but mtime differs)
1094 1092 status.modified:
1095 1093 files that have definitely been modified since the dirstate
1096 1094 was written (different size or mode)
1097 1095 status.clean:
1098 1096 files that have definitely not been modified since the
1099 1097 dirstate was written
1100 1098 '''
1101 1099 listignored, listclean, listunknown = ignored, clean, unknown
1102 1100 lookup, modified, added, unknown, ignored = [], [], [], [], []
1103 1101 removed, deleted, clean = [], [], []
1104 1102
1105 1103 dmap = self._map
1106 1104 ladd = lookup.append # aka "unsure"
1107 1105 madd = modified.append
1108 1106 aadd = added.append
1109 1107 uadd = unknown.append
1110 1108 iadd = ignored.append
1111 1109 radd = removed.append
1112 1110 dadd = deleted.append
1113 1111 cadd = clean.append
1114 1112 mexact = match.exact
1115 1113 dirignore = self._dirignore
1116 1114 checkexec = self._checkexec
1117 1115 copymap = self._map.copymap
1118 1116 lastnormaltime = self._lastnormaltime
1119 1117
1120 1118 # We need to do full walks when either
1121 1119 # - we're listing all clean files, or
1122 1120 # - match.traversedir does something, because match.traversedir should
1123 1121 # be called for every dir in the working dir
1124 1122 full = listclean or match.traversedir is not None
1125 1123 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1126 1124 full=full).iteritems():
1127 1125 if fn not in dmap:
1128 1126 if (listignored or mexact(fn)) and dirignore(fn):
1129 1127 if listignored:
1130 1128 iadd(fn)
1131 1129 else:
1132 1130 uadd(fn)
1133 1131 continue
1134 1132
1135 1133 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1136 1134 # written like that for performance reasons. dmap[fn] is not a
1137 1135 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1138 1136 # opcode has fast paths when the value to be unpacked is a tuple or
1139 1137 # a list, but falls back to creating a full-fledged iterator in
1140 1138 # general. That is much slower than simply accessing and storing the
1141 1139 # tuple members one by one.
1142 1140 t = dmap[fn]
1143 1141 state = t[0]
1144 1142 mode = t[1]
1145 1143 size = t[2]
1146 1144 time = t[3]
1147 1145
1148 1146 if not st and state in "nma":
1149 1147 dadd(fn)
1150 1148 elif state == 'n':
1151 1149 if (size >= 0 and
1152 1150 ((size != st.st_size and size != st.st_size & _rangemask)
1153 1151 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1154 1152 or size == -2 # other parent
1155 1153 or fn in copymap):
1156 1154 madd(fn)
1157 1155 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1158 1156 ladd(fn)
1159 1157 elif st.st_mtime == lastnormaltime:
1160 1158 # fn may have just been marked as normal and it may have
1161 1159 # changed in the same second without changing its size.
1162 1160 # This can happen if we quickly do multiple commits.
1163 1161 # Force lookup, so we don't miss such a racy file change.
1164 1162 ladd(fn)
1165 1163 elif listclean:
1166 1164 cadd(fn)
1167 1165 elif state == 'm':
1168 1166 madd(fn)
1169 1167 elif state == 'a':
1170 1168 aadd(fn)
1171 1169 elif state == 'r':
1172 1170 radd(fn)
1173 1171
1174 1172 return (lookup, scmutil.status(modified, added, removed, deleted,
1175 1173 unknown, ignored, clean))
1176 1174
1177 1175 def matches(self, match):
1178 1176 '''
1179 1177 return files in the dirstate (in whatever state) filtered by match
1180 1178 '''
1181 1179 dmap = self._map
1182 1180 if match.always():
1183 1181 return dmap.keys()
1184 1182 files = match.files()
1185 1183 if match.isexact():
1186 1184 # fast path -- filter the other way around, since typically files is
1187 1185 # much smaller than dmap
1188 1186 return [f for f in files if f in dmap]
1189 1187 if match.prefix() and all(fn in dmap for fn in files):
1190 1188 # fast path -- all the values are known to be files, so just return
1191 1189 # that
1192 1190 return list(files)
1193 1191 return [f for f in dmap if match(f)]
1194 1192
1195 1193 def _actualfilename(self, tr):
1196 1194 if tr:
1197 1195 return self._pendingfilename
1198 1196 else:
1199 1197 return self._filename
1200 1198
1201 1199 def savebackup(self, tr, backupname):
1202 1200 '''Save current dirstate into backup file'''
1203 1201 filename = self._actualfilename(tr)
1204 1202 assert backupname != filename
1205 1203
1206 1204 # use '_writedirstate' instead of 'write' to write changes certainly,
1207 1205 # because the latter omits writing out if transaction is running.
1208 1206 # output file will be used to create backup of dirstate at this point.
1209 1207 if self._dirty or not self._opener.exists(filename):
1210 1208 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1211 1209 checkambig=True))
1212 1210
1213 1211 if tr:
1214 1212 # ensure that subsequent tr.writepending returns True for
1215 1213 # changes written out above, even if dirstate is never
1216 1214 # changed after this
1217 1215 tr.addfilegenerator('dirstate', (self._filename,),
1218 1216 self._writedirstate, location='plain')
1219 1217
1220 1218 # ensure that pending file written above is unlinked at
1221 1219 # failure, even if tr.writepending isn't invoked until the
1222 1220 # end of this transaction
1223 1221 tr.registertmp(filename, location='plain')
1224 1222
1225 1223 self._opener.tryunlink(backupname)
1226 1224 # hardlink backup is okay because _writedirstate is always called
1227 1225 # with an "atomictemp=True" file.
1228 1226 util.copyfile(self._opener.join(filename),
1229 1227 self._opener.join(backupname), hardlink=True)
1230 1228
1231 1229 def restorebackup(self, tr, backupname):
1232 1230 '''Restore dirstate by backup file'''
1233 1231 # this "invalidate()" prevents "wlock.release()" from writing
1234 1232 # changes of dirstate out after restoring from backup file
1235 1233 self.invalidate()
1236 1234 filename = self._actualfilename(tr)
1237 1235 self._opener.rename(backupname, filename, checkambig=True)
1238 1236
1239 1237 def clearbackup(self, tr, backupname):
1240 1238 '''Clear backup file'''
1241 1239 self._opener.unlink(backupname)
1242 1240
1243 1241 class dirstatemap(object):
1244 1242 def __init__(self, ui, opener, root):
1245 1243 self._ui = ui
1246 1244 self._opener = opener
1247 1245 self._root = root
1248 1246 self._filename = 'dirstate'
1249 1247
1250 1248 self._map = {}
1251 1249 self.copymap = {}
1252 1250 self._parents = None
1253 1251 self._dirtyparents = False
1254 1252
1255 1253 # for consistent view between _pl() and _read() invocations
1256 1254 self._pendingmode = None
1257 1255
1258 1256 def iteritems(self):
1259 1257 return self._map.iteritems()
1260 1258
1261 1259 def __len__(self):
1262 1260 return len(self._map)
1263 1261
1264 1262 def __iter__(self):
1265 1263 return iter(self._map)
1266 1264
1267 1265 def get(self, key, default=None):
1268 1266 return self._map.get(key, default)
1269 1267
1270 1268 def __contains__(self, key):
1271 1269 return key in self._map
1272 1270
1273 1271 def __setitem__(self, key, value):
1274 1272 self._map[key] = value
1275 1273
1276 1274 def __getitem__(self, key):
1277 1275 return self._map[key]
1278 1276
1279 1277 def __delitem__(self, key):
1280 1278 del self._map[key]
1281 1279
1282 1280 def keys(self):
1283 1281 return self._map.keys()
1284 1282
1285 1283 def nonnormalentries(self):
1286 1284 '''Compute the nonnormal dirstate entries from the dmap'''
1287 1285 try:
1288 1286 return parsers.nonnormalotherparententries(self._map)
1289 1287 except AttributeError:
1290 1288 nonnorm = set()
1291 1289 otherparent = set()
1292 1290 for fname, e in self._map.iteritems():
1293 1291 if e[0] != 'n' or e[3] == -1:
1294 1292 nonnorm.add(fname)
1295 1293 if e[0] == 'n' and e[2] == -2:
1296 1294 otherparent.add(fname)
1297 1295 return nonnorm, otherparent
1298 1296
1299 1297 def filefoldmap(self):
1300 1298 """Returns a dictionary mapping normalized case paths to their
1301 1299 non-normalized versions.
1302 1300 """
1303 1301 try:
1304 1302 makefilefoldmap = parsers.make_file_foldmap
1305 1303 except AttributeError:
1306 1304 pass
1307 1305 else:
1308 1306 return makefilefoldmap(self._map, util.normcasespec,
1309 1307 util.normcasefallback)
1310 1308
1311 1309 f = {}
1312 1310 normcase = util.normcase
1313 1311 for name, s in self._map.iteritems():
1314 1312 if s[0] != 'r':
1315 1313 f[normcase(name)] = name
1316 1314 f['.'] = '.' # prevents useless util.fspath() invocation
1317 1315 return f
1318 1316
1319 1317 def dirs(self):
1320 1318 """Returns a set-like object containing all the directories in the
1321 1319 current dirstate.
1322 1320 """
1323 1321 return util.dirs(self._map, 'r')
1324 1322
1325 1323 def _opendirstatefile(self):
1326 1324 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1327 1325 if self._pendingmode is not None and self._pendingmode != mode:
1328 1326 fp.close()
1329 1327 raise error.Abort(_('working directory state may be '
1330 1328 'changed parallelly'))
1331 1329 self._pendingmode = mode
1332 1330 return fp
1333 1331
1334 1332 def parents(self):
1335 1333 if not self._parents:
1336 1334 try:
1337 1335 fp = self._opendirstatefile()
1338 1336 st = fp.read(40)
1339 1337 fp.close()
1340 1338 except IOError as err:
1341 1339 if err.errno != errno.ENOENT:
1342 1340 raise
1343 1341 # File doesn't exist, so the current state is empty
1344 1342 st = ''
1345 1343
1346 1344 l = len(st)
1347 1345 if l == 40:
1348 1346 self._parents = st[:20], st[20:40]
1349 1347 elif l == 0:
1350 1348 self._parents = [nullid, nullid]
1351 1349 else:
1352 1350 raise error.Abort(_('working directory state appears '
1353 1351 'damaged!'))
1354 1352
1355 1353 return self._parents
1356 1354
1357 1355 def setparents(self, p1, p2):
1358 1356 self._parents = (p1, p2)
1359 1357 self._dirtyparents = True
1360 1358
1361 1359 def read(self):
1362 1360 try:
1363 1361 fp = self._opendirstatefile()
1364 1362 try:
1365 1363 st = fp.read()
1366 1364 finally:
1367 1365 fp.close()
1368 1366 except IOError as err:
1369 1367 if err.errno != errno.ENOENT:
1370 1368 raise
1371 1369 return
1372 1370 if not st:
1373 1371 return
1374 1372
1375 1373 if util.safehasattr(parsers, 'dict_new_presized'):
1376 1374 # Make an estimate of the number of files in the dirstate based on
1377 1375 # its size. From a linear regression on a set of real-world repos,
1378 1376 # all over 10,000 files, the size of a dirstate entry is 85
1379 1377 # bytes. The cost of resizing is significantly higher than the cost
1380 1378 # of filling in a larger presized dict, so subtract 20% from the
1381 1379 # size.
1382 1380 #
1383 1381 # This heuristic is imperfect in many ways, so in a future dirstate
1384 1382 # format update it makes sense to just record the number of entries
1385 1383 # on write.
1386 1384 self._map = parsers.dict_new_presized(len(st) / 71)
1387 1385
1388 1386 # Python's garbage collector triggers a GC each time a certain number
1389 1387 # of container objects (the number being defined by
1390 1388 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1391 1389 # for each file in the dirstate. The C version then immediately marks
1392 1390 # them as not to be tracked by the collector. However, this has no
1393 1391 # effect on when GCs are triggered, only on what objects the GC looks
1394 1392 # into. This means that O(number of files) GCs are unavoidable.
1395 1393 # Depending on when in the process's lifetime the dirstate is parsed,
1396 1394 # this can get very expensive. As a workaround, disable GC while
1397 1395 # parsing the dirstate.
1398 1396 #
1399 1397 # (we cannot decorate the function directly since it is in a C module)
1400 1398 parse_dirstate = util.nogc(parsers.parse_dirstate)
1401 1399 p = parse_dirstate(self._map, self.copymap, st)
1402 1400 if not self._dirtyparents:
1403 1401 self.setparents(*p)
1402
1403 def write(self, st, now):
1404 st.write(parsers.pack_dirstate(self._map, self.copymap,
1405 self.parents(), now))
1406 st.close()
1407 self._dirtyparents = False
General Comments 0
You need to be logged in to leave comments. Login now