##// END OF EJS Templates
dirstate: drop deprecated methods (API)...
Matt Harbison -
r35969:265e91da default
parent child Browse files
Show More
@@ -1,1498 +1,1477 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 parsers = policy.importmod(r'parsers')
31 31
32 32 propertycache = util.propertycache
33 33 filecache = scmutil.filecache
34 34 _rangemask = 0x7fffffff
35 35
36 36 dirstatetuple = parsers.dirstatetuple
37 37
38 38 class repocache(filecache):
39 39 """filecache for files in .hg/"""
40 40 def join(self, obj, fname):
41 41 return obj._opener.join(fname)
42 42
43 43 class rootcache(filecache):
44 44 """filecache for files in the repository root"""
45 45 def join(self, obj, fname):
46 46 return obj._join(fname)
47 47
48 48 def _getfsnow(vfs):
49 49 '''Get "now" timestamp on filesystem'''
50 50 tmpfd, tmpname = vfs.mkstemp()
51 51 try:
52 52 return os.fstat(tmpfd).st_mtime
53 53 finally:
54 54 os.close(tmpfd)
55 55 vfs.unlink(tmpname)
56 56
57 57 class dirstate(object):
58 58
59 59 def __init__(self, opener, ui, root, validate, sparsematchfn):
60 60 '''Create a new dirstate object.
61 61
62 62 opener is an open()-like callable that can be used to open the
63 63 dirstate file; root is the root of the directory tracked by
64 64 the dirstate.
65 65 '''
66 66 self._opener = opener
67 67 self._validate = validate
68 68 self._root = root
69 69 self._sparsematchfn = sparsematchfn
70 70 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
71 71 # UNC path pointing to root share (issue4557)
72 72 self._rootdir = pathutil.normasprefix(root)
73 73 self._dirty = False
74 74 self._lastnormaltime = 0
75 75 self._ui = ui
76 76 self._filecache = {}
77 77 self._parentwriters = 0
78 78 self._filename = 'dirstate'
79 79 self._pendingfilename = '%s.pending' % self._filename
80 80 self._plchangecallbacks = {}
81 81 self._origpl = None
82 82 self._updatedfiles = set()
83 83 self._mapcls = dirstatemap
84 84
85 85 @contextlib.contextmanager
86 86 def parentchange(self):
87 87 '''Context manager for handling dirstate parents.
88 88
89 89 If an exception occurs in the scope of the context manager,
90 90 the incoherent dirstate won't be written when wlock is
91 91 released.
92 92 '''
93 93 self._parentwriters += 1
94 94 yield
95 95 # Typically we want the "undo" step of a context manager in a
96 96 # finally block so it happens even when an exception
97 97 # occurs. In this case, however, we only want to decrement
98 98 # parentwriters if the code in the with statement exits
99 99 # normally, so we don't have a try/finally here on purpose.
100 100 self._parentwriters -= 1
101 101
102 def beginparentchange(self):
103 '''Marks the beginning of a set of changes that involve changing
104 the dirstate parents. If there is an exception during this time,
105 the dirstate will not be written when the wlock is released. This
106 prevents writing an incoherent dirstate where the parent doesn't
107 match the contents.
108 '''
109 self._ui.deprecwarn('beginparentchange is obsoleted by the '
110 'parentchange context manager.', '4.3')
111 self._parentwriters += 1
112
113 def endparentchange(self):
114 '''Marks the end of a set of changes that involve changing the
115 dirstate parents. Once all parent changes have been marked done,
116 the wlock will be free to write the dirstate on release.
117 '''
118 self._ui.deprecwarn('endparentchange is obsoleted by the '
119 'parentchange context manager.', '4.3')
120 if self._parentwriters > 0:
121 self._parentwriters -= 1
122
123 102 def pendingparentchange(self):
124 103 '''Returns true if the dirstate is in the middle of a set of changes
125 104 that modify the dirstate parent.
126 105 '''
127 106 return self._parentwriters > 0
128 107
129 108 @propertycache
130 109 def _map(self):
131 110 """Return the dirstate contents (see documentation for dirstatemap)."""
132 111 self._map = self._mapcls(self._ui, self._opener, self._root)
133 112 return self._map
134 113
135 114 @property
136 115 def _sparsematcher(self):
137 116 """The matcher for the sparse checkout.
138 117
139 118 The working directory may not include every file from a manifest. The
140 119 matcher obtained by this property will match a path if it is to be
141 120 included in the working directory.
142 121 """
143 122 # TODO there is potential to cache this property. For now, the matcher
144 123 # is resolved on every access. (But the called function does use a
145 124 # cache to keep the lookup fast.)
146 125 return self._sparsematchfn()
147 126
148 127 @repocache('branch')
149 128 def _branch(self):
150 129 try:
151 130 return self._opener.read("branch").strip() or "default"
152 131 except IOError as inst:
153 132 if inst.errno != errno.ENOENT:
154 133 raise
155 134 return "default"
156 135
157 136 @property
158 137 def _pl(self):
159 138 return self._map.parents()
160 139
161 140 def hasdir(self, d):
162 141 return self._map.hastrackeddir(d)
163 142
164 143 @rootcache('.hgignore')
165 144 def _ignore(self):
166 145 files = self._ignorefiles()
167 146 if not files:
168 147 return matchmod.never(self._root, '')
169 148
170 149 pats = ['include:%s' % f for f in files]
171 150 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
172 151
173 152 @propertycache
174 153 def _slash(self):
175 154 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
176 155
177 156 @propertycache
178 157 def _checklink(self):
179 158 return util.checklink(self._root)
180 159
181 160 @propertycache
182 161 def _checkexec(self):
183 162 return util.checkexec(self._root)
184 163
185 164 @propertycache
186 165 def _checkcase(self):
187 166 return not util.fscasesensitive(self._join('.hg'))
188 167
189 168 def _join(self, f):
190 169 # much faster than os.path.join()
191 170 # it's safe because f is always a relative path
192 171 return self._rootdir + f
193 172
194 173 def flagfunc(self, buildfallback):
195 174 if self._checklink and self._checkexec:
196 175 def f(x):
197 176 try:
198 177 st = os.lstat(self._join(x))
199 178 if util.statislink(st):
200 179 return 'l'
201 180 if util.statisexec(st):
202 181 return 'x'
203 182 except OSError:
204 183 pass
205 184 return ''
206 185 return f
207 186
208 187 fallback = buildfallback()
209 188 if self._checklink:
210 189 def f(x):
211 190 if os.path.islink(self._join(x)):
212 191 return 'l'
213 192 if 'x' in fallback(x):
214 193 return 'x'
215 194 return ''
216 195 return f
217 196 if self._checkexec:
218 197 def f(x):
219 198 if 'l' in fallback(x):
220 199 return 'l'
221 200 if util.isexec(self._join(x)):
222 201 return 'x'
223 202 return ''
224 203 return f
225 204 else:
226 205 return fallback
227 206
228 207 @propertycache
229 208 def _cwd(self):
230 209 # internal config: ui.forcecwd
231 210 forcecwd = self._ui.config('ui', 'forcecwd')
232 211 if forcecwd:
233 212 return forcecwd
234 213 return pycompat.getcwd()
235 214
236 215 def getcwd(self):
237 216 '''Return the path from which a canonical path is calculated.
238 217
239 218 This path should be used to resolve file patterns or to convert
240 219 canonical paths back to file paths for display. It shouldn't be
241 220 used to get real file paths. Use vfs functions instead.
242 221 '''
243 222 cwd = self._cwd
244 223 if cwd == self._root:
245 224 return ''
246 225 # self._root ends with a path separator if self._root is '/' or 'C:\'
247 226 rootsep = self._root
248 227 if not util.endswithsep(rootsep):
249 228 rootsep += pycompat.ossep
250 229 if cwd.startswith(rootsep):
251 230 return cwd[len(rootsep):]
252 231 else:
253 232 # we're outside the repo. return an absolute path.
254 233 return cwd
255 234
256 235 def pathto(self, f, cwd=None):
257 236 if cwd is None:
258 237 cwd = self.getcwd()
259 238 path = util.pathto(self._root, cwd, f)
260 239 if self._slash:
261 240 return util.pconvert(path)
262 241 return path
263 242
264 243 def __getitem__(self, key):
265 244 '''Return the current state of key (a filename) in the dirstate.
266 245
267 246 States are:
268 247 n normal
269 248 m needs merging
270 249 r marked for removal
271 250 a marked for addition
272 251 ? not tracked
273 252 '''
274 253 return self._map.get(key, ("?",))[0]
275 254
276 255 def __contains__(self, key):
277 256 return key in self._map
278 257
279 258 def __iter__(self):
280 259 return iter(sorted(self._map))
281 260
282 261 def items(self):
283 262 return self._map.iteritems()
284 263
285 264 iteritems = items
286 265
287 266 def parents(self):
288 267 return [self._validate(p) for p in self._pl]
289 268
290 269 def p1(self):
291 270 return self._validate(self._pl[0])
292 271
293 272 def p2(self):
294 273 return self._validate(self._pl[1])
295 274
296 275 def branch(self):
297 276 return encoding.tolocal(self._branch)
298 277
299 278 def setparents(self, p1, p2=nullid):
300 279 """Set dirstate parents to p1 and p2.
301 280
302 281 When moving from two parents to one, 'm' merged entries a
303 282 adjusted to normal and previous copy records discarded and
304 283 returned by the call.
305 284
306 285 See localrepo.setparents()
307 286 """
308 287 if self._parentwriters == 0:
309 288 raise ValueError("cannot set dirstate parent without "
310 289 "calling dirstate.beginparentchange")
311 290
312 291 self._dirty = True
313 292 oldp2 = self._pl[1]
314 293 if self._origpl is None:
315 294 self._origpl = self._pl
316 295 self._map.setparents(p1, p2)
317 296 copies = {}
318 297 if oldp2 != nullid and p2 == nullid:
319 298 candidatefiles = self._map.nonnormalset.union(
320 299 self._map.otherparentset)
321 300 for f in candidatefiles:
322 301 s = self._map.get(f)
323 302 if s is None:
324 303 continue
325 304
326 305 # Discard 'm' markers when moving away from a merge state
327 306 if s[0] == 'm':
328 307 source = self._map.copymap.get(f)
329 308 if source:
330 309 copies[f] = source
331 310 self.normallookup(f)
332 311 # Also fix up otherparent markers
333 312 elif s[0] == 'n' and s[2] == -2:
334 313 source = self._map.copymap.get(f)
335 314 if source:
336 315 copies[f] = source
337 316 self.add(f)
338 317 return copies
339 318
340 319 def setbranch(self, branch):
341 320 self._branch = encoding.fromlocal(branch)
342 321 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
343 322 try:
344 323 f.write(self._branch + '\n')
345 324 f.close()
346 325
347 326 # make sure filecache has the correct stat info for _branch after
348 327 # replacing the underlying file
349 328 ce = self._filecache['_branch']
350 329 if ce:
351 330 ce.refresh()
352 331 except: # re-raises
353 332 f.discard()
354 333 raise
355 334
356 335 def invalidate(self):
357 336 '''Causes the next access to reread the dirstate.
358 337
359 338 This is different from localrepo.invalidatedirstate() because it always
360 339 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
361 340 check whether the dirstate has changed before rereading it.'''
362 341
363 342 for a in (r"_map", r"_branch", r"_ignore"):
364 343 if a in self.__dict__:
365 344 delattr(self, a)
366 345 self._lastnormaltime = 0
367 346 self._dirty = False
368 347 self._updatedfiles.clear()
369 348 self._parentwriters = 0
370 349 self._origpl = None
371 350
372 351 def copy(self, source, dest):
373 352 """Mark dest as a copy of source. Unmark dest if source is None."""
374 353 if source == dest:
375 354 return
376 355 self._dirty = True
377 356 if source is not None:
378 357 self._map.copymap[dest] = source
379 358 self._updatedfiles.add(source)
380 359 self._updatedfiles.add(dest)
381 360 elif self._map.copymap.pop(dest, None):
382 361 self._updatedfiles.add(dest)
383 362
384 363 def copied(self, file):
385 364 return self._map.copymap.get(file, None)
386 365
387 366 def copies(self):
388 367 return self._map.copymap
389 368
390 369 def _addpath(self, f, state, mode, size, mtime):
391 370 oldstate = self[f]
392 371 if state == 'a' or oldstate == 'r':
393 372 scmutil.checkfilename(f)
394 373 if self._map.hastrackeddir(f):
395 374 raise error.Abort(_('directory %r already in dirstate') % f)
396 375 # shadows
397 376 for d in util.finddirs(f):
398 377 if self._map.hastrackeddir(d):
399 378 break
400 379 entry = self._map.get(d)
401 380 if entry is not None and entry[0] != 'r':
402 381 raise error.Abort(
403 382 _('file %r in dirstate clashes with %r') % (d, f))
404 383 self._dirty = True
405 384 self._updatedfiles.add(f)
406 385 self._map.addfile(f, oldstate, state, mode, size, mtime)
407 386
408 387 def normal(self, f):
409 388 '''Mark a file normal and clean.'''
410 389 s = os.lstat(self._join(f))
411 390 mtime = s.st_mtime
412 391 self._addpath(f, 'n', s.st_mode,
413 392 s.st_size & _rangemask, mtime & _rangemask)
414 393 self._map.copymap.pop(f, None)
415 394 if f in self._map.nonnormalset:
416 395 self._map.nonnormalset.remove(f)
417 396 if mtime > self._lastnormaltime:
418 397 # Remember the most recent modification timeslot for status(),
419 398 # to make sure we won't miss future size-preserving file content
420 399 # modifications that happen within the same timeslot.
421 400 self._lastnormaltime = mtime
422 401
423 402 def normallookup(self, f):
424 403 '''Mark a file normal, but possibly dirty.'''
425 404 if self._pl[1] != nullid:
426 405 # if there is a merge going on and the file was either
427 406 # in state 'm' (-1) or coming from other parent (-2) before
428 407 # being removed, restore that state.
429 408 entry = self._map.get(f)
430 409 if entry is not None:
431 410 if entry[0] == 'r' and entry[2] in (-1, -2):
432 411 source = self._map.copymap.get(f)
433 412 if entry[2] == -1:
434 413 self.merge(f)
435 414 elif entry[2] == -2:
436 415 self.otherparent(f)
437 416 if source:
438 417 self.copy(source, f)
439 418 return
440 419 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
441 420 return
442 421 self._addpath(f, 'n', 0, -1, -1)
443 422 self._map.copymap.pop(f, None)
444 423
445 424 def otherparent(self, f):
446 425 '''Mark as coming from the other parent, always dirty.'''
447 426 if self._pl[1] == nullid:
448 427 raise error.Abort(_("setting %r to other parent "
449 428 "only allowed in merges") % f)
450 429 if f in self and self[f] == 'n':
451 430 # merge-like
452 431 self._addpath(f, 'm', 0, -2, -1)
453 432 else:
454 433 # add-like
455 434 self._addpath(f, 'n', 0, -2, -1)
456 435 self._map.copymap.pop(f, None)
457 436
458 437 def add(self, f):
459 438 '''Mark a file added.'''
460 439 self._addpath(f, 'a', 0, -1, -1)
461 440 self._map.copymap.pop(f, None)
462 441
463 442 def remove(self, f):
464 443 '''Mark a file removed.'''
465 444 self._dirty = True
466 445 oldstate = self[f]
467 446 size = 0
468 447 if self._pl[1] != nullid:
469 448 entry = self._map.get(f)
470 449 if entry is not None:
471 450 # backup the previous state
472 451 if entry[0] == 'm': # merge
473 452 size = -1
474 453 elif entry[0] == 'n' and entry[2] == -2: # other parent
475 454 size = -2
476 455 self._map.otherparentset.add(f)
477 456 self._updatedfiles.add(f)
478 457 self._map.removefile(f, oldstate, size)
479 458 if size == 0:
480 459 self._map.copymap.pop(f, None)
481 460
482 461 def merge(self, f):
483 462 '''Mark a file merged.'''
484 463 if self._pl[1] == nullid:
485 464 return self.normallookup(f)
486 465 return self.otherparent(f)
487 466
488 467 def drop(self, f):
489 468 '''Drop a file from the dirstate'''
490 469 oldstate = self[f]
491 470 if self._map.dropfile(f, oldstate):
492 471 self._dirty = True
493 472 self._updatedfiles.add(f)
494 473 self._map.copymap.pop(f, None)
495 474
496 475 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
497 476 if exists is None:
498 477 exists = os.path.lexists(os.path.join(self._root, path))
499 478 if not exists:
500 479 # Maybe a path component exists
501 480 if not ignoremissing and '/' in path:
502 481 d, f = path.rsplit('/', 1)
503 482 d = self._normalize(d, False, ignoremissing, None)
504 483 folded = d + "/" + f
505 484 else:
506 485 # No path components, preserve original case
507 486 folded = path
508 487 else:
509 488 # recursively normalize leading directory components
510 489 # against dirstate
511 490 if '/' in normed:
512 491 d, f = normed.rsplit('/', 1)
513 492 d = self._normalize(d, False, ignoremissing, True)
514 493 r = self._root + "/" + d
515 494 folded = d + "/" + util.fspath(f, r)
516 495 else:
517 496 folded = util.fspath(normed, self._root)
518 497 storemap[normed] = folded
519 498
520 499 return folded
521 500
522 501 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
523 502 normed = util.normcase(path)
524 503 folded = self._map.filefoldmap.get(normed, None)
525 504 if folded is None:
526 505 if isknown:
527 506 folded = path
528 507 else:
529 508 folded = self._discoverpath(path, normed, ignoremissing, exists,
530 509 self._map.filefoldmap)
531 510 return folded
532 511
533 512 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
534 513 normed = util.normcase(path)
535 514 folded = self._map.filefoldmap.get(normed, None)
536 515 if folded is None:
537 516 folded = self._map.dirfoldmap.get(normed, None)
538 517 if folded is None:
539 518 if isknown:
540 519 folded = path
541 520 else:
542 521 # store discovered result in dirfoldmap so that future
543 522 # normalizefile calls don't start matching directories
544 523 folded = self._discoverpath(path, normed, ignoremissing, exists,
545 524 self._map.dirfoldmap)
546 525 return folded
547 526
548 527 def normalize(self, path, isknown=False, ignoremissing=False):
549 528 '''
550 529 normalize the case of a pathname when on a casefolding filesystem
551 530
552 531 isknown specifies whether the filename came from walking the
553 532 disk, to avoid extra filesystem access.
554 533
555 534 If ignoremissing is True, missing path are returned
556 535 unchanged. Otherwise, we try harder to normalize possibly
557 536 existing path components.
558 537
559 538 The normalized case is determined based on the following precedence:
560 539
561 540 - version of name already stored in the dirstate
562 541 - version of name stored on disk
563 542 - version provided via command arguments
564 543 '''
565 544
566 545 if self._checkcase:
567 546 return self._normalize(path, isknown, ignoremissing)
568 547 return path
569 548
570 549 def clear(self):
571 550 self._map.clear()
572 551 self._lastnormaltime = 0
573 552 self._updatedfiles.clear()
574 553 self._dirty = True
575 554
576 555 def rebuild(self, parent, allfiles, changedfiles=None):
577 556 if changedfiles is None:
578 557 # Rebuild entire dirstate
579 558 changedfiles = allfiles
580 559 lastnormaltime = self._lastnormaltime
581 560 self.clear()
582 561 self._lastnormaltime = lastnormaltime
583 562
584 563 if self._origpl is None:
585 564 self._origpl = self._pl
586 565 self._map.setparents(parent, nullid)
587 566 for f in changedfiles:
588 567 if f in allfiles:
589 568 self.normallookup(f)
590 569 else:
591 570 self.drop(f)
592 571
593 572 self._dirty = True
594 573
595 574 def identity(self):
596 575 '''Return identity of dirstate itself to detect changing in storage
597 576
598 577 If identity of previous dirstate is equal to this, writing
599 578 changes based on the former dirstate out can keep consistency.
600 579 '''
601 580 return self._map.identity
602 581
603 582 def write(self, tr):
604 583 if not self._dirty:
605 584 return
606 585
607 586 filename = self._filename
608 587 if tr:
609 588 # 'dirstate.write()' is not only for writing in-memory
610 589 # changes out, but also for dropping ambiguous timestamp.
611 590 # delayed writing re-raise "ambiguous timestamp issue".
612 591 # See also the wiki page below for detail:
613 592 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
614 593
615 594 # emulate dropping timestamp in 'parsers.pack_dirstate'
616 595 now = _getfsnow(self._opener)
617 596 self._map.clearambiguoustimes(self._updatedfiles, now)
618 597
619 598 # emulate that all 'dirstate.normal' results are written out
620 599 self._lastnormaltime = 0
621 600 self._updatedfiles.clear()
622 601
623 602 # delay writing in-memory changes out
624 603 tr.addfilegenerator('dirstate', (self._filename,),
625 604 self._writedirstate, location='plain')
626 605 return
627 606
628 607 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
629 608 self._writedirstate(st)
630 609
631 610 def addparentchangecallback(self, category, callback):
632 611 """add a callback to be called when the wd parents are changed
633 612
634 613 Callback will be called with the following arguments:
635 614 dirstate, (oldp1, oldp2), (newp1, newp2)
636 615
637 616 Category is a unique identifier to allow overwriting an old callback
638 617 with a newer callback.
639 618 """
640 619 self._plchangecallbacks[category] = callback
641 620
642 621 def _writedirstate(self, st):
643 622 # notify callbacks about parents change
644 623 if self._origpl is not None and self._origpl != self._pl:
645 624 for c, callback in sorted(self._plchangecallbacks.iteritems()):
646 625 callback(self, self._origpl, self._pl)
647 626 self._origpl = None
648 627 # use the modification time of the newly created temporary file as the
649 628 # filesystem's notion of 'now'
650 629 now = util.fstat(st).st_mtime & _rangemask
651 630
652 631 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
653 632 # timestamp of each entries in dirstate, because of 'now > mtime'
654 633 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite')
655 634 if delaywrite > 0:
656 635 # do we have any files to delay for?
657 636 for f, e in self._map.iteritems():
658 637 if e[0] == 'n' and e[3] == now:
659 638 import time # to avoid useless import
660 639 # rather than sleep n seconds, sleep until the next
661 640 # multiple of n seconds
662 641 clock = time.time()
663 642 start = int(clock) - (int(clock) % delaywrite)
664 643 end = start + delaywrite
665 644 time.sleep(end - clock)
666 645 now = end # trust our estimate that the end is near now
667 646 break
668 647
669 648 self._map.write(st, now)
670 649 self._lastnormaltime = 0
671 650 self._dirty = False
672 651
673 652 def _dirignore(self, f):
674 653 if f == '.':
675 654 return False
676 655 if self._ignore(f):
677 656 return True
678 657 for p in util.finddirs(f):
679 658 if self._ignore(p):
680 659 return True
681 660 return False
682 661
683 662 def _ignorefiles(self):
684 663 files = []
685 664 if os.path.exists(self._join('.hgignore')):
686 665 files.append(self._join('.hgignore'))
687 666 for name, path in self._ui.configitems("ui"):
688 667 if name == 'ignore' or name.startswith('ignore.'):
689 668 # we need to use os.path.join here rather than self._join
690 669 # because path is arbitrary and user-specified
691 670 files.append(os.path.join(self._rootdir, util.expandpath(path)))
692 671 return files
693 672
694 673 def _ignorefileandline(self, f):
695 674 files = collections.deque(self._ignorefiles())
696 675 visited = set()
697 676 while files:
698 677 i = files.popleft()
699 678 patterns = matchmod.readpatternfile(i, self._ui.warn,
700 679 sourceinfo=True)
701 680 for pattern, lineno, line in patterns:
702 681 kind, p = matchmod._patsplit(pattern, 'glob')
703 682 if kind == "subinclude":
704 683 if p not in visited:
705 684 files.append(p)
706 685 continue
707 686 m = matchmod.match(self._root, '', [], [pattern],
708 687 warn=self._ui.warn)
709 688 if m(f):
710 689 return (i, lineno, line)
711 690 visited.add(i)
712 691 return (None, -1, "")
713 692
714 693 def _walkexplicit(self, match, subrepos):
715 694 '''Get stat data about the files explicitly specified by match.
716 695
717 696 Return a triple (results, dirsfound, dirsnotfound).
718 697 - results is a mapping from filename to stat result. It also contains
719 698 listings mapping subrepos and .hg to None.
720 699 - dirsfound is a list of files found to be directories.
721 700 - dirsnotfound is a list of files that the dirstate thinks are
722 701 directories and that were not found.'''
723 702
724 703 def badtype(mode):
725 704 kind = _('unknown')
726 705 if stat.S_ISCHR(mode):
727 706 kind = _('character device')
728 707 elif stat.S_ISBLK(mode):
729 708 kind = _('block device')
730 709 elif stat.S_ISFIFO(mode):
731 710 kind = _('fifo')
732 711 elif stat.S_ISSOCK(mode):
733 712 kind = _('socket')
734 713 elif stat.S_ISDIR(mode):
735 714 kind = _('directory')
736 715 return _('unsupported file type (type is %s)') % kind
737 716
738 717 matchedir = match.explicitdir
739 718 badfn = match.bad
740 719 dmap = self._map
741 720 lstat = os.lstat
742 721 getkind = stat.S_IFMT
743 722 dirkind = stat.S_IFDIR
744 723 regkind = stat.S_IFREG
745 724 lnkkind = stat.S_IFLNK
746 725 join = self._join
747 726 dirsfound = []
748 727 foundadd = dirsfound.append
749 728 dirsnotfound = []
750 729 notfoundadd = dirsnotfound.append
751 730
752 731 if not match.isexact() and self._checkcase:
753 732 normalize = self._normalize
754 733 else:
755 734 normalize = None
756 735
757 736 files = sorted(match.files())
758 737 subrepos.sort()
759 738 i, j = 0, 0
760 739 while i < len(files) and j < len(subrepos):
761 740 subpath = subrepos[j] + "/"
762 741 if files[i] < subpath:
763 742 i += 1
764 743 continue
765 744 while i < len(files) and files[i].startswith(subpath):
766 745 del files[i]
767 746 j += 1
768 747
769 748 if not files or '.' in files:
770 749 files = ['.']
771 750 results = dict.fromkeys(subrepos)
772 751 results['.hg'] = None
773 752
774 753 for ff in files:
775 754 # constructing the foldmap is expensive, so don't do it for the
776 755 # common case where files is ['.']
777 756 if normalize and ff != '.':
778 757 nf = normalize(ff, False, True)
779 758 else:
780 759 nf = ff
781 760 if nf in results:
782 761 continue
783 762
784 763 try:
785 764 st = lstat(join(nf))
786 765 kind = getkind(st.st_mode)
787 766 if kind == dirkind:
788 767 if nf in dmap:
789 768 # file replaced by dir on disk but still in dirstate
790 769 results[nf] = None
791 770 if matchedir:
792 771 matchedir(nf)
793 772 foundadd((nf, ff))
794 773 elif kind == regkind or kind == lnkkind:
795 774 results[nf] = st
796 775 else:
797 776 badfn(ff, badtype(kind))
798 777 if nf in dmap:
799 778 results[nf] = None
800 779 except OSError as inst: # nf not found on disk - it is dirstate only
801 780 if nf in dmap: # does it exactly match a missing file?
802 781 results[nf] = None
803 782 else: # does it match a missing directory?
804 783 if self._map.hasdir(nf):
805 784 if matchedir:
806 785 matchedir(nf)
807 786 notfoundadd(nf)
808 787 else:
809 788 badfn(ff, encoding.strtolocal(inst.strerror))
810 789
811 790 # Case insensitive filesystems cannot rely on lstat() failing to detect
812 791 # a case-only rename. Prune the stat object for any file that does not
813 792 # match the case in the filesystem, if there are multiple files that
814 793 # normalize to the same path.
815 794 if match.isexact() and self._checkcase:
816 795 normed = {}
817 796
818 797 for f, st in results.iteritems():
819 798 if st is None:
820 799 continue
821 800
822 801 nc = util.normcase(f)
823 802 paths = normed.get(nc)
824 803
825 804 if paths is None:
826 805 paths = set()
827 806 normed[nc] = paths
828 807
829 808 paths.add(f)
830 809
831 810 for norm, paths in normed.iteritems():
832 811 if len(paths) > 1:
833 812 for path in paths:
834 813 folded = self._discoverpath(path, norm, True, None,
835 814 self._map.dirfoldmap)
836 815 if path != folded:
837 816 results[path] = None
838 817
839 818 return results, dirsfound, dirsnotfound
840 819
841 820 def walk(self, match, subrepos, unknown, ignored, full=True):
842 821 '''
843 822 Walk recursively through the directory tree, finding all files
844 823 matched by match.
845 824
846 825 If full is False, maybe skip some known-clean files.
847 826
848 827 Return a dict mapping filename to stat-like object (either
849 828 mercurial.osutil.stat instance or return value of os.stat()).
850 829
851 830 '''
852 831 # full is a flag that extensions that hook into walk can use -- this
853 832 # implementation doesn't use it at all. This satisfies the contract
854 833 # because we only guarantee a "maybe".
855 834
856 835 if ignored:
857 836 ignore = util.never
858 837 dirignore = util.never
859 838 elif unknown:
860 839 ignore = self._ignore
861 840 dirignore = self._dirignore
862 841 else:
863 842 # if not unknown and not ignored, drop dir recursion and step 2
864 843 ignore = util.always
865 844 dirignore = util.always
866 845
867 846 matchfn = match.matchfn
868 847 matchalways = match.always()
869 848 matchtdir = match.traversedir
870 849 dmap = self._map
871 850 listdir = util.listdir
872 851 lstat = os.lstat
873 852 dirkind = stat.S_IFDIR
874 853 regkind = stat.S_IFREG
875 854 lnkkind = stat.S_IFLNK
876 855 join = self._join
877 856
878 857 exact = skipstep3 = False
879 858 if match.isexact(): # match.exact
880 859 exact = True
881 860 dirignore = util.always # skip step 2
882 861 elif match.prefix(): # match.match, no patterns
883 862 skipstep3 = True
884 863
885 864 if not exact and self._checkcase:
886 865 normalize = self._normalize
887 866 normalizefile = self._normalizefile
888 867 skipstep3 = False
889 868 else:
890 869 normalize = self._normalize
891 870 normalizefile = None
892 871
893 872 # step 1: find all explicit files
894 873 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
895 874
896 875 skipstep3 = skipstep3 and not (work or dirsnotfound)
897 876 work = [d for d in work if not dirignore(d[0])]
898 877
899 878 # step 2: visit subdirectories
900 879 def traverse(work, alreadynormed):
901 880 wadd = work.append
902 881 while work:
903 882 nd = work.pop()
904 883 if not match.visitdir(nd):
905 884 continue
906 885 skip = None
907 886 if nd == '.':
908 887 nd = ''
909 888 else:
910 889 skip = '.hg'
911 890 try:
912 891 entries = listdir(join(nd), stat=True, skip=skip)
913 892 except OSError as inst:
914 893 if inst.errno in (errno.EACCES, errno.ENOENT):
915 894 match.bad(self.pathto(nd),
916 895 encoding.strtolocal(inst.strerror))
917 896 continue
918 897 raise
919 898 for f, kind, st in entries:
920 899 if normalizefile:
921 900 # even though f might be a directory, we're only
922 901 # interested in comparing it to files currently in the
923 902 # dmap -- therefore normalizefile is enough
924 903 nf = normalizefile(nd and (nd + "/" + f) or f, True,
925 904 True)
926 905 else:
927 906 nf = nd and (nd + "/" + f) or f
928 907 if nf not in results:
929 908 if kind == dirkind:
930 909 if not ignore(nf):
931 910 if matchtdir:
932 911 matchtdir(nf)
933 912 wadd(nf)
934 913 if nf in dmap and (matchalways or matchfn(nf)):
935 914 results[nf] = None
936 915 elif kind == regkind or kind == lnkkind:
937 916 if nf in dmap:
938 917 if matchalways or matchfn(nf):
939 918 results[nf] = st
940 919 elif ((matchalways or matchfn(nf))
941 920 and not ignore(nf)):
942 921 # unknown file -- normalize if necessary
943 922 if not alreadynormed:
944 923 nf = normalize(nf, False, True)
945 924 results[nf] = st
946 925 elif nf in dmap and (matchalways or matchfn(nf)):
947 926 results[nf] = None
948 927
949 928 for nd, d in work:
950 929 # alreadynormed means that processwork doesn't have to do any
951 930 # expensive directory normalization
952 931 alreadynormed = not normalize or nd == d
953 932 traverse([d], alreadynormed)
954 933
955 934 for s in subrepos:
956 935 del results[s]
957 936 del results['.hg']
958 937
959 938 # step 3: visit remaining files from dmap
960 939 if not skipstep3 and not exact:
961 940 # If a dmap file is not in results yet, it was either
962 941 # a) not matching matchfn b) ignored, c) missing, or d) under a
963 942 # symlink directory.
964 943 if not results and matchalways:
965 944 visit = [f for f in dmap]
966 945 else:
967 946 visit = [f for f in dmap if f not in results and matchfn(f)]
968 947 visit.sort()
969 948
970 949 if unknown:
971 950 # unknown == True means we walked all dirs under the roots
972 951 # that wasn't ignored, and everything that matched was stat'ed
973 952 # and is already in results.
974 953 # The rest must thus be ignored or under a symlink.
975 954 audit_path = pathutil.pathauditor(self._root, cached=True)
976 955
977 956 for nf in iter(visit):
978 957 # If a stat for the same file was already added with a
979 958 # different case, don't add one for this, since that would
980 959 # make it appear as if the file exists under both names
981 960 # on disk.
982 961 if (normalizefile and
983 962 normalizefile(nf, True, True) in results):
984 963 results[nf] = None
985 964 # Report ignored items in the dmap as long as they are not
986 965 # under a symlink directory.
987 966 elif audit_path.check(nf):
988 967 try:
989 968 results[nf] = lstat(join(nf))
990 969 # file was just ignored, no links, and exists
991 970 except OSError:
992 971 # file doesn't exist
993 972 results[nf] = None
994 973 else:
995 974 # It's either missing or under a symlink directory
996 975 # which we in this case report as missing
997 976 results[nf] = None
998 977 else:
999 978 # We may not have walked the full directory tree above,
1000 979 # so stat and check everything we missed.
1001 980 iv = iter(visit)
1002 981 for st in util.statfiles([join(i) for i in visit]):
1003 982 results[next(iv)] = st
1004 983 return results
1005 984
1006 985 def status(self, match, subrepos, ignored, clean, unknown):
1007 986 '''Determine the status of the working copy relative to the
1008 987 dirstate and return a pair of (unsure, status), where status is of type
1009 988 scmutil.status and:
1010 989
1011 990 unsure:
1012 991 files that might have been modified since the dirstate was
1013 992 written, but need to be read to be sure (size is the same
1014 993 but mtime differs)
1015 994 status.modified:
1016 995 files that have definitely been modified since the dirstate
1017 996 was written (different size or mode)
1018 997 status.clean:
1019 998 files that have definitely not been modified since the
1020 999 dirstate was written
1021 1000 '''
1022 1001 listignored, listclean, listunknown = ignored, clean, unknown
1023 1002 lookup, modified, added, unknown, ignored = [], [], [], [], []
1024 1003 removed, deleted, clean = [], [], []
1025 1004
1026 1005 dmap = self._map
1027 1006 dmap.preload()
1028 1007 dcontains = dmap.__contains__
1029 1008 dget = dmap.__getitem__
1030 1009 ladd = lookup.append # aka "unsure"
1031 1010 madd = modified.append
1032 1011 aadd = added.append
1033 1012 uadd = unknown.append
1034 1013 iadd = ignored.append
1035 1014 radd = removed.append
1036 1015 dadd = deleted.append
1037 1016 cadd = clean.append
1038 1017 mexact = match.exact
1039 1018 dirignore = self._dirignore
1040 1019 checkexec = self._checkexec
1041 1020 copymap = self._map.copymap
1042 1021 lastnormaltime = self._lastnormaltime
1043 1022
1044 1023 # We need to do full walks when either
1045 1024 # - we're listing all clean files, or
1046 1025 # - match.traversedir does something, because match.traversedir should
1047 1026 # be called for every dir in the working dir
1048 1027 full = listclean or match.traversedir is not None
1049 1028 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1050 1029 full=full).iteritems():
1051 1030 if not dcontains(fn):
1052 1031 if (listignored or mexact(fn)) and dirignore(fn):
1053 1032 if listignored:
1054 1033 iadd(fn)
1055 1034 else:
1056 1035 uadd(fn)
1057 1036 continue
1058 1037
1059 1038 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1060 1039 # written like that for performance reasons. dmap[fn] is not a
1061 1040 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1062 1041 # opcode has fast paths when the value to be unpacked is a tuple or
1063 1042 # a list, but falls back to creating a full-fledged iterator in
1064 1043 # general. That is much slower than simply accessing and storing the
1065 1044 # tuple members one by one.
1066 1045 t = dget(fn)
1067 1046 state = t[0]
1068 1047 mode = t[1]
1069 1048 size = t[2]
1070 1049 time = t[3]
1071 1050
1072 1051 if not st and state in "nma":
1073 1052 dadd(fn)
1074 1053 elif state == 'n':
1075 1054 if (size >= 0 and
1076 1055 ((size != st.st_size and size != st.st_size & _rangemask)
1077 1056 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1078 1057 or size == -2 # other parent
1079 1058 or fn in copymap):
1080 1059 madd(fn)
1081 1060 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1082 1061 ladd(fn)
1083 1062 elif st.st_mtime == lastnormaltime:
1084 1063 # fn may have just been marked as normal and it may have
1085 1064 # changed in the same second without changing its size.
1086 1065 # This can happen if we quickly do multiple commits.
1087 1066 # Force lookup, so we don't miss such a racy file change.
1088 1067 ladd(fn)
1089 1068 elif listclean:
1090 1069 cadd(fn)
1091 1070 elif state == 'm':
1092 1071 madd(fn)
1093 1072 elif state == 'a':
1094 1073 aadd(fn)
1095 1074 elif state == 'r':
1096 1075 radd(fn)
1097 1076
1098 1077 return (lookup, scmutil.status(modified, added, removed, deleted,
1099 1078 unknown, ignored, clean))
1100 1079
1101 1080 def matches(self, match):
1102 1081 '''
1103 1082 return files in the dirstate (in whatever state) filtered by match
1104 1083 '''
1105 1084 dmap = self._map
1106 1085 if match.always():
1107 1086 return dmap.keys()
1108 1087 files = match.files()
1109 1088 if match.isexact():
1110 1089 # fast path -- filter the other way around, since typically files is
1111 1090 # much smaller than dmap
1112 1091 return [f for f in files if f in dmap]
1113 1092 if match.prefix() and all(fn in dmap for fn in files):
1114 1093 # fast path -- all the values are known to be files, so just return
1115 1094 # that
1116 1095 return list(files)
1117 1096 return [f for f in dmap if match(f)]
1118 1097
1119 1098 def _actualfilename(self, tr):
1120 1099 if tr:
1121 1100 return self._pendingfilename
1122 1101 else:
1123 1102 return self._filename
1124 1103
1125 1104 def savebackup(self, tr, backupname):
1126 1105 '''Save current dirstate into backup file'''
1127 1106 filename = self._actualfilename(tr)
1128 1107 assert backupname != filename
1129 1108
1130 1109 # use '_writedirstate' instead of 'write' to write changes certainly,
1131 1110 # because the latter omits writing out if transaction is running.
1132 1111 # output file will be used to create backup of dirstate at this point.
1133 1112 if self._dirty or not self._opener.exists(filename):
1134 1113 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1135 1114 checkambig=True))
1136 1115
1137 1116 if tr:
1138 1117 # ensure that subsequent tr.writepending returns True for
1139 1118 # changes written out above, even if dirstate is never
1140 1119 # changed after this
1141 1120 tr.addfilegenerator('dirstate', (self._filename,),
1142 1121 self._writedirstate, location='plain')
1143 1122
1144 1123 # ensure that pending file written above is unlinked at
1145 1124 # failure, even if tr.writepending isn't invoked until the
1146 1125 # end of this transaction
1147 1126 tr.registertmp(filename, location='plain')
1148 1127
1149 1128 self._opener.tryunlink(backupname)
1150 1129 # hardlink backup is okay because _writedirstate is always called
1151 1130 # with an "atomictemp=True" file.
1152 1131 util.copyfile(self._opener.join(filename),
1153 1132 self._opener.join(backupname), hardlink=True)
1154 1133
1155 1134 def restorebackup(self, tr, backupname):
1156 1135 '''Restore dirstate by backup file'''
1157 1136 # this "invalidate()" prevents "wlock.release()" from writing
1158 1137 # changes of dirstate out after restoring from backup file
1159 1138 self.invalidate()
1160 1139 filename = self._actualfilename(tr)
1161 1140 o = self._opener
1162 1141 if util.samefile(o.join(backupname), o.join(filename)):
1163 1142 o.unlink(backupname)
1164 1143 else:
1165 1144 o.rename(backupname, filename, checkambig=True)
1166 1145
1167 1146 def clearbackup(self, tr, backupname):
1168 1147 '''Clear backup file'''
1169 1148 self._opener.unlink(backupname)
1170 1149
1171 1150 class dirstatemap(object):
1172 1151 """Map encapsulating the dirstate's contents.
1173 1152
1174 1153 The dirstate contains the following state:
1175 1154
1176 1155 - `identity` is the identity of the dirstate file, which can be used to
1177 1156 detect when changes have occurred to the dirstate file.
1178 1157
1179 1158 - `parents` is a pair containing the parents of the working copy. The
1180 1159 parents are updated by calling `setparents`.
1181 1160
1182 1161 - the state map maps filenames to tuples of (state, mode, size, mtime),
1183 1162 where state is a single character representing 'normal', 'added',
1184 1163 'removed', or 'merged'. It is read by treating the dirstate as a
1185 1164 dict. File state is updated by calling the `addfile`, `removefile` and
1186 1165 `dropfile` methods.
1187 1166
1188 1167 - `copymap` maps destination filenames to their source filename.
1189 1168
1190 1169 The dirstate also provides the following views onto the state:
1191 1170
1192 1171 - `nonnormalset` is a set of the filenames that have state other
1193 1172 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1194 1173
1195 1174 - `otherparentset` is a set of the filenames that are marked as coming
1196 1175 from the second parent when the dirstate is currently being merged.
1197 1176
1198 1177 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1199 1178 form that they appear as in the dirstate.
1200 1179
1201 1180 - `dirfoldmap` is a dict mapping normalized directory names to the
1202 1181 denormalized form that they appear as in the dirstate.
1203 1182 """
1204 1183
1205 1184 def __init__(self, ui, opener, root):
1206 1185 self._ui = ui
1207 1186 self._opener = opener
1208 1187 self._root = root
1209 1188 self._filename = 'dirstate'
1210 1189
1211 1190 self._parents = None
1212 1191 self._dirtyparents = False
1213 1192
1214 1193 # for consistent view between _pl() and _read() invocations
1215 1194 self._pendingmode = None
1216 1195
1217 1196 @propertycache
1218 1197 def _map(self):
1219 1198 self._map = {}
1220 1199 self.read()
1221 1200 return self._map
1222 1201
1223 1202 @propertycache
1224 1203 def copymap(self):
1225 1204 self.copymap = {}
1226 1205 self._map
1227 1206 return self.copymap
1228 1207
1229 1208 def clear(self):
1230 1209 self._map.clear()
1231 1210 self.copymap.clear()
1232 1211 self.setparents(nullid, nullid)
1233 1212 util.clearcachedproperty(self, "_dirs")
1234 1213 util.clearcachedproperty(self, "_alldirs")
1235 1214 util.clearcachedproperty(self, "filefoldmap")
1236 1215 util.clearcachedproperty(self, "dirfoldmap")
1237 1216 util.clearcachedproperty(self, "nonnormalset")
1238 1217 util.clearcachedproperty(self, "otherparentset")
1239 1218
1240 1219 def items(self):
1241 1220 return self._map.iteritems()
1242 1221
1243 1222 # forward for python2,3 compat
1244 1223 iteritems = items
1245 1224
1246 1225 def __len__(self):
1247 1226 return len(self._map)
1248 1227
1249 1228 def __iter__(self):
1250 1229 return iter(self._map)
1251 1230
1252 1231 def get(self, key, default=None):
1253 1232 return self._map.get(key, default)
1254 1233
1255 1234 def __contains__(self, key):
1256 1235 return key in self._map
1257 1236
1258 1237 def __getitem__(self, key):
1259 1238 return self._map[key]
1260 1239
1261 1240 def keys(self):
1262 1241 return self._map.keys()
1263 1242
1264 1243 def preload(self):
1265 1244 """Loads the underlying data, if it's not already loaded"""
1266 1245 self._map
1267 1246
1268 1247 def addfile(self, f, oldstate, state, mode, size, mtime):
1269 1248 """Add a tracked file to the dirstate."""
1270 1249 if oldstate in "?r" and r"_dirs" in self.__dict__:
1271 1250 self._dirs.addpath(f)
1272 1251 if oldstate == "?" and r"_alldirs" in self.__dict__:
1273 1252 self._alldirs.addpath(f)
1274 1253 self._map[f] = dirstatetuple(state, mode, size, mtime)
1275 1254 if state != 'n' or mtime == -1:
1276 1255 self.nonnormalset.add(f)
1277 1256 if size == -2:
1278 1257 self.otherparentset.add(f)
1279 1258
1280 1259 def removefile(self, f, oldstate, size):
1281 1260 """
1282 1261 Mark a file as removed in the dirstate.
1283 1262
1284 1263 The `size` parameter is used to store sentinel values that indicate
1285 1264 the file's previous state. In the future, we should refactor this
1286 1265 to be more explicit about what that state is.
1287 1266 """
1288 1267 if oldstate not in "?r" and r"_dirs" in self.__dict__:
1289 1268 self._dirs.delpath(f)
1290 1269 if oldstate == "?" and r"_alldirs" in self.__dict__:
1291 1270 self._alldirs.addpath(f)
1292 1271 if r"filefoldmap" in self.__dict__:
1293 1272 normed = util.normcase(f)
1294 1273 self.filefoldmap.pop(normed, None)
1295 1274 self._map[f] = dirstatetuple('r', 0, size, 0)
1296 1275 self.nonnormalset.add(f)
1297 1276
1298 1277 def dropfile(self, f, oldstate):
1299 1278 """
1300 1279 Remove a file from the dirstate. Returns True if the file was
1301 1280 previously recorded.
1302 1281 """
1303 1282 exists = self._map.pop(f, None) is not None
1304 1283 if exists:
1305 1284 if oldstate != "r" and r"_dirs" in self.__dict__:
1306 1285 self._dirs.delpath(f)
1307 1286 if r"_alldirs" in self.__dict__:
1308 1287 self._alldirs.delpath(f)
1309 1288 if r"filefoldmap" in self.__dict__:
1310 1289 normed = util.normcase(f)
1311 1290 self.filefoldmap.pop(normed, None)
1312 1291 self.nonnormalset.discard(f)
1313 1292 return exists
1314 1293
1315 1294 def clearambiguoustimes(self, files, now):
1316 1295 for f in files:
1317 1296 e = self.get(f)
1318 1297 if e is not None and e[0] == 'n' and e[3] == now:
1319 1298 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1320 1299 self.nonnormalset.add(f)
1321 1300
1322 1301 def nonnormalentries(self):
1323 1302 '''Compute the nonnormal dirstate entries from the dmap'''
1324 1303 try:
1325 1304 return parsers.nonnormalotherparententries(self._map)
1326 1305 except AttributeError:
1327 1306 nonnorm = set()
1328 1307 otherparent = set()
1329 1308 for fname, e in self._map.iteritems():
1330 1309 if e[0] != 'n' or e[3] == -1:
1331 1310 nonnorm.add(fname)
1332 1311 if e[0] == 'n' and e[2] == -2:
1333 1312 otherparent.add(fname)
1334 1313 return nonnorm, otherparent
1335 1314
1336 1315 @propertycache
1337 1316 def filefoldmap(self):
1338 1317 """Returns a dictionary mapping normalized case paths to their
1339 1318 non-normalized versions.
1340 1319 """
1341 1320 try:
1342 1321 makefilefoldmap = parsers.make_file_foldmap
1343 1322 except AttributeError:
1344 1323 pass
1345 1324 else:
1346 1325 return makefilefoldmap(self._map, util.normcasespec,
1347 1326 util.normcasefallback)
1348 1327
1349 1328 f = {}
1350 1329 normcase = util.normcase
1351 1330 for name, s in self._map.iteritems():
1352 1331 if s[0] != 'r':
1353 1332 f[normcase(name)] = name
1354 1333 f['.'] = '.' # prevents useless util.fspath() invocation
1355 1334 return f
1356 1335
1357 1336 def hastrackeddir(self, d):
1358 1337 """
1359 1338 Returns True if the dirstate contains a tracked (not removed) file
1360 1339 in this directory.
1361 1340 """
1362 1341 return d in self._dirs
1363 1342
1364 1343 def hasdir(self, d):
1365 1344 """
1366 1345 Returns True if the dirstate contains a file (tracked or removed)
1367 1346 in this directory.
1368 1347 """
1369 1348 return d in self._alldirs
1370 1349
1371 1350 @propertycache
1372 1351 def _dirs(self):
1373 1352 return util.dirs(self._map, 'r')
1374 1353
1375 1354 @propertycache
1376 1355 def _alldirs(self):
1377 1356 return util.dirs(self._map)
1378 1357
1379 1358 def _opendirstatefile(self):
1380 1359 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1381 1360 if self._pendingmode is not None and self._pendingmode != mode:
1382 1361 fp.close()
1383 1362 raise error.Abort(_('working directory state may be '
1384 1363 'changed parallelly'))
1385 1364 self._pendingmode = mode
1386 1365 return fp
1387 1366
1388 1367 def parents(self):
1389 1368 if not self._parents:
1390 1369 try:
1391 1370 fp = self._opendirstatefile()
1392 1371 st = fp.read(40)
1393 1372 fp.close()
1394 1373 except IOError as err:
1395 1374 if err.errno != errno.ENOENT:
1396 1375 raise
1397 1376 # File doesn't exist, so the current state is empty
1398 1377 st = ''
1399 1378
1400 1379 l = len(st)
1401 1380 if l == 40:
1402 1381 self._parents = st[:20], st[20:40]
1403 1382 elif l == 0:
1404 1383 self._parents = [nullid, nullid]
1405 1384 else:
1406 1385 raise error.Abort(_('working directory state appears '
1407 1386 'damaged!'))
1408 1387
1409 1388 return self._parents
1410 1389
1411 1390 def setparents(self, p1, p2):
1412 1391 self._parents = (p1, p2)
1413 1392 self._dirtyparents = True
1414 1393
1415 1394 def read(self):
1416 1395 # ignore HG_PENDING because identity is used only for writing
1417 1396 self.identity = util.filestat.frompath(
1418 1397 self._opener.join(self._filename))
1419 1398
1420 1399 try:
1421 1400 fp = self._opendirstatefile()
1422 1401 try:
1423 1402 st = fp.read()
1424 1403 finally:
1425 1404 fp.close()
1426 1405 except IOError as err:
1427 1406 if err.errno != errno.ENOENT:
1428 1407 raise
1429 1408 return
1430 1409 if not st:
1431 1410 return
1432 1411
1433 1412 if util.safehasattr(parsers, 'dict_new_presized'):
1434 1413 # Make an estimate of the number of files in the dirstate based on
1435 1414 # its size. From a linear regression on a set of real-world repos,
1436 1415 # all over 10,000 files, the size of a dirstate entry is 85
1437 1416 # bytes. The cost of resizing is significantly higher than the cost
1438 1417 # of filling in a larger presized dict, so subtract 20% from the
1439 1418 # size.
1440 1419 #
1441 1420 # This heuristic is imperfect in many ways, so in a future dirstate
1442 1421 # format update it makes sense to just record the number of entries
1443 1422 # on write.
1444 1423 self._map = parsers.dict_new_presized(len(st) / 71)
1445 1424
1446 1425 # Python's garbage collector triggers a GC each time a certain number
1447 1426 # of container objects (the number being defined by
1448 1427 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1449 1428 # for each file in the dirstate. The C version then immediately marks
1450 1429 # them as not to be tracked by the collector. However, this has no
1451 1430 # effect on when GCs are triggered, only on what objects the GC looks
1452 1431 # into. This means that O(number of files) GCs are unavoidable.
1453 1432 # Depending on when in the process's lifetime the dirstate is parsed,
1454 1433 # this can get very expensive. As a workaround, disable GC while
1455 1434 # parsing the dirstate.
1456 1435 #
1457 1436 # (we cannot decorate the function directly since it is in a C module)
1458 1437 parse_dirstate = util.nogc(parsers.parse_dirstate)
1459 1438 p = parse_dirstate(self._map, self.copymap, st)
1460 1439 if not self._dirtyparents:
1461 1440 self.setparents(*p)
1462 1441
1463 1442 # Avoid excess attribute lookups by fast pathing certain checks
1464 1443 self.__contains__ = self._map.__contains__
1465 1444 self.__getitem__ = self._map.__getitem__
1466 1445 self.get = self._map.get
1467 1446
1468 1447 def write(self, st, now):
1469 1448 st.write(parsers.pack_dirstate(self._map, self.copymap,
1470 1449 self.parents(), now))
1471 1450 st.close()
1472 1451 self._dirtyparents = False
1473 1452 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1474 1453
1475 1454 @propertycache
1476 1455 def nonnormalset(self):
1477 1456 nonnorm, otherparents = self.nonnormalentries()
1478 1457 self.otherparentset = otherparents
1479 1458 return nonnorm
1480 1459
1481 1460 @propertycache
1482 1461 def otherparentset(self):
1483 1462 nonnorm, otherparents = self.nonnormalentries()
1484 1463 self.nonnormalset = nonnorm
1485 1464 return otherparents
1486 1465
1487 1466 @propertycache
1488 1467 def identity(self):
1489 1468 self._map
1490 1469 return self.identity
1491 1470
1492 1471 @propertycache
1493 1472 def dirfoldmap(self):
1494 1473 f = {}
1495 1474 normcase = util.normcase
1496 1475 for name in self._dirs:
1497 1476 f[normcase(name)] = name
1498 1477 return f
General Comments 0
You need to be logged in to leave comments. Login now