##// END OF EJS Templates
dirstate: add callback to notify extensions about wd parent change...
Mateusz Kwapich -
r29772:2ebd507e default
parent child Browse files
Show More
@@ -1,1241 +1,1264 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import nullid
17 17 from . import (
18 18 encoding,
19 19 error,
20 20 match as matchmod,
21 21 osutil,
22 22 parsers,
23 23 pathutil,
24 24 scmutil,
25 25 util,
26 26 )
27 27
28 28 propertycache = util.propertycache
29 29 filecache = scmutil.filecache
30 30 _rangemask = 0x7fffffff
31 31
32 32 dirstatetuple = parsers.dirstatetuple
33 33
34 34 class repocache(filecache):
35 35 """filecache for files in .hg/"""
36 36 def join(self, obj, fname):
37 37 return obj._opener.join(fname)
38 38
39 39 class rootcache(filecache):
40 40 """filecache for files in the repository root"""
41 41 def join(self, obj, fname):
42 42 return obj._join(fname)
43 43
44 44 def _getfsnow(vfs):
45 45 '''Get "now" timestamp on filesystem'''
46 46 tmpfd, tmpname = vfs.mkstemp()
47 47 try:
48 48 return os.fstat(tmpfd).st_mtime
49 49 finally:
50 50 os.close(tmpfd)
51 51 vfs.unlink(tmpname)
52 52
53 53 def nonnormalentries(dmap):
54 54 '''Compute the nonnormal dirstate entries from the dmap'''
55 55 try:
56 56 return parsers.nonnormalentries(dmap)
57 57 except AttributeError:
58 58 return set(fname for fname, e in dmap.iteritems()
59 59 if e[0] != 'n' or e[3] == -1)
60 60
61 61 def _trypending(root, vfs, filename):
62 62 '''Open file to be read according to HG_PENDING environment variable
63 63
64 64 This opens '.pending' of specified 'filename' only when HG_PENDING
65 65 is equal to 'root'.
66 66
67 67 This returns '(fp, is_pending_opened)' tuple.
68 68 '''
69 69 if root == os.environ.get('HG_PENDING'):
70 70 try:
71 71 return (vfs('%s.pending' % filename), True)
72 72 except IOError as inst:
73 73 if inst.errno != errno.ENOENT:
74 74 raise
75 75 return (vfs(filename), False)
76 76
77 77 class dirstate(object):
78 78
79 79 def __init__(self, opener, ui, root, validate):
80 80 '''Create a new dirstate object.
81 81
82 82 opener is an open()-like callable that can be used to open the
83 83 dirstate file; root is the root of the directory tracked by
84 84 the dirstate.
85 85 '''
86 86 self._opener = opener
87 87 self._validate = validate
88 88 self._root = root
89 89 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
90 90 # UNC path pointing to root share (issue4557)
91 91 self._rootdir = pathutil.normasprefix(root)
92 92 # internal config: ui.forcecwd
93 93 forcecwd = ui.config('ui', 'forcecwd')
94 94 if forcecwd:
95 95 self._cwd = forcecwd
96 96 self._dirty = False
97 97 self._dirtypl = False
98 98 self._lastnormaltime = 0
99 99 self._ui = ui
100 100 self._filecache = {}
101 101 self._parentwriters = 0
102 102 self._filename = 'dirstate'
103 103 self._pendingfilename = '%s.pending' % self._filename
104 self._plchangecallbacks = {}
105 self._origpl = None
104 106
105 107 # for consistent view between _pl() and _read() invocations
106 108 self._pendingmode = None
107 109
108 110 def beginparentchange(self):
109 111 '''Marks the beginning of a set of changes that involve changing
110 112 the dirstate parents. If there is an exception during this time,
111 113 the dirstate will not be written when the wlock is released. This
112 114 prevents writing an incoherent dirstate where the parent doesn't
113 115 match the contents.
114 116 '''
115 117 self._parentwriters += 1
116 118
117 119 def endparentchange(self):
118 120 '''Marks the end of a set of changes that involve changing the
119 121 dirstate parents. Once all parent changes have been marked done,
120 122 the wlock will be free to write the dirstate on release.
121 123 '''
122 124 if self._parentwriters > 0:
123 125 self._parentwriters -= 1
124 126
125 127 def pendingparentchange(self):
126 128 '''Returns true if the dirstate is in the middle of a set of changes
127 129 that modify the dirstate parent.
128 130 '''
129 131 return self._parentwriters > 0
130 132
131 133 @propertycache
132 134 def _map(self):
133 135 '''Return the dirstate contents as a map from filename to
134 136 (state, mode, size, time).'''
135 137 self._read()
136 138 return self._map
137 139
138 140 @propertycache
139 141 def _copymap(self):
140 142 self._read()
141 143 return self._copymap
142 144
143 145 @propertycache
144 146 def _nonnormalset(self):
145 147 return nonnormalentries(self._map)
146 148
147 149 @propertycache
148 150 def _filefoldmap(self):
149 151 try:
150 152 makefilefoldmap = parsers.make_file_foldmap
151 153 except AttributeError:
152 154 pass
153 155 else:
154 156 return makefilefoldmap(self._map, util.normcasespec,
155 157 util.normcasefallback)
156 158
157 159 f = {}
158 160 normcase = util.normcase
159 161 for name, s in self._map.iteritems():
160 162 if s[0] != 'r':
161 163 f[normcase(name)] = name
162 164 f['.'] = '.' # prevents useless util.fspath() invocation
163 165 return f
164 166
165 167 @propertycache
166 168 def _dirfoldmap(self):
167 169 f = {}
168 170 normcase = util.normcase
169 171 for name in self._dirs:
170 172 f[normcase(name)] = name
171 173 return f
172 174
173 175 @repocache('branch')
174 176 def _branch(self):
175 177 try:
176 178 return self._opener.read("branch").strip() or "default"
177 179 except IOError as inst:
178 180 if inst.errno != errno.ENOENT:
179 181 raise
180 182 return "default"
181 183
182 184 @propertycache
183 185 def _pl(self):
184 186 try:
185 187 fp = self._opendirstatefile()
186 188 st = fp.read(40)
187 189 fp.close()
188 190 l = len(st)
189 191 if l == 40:
190 192 return st[:20], st[20:40]
191 193 elif l > 0 and l < 40:
192 194 raise error.Abort(_('working directory state appears damaged!'))
193 195 except IOError as err:
194 196 if err.errno != errno.ENOENT:
195 197 raise
196 198 return [nullid, nullid]
197 199
198 200 @propertycache
199 201 def _dirs(self):
200 202 return util.dirs(self._map, 'r')
201 203
202 204 def dirs(self):
203 205 return self._dirs
204 206
205 207 @rootcache('.hgignore')
206 208 def _ignore(self):
207 209 files = self._ignorefiles()
208 210 if not files:
209 211 return util.never
210 212
211 213 pats = ['include:%s' % f for f in files]
212 214 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
213 215
214 216 @propertycache
215 217 def _slash(self):
216 218 return self._ui.configbool('ui', 'slash') and os.sep != '/'
217 219
218 220 @propertycache
219 221 def _checklink(self):
220 222 return util.checklink(self._root)
221 223
222 224 @propertycache
223 225 def _checkexec(self):
224 226 return util.checkexec(self._root)
225 227
226 228 @propertycache
227 229 def _checkcase(self):
228 230 return not util.checkcase(self._join('.hg'))
229 231
230 232 def _join(self, f):
231 233 # much faster than os.path.join()
232 234 # it's safe because f is always a relative path
233 235 return self._rootdir + f
234 236
235 237 def flagfunc(self, buildfallback):
236 238 if self._checklink and self._checkexec:
237 239 def f(x):
238 240 try:
239 241 st = os.lstat(self._join(x))
240 242 if util.statislink(st):
241 243 return 'l'
242 244 if util.statisexec(st):
243 245 return 'x'
244 246 except OSError:
245 247 pass
246 248 return ''
247 249 return f
248 250
249 251 fallback = buildfallback()
250 252 if self._checklink:
251 253 def f(x):
252 254 if os.path.islink(self._join(x)):
253 255 return 'l'
254 256 if 'x' in fallback(x):
255 257 return 'x'
256 258 return ''
257 259 return f
258 260 if self._checkexec:
259 261 def f(x):
260 262 if 'l' in fallback(x):
261 263 return 'l'
262 264 if util.isexec(self._join(x)):
263 265 return 'x'
264 266 return ''
265 267 return f
266 268 else:
267 269 return fallback
268 270
269 271 @propertycache
270 272 def _cwd(self):
271 273 return os.getcwd()
272 274
273 275 def getcwd(self):
274 276 '''Return the path from which a canonical path is calculated.
275 277
276 278 This path should be used to resolve file patterns or to convert
277 279 canonical paths back to file paths for display. It shouldn't be
278 280 used to get real file paths. Use vfs functions instead.
279 281 '''
280 282 cwd = self._cwd
281 283 if cwd == self._root:
282 284 return ''
283 285 # self._root ends with a path separator if self._root is '/' or 'C:\'
284 286 rootsep = self._root
285 287 if not util.endswithsep(rootsep):
286 288 rootsep += os.sep
287 289 if cwd.startswith(rootsep):
288 290 return cwd[len(rootsep):]
289 291 else:
290 292 # we're outside the repo. return an absolute path.
291 293 return cwd
292 294
293 295 def pathto(self, f, cwd=None):
294 296 if cwd is None:
295 297 cwd = self.getcwd()
296 298 path = util.pathto(self._root, cwd, f)
297 299 if self._slash:
298 300 return util.pconvert(path)
299 301 return path
300 302
301 303 def __getitem__(self, key):
302 304 '''Return the current state of key (a filename) in the dirstate.
303 305
304 306 States are:
305 307 n normal
306 308 m needs merging
307 309 r marked for removal
308 310 a marked for addition
309 311 ? not tracked
310 312 '''
311 313 return self._map.get(key, ("?",))[0]
312 314
313 315 def __contains__(self, key):
314 316 return key in self._map
315 317
316 318 def __iter__(self):
317 319 for x in sorted(self._map):
318 320 yield x
319 321
320 322 def iteritems(self):
321 323 return self._map.iteritems()
322 324
323 325 def parents(self):
324 326 return [self._validate(p) for p in self._pl]
325 327
326 328 def p1(self):
327 329 return self._validate(self._pl[0])
328 330
329 331 def p2(self):
330 332 return self._validate(self._pl[1])
331 333
332 334 def branch(self):
333 335 return encoding.tolocal(self._branch)
334 336
335 337 def setparents(self, p1, p2=nullid):
336 338 """Set dirstate parents to p1 and p2.
337 339
338 340 When moving from two parents to one, 'm' merged entries a
339 341 adjusted to normal and previous copy records discarded and
340 342 returned by the call.
341 343
342 344 See localrepo.setparents()
343 345 """
344 346 if self._parentwriters == 0:
345 347 raise ValueError("cannot set dirstate parent without "
346 348 "calling dirstate.beginparentchange")
347 349
348 350 self._dirty = self._dirtypl = True
349 351 oldp2 = self._pl[1]
352 if self._origpl is None:
353 self._origpl = self._pl
350 354 self._pl = p1, p2
351 355 copies = {}
352 356 if oldp2 != nullid and p2 == nullid:
353 357 for f, s in self._map.iteritems():
354 358 # Discard 'm' markers when moving away from a merge state
355 359 if s[0] == 'm':
356 360 if f in self._copymap:
357 361 copies[f] = self._copymap[f]
358 362 self.normallookup(f)
359 363 # Also fix up otherparent markers
360 364 elif s[0] == 'n' and s[2] == -2:
361 365 if f in self._copymap:
362 366 copies[f] = self._copymap[f]
363 367 self.add(f)
364 368 return copies
365 369
366 370 def setbranch(self, branch):
367 371 self._branch = encoding.fromlocal(branch)
368 372 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
369 373 try:
370 374 f.write(self._branch + '\n')
371 375 f.close()
372 376
373 377 # make sure filecache has the correct stat info for _branch after
374 378 # replacing the underlying file
375 379 ce = self._filecache['_branch']
376 380 if ce:
377 381 ce.refresh()
378 382 except: # re-raises
379 383 f.discard()
380 384 raise
381 385
382 386 def _opendirstatefile(self):
383 387 fp, mode = _trypending(self._root, self._opener, self._filename)
384 388 if self._pendingmode is not None and self._pendingmode != mode:
385 389 fp.close()
386 390 raise error.Abort(_('working directory state may be '
387 391 'changed parallelly'))
388 392 self._pendingmode = mode
389 393 return fp
390 394
391 395 def _read(self):
392 396 self._map = {}
393 397 self._copymap = {}
394 398 try:
395 399 fp = self._opendirstatefile()
396 400 try:
397 401 st = fp.read()
398 402 finally:
399 403 fp.close()
400 404 except IOError as err:
401 405 if err.errno != errno.ENOENT:
402 406 raise
403 407 return
404 408 if not st:
405 409 return
406 410
407 411 if util.safehasattr(parsers, 'dict_new_presized'):
408 412 # Make an estimate of the number of files in the dirstate based on
409 413 # its size. From a linear regression on a set of real-world repos,
410 414 # all over 10,000 files, the size of a dirstate entry is 85
411 415 # bytes. The cost of resizing is significantly higher than the cost
412 416 # of filling in a larger presized dict, so subtract 20% from the
413 417 # size.
414 418 #
415 419 # This heuristic is imperfect in many ways, so in a future dirstate
416 420 # format update it makes sense to just record the number of entries
417 421 # on write.
418 422 self._map = parsers.dict_new_presized(len(st) / 71)
419 423
420 424 # Python's garbage collector triggers a GC each time a certain number
421 425 # of container objects (the number being defined by
422 426 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
423 427 # for each file in the dirstate. The C version then immediately marks
424 428 # them as not to be tracked by the collector. However, this has no
425 429 # effect on when GCs are triggered, only on what objects the GC looks
426 430 # into. This means that O(number of files) GCs are unavoidable.
427 431 # Depending on when in the process's lifetime the dirstate is parsed,
428 432 # this can get very expensive. As a workaround, disable GC while
429 433 # parsing the dirstate.
430 434 #
431 435 # (we cannot decorate the function directly since it is in a C module)
432 436 parse_dirstate = util.nogc(parsers.parse_dirstate)
433 437 p = parse_dirstate(self._map, self._copymap, st)
434 438 if not self._dirtypl:
435 439 self._pl = p
436 440
437 441 def invalidate(self):
438 442 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
439 443 "_pl", "_dirs", "_ignore", "_nonnormalset"):
440 444 if a in self.__dict__:
441 445 delattr(self, a)
442 446 self._lastnormaltime = 0
443 447 self._dirty = False
444 448 self._parentwriters = 0
449 self._origpl = None
445 450
446 451 def copy(self, source, dest):
447 452 """Mark dest as a copy of source. Unmark dest if source is None."""
448 453 if source == dest:
449 454 return
450 455 self._dirty = True
451 456 if source is not None:
452 457 self._copymap[dest] = source
453 458 elif dest in self._copymap:
454 459 del self._copymap[dest]
455 460
456 461 def copied(self, file):
457 462 return self._copymap.get(file, None)
458 463
459 464 def copies(self):
460 465 return self._copymap
461 466
462 467 def _droppath(self, f):
463 468 if self[f] not in "?r" and "_dirs" in self.__dict__:
464 469 self._dirs.delpath(f)
465 470
466 471 if "_filefoldmap" in self.__dict__:
467 472 normed = util.normcase(f)
468 473 if normed in self._filefoldmap:
469 474 del self._filefoldmap[normed]
470 475
471 476 def _addpath(self, f, state, mode, size, mtime):
472 477 oldstate = self[f]
473 478 if state == 'a' or oldstate == 'r':
474 479 scmutil.checkfilename(f)
475 480 if f in self._dirs:
476 481 raise error.Abort(_('directory %r already in dirstate') % f)
477 482 # shadows
478 483 for d in util.finddirs(f):
479 484 if d in self._dirs:
480 485 break
481 486 if d in self._map and self[d] != 'r':
482 487 raise error.Abort(
483 488 _('file %r in dirstate clashes with %r') % (d, f))
484 489 if oldstate in "?r" and "_dirs" in self.__dict__:
485 490 self._dirs.addpath(f)
486 491 self._dirty = True
487 492 self._map[f] = dirstatetuple(state, mode, size, mtime)
488 493 if state != 'n' or mtime == -1:
489 494 self._nonnormalset.add(f)
490 495
491 496 def normal(self, f):
492 497 '''Mark a file normal and clean.'''
493 498 s = os.lstat(self._join(f))
494 499 mtime = s.st_mtime
495 500 self._addpath(f, 'n', s.st_mode,
496 501 s.st_size & _rangemask, mtime & _rangemask)
497 502 if f in self._copymap:
498 503 del self._copymap[f]
499 504 if f in self._nonnormalset:
500 505 self._nonnormalset.remove(f)
501 506 if mtime > self._lastnormaltime:
502 507 # Remember the most recent modification timeslot for status(),
503 508 # to make sure we won't miss future size-preserving file content
504 509 # modifications that happen within the same timeslot.
505 510 self._lastnormaltime = mtime
506 511
507 512 def normallookup(self, f):
508 513 '''Mark a file normal, but possibly dirty.'''
509 514 if self._pl[1] != nullid and f in self._map:
510 515 # if there is a merge going on and the file was either
511 516 # in state 'm' (-1) or coming from other parent (-2) before
512 517 # being removed, restore that state.
513 518 entry = self._map[f]
514 519 if entry[0] == 'r' and entry[2] in (-1, -2):
515 520 source = self._copymap.get(f)
516 521 if entry[2] == -1:
517 522 self.merge(f)
518 523 elif entry[2] == -2:
519 524 self.otherparent(f)
520 525 if source:
521 526 self.copy(source, f)
522 527 return
523 528 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
524 529 return
525 530 self._addpath(f, 'n', 0, -1, -1)
526 531 if f in self._copymap:
527 532 del self._copymap[f]
528 533 if f in self._nonnormalset:
529 534 self._nonnormalset.remove(f)
530 535
531 536 def otherparent(self, f):
532 537 '''Mark as coming from the other parent, always dirty.'''
533 538 if self._pl[1] == nullid:
534 539 raise error.Abort(_("setting %r to other parent "
535 540 "only allowed in merges") % f)
536 541 if f in self and self[f] == 'n':
537 542 # merge-like
538 543 self._addpath(f, 'm', 0, -2, -1)
539 544 else:
540 545 # add-like
541 546 self._addpath(f, 'n', 0, -2, -1)
542 547
543 548 if f in self._copymap:
544 549 del self._copymap[f]
545 550
546 551 def add(self, f):
547 552 '''Mark a file added.'''
548 553 self._addpath(f, 'a', 0, -1, -1)
549 554 if f in self._copymap:
550 555 del self._copymap[f]
551 556
552 557 def remove(self, f):
553 558 '''Mark a file removed.'''
554 559 self._dirty = True
555 560 self._droppath(f)
556 561 size = 0
557 562 if self._pl[1] != nullid and f in self._map:
558 563 # backup the previous state
559 564 entry = self._map[f]
560 565 if entry[0] == 'm': # merge
561 566 size = -1
562 567 elif entry[0] == 'n' and entry[2] == -2: # other parent
563 568 size = -2
564 569 self._map[f] = dirstatetuple('r', 0, size, 0)
565 570 self._nonnormalset.add(f)
566 571 if size == 0 and f in self._copymap:
567 572 del self._copymap[f]
568 573
569 574 def merge(self, f):
570 575 '''Mark a file merged.'''
571 576 if self._pl[1] == nullid:
572 577 return self.normallookup(f)
573 578 return self.otherparent(f)
574 579
575 580 def drop(self, f):
576 581 '''Drop a file from the dirstate'''
577 582 if f in self._map:
578 583 self._dirty = True
579 584 self._droppath(f)
580 585 del self._map[f]
581 586 if f in self._nonnormalset:
582 587 self._nonnormalset.remove(f)
583 588 if f in self._copymap:
584 589 del self._copymap[f]
585 590
586 591 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
587 592 if exists is None:
588 593 exists = os.path.lexists(os.path.join(self._root, path))
589 594 if not exists:
590 595 # Maybe a path component exists
591 596 if not ignoremissing and '/' in path:
592 597 d, f = path.rsplit('/', 1)
593 598 d = self._normalize(d, False, ignoremissing, None)
594 599 folded = d + "/" + f
595 600 else:
596 601 # No path components, preserve original case
597 602 folded = path
598 603 else:
599 604 # recursively normalize leading directory components
600 605 # against dirstate
601 606 if '/' in normed:
602 607 d, f = normed.rsplit('/', 1)
603 608 d = self._normalize(d, False, ignoremissing, True)
604 609 r = self._root + "/" + d
605 610 folded = d + "/" + util.fspath(f, r)
606 611 else:
607 612 folded = util.fspath(normed, self._root)
608 613 storemap[normed] = folded
609 614
610 615 return folded
611 616
612 617 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
613 618 normed = util.normcase(path)
614 619 folded = self._filefoldmap.get(normed, None)
615 620 if folded is None:
616 621 if isknown:
617 622 folded = path
618 623 else:
619 624 folded = self._discoverpath(path, normed, ignoremissing, exists,
620 625 self._filefoldmap)
621 626 return folded
622 627
623 628 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
624 629 normed = util.normcase(path)
625 630 folded = self._filefoldmap.get(normed, None)
626 631 if folded is None:
627 632 folded = self._dirfoldmap.get(normed, None)
628 633 if folded is None:
629 634 if isknown:
630 635 folded = path
631 636 else:
632 637 # store discovered result in dirfoldmap so that future
633 638 # normalizefile calls don't start matching directories
634 639 folded = self._discoverpath(path, normed, ignoremissing, exists,
635 640 self._dirfoldmap)
636 641 return folded
637 642
638 643 def normalize(self, path, isknown=False, ignoremissing=False):
639 644 '''
640 645 normalize the case of a pathname when on a casefolding filesystem
641 646
642 647 isknown specifies whether the filename came from walking the
643 648 disk, to avoid extra filesystem access.
644 649
645 650 If ignoremissing is True, missing path are returned
646 651 unchanged. Otherwise, we try harder to normalize possibly
647 652 existing path components.
648 653
649 654 The normalized case is determined based on the following precedence:
650 655
651 656 - version of name already stored in the dirstate
652 657 - version of name stored on disk
653 658 - version provided via command arguments
654 659 '''
655 660
656 661 if self._checkcase:
657 662 return self._normalize(path, isknown, ignoremissing)
658 663 return path
659 664
660 665 def clear(self):
661 666 self._map = {}
662 667 self._nonnormalset = set()
663 668 if "_dirs" in self.__dict__:
664 669 delattr(self, "_dirs")
665 670 self._copymap = {}
666 671 self._pl = [nullid, nullid]
667 672 self._lastnormaltime = 0
668 673 self._dirty = True
669 674
670 675 def rebuild(self, parent, allfiles, changedfiles=None):
671 676 if changedfiles is None:
672 677 # Rebuild entire dirstate
673 678 changedfiles = allfiles
674 679 lastnormaltime = self._lastnormaltime
675 680 self.clear()
676 681 self._lastnormaltime = lastnormaltime
677 682
678 683 for f in changedfiles:
679 684 mode = 0o666
680 685 if f in allfiles and 'x' in allfiles.flags(f):
681 686 mode = 0o777
682 687
683 688 if f in allfiles:
684 689 self._map[f] = dirstatetuple('n', mode, -1, 0)
685 690 else:
686 691 self._map.pop(f, None)
687 692 if f in self._nonnormalset:
688 693 self._nonnormalset.remove(f)
689 694
695 if self._origpl is None:
696 self._origpl = self._pl
690 697 self._pl = (parent, nullid)
691 698 self._dirty = True
692 699
693 700 def write(self, tr):
694 701 if not self._dirty:
695 702 return
696 703
697 704 filename = self._filename
698 705 if tr:
699 706 # 'dirstate.write()' is not only for writing in-memory
700 707 # changes out, but also for dropping ambiguous timestamp.
701 708 # delayed writing re-raise "ambiguous timestamp issue".
702 709 # See also the wiki page below for detail:
703 710 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
704 711
705 712 # emulate dropping timestamp in 'parsers.pack_dirstate'
706 713 now = _getfsnow(self._opener)
707 714 dmap = self._map
708 715 for f, e in dmap.iteritems():
709 716 if e[0] == 'n' and e[3] == now:
710 717 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
711 718 self._nonnormalset.add(f)
712 719
713 720 # emulate that all 'dirstate.normal' results are written out
714 721 self._lastnormaltime = 0
715 722
716 723 # delay writing in-memory changes out
717 724 tr.addfilegenerator('dirstate', (self._filename,),
718 725 self._writedirstate, location='plain')
719 726 return
720 727
721 728 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
722 729 self._writedirstate(st)
723 730
731 def addparentchangecallback(self, category, callback):
732 """add a callback to be called when the wd parents are changed
733
734 Callback will be called with the following arguments:
735 dirstate, (oldp1, oldp2), (newp1, newp2)
736
737 Category is a unique identifier to allow overwriting an old callback
738 with a newer callback.
739 """
740 self._plchangecallbacks[category] = callback
741
724 742 def _writedirstate(self, st):
743 # notify callbacks about parents change
744 if self._origpl is not None and self._origpl != self._pl:
745 for c, callback in sorted(self._plchangecallbacks.iteritems()):
746 callback(self, self._origpl, self._pl)
747 self._origpl = None
725 748 # use the modification time of the newly created temporary file as the
726 749 # filesystem's notion of 'now'
727 750 now = util.fstat(st).st_mtime & _rangemask
728 751
729 752 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
730 753 # timestamp of each entries in dirstate, because of 'now > mtime'
731 754 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
732 755 if delaywrite > 0:
733 756 # do we have any files to delay for?
734 757 for f, e in self._map.iteritems():
735 758 if e[0] == 'n' and e[3] == now:
736 759 import time # to avoid useless import
737 760 # rather than sleep n seconds, sleep until the next
738 761 # multiple of n seconds
739 762 clock = time.time()
740 763 start = int(clock) - (int(clock) % delaywrite)
741 764 end = start + delaywrite
742 765 time.sleep(end - clock)
743 766 break
744 767
745 768 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
746 769 self._nonnormalset = nonnormalentries(self._map)
747 770 st.close()
748 771 self._lastnormaltime = 0
749 772 self._dirty = self._dirtypl = False
750 773
751 774 def _dirignore(self, f):
752 775 if f == '.':
753 776 return False
754 777 if self._ignore(f):
755 778 return True
756 779 for p in util.finddirs(f):
757 780 if self._ignore(p):
758 781 return True
759 782 return False
760 783
761 784 def _ignorefiles(self):
762 785 files = []
763 786 if os.path.exists(self._join('.hgignore')):
764 787 files.append(self._join('.hgignore'))
765 788 for name, path in self._ui.configitems("ui"):
766 789 if name == 'ignore' or name.startswith('ignore.'):
767 790 # we need to use os.path.join here rather than self._join
768 791 # because path is arbitrary and user-specified
769 792 files.append(os.path.join(self._rootdir, util.expandpath(path)))
770 793 return files
771 794
772 795 def _ignorefileandline(self, f):
773 796 files = collections.deque(self._ignorefiles())
774 797 visited = set()
775 798 while files:
776 799 i = files.popleft()
777 800 patterns = matchmod.readpatternfile(i, self._ui.warn,
778 801 sourceinfo=True)
779 802 for pattern, lineno, line in patterns:
780 803 kind, p = matchmod._patsplit(pattern, 'glob')
781 804 if kind == "subinclude":
782 805 if p not in visited:
783 806 files.append(p)
784 807 continue
785 808 m = matchmod.match(self._root, '', [], [pattern],
786 809 warn=self._ui.warn)
787 810 if m(f):
788 811 return (i, lineno, line)
789 812 visited.add(i)
790 813 return (None, -1, "")
791 814
792 815 def _walkexplicit(self, match, subrepos):
793 816 '''Get stat data about the files explicitly specified by match.
794 817
795 818 Return a triple (results, dirsfound, dirsnotfound).
796 819 - results is a mapping from filename to stat result. It also contains
797 820 listings mapping subrepos and .hg to None.
798 821 - dirsfound is a list of files found to be directories.
799 822 - dirsnotfound is a list of files that the dirstate thinks are
800 823 directories and that were not found.'''
801 824
802 825 def badtype(mode):
803 826 kind = _('unknown')
804 827 if stat.S_ISCHR(mode):
805 828 kind = _('character device')
806 829 elif stat.S_ISBLK(mode):
807 830 kind = _('block device')
808 831 elif stat.S_ISFIFO(mode):
809 832 kind = _('fifo')
810 833 elif stat.S_ISSOCK(mode):
811 834 kind = _('socket')
812 835 elif stat.S_ISDIR(mode):
813 836 kind = _('directory')
814 837 return _('unsupported file type (type is %s)') % kind
815 838
816 839 matchedir = match.explicitdir
817 840 badfn = match.bad
818 841 dmap = self._map
819 842 lstat = os.lstat
820 843 getkind = stat.S_IFMT
821 844 dirkind = stat.S_IFDIR
822 845 regkind = stat.S_IFREG
823 846 lnkkind = stat.S_IFLNK
824 847 join = self._join
825 848 dirsfound = []
826 849 foundadd = dirsfound.append
827 850 dirsnotfound = []
828 851 notfoundadd = dirsnotfound.append
829 852
830 853 if not match.isexact() and self._checkcase:
831 854 normalize = self._normalize
832 855 else:
833 856 normalize = None
834 857
835 858 files = sorted(match.files())
836 859 subrepos.sort()
837 860 i, j = 0, 0
838 861 while i < len(files) and j < len(subrepos):
839 862 subpath = subrepos[j] + "/"
840 863 if files[i] < subpath:
841 864 i += 1
842 865 continue
843 866 while i < len(files) and files[i].startswith(subpath):
844 867 del files[i]
845 868 j += 1
846 869
847 870 if not files or '.' in files:
848 871 files = ['.']
849 872 results = dict.fromkeys(subrepos)
850 873 results['.hg'] = None
851 874
852 875 alldirs = None
853 876 for ff in files:
854 877 # constructing the foldmap is expensive, so don't do it for the
855 878 # common case where files is ['.']
856 879 if normalize and ff != '.':
857 880 nf = normalize(ff, False, True)
858 881 else:
859 882 nf = ff
860 883 if nf in results:
861 884 continue
862 885
863 886 try:
864 887 st = lstat(join(nf))
865 888 kind = getkind(st.st_mode)
866 889 if kind == dirkind:
867 890 if nf in dmap:
868 891 # file replaced by dir on disk but still in dirstate
869 892 results[nf] = None
870 893 if matchedir:
871 894 matchedir(nf)
872 895 foundadd((nf, ff))
873 896 elif kind == regkind or kind == lnkkind:
874 897 results[nf] = st
875 898 else:
876 899 badfn(ff, badtype(kind))
877 900 if nf in dmap:
878 901 results[nf] = None
879 902 except OSError as inst: # nf not found on disk - it is dirstate only
880 903 if nf in dmap: # does it exactly match a missing file?
881 904 results[nf] = None
882 905 else: # does it match a missing directory?
883 906 if alldirs is None:
884 907 alldirs = util.dirs(dmap)
885 908 if nf in alldirs:
886 909 if matchedir:
887 910 matchedir(nf)
888 911 notfoundadd(nf)
889 912 else:
890 913 badfn(ff, inst.strerror)
891 914
892 915 # Case insensitive filesystems cannot rely on lstat() failing to detect
893 916 # a case-only rename. Prune the stat object for any file that does not
894 917 # match the case in the filesystem, if there are multiple files that
895 918 # normalize to the same path.
896 919 if match.isexact() and self._checkcase:
897 920 normed = {}
898 921
899 922 for f, st in results.iteritems():
900 923 if st is None:
901 924 continue
902 925
903 926 nc = util.normcase(f)
904 927 paths = normed.get(nc)
905 928
906 929 if paths is None:
907 930 paths = set()
908 931 normed[nc] = paths
909 932
910 933 paths.add(f)
911 934
912 935 for norm, paths in normed.iteritems():
913 936 if len(paths) > 1:
914 937 for path in paths:
915 938 folded = self._discoverpath(path, norm, True, None,
916 939 self._dirfoldmap)
917 940 if path != folded:
918 941 results[path] = None
919 942
920 943 return results, dirsfound, dirsnotfound
921 944
922 945 def walk(self, match, subrepos, unknown, ignored, full=True):
923 946 '''
924 947 Walk recursively through the directory tree, finding all files
925 948 matched by match.
926 949
927 950 If full is False, maybe skip some known-clean files.
928 951
929 952 Return a dict mapping filename to stat-like object (either
930 953 mercurial.osutil.stat instance or return value of os.stat()).
931 954
932 955 '''
933 956 # full is a flag that extensions that hook into walk can use -- this
934 957 # implementation doesn't use it at all. This satisfies the contract
935 958 # because we only guarantee a "maybe".
936 959
937 960 if ignored:
938 961 ignore = util.never
939 962 dirignore = util.never
940 963 elif unknown:
941 964 ignore = self._ignore
942 965 dirignore = self._dirignore
943 966 else:
944 967 # if not unknown and not ignored, drop dir recursion and step 2
945 968 ignore = util.always
946 969 dirignore = util.always
947 970
948 971 matchfn = match.matchfn
949 972 matchalways = match.always()
950 973 matchtdir = match.traversedir
951 974 dmap = self._map
952 975 listdir = osutil.listdir
953 976 lstat = os.lstat
954 977 dirkind = stat.S_IFDIR
955 978 regkind = stat.S_IFREG
956 979 lnkkind = stat.S_IFLNK
957 980 join = self._join
958 981
959 982 exact = skipstep3 = False
960 983 if match.isexact(): # match.exact
961 984 exact = True
962 985 dirignore = util.always # skip step 2
963 986 elif match.prefix(): # match.match, no patterns
964 987 skipstep3 = True
965 988
966 989 if not exact and self._checkcase:
967 990 normalize = self._normalize
968 991 normalizefile = self._normalizefile
969 992 skipstep3 = False
970 993 else:
971 994 normalize = self._normalize
972 995 normalizefile = None
973 996
974 997 # step 1: find all explicit files
975 998 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
976 999
977 1000 skipstep3 = skipstep3 and not (work or dirsnotfound)
978 1001 work = [d for d in work if not dirignore(d[0])]
979 1002
980 1003 # step 2: visit subdirectories
981 1004 def traverse(work, alreadynormed):
982 1005 wadd = work.append
983 1006 while work:
984 1007 nd = work.pop()
985 1008 skip = None
986 1009 if nd == '.':
987 1010 nd = ''
988 1011 else:
989 1012 skip = '.hg'
990 1013 try:
991 1014 entries = listdir(join(nd), stat=True, skip=skip)
992 1015 except OSError as inst:
993 1016 if inst.errno in (errno.EACCES, errno.ENOENT):
994 1017 match.bad(self.pathto(nd), inst.strerror)
995 1018 continue
996 1019 raise
997 1020 for f, kind, st in entries:
998 1021 if normalizefile:
999 1022 # even though f might be a directory, we're only
1000 1023 # interested in comparing it to files currently in the
1001 1024 # dmap -- therefore normalizefile is enough
1002 1025 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1003 1026 True)
1004 1027 else:
1005 1028 nf = nd and (nd + "/" + f) or f
1006 1029 if nf not in results:
1007 1030 if kind == dirkind:
1008 1031 if not ignore(nf):
1009 1032 if matchtdir:
1010 1033 matchtdir(nf)
1011 1034 wadd(nf)
1012 1035 if nf in dmap and (matchalways or matchfn(nf)):
1013 1036 results[nf] = None
1014 1037 elif kind == regkind or kind == lnkkind:
1015 1038 if nf in dmap:
1016 1039 if matchalways or matchfn(nf):
1017 1040 results[nf] = st
1018 1041 elif ((matchalways or matchfn(nf))
1019 1042 and not ignore(nf)):
1020 1043 # unknown file -- normalize if necessary
1021 1044 if not alreadynormed:
1022 1045 nf = normalize(nf, False, True)
1023 1046 results[nf] = st
1024 1047 elif nf in dmap and (matchalways or matchfn(nf)):
1025 1048 results[nf] = None
1026 1049
1027 1050 for nd, d in work:
1028 1051 # alreadynormed means that processwork doesn't have to do any
1029 1052 # expensive directory normalization
1030 1053 alreadynormed = not normalize or nd == d
1031 1054 traverse([d], alreadynormed)
1032 1055
1033 1056 for s in subrepos:
1034 1057 del results[s]
1035 1058 del results['.hg']
1036 1059
1037 1060 # step 3: visit remaining files from dmap
1038 1061 if not skipstep3 and not exact:
1039 1062 # If a dmap file is not in results yet, it was either
1040 1063 # a) not matching matchfn b) ignored, c) missing, or d) under a
1041 1064 # symlink directory.
1042 1065 if not results and matchalways:
1043 1066 visit = dmap.keys()
1044 1067 else:
1045 1068 visit = [f for f in dmap if f not in results and matchfn(f)]
1046 1069 visit.sort()
1047 1070
1048 1071 if unknown:
1049 1072 # unknown == True means we walked all dirs under the roots
1050 1073 # that wasn't ignored, and everything that matched was stat'ed
1051 1074 # and is already in results.
1052 1075 # The rest must thus be ignored or under a symlink.
1053 1076 audit_path = pathutil.pathauditor(self._root)
1054 1077
1055 1078 for nf in iter(visit):
1056 1079 # If a stat for the same file was already added with a
1057 1080 # different case, don't add one for this, since that would
1058 1081 # make it appear as if the file exists under both names
1059 1082 # on disk.
1060 1083 if (normalizefile and
1061 1084 normalizefile(nf, True, True) in results):
1062 1085 results[nf] = None
1063 1086 # Report ignored items in the dmap as long as they are not
1064 1087 # under a symlink directory.
1065 1088 elif audit_path.check(nf):
1066 1089 try:
1067 1090 results[nf] = lstat(join(nf))
1068 1091 # file was just ignored, no links, and exists
1069 1092 except OSError:
1070 1093 # file doesn't exist
1071 1094 results[nf] = None
1072 1095 else:
1073 1096 # It's either missing or under a symlink directory
1074 1097 # which we in this case report as missing
1075 1098 results[nf] = None
1076 1099 else:
1077 1100 # We may not have walked the full directory tree above,
1078 1101 # so stat and check everything we missed.
1079 1102 nf = iter(visit).next
1080 1103 for st in util.statfiles([join(i) for i in visit]):
1081 1104 results[nf()] = st
1082 1105 return results
1083 1106
1084 1107 def status(self, match, subrepos, ignored, clean, unknown):
1085 1108 '''Determine the status of the working copy relative to the
1086 1109 dirstate and return a pair of (unsure, status), where status is of type
1087 1110 scmutil.status and:
1088 1111
1089 1112 unsure:
1090 1113 files that might have been modified since the dirstate was
1091 1114 written, but need to be read to be sure (size is the same
1092 1115 but mtime differs)
1093 1116 status.modified:
1094 1117 files that have definitely been modified since the dirstate
1095 1118 was written (different size or mode)
1096 1119 status.clean:
1097 1120 files that have definitely not been modified since the
1098 1121 dirstate was written
1099 1122 '''
1100 1123 listignored, listclean, listunknown = ignored, clean, unknown
1101 1124 lookup, modified, added, unknown, ignored = [], [], [], [], []
1102 1125 removed, deleted, clean = [], [], []
1103 1126
1104 1127 dmap = self._map
1105 1128 ladd = lookup.append # aka "unsure"
1106 1129 madd = modified.append
1107 1130 aadd = added.append
1108 1131 uadd = unknown.append
1109 1132 iadd = ignored.append
1110 1133 radd = removed.append
1111 1134 dadd = deleted.append
1112 1135 cadd = clean.append
1113 1136 mexact = match.exact
1114 1137 dirignore = self._dirignore
1115 1138 checkexec = self._checkexec
1116 1139 copymap = self._copymap
1117 1140 lastnormaltime = self._lastnormaltime
1118 1141
1119 1142 # We need to do full walks when either
1120 1143 # - we're listing all clean files, or
1121 1144 # - match.traversedir does something, because match.traversedir should
1122 1145 # be called for every dir in the working dir
1123 1146 full = listclean or match.traversedir is not None
1124 1147 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1125 1148 full=full).iteritems():
1126 1149 if fn not in dmap:
1127 1150 if (listignored or mexact(fn)) and dirignore(fn):
1128 1151 if listignored:
1129 1152 iadd(fn)
1130 1153 else:
1131 1154 uadd(fn)
1132 1155 continue
1133 1156
1134 1157 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1135 1158 # written like that for performance reasons. dmap[fn] is not a
1136 1159 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1137 1160 # opcode has fast paths when the value to be unpacked is a tuple or
1138 1161 # a list, but falls back to creating a full-fledged iterator in
1139 1162 # general. That is much slower than simply accessing and storing the
1140 1163 # tuple members one by one.
1141 1164 t = dmap[fn]
1142 1165 state = t[0]
1143 1166 mode = t[1]
1144 1167 size = t[2]
1145 1168 time = t[3]
1146 1169
1147 1170 if not st and state in "nma":
1148 1171 dadd(fn)
1149 1172 elif state == 'n':
1150 1173 if (size >= 0 and
1151 1174 ((size != st.st_size and size != st.st_size & _rangemask)
1152 1175 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1153 1176 or size == -2 # other parent
1154 1177 or fn in copymap):
1155 1178 madd(fn)
1156 1179 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1157 1180 ladd(fn)
1158 1181 elif st.st_mtime == lastnormaltime:
1159 1182 # fn may have just been marked as normal and it may have
1160 1183 # changed in the same second without changing its size.
1161 1184 # This can happen if we quickly do multiple commits.
1162 1185 # Force lookup, so we don't miss such a racy file change.
1163 1186 ladd(fn)
1164 1187 elif listclean:
1165 1188 cadd(fn)
1166 1189 elif state == 'm':
1167 1190 madd(fn)
1168 1191 elif state == 'a':
1169 1192 aadd(fn)
1170 1193 elif state == 'r':
1171 1194 radd(fn)
1172 1195
1173 1196 return (lookup, scmutil.status(modified, added, removed, deleted,
1174 1197 unknown, ignored, clean))
1175 1198
1176 1199 def matches(self, match):
1177 1200 '''
1178 1201 return files in the dirstate (in whatever state) filtered by match
1179 1202 '''
1180 1203 dmap = self._map
1181 1204 if match.always():
1182 1205 return dmap.keys()
1183 1206 files = match.files()
1184 1207 if match.isexact():
1185 1208 # fast path -- filter the other way around, since typically files is
1186 1209 # much smaller than dmap
1187 1210 return [f for f in files if f in dmap]
1188 1211 if match.prefix() and all(fn in dmap for fn in files):
1189 1212 # fast path -- all the values are known to be files, so just return
1190 1213 # that
1191 1214 return list(files)
1192 1215 return [f for f in dmap if match(f)]
1193 1216
1194 1217 def _actualfilename(self, tr):
1195 1218 if tr:
1196 1219 return self._pendingfilename
1197 1220 else:
1198 1221 return self._filename
1199 1222
1200 1223 def savebackup(self, tr, suffix='', prefix=''):
1201 1224 '''Save current dirstate into backup file with suffix'''
1202 1225 assert len(suffix) > 0 or len(prefix) > 0
1203 1226 filename = self._actualfilename(tr)
1204 1227
1205 1228 # use '_writedirstate' instead of 'write' to write changes certainly,
1206 1229 # because the latter omits writing out if transaction is running.
1207 1230 # output file will be used to create backup of dirstate at this point.
1208 1231 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1209 1232 checkambig=True))
1210 1233
1211 1234 if tr:
1212 1235 # ensure that subsequent tr.writepending returns True for
1213 1236 # changes written out above, even if dirstate is never
1214 1237 # changed after this
1215 1238 tr.addfilegenerator('dirstate', (self._filename,),
1216 1239 self._writedirstate, location='plain')
1217 1240
1218 1241 # ensure that pending file written above is unlinked at
1219 1242 # failure, even if tr.writepending isn't invoked until the
1220 1243 # end of this transaction
1221 1244 tr.registertmp(filename, location='plain')
1222 1245
1223 1246 self._opener.write(prefix + self._filename + suffix,
1224 1247 self._opener.tryread(filename))
1225 1248
1226 1249 def restorebackup(self, tr, suffix='', prefix=''):
1227 1250 '''Restore dirstate by backup file with suffix'''
1228 1251 assert len(suffix) > 0 or len(prefix) > 0
1229 1252 # this "invalidate()" prevents "wlock.release()" from writing
1230 1253 # changes of dirstate out after restoring from backup file
1231 1254 self.invalidate()
1232 1255 filename = self._actualfilename(tr)
1233 1256 # using self._filename to avoid having "pending" in the backup filename
1234 1257 self._opener.rename(prefix + self._filename + suffix, filename,
1235 1258 checkambig=True)
1236 1259
1237 1260 def clearbackup(self, tr, suffix='', prefix=''):
1238 1261 '''Clear backup file with suffix'''
1239 1262 assert len(suffix) > 0 or len(prefix) > 0
1240 1263 # using self._filename to avoid having "pending" in the backup filename
1241 1264 self._opener.unlink(prefix + self._filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now