##// END OF EJS Templates
dirstate: fix debug.dirstate.delaywrite to use the new "now" after sleeping...
Mads Kiilerich -
r30224:ad56071b stable
parent child Browse files
Show More
@@ -1,1258 +1,1259 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import nullid
17 17 from . import (
18 18 encoding,
19 19 error,
20 20 match as matchmod,
21 21 osutil,
22 22 parsers,
23 23 pathutil,
24 24 scmutil,
25 25 util,
26 26 )
27 27
28 28 propertycache = util.propertycache
29 29 filecache = scmutil.filecache
30 30 _rangemask = 0x7fffffff
31 31
32 32 dirstatetuple = parsers.dirstatetuple
33 33
34 34 class repocache(filecache):
35 35 """filecache for files in .hg/"""
36 36 def join(self, obj, fname):
37 37 return obj._opener.join(fname)
38 38
39 39 class rootcache(filecache):
40 40 """filecache for files in the repository root"""
41 41 def join(self, obj, fname):
42 42 return obj._join(fname)
43 43
44 44 def _getfsnow(vfs):
45 45 '''Get "now" timestamp on filesystem'''
46 46 tmpfd, tmpname = vfs.mkstemp()
47 47 try:
48 48 return os.fstat(tmpfd).st_mtime
49 49 finally:
50 50 os.close(tmpfd)
51 51 vfs.unlink(tmpname)
52 52
53 53 def nonnormalentries(dmap):
54 54 '''Compute the nonnormal dirstate entries from the dmap'''
55 55 try:
56 56 return parsers.nonnormalentries(dmap)
57 57 except AttributeError:
58 58 return set(fname for fname, e in dmap.iteritems()
59 59 if e[0] != 'n' or e[3] == -1)
60 60
61 61 def _trypending(root, vfs, filename):
62 62 '''Open file to be read according to HG_PENDING environment variable
63 63
64 64 This opens '.pending' of specified 'filename' only when HG_PENDING
65 65 is equal to 'root'.
66 66
67 67 This returns '(fp, is_pending_opened)' tuple.
68 68 '''
69 69 if root == os.environ.get('HG_PENDING'):
70 70 try:
71 71 return (vfs('%s.pending' % filename), True)
72 72 except IOError as inst:
73 73 if inst.errno != errno.ENOENT:
74 74 raise
75 75 return (vfs(filename), False)
76 76
77 77 class dirstate(object):
78 78
79 79 def __init__(self, opener, ui, root, validate):
80 80 '''Create a new dirstate object.
81 81
82 82 opener is an open()-like callable that can be used to open the
83 83 dirstate file; root is the root of the directory tracked by
84 84 the dirstate.
85 85 '''
86 86 self._opener = opener
87 87 self._validate = validate
88 88 self._root = root
89 89 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
90 90 # UNC path pointing to root share (issue4557)
91 91 self._rootdir = pathutil.normasprefix(root)
92 92 # internal config: ui.forcecwd
93 93 forcecwd = ui.config('ui', 'forcecwd')
94 94 if forcecwd:
95 95 self._cwd = forcecwd
96 96 self._dirty = False
97 97 self._dirtypl = False
98 98 self._lastnormaltime = 0
99 99 self._ui = ui
100 100 self._filecache = {}
101 101 self._parentwriters = 0
102 102 self._filename = 'dirstate'
103 103 self._pendingfilename = '%s.pending' % self._filename
104 104 self._plchangecallbacks = {}
105 105 self._origpl = None
106 106
107 107 # for consistent view between _pl() and _read() invocations
108 108 self._pendingmode = None
109 109
110 110 def beginparentchange(self):
111 111 '''Marks the beginning of a set of changes that involve changing
112 112 the dirstate parents. If there is an exception during this time,
113 113 the dirstate will not be written when the wlock is released. This
114 114 prevents writing an incoherent dirstate where the parent doesn't
115 115 match the contents.
116 116 '''
117 117 self._parentwriters += 1
118 118
119 119 def endparentchange(self):
120 120 '''Marks the end of a set of changes that involve changing the
121 121 dirstate parents. Once all parent changes have been marked done,
122 122 the wlock will be free to write the dirstate on release.
123 123 '''
124 124 if self._parentwriters > 0:
125 125 self._parentwriters -= 1
126 126
127 127 def pendingparentchange(self):
128 128 '''Returns true if the dirstate is in the middle of a set of changes
129 129 that modify the dirstate parent.
130 130 '''
131 131 return self._parentwriters > 0
132 132
133 133 @propertycache
134 134 def _map(self):
135 135 '''Return the dirstate contents as a map from filename to
136 136 (state, mode, size, time).'''
137 137 self._read()
138 138 return self._map
139 139
140 140 @propertycache
141 141 def _copymap(self):
142 142 self._read()
143 143 return self._copymap
144 144
145 145 @propertycache
146 146 def _nonnormalset(self):
147 147 return nonnormalentries(self._map)
148 148
149 149 @propertycache
150 150 def _filefoldmap(self):
151 151 try:
152 152 makefilefoldmap = parsers.make_file_foldmap
153 153 except AttributeError:
154 154 pass
155 155 else:
156 156 return makefilefoldmap(self._map, util.normcasespec,
157 157 util.normcasefallback)
158 158
159 159 f = {}
160 160 normcase = util.normcase
161 161 for name, s in self._map.iteritems():
162 162 if s[0] != 'r':
163 163 f[normcase(name)] = name
164 164 f['.'] = '.' # prevents useless util.fspath() invocation
165 165 return f
166 166
167 167 @propertycache
168 168 def _dirfoldmap(self):
169 169 f = {}
170 170 normcase = util.normcase
171 171 for name in self._dirs:
172 172 f[normcase(name)] = name
173 173 return f
174 174
175 175 @repocache('branch')
176 176 def _branch(self):
177 177 try:
178 178 return self._opener.read("branch").strip() or "default"
179 179 except IOError as inst:
180 180 if inst.errno != errno.ENOENT:
181 181 raise
182 182 return "default"
183 183
184 184 @propertycache
185 185 def _pl(self):
186 186 try:
187 187 fp = self._opendirstatefile()
188 188 st = fp.read(40)
189 189 fp.close()
190 190 l = len(st)
191 191 if l == 40:
192 192 return st[:20], st[20:40]
193 193 elif l > 0 and l < 40:
194 194 raise error.Abort(_('working directory state appears damaged!'))
195 195 except IOError as err:
196 196 if err.errno != errno.ENOENT:
197 197 raise
198 198 return [nullid, nullid]
199 199
200 200 @propertycache
201 201 def _dirs(self):
202 202 return util.dirs(self._map, 'r')
203 203
204 204 def dirs(self):
205 205 return self._dirs
206 206
207 207 @rootcache('.hgignore')
208 208 def _ignore(self):
209 209 files = self._ignorefiles()
210 210 if not files:
211 211 return util.never
212 212
213 213 pats = ['include:%s' % f for f in files]
214 214 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
215 215
216 216 @propertycache
217 217 def _slash(self):
218 218 return self._ui.configbool('ui', 'slash') and os.sep != '/'
219 219
220 220 @propertycache
221 221 def _checklink(self):
222 222 return util.checklink(self._root)
223 223
224 224 @propertycache
225 225 def _checkexec(self):
226 226 return util.checkexec(self._root)
227 227
228 228 @propertycache
229 229 def _checkcase(self):
230 230 return not util.fscasesensitive(self._join('.hg'))
231 231
232 232 def _join(self, f):
233 233 # much faster than os.path.join()
234 234 # it's safe because f is always a relative path
235 235 return self._rootdir + f
236 236
237 237 def flagfunc(self, buildfallback):
238 238 if self._checklink and self._checkexec:
239 239 def f(x):
240 240 try:
241 241 st = os.lstat(self._join(x))
242 242 if util.statislink(st):
243 243 return 'l'
244 244 if util.statisexec(st):
245 245 return 'x'
246 246 except OSError:
247 247 pass
248 248 return ''
249 249 return f
250 250
251 251 fallback = buildfallback()
252 252 if self._checklink:
253 253 def f(x):
254 254 if os.path.islink(self._join(x)):
255 255 return 'l'
256 256 if 'x' in fallback(x):
257 257 return 'x'
258 258 return ''
259 259 return f
260 260 if self._checkexec:
261 261 def f(x):
262 262 if 'l' in fallback(x):
263 263 return 'l'
264 264 if util.isexec(self._join(x)):
265 265 return 'x'
266 266 return ''
267 267 return f
268 268 else:
269 269 return fallback
270 270
271 271 @propertycache
272 272 def _cwd(self):
273 273 return os.getcwd()
274 274
275 275 def getcwd(self):
276 276 '''Return the path from which a canonical path is calculated.
277 277
278 278 This path should be used to resolve file patterns or to convert
279 279 canonical paths back to file paths for display. It shouldn't be
280 280 used to get real file paths. Use vfs functions instead.
281 281 '''
282 282 cwd = self._cwd
283 283 if cwd == self._root:
284 284 return ''
285 285 # self._root ends with a path separator if self._root is '/' or 'C:\'
286 286 rootsep = self._root
287 287 if not util.endswithsep(rootsep):
288 288 rootsep += os.sep
289 289 if cwd.startswith(rootsep):
290 290 return cwd[len(rootsep):]
291 291 else:
292 292 # we're outside the repo. return an absolute path.
293 293 return cwd
294 294
295 295 def pathto(self, f, cwd=None):
296 296 if cwd is None:
297 297 cwd = self.getcwd()
298 298 path = util.pathto(self._root, cwd, f)
299 299 if self._slash:
300 300 return util.pconvert(path)
301 301 return path
302 302
303 303 def __getitem__(self, key):
304 304 '''Return the current state of key (a filename) in the dirstate.
305 305
306 306 States are:
307 307 n normal
308 308 m needs merging
309 309 r marked for removal
310 310 a marked for addition
311 311 ? not tracked
312 312 '''
313 313 return self._map.get(key, ("?",))[0]
314 314
315 315 def __contains__(self, key):
316 316 return key in self._map
317 317
318 318 def __iter__(self):
319 319 for x in sorted(self._map):
320 320 yield x
321 321
322 322 def iteritems(self):
323 323 return self._map.iteritems()
324 324
325 325 def parents(self):
326 326 return [self._validate(p) for p in self._pl]
327 327
328 328 def p1(self):
329 329 return self._validate(self._pl[0])
330 330
331 331 def p2(self):
332 332 return self._validate(self._pl[1])
333 333
334 334 def branch(self):
335 335 return encoding.tolocal(self._branch)
336 336
337 337 def setparents(self, p1, p2=nullid):
338 338 """Set dirstate parents to p1 and p2.
339 339
340 340 When moving from two parents to one, 'm' merged entries a
341 341 adjusted to normal and previous copy records discarded and
342 342 returned by the call.
343 343
344 344 See localrepo.setparents()
345 345 """
346 346 if self._parentwriters == 0:
347 347 raise ValueError("cannot set dirstate parent without "
348 348 "calling dirstate.beginparentchange")
349 349
350 350 self._dirty = self._dirtypl = True
351 351 oldp2 = self._pl[1]
352 352 if self._origpl is None:
353 353 self._origpl = self._pl
354 354 self._pl = p1, p2
355 355 copies = {}
356 356 if oldp2 != nullid and p2 == nullid:
357 357 for f, s in self._map.iteritems():
358 358 # Discard 'm' markers when moving away from a merge state
359 359 if s[0] == 'm':
360 360 if f in self._copymap:
361 361 copies[f] = self._copymap[f]
362 362 self.normallookup(f)
363 363 # Also fix up otherparent markers
364 364 elif s[0] == 'n' and s[2] == -2:
365 365 if f in self._copymap:
366 366 copies[f] = self._copymap[f]
367 367 self.add(f)
368 368 return copies
369 369
370 370 def setbranch(self, branch):
371 371 self._branch = encoding.fromlocal(branch)
372 372 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
373 373 try:
374 374 f.write(self._branch + '\n')
375 375 f.close()
376 376
377 377 # make sure filecache has the correct stat info for _branch after
378 378 # replacing the underlying file
379 379 ce = self._filecache['_branch']
380 380 if ce:
381 381 ce.refresh()
382 382 except: # re-raises
383 383 f.discard()
384 384 raise
385 385
386 386 def _opendirstatefile(self):
387 387 fp, mode = _trypending(self._root, self._opener, self._filename)
388 388 if self._pendingmode is not None and self._pendingmode != mode:
389 389 fp.close()
390 390 raise error.Abort(_('working directory state may be '
391 391 'changed parallelly'))
392 392 self._pendingmode = mode
393 393 return fp
394 394
395 395 def _read(self):
396 396 self._map = {}
397 397 self._copymap = {}
398 398 try:
399 399 fp = self._opendirstatefile()
400 400 try:
401 401 st = fp.read()
402 402 finally:
403 403 fp.close()
404 404 except IOError as err:
405 405 if err.errno != errno.ENOENT:
406 406 raise
407 407 return
408 408 if not st:
409 409 return
410 410
411 411 if util.safehasattr(parsers, 'dict_new_presized'):
412 412 # Make an estimate of the number of files in the dirstate based on
413 413 # its size. From a linear regression on a set of real-world repos,
414 414 # all over 10,000 files, the size of a dirstate entry is 85
415 415 # bytes. The cost of resizing is significantly higher than the cost
416 416 # of filling in a larger presized dict, so subtract 20% from the
417 417 # size.
418 418 #
419 419 # This heuristic is imperfect in many ways, so in a future dirstate
420 420 # format update it makes sense to just record the number of entries
421 421 # on write.
422 422 self._map = parsers.dict_new_presized(len(st) / 71)
423 423
424 424 # Python's garbage collector triggers a GC each time a certain number
425 425 # of container objects (the number being defined by
426 426 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
427 427 # for each file in the dirstate. The C version then immediately marks
428 428 # them as not to be tracked by the collector. However, this has no
429 429 # effect on when GCs are triggered, only on what objects the GC looks
430 430 # into. This means that O(number of files) GCs are unavoidable.
431 431 # Depending on when in the process's lifetime the dirstate is parsed,
432 432 # this can get very expensive. As a workaround, disable GC while
433 433 # parsing the dirstate.
434 434 #
435 435 # (we cannot decorate the function directly since it is in a C module)
436 436 parse_dirstate = util.nogc(parsers.parse_dirstate)
437 437 p = parse_dirstate(self._map, self._copymap, st)
438 438 if not self._dirtypl:
439 439 self._pl = p
440 440
441 441 def invalidate(self):
442 442 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
443 443 "_pl", "_dirs", "_ignore", "_nonnormalset"):
444 444 if a in self.__dict__:
445 445 delattr(self, a)
446 446 self._lastnormaltime = 0
447 447 self._dirty = False
448 448 self._parentwriters = 0
449 449 self._origpl = None
450 450
451 451 def copy(self, source, dest):
452 452 """Mark dest as a copy of source. Unmark dest if source is None."""
453 453 if source == dest:
454 454 return
455 455 self._dirty = True
456 456 if source is not None:
457 457 self._copymap[dest] = source
458 458 elif dest in self._copymap:
459 459 del self._copymap[dest]
460 460
461 461 def copied(self, file):
462 462 return self._copymap.get(file, None)
463 463
464 464 def copies(self):
465 465 return self._copymap
466 466
467 467 def _droppath(self, f):
468 468 if self[f] not in "?r" and "_dirs" in self.__dict__:
469 469 self._dirs.delpath(f)
470 470
471 471 if "_filefoldmap" in self.__dict__:
472 472 normed = util.normcase(f)
473 473 if normed in self._filefoldmap:
474 474 del self._filefoldmap[normed]
475 475
476 476 def _addpath(self, f, state, mode, size, mtime):
477 477 oldstate = self[f]
478 478 if state == 'a' or oldstate == 'r':
479 479 scmutil.checkfilename(f)
480 480 if f in self._dirs:
481 481 raise error.Abort(_('directory %r already in dirstate') % f)
482 482 # shadows
483 483 for d in util.finddirs(f):
484 484 if d in self._dirs:
485 485 break
486 486 if d in self._map and self[d] != 'r':
487 487 raise error.Abort(
488 488 _('file %r in dirstate clashes with %r') % (d, f))
489 489 if oldstate in "?r" and "_dirs" in self.__dict__:
490 490 self._dirs.addpath(f)
491 491 self._dirty = True
492 492 self._map[f] = dirstatetuple(state, mode, size, mtime)
493 493 if state != 'n' or mtime == -1:
494 494 self._nonnormalset.add(f)
495 495
496 496 def normal(self, f):
497 497 '''Mark a file normal and clean.'''
498 498 s = os.lstat(self._join(f))
499 499 mtime = s.st_mtime
500 500 self._addpath(f, 'n', s.st_mode,
501 501 s.st_size & _rangemask, mtime & _rangemask)
502 502 if f in self._copymap:
503 503 del self._copymap[f]
504 504 if f in self._nonnormalset:
505 505 self._nonnormalset.remove(f)
506 506 if mtime > self._lastnormaltime:
507 507 # Remember the most recent modification timeslot for status(),
508 508 # to make sure we won't miss future size-preserving file content
509 509 # modifications that happen within the same timeslot.
510 510 self._lastnormaltime = mtime
511 511
512 512 def normallookup(self, f):
513 513 '''Mark a file normal, but possibly dirty.'''
514 514 if self._pl[1] != nullid and f in self._map:
515 515 # if there is a merge going on and the file was either
516 516 # in state 'm' (-1) or coming from other parent (-2) before
517 517 # being removed, restore that state.
518 518 entry = self._map[f]
519 519 if entry[0] == 'r' and entry[2] in (-1, -2):
520 520 source = self._copymap.get(f)
521 521 if entry[2] == -1:
522 522 self.merge(f)
523 523 elif entry[2] == -2:
524 524 self.otherparent(f)
525 525 if source:
526 526 self.copy(source, f)
527 527 return
528 528 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
529 529 return
530 530 self._addpath(f, 'n', 0, -1, -1)
531 531 if f in self._copymap:
532 532 del self._copymap[f]
533 533 if f in self._nonnormalset:
534 534 self._nonnormalset.remove(f)
535 535
536 536 def otherparent(self, f):
537 537 '''Mark as coming from the other parent, always dirty.'''
538 538 if self._pl[1] == nullid:
539 539 raise error.Abort(_("setting %r to other parent "
540 540 "only allowed in merges") % f)
541 541 if f in self and self[f] == 'n':
542 542 # merge-like
543 543 self._addpath(f, 'm', 0, -2, -1)
544 544 else:
545 545 # add-like
546 546 self._addpath(f, 'n', 0, -2, -1)
547 547
548 548 if f in self._copymap:
549 549 del self._copymap[f]
550 550
551 551 def add(self, f):
552 552 '''Mark a file added.'''
553 553 self._addpath(f, 'a', 0, -1, -1)
554 554 if f in self._copymap:
555 555 del self._copymap[f]
556 556
557 557 def remove(self, f):
558 558 '''Mark a file removed.'''
559 559 self._dirty = True
560 560 self._droppath(f)
561 561 size = 0
562 562 if self._pl[1] != nullid and f in self._map:
563 563 # backup the previous state
564 564 entry = self._map[f]
565 565 if entry[0] == 'm': # merge
566 566 size = -1
567 567 elif entry[0] == 'n' and entry[2] == -2: # other parent
568 568 size = -2
569 569 self._map[f] = dirstatetuple('r', 0, size, 0)
570 570 self._nonnormalset.add(f)
571 571 if size == 0 and f in self._copymap:
572 572 del self._copymap[f]
573 573
574 574 def merge(self, f):
575 575 '''Mark a file merged.'''
576 576 if self._pl[1] == nullid:
577 577 return self.normallookup(f)
578 578 return self.otherparent(f)
579 579
580 580 def drop(self, f):
581 581 '''Drop a file from the dirstate'''
582 582 if f in self._map:
583 583 self._dirty = True
584 584 self._droppath(f)
585 585 del self._map[f]
586 586 if f in self._nonnormalset:
587 587 self._nonnormalset.remove(f)
588 588 if f in self._copymap:
589 589 del self._copymap[f]
590 590
591 591 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
592 592 if exists is None:
593 593 exists = os.path.lexists(os.path.join(self._root, path))
594 594 if not exists:
595 595 # Maybe a path component exists
596 596 if not ignoremissing and '/' in path:
597 597 d, f = path.rsplit('/', 1)
598 598 d = self._normalize(d, False, ignoremissing, None)
599 599 folded = d + "/" + f
600 600 else:
601 601 # No path components, preserve original case
602 602 folded = path
603 603 else:
604 604 # recursively normalize leading directory components
605 605 # against dirstate
606 606 if '/' in normed:
607 607 d, f = normed.rsplit('/', 1)
608 608 d = self._normalize(d, False, ignoremissing, True)
609 609 r = self._root + "/" + d
610 610 folded = d + "/" + util.fspath(f, r)
611 611 else:
612 612 folded = util.fspath(normed, self._root)
613 613 storemap[normed] = folded
614 614
615 615 return folded
616 616
617 617 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
618 618 normed = util.normcase(path)
619 619 folded = self._filefoldmap.get(normed, None)
620 620 if folded is None:
621 621 if isknown:
622 622 folded = path
623 623 else:
624 624 folded = self._discoverpath(path, normed, ignoremissing, exists,
625 625 self._filefoldmap)
626 626 return folded
627 627
628 628 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
629 629 normed = util.normcase(path)
630 630 folded = self._filefoldmap.get(normed, None)
631 631 if folded is None:
632 632 folded = self._dirfoldmap.get(normed, None)
633 633 if folded is None:
634 634 if isknown:
635 635 folded = path
636 636 else:
637 637 # store discovered result in dirfoldmap so that future
638 638 # normalizefile calls don't start matching directories
639 639 folded = self._discoverpath(path, normed, ignoremissing, exists,
640 640 self._dirfoldmap)
641 641 return folded
642 642
643 643 def normalize(self, path, isknown=False, ignoremissing=False):
644 644 '''
645 645 normalize the case of a pathname when on a casefolding filesystem
646 646
647 647 isknown specifies whether the filename came from walking the
648 648 disk, to avoid extra filesystem access.
649 649
650 650 If ignoremissing is True, missing path are returned
651 651 unchanged. Otherwise, we try harder to normalize possibly
652 652 existing path components.
653 653
654 654 The normalized case is determined based on the following precedence:
655 655
656 656 - version of name already stored in the dirstate
657 657 - version of name stored on disk
658 658 - version provided via command arguments
659 659 '''
660 660
661 661 if self._checkcase:
662 662 return self._normalize(path, isknown, ignoremissing)
663 663 return path
664 664
665 665 def clear(self):
666 666 self._map = {}
667 667 self._nonnormalset = set()
668 668 if "_dirs" in self.__dict__:
669 669 delattr(self, "_dirs")
670 670 self._copymap = {}
671 671 self._pl = [nullid, nullid]
672 672 self._lastnormaltime = 0
673 673 self._dirty = True
674 674
675 675 def rebuild(self, parent, allfiles, changedfiles=None):
676 676 if changedfiles is None:
677 677 # Rebuild entire dirstate
678 678 changedfiles = allfiles
679 679 lastnormaltime = self._lastnormaltime
680 680 self.clear()
681 681 self._lastnormaltime = lastnormaltime
682 682
683 683 if self._origpl is None:
684 684 self._origpl = self._pl
685 685 self._pl = (parent, nullid)
686 686 for f in changedfiles:
687 687 if f in allfiles:
688 688 self.normallookup(f)
689 689 else:
690 690 self.drop(f)
691 691
692 692 self._dirty = True
693 693
694 694 def write(self, tr):
695 695 if not self._dirty:
696 696 return
697 697
698 698 filename = self._filename
699 699 if tr:
700 700 # 'dirstate.write()' is not only for writing in-memory
701 701 # changes out, but also for dropping ambiguous timestamp.
702 702 # delayed writing re-raise "ambiguous timestamp issue".
703 703 # See also the wiki page below for detail:
704 704 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
705 705
706 706 # emulate dropping timestamp in 'parsers.pack_dirstate'
707 707 now = _getfsnow(self._opener)
708 708 dmap = self._map
709 709 for f, e in dmap.iteritems():
710 710 if e[0] == 'n' and e[3] == now:
711 711 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
712 712 self._nonnormalset.add(f)
713 713
714 714 # emulate that all 'dirstate.normal' results are written out
715 715 self._lastnormaltime = 0
716 716
717 717 # delay writing in-memory changes out
718 718 tr.addfilegenerator('dirstate', (self._filename,),
719 719 self._writedirstate, location='plain')
720 720 return
721 721
722 722 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
723 723 self._writedirstate(st)
724 724
725 725 def addparentchangecallback(self, category, callback):
726 726 """add a callback to be called when the wd parents are changed
727 727
728 728 Callback will be called with the following arguments:
729 729 dirstate, (oldp1, oldp2), (newp1, newp2)
730 730
731 731 Category is a unique identifier to allow overwriting an old callback
732 732 with a newer callback.
733 733 """
734 734 self._plchangecallbacks[category] = callback
735 735
736 736 def _writedirstate(self, st):
737 737 # notify callbacks about parents change
738 738 if self._origpl is not None and self._origpl != self._pl:
739 739 for c, callback in sorted(self._plchangecallbacks.iteritems()):
740 740 callback(self, self._origpl, self._pl)
741 741 self._origpl = None
742 742 # use the modification time of the newly created temporary file as the
743 743 # filesystem's notion of 'now'
744 744 now = util.fstat(st).st_mtime & _rangemask
745 745
746 746 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
747 747 # timestamp of each entries in dirstate, because of 'now > mtime'
748 748 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
749 749 if delaywrite > 0:
750 750 # do we have any files to delay for?
751 751 for f, e in self._map.iteritems():
752 752 if e[0] == 'n' and e[3] == now:
753 753 import time # to avoid useless import
754 754 # rather than sleep n seconds, sleep until the next
755 755 # multiple of n seconds
756 756 clock = time.time()
757 757 start = int(clock) - (int(clock) % delaywrite)
758 758 end = start + delaywrite
759 759 time.sleep(end - clock)
760 now = end # trust our estimate that the end is near now
760 761 break
761 762
762 763 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
763 764 self._nonnormalset = nonnormalentries(self._map)
764 765 st.close()
765 766 self._lastnormaltime = 0
766 767 self._dirty = self._dirtypl = False
767 768
768 769 def _dirignore(self, f):
769 770 if f == '.':
770 771 return False
771 772 if self._ignore(f):
772 773 return True
773 774 for p in util.finddirs(f):
774 775 if self._ignore(p):
775 776 return True
776 777 return False
777 778
778 779 def _ignorefiles(self):
779 780 files = []
780 781 if os.path.exists(self._join('.hgignore')):
781 782 files.append(self._join('.hgignore'))
782 783 for name, path in self._ui.configitems("ui"):
783 784 if name == 'ignore' or name.startswith('ignore.'):
784 785 # we need to use os.path.join here rather than self._join
785 786 # because path is arbitrary and user-specified
786 787 files.append(os.path.join(self._rootdir, util.expandpath(path)))
787 788 return files
788 789
789 790 def _ignorefileandline(self, f):
790 791 files = collections.deque(self._ignorefiles())
791 792 visited = set()
792 793 while files:
793 794 i = files.popleft()
794 795 patterns = matchmod.readpatternfile(i, self._ui.warn,
795 796 sourceinfo=True)
796 797 for pattern, lineno, line in patterns:
797 798 kind, p = matchmod._patsplit(pattern, 'glob')
798 799 if kind == "subinclude":
799 800 if p not in visited:
800 801 files.append(p)
801 802 continue
802 803 m = matchmod.match(self._root, '', [], [pattern],
803 804 warn=self._ui.warn)
804 805 if m(f):
805 806 return (i, lineno, line)
806 807 visited.add(i)
807 808 return (None, -1, "")
808 809
809 810 def _walkexplicit(self, match, subrepos):
810 811 '''Get stat data about the files explicitly specified by match.
811 812
812 813 Return a triple (results, dirsfound, dirsnotfound).
813 814 - results is a mapping from filename to stat result. It also contains
814 815 listings mapping subrepos and .hg to None.
815 816 - dirsfound is a list of files found to be directories.
816 817 - dirsnotfound is a list of files that the dirstate thinks are
817 818 directories and that were not found.'''
818 819
819 820 def badtype(mode):
820 821 kind = _('unknown')
821 822 if stat.S_ISCHR(mode):
822 823 kind = _('character device')
823 824 elif stat.S_ISBLK(mode):
824 825 kind = _('block device')
825 826 elif stat.S_ISFIFO(mode):
826 827 kind = _('fifo')
827 828 elif stat.S_ISSOCK(mode):
828 829 kind = _('socket')
829 830 elif stat.S_ISDIR(mode):
830 831 kind = _('directory')
831 832 return _('unsupported file type (type is %s)') % kind
832 833
833 834 matchedir = match.explicitdir
834 835 badfn = match.bad
835 836 dmap = self._map
836 837 lstat = os.lstat
837 838 getkind = stat.S_IFMT
838 839 dirkind = stat.S_IFDIR
839 840 regkind = stat.S_IFREG
840 841 lnkkind = stat.S_IFLNK
841 842 join = self._join
842 843 dirsfound = []
843 844 foundadd = dirsfound.append
844 845 dirsnotfound = []
845 846 notfoundadd = dirsnotfound.append
846 847
847 848 if not match.isexact() and self._checkcase:
848 849 normalize = self._normalize
849 850 else:
850 851 normalize = None
851 852
852 853 files = sorted(match.files())
853 854 subrepos.sort()
854 855 i, j = 0, 0
855 856 while i < len(files) and j < len(subrepos):
856 857 subpath = subrepos[j] + "/"
857 858 if files[i] < subpath:
858 859 i += 1
859 860 continue
860 861 while i < len(files) and files[i].startswith(subpath):
861 862 del files[i]
862 863 j += 1
863 864
864 865 if not files or '.' in files:
865 866 files = ['.']
866 867 results = dict.fromkeys(subrepos)
867 868 results['.hg'] = None
868 869
869 870 alldirs = None
870 871 for ff in files:
871 872 # constructing the foldmap is expensive, so don't do it for the
872 873 # common case where files is ['.']
873 874 if normalize and ff != '.':
874 875 nf = normalize(ff, False, True)
875 876 else:
876 877 nf = ff
877 878 if nf in results:
878 879 continue
879 880
880 881 try:
881 882 st = lstat(join(nf))
882 883 kind = getkind(st.st_mode)
883 884 if kind == dirkind:
884 885 if nf in dmap:
885 886 # file replaced by dir on disk but still in dirstate
886 887 results[nf] = None
887 888 if matchedir:
888 889 matchedir(nf)
889 890 foundadd((nf, ff))
890 891 elif kind == regkind or kind == lnkkind:
891 892 results[nf] = st
892 893 else:
893 894 badfn(ff, badtype(kind))
894 895 if nf in dmap:
895 896 results[nf] = None
896 897 except OSError as inst: # nf not found on disk - it is dirstate only
897 898 if nf in dmap: # does it exactly match a missing file?
898 899 results[nf] = None
899 900 else: # does it match a missing directory?
900 901 if alldirs is None:
901 902 alldirs = util.dirs(dmap)
902 903 if nf in alldirs:
903 904 if matchedir:
904 905 matchedir(nf)
905 906 notfoundadd(nf)
906 907 else:
907 908 badfn(ff, inst.strerror)
908 909
909 910 # Case insensitive filesystems cannot rely on lstat() failing to detect
910 911 # a case-only rename. Prune the stat object for any file that does not
911 912 # match the case in the filesystem, if there are multiple files that
912 913 # normalize to the same path.
913 914 if match.isexact() and self._checkcase:
914 915 normed = {}
915 916
916 917 for f, st in results.iteritems():
917 918 if st is None:
918 919 continue
919 920
920 921 nc = util.normcase(f)
921 922 paths = normed.get(nc)
922 923
923 924 if paths is None:
924 925 paths = set()
925 926 normed[nc] = paths
926 927
927 928 paths.add(f)
928 929
929 930 for norm, paths in normed.iteritems():
930 931 if len(paths) > 1:
931 932 for path in paths:
932 933 folded = self._discoverpath(path, norm, True, None,
933 934 self._dirfoldmap)
934 935 if path != folded:
935 936 results[path] = None
936 937
937 938 return results, dirsfound, dirsnotfound
938 939
939 940 def walk(self, match, subrepos, unknown, ignored, full=True):
940 941 '''
941 942 Walk recursively through the directory tree, finding all files
942 943 matched by match.
943 944
944 945 If full is False, maybe skip some known-clean files.
945 946
946 947 Return a dict mapping filename to stat-like object (either
947 948 mercurial.osutil.stat instance or return value of os.stat()).
948 949
949 950 '''
950 951 # full is a flag that extensions that hook into walk can use -- this
951 952 # implementation doesn't use it at all. This satisfies the contract
952 953 # because we only guarantee a "maybe".
953 954
954 955 if ignored:
955 956 ignore = util.never
956 957 dirignore = util.never
957 958 elif unknown:
958 959 ignore = self._ignore
959 960 dirignore = self._dirignore
960 961 else:
961 962 # if not unknown and not ignored, drop dir recursion and step 2
962 963 ignore = util.always
963 964 dirignore = util.always
964 965
965 966 matchfn = match.matchfn
966 967 matchalways = match.always()
967 968 matchtdir = match.traversedir
968 969 dmap = self._map
969 970 listdir = osutil.listdir
970 971 lstat = os.lstat
971 972 dirkind = stat.S_IFDIR
972 973 regkind = stat.S_IFREG
973 974 lnkkind = stat.S_IFLNK
974 975 join = self._join
975 976
976 977 exact = skipstep3 = False
977 978 if match.isexact(): # match.exact
978 979 exact = True
979 980 dirignore = util.always # skip step 2
980 981 elif match.prefix(): # match.match, no patterns
981 982 skipstep3 = True
982 983
983 984 if not exact and self._checkcase:
984 985 normalize = self._normalize
985 986 normalizefile = self._normalizefile
986 987 skipstep3 = False
987 988 else:
988 989 normalize = self._normalize
989 990 normalizefile = None
990 991
991 992 # step 1: find all explicit files
992 993 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
993 994
994 995 skipstep3 = skipstep3 and not (work or dirsnotfound)
995 996 work = [d for d in work if not dirignore(d[0])]
996 997
997 998 # step 2: visit subdirectories
998 999 def traverse(work, alreadynormed):
999 1000 wadd = work.append
1000 1001 while work:
1001 1002 nd = work.pop()
1002 1003 skip = None
1003 1004 if nd == '.':
1004 1005 nd = ''
1005 1006 else:
1006 1007 skip = '.hg'
1007 1008 try:
1008 1009 entries = listdir(join(nd), stat=True, skip=skip)
1009 1010 except OSError as inst:
1010 1011 if inst.errno in (errno.EACCES, errno.ENOENT):
1011 1012 match.bad(self.pathto(nd), inst.strerror)
1012 1013 continue
1013 1014 raise
1014 1015 for f, kind, st in entries:
1015 1016 if normalizefile:
1016 1017 # even though f might be a directory, we're only
1017 1018 # interested in comparing it to files currently in the
1018 1019 # dmap -- therefore normalizefile is enough
1019 1020 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1020 1021 True)
1021 1022 else:
1022 1023 nf = nd and (nd + "/" + f) or f
1023 1024 if nf not in results:
1024 1025 if kind == dirkind:
1025 1026 if not ignore(nf):
1026 1027 if matchtdir:
1027 1028 matchtdir(nf)
1028 1029 wadd(nf)
1029 1030 if nf in dmap and (matchalways or matchfn(nf)):
1030 1031 results[nf] = None
1031 1032 elif kind == regkind or kind == lnkkind:
1032 1033 if nf in dmap:
1033 1034 if matchalways or matchfn(nf):
1034 1035 results[nf] = st
1035 1036 elif ((matchalways or matchfn(nf))
1036 1037 and not ignore(nf)):
1037 1038 # unknown file -- normalize if necessary
1038 1039 if not alreadynormed:
1039 1040 nf = normalize(nf, False, True)
1040 1041 results[nf] = st
1041 1042 elif nf in dmap and (matchalways or matchfn(nf)):
1042 1043 results[nf] = None
1043 1044
1044 1045 for nd, d in work:
1045 1046 # alreadynormed means that processwork doesn't have to do any
1046 1047 # expensive directory normalization
1047 1048 alreadynormed = not normalize or nd == d
1048 1049 traverse([d], alreadynormed)
1049 1050
1050 1051 for s in subrepos:
1051 1052 del results[s]
1052 1053 del results['.hg']
1053 1054
1054 1055 # step 3: visit remaining files from dmap
1055 1056 if not skipstep3 and not exact:
1056 1057 # If a dmap file is not in results yet, it was either
1057 1058 # a) not matching matchfn b) ignored, c) missing, or d) under a
1058 1059 # symlink directory.
1059 1060 if not results and matchalways:
1060 1061 visit = dmap.keys()
1061 1062 else:
1062 1063 visit = [f for f in dmap if f not in results and matchfn(f)]
1063 1064 visit.sort()
1064 1065
1065 1066 if unknown:
1066 1067 # unknown == True means we walked all dirs under the roots
1067 1068 # that wasn't ignored, and everything that matched was stat'ed
1068 1069 # and is already in results.
1069 1070 # The rest must thus be ignored or under a symlink.
1070 1071 audit_path = pathutil.pathauditor(self._root)
1071 1072
1072 1073 for nf in iter(visit):
1073 1074 # If a stat for the same file was already added with a
1074 1075 # different case, don't add one for this, since that would
1075 1076 # make it appear as if the file exists under both names
1076 1077 # on disk.
1077 1078 if (normalizefile and
1078 1079 normalizefile(nf, True, True) in results):
1079 1080 results[nf] = None
1080 1081 # Report ignored items in the dmap as long as they are not
1081 1082 # under a symlink directory.
1082 1083 elif audit_path.check(nf):
1083 1084 try:
1084 1085 results[nf] = lstat(join(nf))
1085 1086 # file was just ignored, no links, and exists
1086 1087 except OSError:
1087 1088 # file doesn't exist
1088 1089 results[nf] = None
1089 1090 else:
1090 1091 # It's either missing or under a symlink directory
1091 1092 # which we in this case report as missing
1092 1093 results[nf] = None
1093 1094 else:
1094 1095 # We may not have walked the full directory tree above,
1095 1096 # so stat and check everything we missed.
1096 1097 nf = iter(visit).next
1097 1098 for st in util.statfiles([join(i) for i in visit]):
1098 1099 results[nf()] = st
1099 1100 return results
1100 1101
1101 1102 def status(self, match, subrepos, ignored, clean, unknown):
1102 1103 '''Determine the status of the working copy relative to the
1103 1104 dirstate and return a pair of (unsure, status), where status is of type
1104 1105 scmutil.status and:
1105 1106
1106 1107 unsure:
1107 1108 files that might have been modified since the dirstate was
1108 1109 written, but need to be read to be sure (size is the same
1109 1110 but mtime differs)
1110 1111 status.modified:
1111 1112 files that have definitely been modified since the dirstate
1112 1113 was written (different size or mode)
1113 1114 status.clean:
1114 1115 files that have definitely not been modified since the
1115 1116 dirstate was written
1116 1117 '''
1117 1118 listignored, listclean, listunknown = ignored, clean, unknown
1118 1119 lookup, modified, added, unknown, ignored = [], [], [], [], []
1119 1120 removed, deleted, clean = [], [], []
1120 1121
1121 1122 dmap = self._map
1122 1123 ladd = lookup.append # aka "unsure"
1123 1124 madd = modified.append
1124 1125 aadd = added.append
1125 1126 uadd = unknown.append
1126 1127 iadd = ignored.append
1127 1128 radd = removed.append
1128 1129 dadd = deleted.append
1129 1130 cadd = clean.append
1130 1131 mexact = match.exact
1131 1132 dirignore = self._dirignore
1132 1133 checkexec = self._checkexec
1133 1134 copymap = self._copymap
1134 1135 lastnormaltime = self._lastnormaltime
1135 1136
1136 1137 # We need to do full walks when either
1137 1138 # - we're listing all clean files, or
1138 1139 # - match.traversedir does something, because match.traversedir should
1139 1140 # be called for every dir in the working dir
1140 1141 full = listclean or match.traversedir is not None
1141 1142 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1142 1143 full=full).iteritems():
1143 1144 if fn not in dmap:
1144 1145 if (listignored or mexact(fn)) and dirignore(fn):
1145 1146 if listignored:
1146 1147 iadd(fn)
1147 1148 else:
1148 1149 uadd(fn)
1149 1150 continue
1150 1151
1151 1152 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1152 1153 # written like that for performance reasons. dmap[fn] is not a
1153 1154 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1154 1155 # opcode has fast paths when the value to be unpacked is a tuple or
1155 1156 # a list, but falls back to creating a full-fledged iterator in
1156 1157 # general. That is much slower than simply accessing and storing the
1157 1158 # tuple members one by one.
1158 1159 t = dmap[fn]
1159 1160 state = t[0]
1160 1161 mode = t[1]
1161 1162 size = t[2]
1162 1163 time = t[3]
1163 1164
1164 1165 if not st and state in "nma":
1165 1166 dadd(fn)
1166 1167 elif state == 'n':
1167 1168 if (size >= 0 and
1168 1169 ((size != st.st_size and size != st.st_size & _rangemask)
1169 1170 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1170 1171 or size == -2 # other parent
1171 1172 or fn in copymap):
1172 1173 madd(fn)
1173 1174 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1174 1175 ladd(fn)
1175 1176 elif st.st_mtime == lastnormaltime:
1176 1177 # fn may have just been marked as normal and it may have
1177 1178 # changed in the same second without changing its size.
1178 1179 # This can happen if we quickly do multiple commits.
1179 1180 # Force lookup, so we don't miss such a racy file change.
1180 1181 ladd(fn)
1181 1182 elif listclean:
1182 1183 cadd(fn)
1183 1184 elif state == 'm':
1184 1185 madd(fn)
1185 1186 elif state == 'a':
1186 1187 aadd(fn)
1187 1188 elif state == 'r':
1188 1189 radd(fn)
1189 1190
1190 1191 return (lookup, scmutil.status(modified, added, removed, deleted,
1191 1192 unknown, ignored, clean))
1192 1193
1193 1194 def matches(self, match):
1194 1195 '''
1195 1196 return files in the dirstate (in whatever state) filtered by match
1196 1197 '''
1197 1198 dmap = self._map
1198 1199 if match.always():
1199 1200 return dmap.keys()
1200 1201 files = match.files()
1201 1202 if match.isexact():
1202 1203 # fast path -- filter the other way around, since typically files is
1203 1204 # much smaller than dmap
1204 1205 return [f for f in files if f in dmap]
1205 1206 if match.prefix() and all(fn in dmap for fn in files):
1206 1207 # fast path -- all the values are known to be files, so just return
1207 1208 # that
1208 1209 return list(files)
1209 1210 return [f for f in dmap if match(f)]
1210 1211
1211 1212 def _actualfilename(self, tr):
1212 1213 if tr:
1213 1214 return self._pendingfilename
1214 1215 else:
1215 1216 return self._filename
1216 1217
1217 1218 def savebackup(self, tr, suffix='', prefix=''):
1218 1219 '''Save current dirstate into backup file with suffix'''
1219 1220 assert len(suffix) > 0 or len(prefix) > 0
1220 1221 filename = self._actualfilename(tr)
1221 1222
1222 1223 # use '_writedirstate' instead of 'write' to write changes certainly,
1223 1224 # because the latter omits writing out if transaction is running.
1224 1225 # output file will be used to create backup of dirstate at this point.
1225 1226 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1226 1227 checkambig=True))
1227 1228
1228 1229 if tr:
1229 1230 # ensure that subsequent tr.writepending returns True for
1230 1231 # changes written out above, even if dirstate is never
1231 1232 # changed after this
1232 1233 tr.addfilegenerator('dirstate', (self._filename,),
1233 1234 self._writedirstate, location='plain')
1234 1235
1235 1236 # ensure that pending file written above is unlinked at
1236 1237 # failure, even if tr.writepending isn't invoked until the
1237 1238 # end of this transaction
1238 1239 tr.registertmp(filename, location='plain')
1239 1240
1240 1241 self._opener.write(prefix + self._filename + suffix,
1241 1242 self._opener.tryread(filename))
1242 1243
1243 1244 def restorebackup(self, tr, suffix='', prefix=''):
1244 1245 '''Restore dirstate by backup file with suffix'''
1245 1246 assert len(suffix) > 0 or len(prefix) > 0
1246 1247 # this "invalidate()" prevents "wlock.release()" from writing
1247 1248 # changes of dirstate out after restoring from backup file
1248 1249 self.invalidate()
1249 1250 filename = self._actualfilename(tr)
1250 1251 # using self._filename to avoid having "pending" in the backup filename
1251 1252 self._opener.rename(prefix + self._filename + suffix, filename,
1252 1253 checkambig=True)
1253 1254
1254 1255 def clearbackup(self, tr, suffix='', prefix=''):
1255 1256 '''Clear backup file with suffix'''
1256 1257 assert len(suffix) > 0 or len(prefix) > 0
1257 1258 # using self._filename to avoid having "pending" in the backup filename
1258 1259 self._opener.unlink(prefix + self._filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now