##// END OF EJS Templates
dirstate: remove file from copymap on drop...
Mateusz Kwapich -
r29247:3e438497 default
parent child Browse files
Show More
@@ -1,1245 +1,1247
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import nullid
17 17 from . import (
18 18 encoding,
19 19 error,
20 20 match as matchmod,
21 21 osutil,
22 22 parsers,
23 23 pathutil,
24 24 scmutil,
25 25 util,
26 26 )
27 27
28 28 propertycache = util.propertycache
29 29 filecache = scmutil.filecache
30 30 _rangemask = 0x7fffffff
31 31
32 32 dirstatetuple = parsers.dirstatetuple
33 33
34 34 class repocache(filecache):
35 35 """filecache for files in .hg/"""
36 36 def join(self, obj, fname):
37 37 return obj._opener.join(fname)
38 38
39 39 class rootcache(filecache):
40 40 """filecache for files in the repository root"""
41 41 def join(self, obj, fname):
42 42 return obj._join(fname)
43 43
44 44 def _getfsnow(vfs):
45 45 '''Get "now" timestamp on filesystem'''
46 46 tmpfd, tmpname = vfs.mkstemp()
47 47 try:
48 48 return os.fstat(tmpfd).st_mtime
49 49 finally:
50 50 os.close(tmpfd)
51 51 vfs.unlink(tmpname)
52 52
53 53 def nonnormalentries(dmap):
54 54 '''Compute the nonnormal dirstate entries from the dmap'''
55 55 try:
56 56 return parsers.nonnormalentries(dmap)
57 57 except AttributeError:
58 58 return set(fname for fname, e in dmap.iteritems()
59 59 if e[0] != 'n' or e[3] == -1)
60 60
61 61 def _trypending(root, vfs, filename):
62 62 '''Open file to be read according to HG_PENDING environment variable
63 63
64 64 This opens '.pending' of specified 'filename' only when HG_PENDING
65 65 is equal to 'root'.
66 66
67 67 This returns '(fp, is_pending_opened)' tuple.
68 68 '''
69 69 if root == os.environ.get('HG_PENDING'):
70 70 try:
71 71 return (vfs('%s.pending' % filename), True)
72 72 except IOError as inst:
73 73 if inst.errno != errno.ENOENT:
74 74 raise
75 75 return (vfs(filename), False)
76 76
77 77 _token = object()
78 78
79 79 class dirstate(object):
80 80
81 81 def __init__(self, opener, ui, root, validate):
82 82 '''Create a new dirstate object.
83 83
84 84 opener is an open()-like callable that can be used to open the
85 85 dirstate file; root is the root of the directory tracked by
86 86 the dirstate.
87 87 '''
88 88 self._opener = opener
89 89 self._validate = validate
90 90 self._root = root
91 91 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
92 92 # UNC path pointing to root share (issue4557)
93 93 self._rootdir = pathutil.normasprefix(root)
94 94 # internal config: ui.forcecwd
95 95 forcecwd = ui.config('ui', 'forcecwd')
96 96 if forcecwd:
97 97 self._cwd = forcecwd
98 98 self._dirty = False
99 99 self._dirtypl = False
100 100 self._lastnormaltime = 0
101 101 self._ui = ui
102 102 self._filecache = {}
103 103 self._parentwriters = 0
104 104 self._filename = 'dirstate'
105 105 self._pendingfilename = '%s.pending' % self._filename
106 106
107 107 # for consistent view between _pl() and _read() invocations
108 108 self._pendingmode = None
109 109
110 110 def beginparentchange(self):
111 111 '''Marks the beginning of a set of changes that involve changing
112 112 the dirstate parents. If there is an exception during this time,
113 113 the dirstate will not be written when the wlock is released. This
114 114 prevents writing an incoherent dirstate where the parent doesn't
115 115 match the contents.
116 116 '''
117 117 self._parentwriters += 1
118 118
119 119 def endparentchange(self):
120 120 '''Marks the end of a set of changes that involve changing the
121 121 dirstate parents. Once all parent changes have been marked done,
122 122 the wlock will be free to write the dirstate on release.
123 123 '''
124 124 if self._parentwriters > 0:
125 125 self._parentwriters -= 1
126 126
127 127 def pendingparentchange(self):
128 128 '''Returns true if the dirstate is in the middle of a set of changes
129 129 that modify the dirstate parent.
130 130 '''
131 131 return self._parentwriters > 0
132 132
133 133 @propertycache
134 134 def _map(self):
135 135 '''Return the dirstate contents as a map from filename to
136 136 (state, mode, size, time).'''
137 137 self._read()
138 138 return self._map
139 139
140 140 @propertycache
141 141 def _copymap(self):
142 142 self._read()
143 143 return self._copymap
144 144
145 145 @propertycache
146 146 def _nonnormalset(self):
147 147 return nonnormalentries(self._map)
148 148
149 149 @propertycache
150 150 def _filefoldmap(self):
151 151 try:
152 152 makefilefoldmap = parsers.make_file_foldmap
153 153 except AttributeError:
154 154 pass
155 155 else:
156 156 return makefilefoldmap(self._map, util.normcasespec,
157 157 util.normcasefallback)
158 158
159 159 f = {}
160 160 normcase = util.normcase
161 161 for name, s in self._map.iteritems():
162 162 if s[0] != 'r':
163 163 f[normcase(name)] = name
164 164 f['.'] = '.' # prevents useless util.fspath() invocation
165 165 return f
166 166
167 167 @propertycache
168 168 def _dirfoldmap(self):
169 169 f = {}
170 170 normcase = util.normcase
171 171 for name in self._dirs:
172 172 f[normcase(name)] = name
173 173 return f
174 174
175 175 @repocache('branch')
176 176 def _branch(self):
177 177 try:
178 178 return self._opener.read("branch").strip() or "default"
179 179 except IOError as inst:
180 180 if inst.errno != errno.ENOENT:
181 181 raise
182 182 return "default"
183 183
184 184 @propertycache
185 185 def _pl(self):
186 186 try:
187 187 fp = self._opendirstatefile()
188 188 st = fp.read(40)
189 189 fp.close()
190 190 l = len(st)
191 191 if l == 40:
192 192 return st[:20], st[20:40]
193 193 elif l > 0 and l < 40:
194 194 raise error.Abort(_('working directory state appears damaged!'))
195 195 except IOError as err:
196 196 if err.errno != errno.ENOENT:
197 197 raise
198 198 return [nullid, nullid]
199 199
200 200 @propertycache
201 201 def _dirs(self):
202 202 return util.dirs(self._map, 'r')
203 203
204 204 def dirs(self):
205 205 return self._dirs
206 206
207 207 @rootcache('.hgignore')
208 208 def _ignore(self):
209 209 files = self._ignorefiles()
210 210 if not files:
211 211 return util.never
212 212
213 213 pats = ['include:%s' % f for f in files]
214 214 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
215 215
216 216 @propertycache
217 217 def _slash(self):
218 218 return self._ui.configbool('ui', 'slash') and os.sep != '/'
219 219
220 220 @propertycache
221 221 def _checklink(self):
222 222 return util.checklink(self._root)
223 223
224 224 @propertycache
225 225 def _checkexec(self):
226 226 return util.checkexec(self._root)
227 227
228 228 @propertycache
229 229 def _checkcase(self):
230 230 return not util.checkcase(self._join('.hg'))
231 231
232 232 def _join(self, f):
233 233 # much faster than os.path.join()
234 234 # it's safe because f is always a relative path
235 235 return self._rootdir + f
236 236
237 237 def flagfunc(self, buildfallback):
238 238 if self._checklink and self._checkexec:
239 239 def f(x):
240 240 try:
241 241 st = os.lstat(self._join(x))
242 242 if util.statislink(st):
243 243 return 'l'
244 244 if util.statisexec(st):
245 245 return 'x'
246 246 except OSError:
247 247 pass
248 248 return ''
249 249 return f
250 250
251 251 fallback = buildfallback()
252 252 if self._checklink:
253 253 def f(x):
254 254 if os.path.islink(self._join(x)):
255 255 return 'l'
256 256 if 'x' in fallback(x):
257 257 return 'x'
258 258 return ''
259 259 return f
260 260 if self._checkexec:
261 261 def f(x):
262 262 if 'l' in fallback(x):
263 263 return 'l'
264 264 if util.isexec(self._join(x)):
265 265 return 'x'
266 266 return ''
267 267 return f
268 268 else:
269 269 return fallback
270 270
271 271 @propertycache
272 272 def _cwd(self):
273 273 return os.getcwd()
274 274
275 275 def getcwd(self):
276 276 '''Return the path from which a canonical path is calculated.
277 277
278 278 This path should be used to resolve file patterns or to convert
279 279 canonical paths back to file paths for display. It shouldn't be
280 280 used to get real file paths. Use vfs functions instead.
281 281 '''
282 282 cwd = self._cwd
283 283 if cwd == self._root:
284 284 return ''
285 285 # self._root ends with a path separator if self._root is '/' or 'C:\'
286 286 rootsep = self._root
287 287 if not util.endswithsep(rootsep):
288 288 rootsep += os.sep
289 289 if cwd.startswith(rootsep):
290 290 return cwd[len(rootsep):]
291 291 else:
292 292 # we're outside the repo. return an absolute path.
293 293 return cwd
294 294
295 295 def pathto(self, f, cwd=None):
296 296 if cwd is None:
297 297 cwd = self.getcwd()
298 298 path = util.pathto(self._root, cwd, f)
299 299 if self._slash:
300 300 return util.pconvert(path)
301 301 return path
302 302
303 303 def __getitem__(self, key):
304 304 '''Return the current state of key (a filename) in the dirstate.
305 305
306 306 States are:
307 307 n normal
308 308 m needs merging
309 309 r marked for removal
310 310 a marked for addition
311 311 ? not tracked
312 312 '''
313 313 return self._map.get(key, ("?",))[0]
314 314
315 315 def __contains__(self, key):
316 316 return key in self._map
317 317
318 318 def __iter__(self):
319 319 for x in sorted(self._map):
320 320 yield x
321 321
322 322 def iteritems(self):
323 323 return self._map.iteritems()
324 324
325 325 def parents(self):
326 326 return [self._validate(p) for p in self._pl]
327 327
328 328 def p1(self):
329 329 return self._validate(self._pl[0])
330 330
331 331 def p2(self):
332 332 return self._validate(self._pl[1])
333 333
334 334 def branch(self):
335 335 return encoding.tolocal(self._branch)
336 336
337 337 def setparents(self, p1, p2=nullid):
338 338 """Set dirstate parents to p1 and p2.
339 339
340 340 When moving from two parents to one, 'm' merged entries a
341 341 adjusted to normal and previous copy records discarded and
342 342 returned by the call.
343 343
344 344 See localrepo.setparents()
345 345 """
346 346 if self._parentwriters == 0:
347 347 raise ValueError("cannot set dirstate parent without "
348 348 "calling dirstate.beginparentchange")
349 349
350 350 self._dirty = self._dirtypl = True
351 351 oldp2 = self._pl[1]
352 352 self._pl = p1, p2
353 353 copies = {}
354 354 if oldp2 != nullid and p2 == nullid:
355 355 for f, s in self._map.iteritems():
356 356 # Discard 'm' markers when moving away from a merge state
357 357 if s[0] == 'm':
358 358 if f in self._copymap:
359 359 copies[f] = self._copymap[f]
360 360 self.normallookup(f)
361 361 # Also fix up otherparent markers
362 362 elif s[0] == 'n' and s[2] == -2:
363 363 if f in self._copymap:
364 364 copies[f] = self._copymap[f]
365 365 self.add(f)
366 366 return copies
367 367
368 368 def setbranch(self, branch):
369 369 self._branch = encoding.fromlocal(branch)
370 370 f = self._opener('branch', 'w', atomictemp=True)
371 371 try:
372 372 f.write(self._branch + '\n')
373 373 f.close()
374 374
375 375 # make sure filecache has the correct stat info for _branch after
376 376 # replacing the underlying file
377 377 ce = self._filecache['_branch']
378 378 if ce:
379 379 ce.refresh()
380 380 except: # re-raises
381 381 f.discard()
382 382 raise
383 383
384 384 def _opendirstatefile(self):
385 385 fp, mode = _trypending(self._root, self._opener, self._filename)
386 386 if self._pendingmode is not None and self._pendingmode != mode:
387 387 fp.close()
388 388 raise error.Abort(_('working directory state may be '
389 389 'changed parallelly'))
390 390 self._pendingmode = mode
391 391 return fp
392 392
393 393 def _read(self):
394 394 self._map = {}
395 395 self._copymap = {}
396 396 try:
397 397 fp = self._opendirstatefile()
398 398 try:
399 399 st = fp.read()
400 400 finally:
401 401 fp.close()
402 402 except IOError as err:
403 403 if err.errno != errno.ENOENT:
404 404 raise
405 405 return
406 406 if not st:
407 407 return
408 408
409 409 if util.safehasattr(parsers, 'dict_new_presized'):
410 410 # Make an estimate of the number of files in the dirstate based on
411 411 # its size. From a linear regression on a set of real-world repos,
412 412 # all over 10,000 files, the size of a dirstate entry is 85
413 413 # bytes. The cost of resizing is significantly higher than the cost
414 414 # of filling in a larger presized dict, so subtract 20% from the
415 415 # size.
416 416 #
417 417 # This heuristic is imperfect in many ways, so in a future dirstate
418 418 # format update it makes sense to just record the number of entries
419 419 # on write.
420 420 self._map = parsers.dict_new_presized(len(st) / 71)
421 421
422 422 # Python's garbage collector triggers a GC each time a certain number
423 423 # of container objects (the number being defined by
424 424 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
425 425 # for each file in the dirstate. The C version then immediately marks
426 426 # them as not to be tracked by the collector. However, this has no
427 427 # effect on when GCs are triggered, only on what objects the GC looks
428 428 # into. This means that O(number of files) GCs are unavoidable.
429 429 # Depending on when in the process's lifetime the dirstate is parsed,
430 430 # this can get very expensive. As a workaround, disable GC while
431 431 # parsing the dirstate.
432 432 #
433 433 # (we cannot decorate the function directly since it is in a C module)
434 434 parse_dirstate = util.nogc(parsers.parse_dirstate)
435 435 p = parse_dirstate(self._map, self._copymap, st)
436 436 if not self._dirtypl:
437 437 self._pl = p
438 438
439 439 def invalidate(self):
440 440 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
441 441 "_pl", "_dirs", "_ignore", "_nonnormalset"):
442 442 if a in self.__dict__:
443 443 delattr(self, a)
444 444 self._lastnormaltime = 0
445 445 self._dirty = False
446 446 self._parentwriters = 0
447 447
448 448 def copy(self, source, dest):
449 449 """Mark dest as a copy of source. Unmark dest if source is None."""
450 450 if source == dest:
451 451 return
452 452 self._dirty = True
453 453 if source is not None:
454 454 self._copymap[dest] = source
455 455 elif dest in self._copymap:
456 456 del self._copymap[dest]
457 457
458 458 def copied(self, file):
459 459 return self._copymap.get(file, None)
460 460
461 461 def copies(self):
462 462 return self._copymap
463 463
464 464 def _droppath(self, f):
465 465 if self[f] not in "?r" and "_dirs" in self.__dict__:
466 466 self._dirs.delpath(f)
467 467
468 468 if "_filefoldmap" in self.__dict__:
469 469 normed = util.normcase(f)
470 470 if normed in self._filefoldmap:
471 471 del self._filefoldmap[normed]
472 472
473 473 def _addpath(self, f, state, mode, size, mtime):
474 474 oldstate = self[f]
475 475 if state == 'a' or oldstate == 'r':
476 476 scmutil.checkfilename(f)
477 477 if f in self._dirs:
478 478 raise error.Abort(_('directory %r already in dirstate') % f)
479 479 # shadows
480 480 for d in util.finddirs(f):
481 481 if d in self._dirs:
482 482 break
483 483 if d in self._map and self[d] != 'r':
484 484 raise error.Abort(
485 485 _('file %r in dirstate clashes with %r') % (d, f))
486 486 if oldstate in "?r" and "_dirs" in self.__dict__:
487 487 self._dirs.addpath(f)
488 488 self._dirty = True
489 489 self._map[f] = dirstatetuple(state, mode, size, mtime)
490 490 if state != 'n' or mtime == -1:
491 491 self._nonnormalset.add(f)
492 492
493 493 def normal(self, f):
494 494 '''Mark a file normal and clean.'''
495 495 s = os.lstat(self._join(f))
496 496 mtime = s.st_mtime
497 497 self._addpath(f, 'n', s.st_mode,
498 498 s.st_size & _rangemask, mtime & _rangemask)
499 499 if f in self._copymap:
500 500 del self._copymap[f]
501 501 if f in self._nonnormalset:
502 502 self._nonnormalset.remove(f)
503 503 if mtime > self._lastnormaltime:
504 504 # Remember the most recent modification timeslot for status(),
505 505 # to make sure we won't miss future size-preserving file content
506 506 # modifications that happen within the same timeslot.
507 507 self._lastnormaltime = mtime
508 508
509 509 def normallookup(self, f):
510 510 '''Mark a file normal, but possibly dirty.'''
511 511 if self._pl[1] != nullid and f in self._map:
512 512 # if there is a merge going on and the file was either
513 513 # in state 'm' (-1) or coming from other parent (-2) before
514 514 # being removed, restore that state.
515 515 entry = self._map[f]
516 516 if entry[0] == 'r' and entry[2] in (-1, -2):
517 517 source = self._copymap.get(f)
518 518 if entry[2] == -1:
519 519 self.merge(f)
520 520 elif entry[2] == -2:
521 521 self.otherparent(f)
522 522 if source:
523 523 self.copy(source, f)
524 524 return
525 525 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
526 526 return
527 527 self._addpath(f, 'n', 0, -1, -1)
528 528 if f in self._copymap:
529 529 del self._copymap[f]
530 530 if f in self._nonnormalset:
531 531 self._nonnormalset.remove(f)
532 532
533 533 def otherparent(self, f):
534 534 '''Mark as coming from the other parent, always dirty.'''
535 535 if self._pl[1] == nullid:
536 536 raise error.Abort(_("setting %r to other parent "
537 537 "only allowed in merges") % f)
538 538 if f in self and self[f] == 'n':
539 539 # merge-like
540 540 self._addpath(f, 'm', 0, -2, -1)
541 541 else:
542 542 # add-like
543 543 self._addpath(f, 'n', 0, -2, -1)
544 544
545 545 if f in self._copymap:
546 546 del self._copymap[f]
547 547
548 548 def add(self, f):
549 549 '''Mark a file added.'''
550 550 self._addpath(f, 'a', 0, -1, -1)
551 551 if f in self._copymap:
552 552 del self._copymap[f]
553 553
554 554 def remove(self, f):
555 555 '''Mark a file removed.'''
556 556 self._dirty = True
557 557 self._droppath(f)
558 558 size = 0
559 559 if self._pl[1] != nullid and f in self._map:
560 560 # backup the previous state
561 561 entry = self._map[f]
562 562 if entry[0] == 'm': # merge
563 563 size = -1
564 564 elif entry[0] == 'n' and entry[2] == -2: # other parent
565 565 size = -2
566 566 self._map[f] = dirstatetuple('r', 0, size, 0)
567 567 self._nonnormalset.add(f)
568 568 if size == 0 and f in self._copymap:
569 569 del self._copymap[f]
570 570
571 571 def merge(self, f):
572 572 '''Mark a file merged.'''
573 573 if self._pl[1] == nullid:
574 574 return self.normallookup(f)
575 575 return self.otherparent(f)
576 576
577 577 def drop(self, f):
578 578 '''Drop a file from the dirstate'''
579 579 if f in self._map:
580 580 self._dirty = True
581 581 self._droppath(f)
582 582 del self._map[f]
583 583 if f in self._nonnormalset:
584 584 self._nonnormalset.remove(f)
585 if f in self._copymap:
586 del self._copymap[f]
585 587
586 588 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
587 589 if exists is None:
588 590 exists = os.path.lexists(os.path.join(self._root, path))
589 591 if not exists:
590 592 # Maybe a path component exists
591 593 if not ignoremissing and '/' in path:
592 594 d, f = path.rsplit('/', 1)
593 595 d = self._normalize(d, False, ignoremissing, None)
594 596 folded = d + "/" + f
595 597 else:
596 598 # No path components, preserve original case
597 599 folded = path
598 600 else:
599 601 # recursively normalize leading directory components
600 602 # against dirstate
601 603 if '/' in normed:
602 604 d, f = normed.rsplit('/', 1)
603 605 d = self._normalize(d, False, ignoremissing, True)
604 606 r = self._root + "/" + d
605 607 folded = d + "/" + util.fspath(f, r)
606 608 else:
607 609 folded = util.fspath(normed, self._root)
608 610 storemap[normed] = folded
609 611
610 612 return folded
611 613
612 614 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
613 615 normed = util.normcase(path)
614 616 folded = self._filefoldmap.get(normed, None)
615 617 if folded is None:
616 618 if isknown:
617 619 folded = path
618 620 else:
619 621 folded = self._discoverpath(path, normed, ignoremissing, exists,
620 622 self._filefoldmap)
621 623 return folded
622 624
623 625 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
624 626 normed = util.normcase(path)
625 627 folded = self._filefoldmap.get(normed, None)
626 628 if folded is None:
627 629 folded = self._dirfoldmap.get(normed, None)
628 630 if folded is None:
629 631 if isknown:
630 632 folded = path
631 633 else:
632 634 # store discovered result in dirfoldmap so that future
633 635 # normalizefile calls don't start matching directories
634 636 folded = self._discoverpath(path, normed, ignoremissing, exists,
635 637 self._dirfoldmap)
636 638 return folded
637 639
638 640 def normalize(self, path, isknown=False, ignoremissing=False):
639 641 '''
640 642 normalize the case of a pathname when on a casefolding filesystem
641 643
642 644 isknown specifies whether the filename came from walking the
643 645 disk, to avoid extra filesystem access.
644 646
645 647 If ignoremissing is True, missing path are returned
646 648 unchanged. Otherwise, we try harder to normalize possibly
647 649 existing path components.
648 650
649 651 The normalized case is determined based on the following precedence:
650 652
651 653 - version of name already stored in the dirstate
652 654 - version of name stored on disk
653 655 - version provided via command arguments
654 656 '''
655 657
656 658 if self._checkcase:
657 659 return self._normalize(path, isknown, ignoremissing)
658 660 return path
659 661
660 662 def clear(self):
661 663 self._map = {}
662 664 self._nonnormalset = set()
663 665 if "_dirs" in self.__dict__:
664 666 delattr(self, "_dirs")
665 667 self._copymap = {}
666 668 self._pl = [nullid, nullid]
667 669 self._lastnormaltime = 0
668 670 self._dirty = True
669 671
670 672 def rebuild(self, parent, allfiles, changedfiles=None):
671 673 if changedfiles is None:
672 674 # Rebuild entire dirstate
673 675 changedfiles = allfiles
674 676 lastnormaltime = self._lastnormaltime
675 677 self.clear()
676 678 self._lastnormaltime = lastnormaltime
677 679
678 680 for f in changedfiles:
679 681 mode = 0o666
680 682 if f in allfiles and 'x' in allfiles.flags(f):
681 683 mode = 0o777
682 684
683 685 if f in allfiles:
684 686 self._map[f] = dirstatetuple('n', mode, -1, 0)
685 687 else:
686 688 self._map.pop(f, None)
687 689 if f in self._nonnormalset:
688 690 self._nonnormalset.remove(f)
689 691
690 692 self._pl = (parent, nullid)
691 693 self._dirty = True
692 694
693 695 def write(self, tr=_token):
694 696 if not self._dirty:
695 697 return
696 698
697 699 filename = self._filename
698 700 if tr is _token: # not explicitly specified
699 701 self._ui.deprecwarn('use dirstate.write with '
700 702 'repo.currenttransaction()',
701 703 '3.9')
702 704
703 705 if self._opener.lexists(self._pendingfilename):
704 706 # if pending file already exists, in-memory changes
705 707 # should be written into it, because it has priority
706 708 # to '.hg/dirstate' at reading under HG_PENDING mode
707 709 filename = self._pendingfilename
708 710 elif tr:
709 711 # 'dirstate.write()' is not only for writing in-memory
710 712 # changes out, but also for dropping ambiguous timestamp.
711 713 # delayed writing re-raise "ambiguous timestamp issue".
712 714 # See also the wiki page below for detail:
713 715 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
714 716
715 717 # emulate dropping timestamp in 'parsers.pack_dirstate'
716 718 now = _getfsnow(self._opener)
717 719 dmap = self._map
718 720 for f, e in dmap.iteritems():
719 721 if e[0] == 'n' and e[3] == now:
720 722 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
721 723 self._nonnormalset.add(f)
722 724
723 725 # emulate that all 'dirstate.normal' results are written out
724 726 self._lastnormaltime = 0
725 727
726 728 # delay writing in-memory changes out
727 729 tr.addfilegenerator('dirstate', (self._filename,),
728 730 self._writedirstate, location='plain')
729 731 return
730 732
731 733 st = self._opener(filename, "w", atomictemp=True)
732 734 self._writedirstate(st)
733 735
734 736 def _writedirstate(self, st):
735 737 # use the modification time of the newly created temporary file as the
736 738 # filesystem's notion of 'now'
737 739 now = util.fstat(st).st_mtime & _rangemask
738 740
739 741 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
740 742 # timestamp of each entries in dirstate, because of 'now > mtime'
741 743 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
742 744 if delaywrite > 0:
743 745 # do we have any files to delay for?
744 746 for f, e in self._map.iteritems():
745 747 if e[0] == 'n' and e[3] == now:
746 748 import time # to avoid useless import
747 749 # rather than sleep n seconds, sleep until the next
748 750 # multiple of n seconds
749 751 clock = time.time()
750 752 start = int(clock) - (int(clock) % delaywrite)
751 753 end = start + delaywrite
752 754 time.sleep(end - clock)
753 755 break
754 756
755 757 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
756 758 self._nonnormalset = nonnormalentries(self._map)
757 759 st.close()
758 760 self._lastnormaltime = 0
759 761 self._dirty = self._dirtypl = False
760 762
761 763 def _dirignore(self, f):
762 764 if f == '.':
763 765 return False
764 766 if self._ignore(f):
765 767 return True
766 768 for p in util.finddirs(f):
767 769 if self._ignore(p):
768 770 return True
769 771 return False
770 772
771 773 def _ignorefiles(self):
772 774 files = []
773 775 if os.path.exists(self._join('.hgignore')):
774 776 files.append(self._join('.hgignore'))
775 777 for name, path in self._ui.configitems("ui"):
776 778 if name == 'ignore' or name.startswith('ignore.'):
777 779 # we need to use os.path.join here rather than self._join
778 780 # because path is arbitrary and user-specified
779 781 files.append(os.path.join(self._rootdir, util.expandpath(path)))
780 782 return files
781 783
782 784 def _ignorefileandline(self, f):
783 785 files = collections.deque(self._ignorefiles())
784 786 visited = set()
785 787 while files:
786 788 i = files.popleft()
787 789 patterns = matchmod.readpatternfile(i, self._ui.warn,
788 790 sourceinfo=True)
789 791 for pattern, lineno, line in patterns:
790 792 kind, p = matchmod._patsplit(pattern, 'glob')
791 793 if kind == "subinclude":
792 794 if p not in visited:
793 795 files.append(p)
794 796 continue
795 797 m = matchmod.match(self._root, '', [], [pattern],
796 798 warn=self._ui.warn)
797 799 if m(f):
798 800 return (i, lineno, line)
799 801 visited.add(i)
800 802 return (None, -1, "")
801 803
802 804 def _walkexplicit(self, match, subrepos):
803 805 '''Get stat data about the files explicitly specified by match.
804 806
805 807 Return a triple (results, dirsfound, dirsnotfound).
806 808 - results is a mapping from filename to stat result. It also contains
807 809 listings mapping subrepos and .hg to None.
808 810 - dirsfound is a list of files found to be directories.
809 811 - dirsnotfound is a list of files that the dirstate thinks are
810 812 directories and that were not found.'''
811 813
812 814 def badtype(mode):
813 815 kind = _('unknown')
814 816 if stat.S_ISCHR(mode):
815 817 kind = _('character device')
816 818 elif stat.S_ISBLK(mode):
817 819 kind = _('block device')
818 820 elif stat.S_ISFIFO(mode):
819 821 kind = _('fifo')
820 822 elif stat.S_ISSOCK(mode):
821 823 kind = _('socket')
822 824 elif stat.S_ISDIR(mode):
823 825 kind = _('directory')
824 826 return _('unsupported file type (type is %s)') % kind
825 827
826 828 matchedir = match.explicitdir
827 829 badfn = match.bad
828 830 dmap = self._map
829 831 lstat = os.lstat
830 832 getkind = stat.S_IFMT
831 833 dirkind = stat.S_IFDIR
832 834 regkind = stat.S_IFREG
833 835 lnkkind = stat.S_IFLNK
834 836 join = self._join
835 837 dirsfound = []
836 838 foundadd = dirsfound.append
837 839 dirsnotfound = []
838 840 notfoundadd = dirsnotfound.append
839 841
840 842 if not match.isexact() and self._checkcase:
841 843 normalize = self._normalize
842 844 else:
843 845 normalize = None
844 846
845 847 files = sorted(match.files())
846 848 subrepos.sort()
847 849 i, j = 0, 0
848 850 while i < len(files) and j < len(subrepos):
849 851 subpath = subrepos[j] + "/"
850 852 if files[i] < subpath:
851 853 i += 1
852 854 continue
853 855 while i < len(files) and files[i].startswith(subpath):
854 856 del files[i]
855 857 j += 1
856 858
857 859 if not files or '.' in files:
858 860 files = ['.']
859 861 results = dict.fromkeys(subrepos)
860 862 results['.hg'] = None
861 863
862 864 alldirs = None
863 865 for ff in files:
864 866 # constructing the foldmap is expensive, so don't do it for the
865 867 # common case where files is ['.']
866 868 if normalize and ff != '.':
867 869 nf = normalize(ff, False, True)
868 870 else:
869 871 nf = ff
870 872 if nf in results:
871 873 continue
872 874
873 875 try:
874 876 st = lstat(join(nf))
875 877 kind = getkind(st.st_mode)
876 878 if kind == dirkind:
877 879 if nf in dmap:
878 880 # file replaced by dir on disk but still in dirstate
879 881 results[nf] = None
880 882 if matchedir:
881 883 matchedir(nf)
882 884 foundadd((nf, ff))
883 885 elif kind == regkind or kind == lnkkind:
884 886 results[nf] = st
885 887 else:
886 888 badfn(ff, badtype(kind))
887 889 if nf in dmap:
888 890 results[nf] = None
889 891 except OSError as inst: # nf not found on disk - it is dirstate only
890 892 if nf in dmap: # does it exactly match a missing file?
891 893 results[nf] = None
892 894 else: # does it match a missing directory?
893 895 if alldirs is None:
894 896 alldirs = util.dirs(dmap)
895 897 if nf in alldirs:
896 898 if matchedir:
897 899 matchedir(nf)
898 900 notfoundadd(nf)
899 901 else:
900 902 badfn(ff, inst.strerror)
901 903
902 904 # Case insensitive filesystems cannot rely on lstat() failing to detect
903 905 # a case-only rename. Prune the stat object for any file that does not
904 906 # match the case in the filesystem, if there are multiple files that
905 907 # normalize to the same path.
906 908 if match.isexact() and self._checkcase:
907 909 normed = {}
908 910
909 911 for f, st in results.iteritems():
910 912 if st is None:
911 913 continue
912 914
913 915 nc = util.normcase(f)
914 916 paths = normed.get(nc)
915 917
916 918 if paths is None:
917 919 paths = set()
918 920 normed[nc] = paths
919 921
920 922 paths.add(f)
921 923
922 924 for norm, paths in normed.iteritems():
923 925 if len(paths) > 1:
924 926 for path in paths:
925 927 folded = self._discoverpath(path, norm, True, None,
926 928 self._dirfoldmap)
927 929 if path != folded:
928 930 results[path] = None
929 931
930 932 return results, dirsfound, dirsnotfound
931 933
932 934 def walk(self, match, subrepos, unknown, ignored, full=True):
933 935 '''
934 936 Walk recursively through the directory tree, finding all files
935 937 matched by match.
936 938
937 939 If full is False, maybe skip some known-clean files.
938 940
939 941 Return a dict mapping filename to stat-like object (either
940 942 mercurial.osutil.stat instance or return value of os.stat()).
941 943
942 944 '''
943 945 # full is a flag that extensions that hook into walk can use -- this
944 946 # implementation doesn't use it at all. This satisfies the contract
945 947 # because we only guarantee a "maybe".
946 948
947 949 if ignored:
948 950 ignore = util.never
949 951 dirignore = util.never
950 952 elif unknown:
951 953 ignore = self._ignore
952 954 dirignore = self._dirignore
953 955 else:
954 956 # if not unknown and not ignored, drop dir recursion and step 2
955 957 ignore = util.always
956 958 dirignore = util.always
957 959
958 960 matchfn = match.matchfn
959 961 matchalways = match.always()
960 962 matchtdir = match.traversedir
961 963 dmap = self._map
962 964 listdir = osutil.listdir
963 965 lstat = os.lstat
964 966 dirkind = stat.S_IFDIR
965 967 regkind = stat.S_IFREG
966 968 lnkkind = stat.S_IFLNK
967 969 join = self._join
968 970
969 971 exact = skipstep3 = False
970 972 if match.isexact(): # match.exact
971 973 exact = True
972 974 dirignore = util.always # skip step 2
973 975 elif match.prefix(): # match.match, no patterns
974 976 skipstep3 = True
975 977
976 978 if not exact and self._checkcase:
977 979 normalize = self._normalize
978 980 normalizefile = self._normalizefile
979 981 skipstep3 = False
980 982 else:
981 983 normalize = self._normalize
982 984 normalizefile = None
983 985
984 986 # step 1: find all explicit files
985 987 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
986 988
987 989 skipstep3 = skipstep3 and not (work or dirsnotfound)
988 990 work = [d for d in work if not dirignore(d[0])]
989 991
990 992 # step 2: visit subdirectories
991 993 def traverse(work, alreadynormed):
992 994 wadd = work.append
993 995 while work:
994 996 nd = work.pop()
995 997 skip = None
996 998 if nd == '.':
997 999 nd = ''
998 1000 else:
999 1001 skip = '.hg'
1000 1002 try:
1001 1003 entries = listdir(join(nd), stat=True, skip=skip)
1002 1004 except OSError as inst:
1003 1005 if inst.errno in (errno.EACCES, errno.ENOENT):
1004 1006 match.bad(self.pathto(nd), inst.strerror)
1005 1007 continue
1006 1008 raise
1007 1009 for f, kind, st in entries:
1008 1010 if normalizefile:
1009 1011 # even though f might be a directory, we're only
1010 1012 # interested in comparing it to files currently in the
1011 1013 # dmap -- therefore normalizefile is enough
1012 1014 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1013 1015 True)
1014 1016 else:
1015 1017 nf = nd and (nd + "/" + f) or f
1016 1018 if nf not in results:
1017 1019 if kind == dirkind:
1018 1020 if not ignore(nf):
1019 1021 if matchtdir:
1020 1022 matchtdir(nf)
1021 1023 wadd(nf)
1022 1024 if nf in dmap and (matchalways or matchfn(nf)):
1023 1025 results[nf] = None
1024 1026 elif kind == regkind or kind == lnkkind:
1025 1027 if nf in dmap:
1026 1028 if matchalways or matchfn(nf):
1027 1029 results[nf] = st
1028 1030 elif ((matchalways or matchfn(nf))
1029 1031 and not ignore(nf)):
1030 1032 # unknown file -- normalize if necessary
1031 1033 if not alreadynormed:
1032 1034 nf = normalize(nf, False, True)
1033 1035 results[nf] = st
1034 1036 elif nf in dmap and (matchalways or matchfn(nf)):
1035 1037 results[nf] = None
1036 1038
1037 1039 for nd, d in work:
1038 1040 # alreadynormed means that processwork doesn't have to do any
1039 1041 # expensive directory normalization
1040 1042 alreadynormed = not normalize or nd == d
1041 1043 traverse([d], alreadynormed)
1042 1044
1043 1045 for s in subrepos:
1044 1046 del results[s]
1045 1047 del results['.hg']
1046 1048
1047 1049 # step 3: visit remaining files from dmap
1048 1050 if not skipstep3 and not exact:
1049 1051 # If a dmap file is not in results yet, it was either
1050 1052 # a) not matching matchfn b) ignored, c) missing, or d) under a
1051 1053 # symlink directory.
1052 1054 if not results and matchalways:
1053 1055 visit = dmap.keys()
1054 1056 else:
1055 1057 visit = [f for f in dmap if f not in results and matchfn(f)]
1056 1058 visit.sort()
1057 1059
1058 1060 if unknown:
1059 1061 # unknown == True means we walked all dirs under the roots
1060 1062 # that wasn't ignored, and everything that matched was stat'ed
1061 1063 # and is already in results.
1062 1064 # The rest must thus be ignored or under a symlink.
1063 1065 audit_path = pathutil.pathauditor(self._root)
1064 1066
1065 1067 for nf in iter(visit):
1066 1068 # If a stat for the same file was already added with a
1067 1069 # different case, don't add one for this, since that would
1068 1070 # make it appear as if the file exists under both names
1069 1071 # on disk.
1070 1072 if (normalizefile and
1071 1073 normalizefile(nf, True, True) in results):
1072 1074 results[nf] = None
1073 1075 # Report ignored items in the dmap as long as they are not
1074 1076 # under a symlink directory.
1075 1077 elif audit_path.check(nf):
1076 1078 try:
1077 1079 results[nf] = lstat(join(nf))
1078 1080 # file was just ignored, no links, and exists
1079 1081 except OSError:
1080 1082 # file doesn't exist
1081 1083 results[nf] = None
1082 1084 else:
1083 1085 # It's either missing or under a symlink directory
1084 1086 # which we in this case report as missing
1085 1087 results[nf] = None
1086 1088 else:
1087 1089 # We may not have walked the full directory tree above,
1088 1090 # so stat and check everything we missed.
1089 1091 nf = iter(visit).next
1090 1092 for st in util.statfiles([join(i) for i in visit]):
1091 1093 results[nf()] = st
1092 1094 return results
1093 1095
1094 1096 def status(self, match, subrepos, ignored, clean, unknown):
1095 1097 '''Determine the status of the working copy relative to the
1096 1098 dirstate and return a pair of (unsure, status), where status is of type
1097 1099 scmutil.status and:
1098 1100
1099 1101 unsure:
1100 1102 files that might have been modified since the dirstate was
1101 1103 written, but need to be read to be sure (size is the same
1102 1104 but mtime differs)
1103 1105 status.modified:
1104 1106 files that have definitely been modified since the dirstate
1105 1107 was written (different size or mode)
1106 1108 status.clean:
1107 1109 files that have definitely not been modified since the
1108 1110 dirstate was written
1109 1111 '''
1110 1112 listignored, listclean, listunknown = ignored, clean, unknown
1111 1113 lookup, modified, added, unknown, ignored = [], [], [], [], []
1112 1114 removed, deleted, clean = [], [], []
1113 1115
1114 1116 dmap = self._map
1115 1117 ladd = lookup.append # aka "unsure"
1116 1118 madd = modified.append
1117 1119 aadd = added.append
1118 1120 uadd = unknown.append
1119 1121 iadd = ignored.append
1120 1122 radd = removed.append
1121 1123 dadd = deleted.append
1122 1124 cadd = clean.append
1123 1125 mexact = match.exact
1124 1126 dirignore = self._dirignore
1125 1127 checkexec = self._checkexec
1126 1128 copymap = self._copymap
1127 1129 lastnormaltime = self._lastnormaltime
1128 1130
1129 1131 # We need to do full walks when either
1130 1132 # - we're listing all clean files, or
1131 1133 # - match.traversedir does something, because match.traversedir should
1132 1134 # be called for every dir in the working dir
1133 1135 full = listclean or match.traversedir is not None
1134 1136 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1135 1137 full=full).iteritems():
1136 1138 if fn not in dmap:
1137 1139 if (listignored or mexact(fn)) and dirignore(fn):
1138 1140 if listignored:
1139 1141 iadd(fn)
1140 1142 else:
1141 1143 uadd(fn)
1142 1144 continue
1143 1145
1144 1146 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1145 1147 # written like that for performance reasons. dmap[fn] is not a
1146 1148 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1147 1149 # opcode has fast paths when the value to be unpacked is a tuple or
1148 1150 # a list, but falls back to creating a full-fledged iterator in
1149 1151 # general. That is much slower than simply accessing and storing the
1150 1152 # tuple members one by one.
1151 1153 t = dmap[fn]
1152 1154 state = t[0]
1153 1155 mode = t[1]
1154 1156 size = t[2]
1155 1157 time = t[3]
1156 1158
1157 1159 if not st and state in "nma":
1158 1160 dadd(fn)
1159 1161 elif state == 'n':
1160 1162 if (size >= 0 and
1161 1163 ((size != st.st_size and size != st.st_size & _rangemask)
1162 1164 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1163 1165 or size == -2 # other parent
1164 1166 or fn in copymap):
1165 1167 madd(fn)
1166 1168 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1167 1169 ladd(fn)
1168 1170 elif st.st_mtime == lastnormaltime:
1169 1171 # fn may have just been marked as normal and it may have
1170 1172 # changed in the same second without changing its size.
1171 1173 # This can happen if we quickly do multiple commits.
1172 1174 # Force lookup, so we don't miss such a racy file change.
1173 1175 ladd(fn)
1174 1176 elif listclean:
1175 1177 cadd(fn)
1176 1178 elif state == 'm':
1177 1179 madd(fn)
1178 1180 elif state == 'a':
1179 1181 aadd(fn)
1180 1182 elif state == 'r':
1181 1183 radd(fn)
1182 1184
1183 1185 return (lookup, scmutil.status(modified, added, removed, deleted,
1184 1186 unknown, ignored, clean))
1185 1187
1186 1188 def matches(self, match):
1187 1189 '''
1188 1190 return files in the dirstate (in whatever state) filtered by match
1189 1191 '''
1190 1192 dmap = self._map
1191 1193 if match.always():
1192 1194 return dmap.keys()
1193 1195 files = match.files()
1194 1196 if match.isexact():
1195 1197 # fast path -- filter the other way around, since typically files is
1196 1198 # much smaller than dmap
1197 1199 return [f for f in files if f in dmap]
1198 1200 if match.prefix() and all(fn in dmap for fn in files):
1199 1201 # fast path -- all the values are known to be files, so just return
1200 1202 # that
1201 1203 return list(files)
1202 1204 return [f for f in dmap if match(f)]
1203 1205
1204 1206 def _actualfilename(self, tr):
1205 1207 if tr:
1206 1208 return self._pendingfilename
1207 1209 else:
1208 1210 return self._filename
1209 1211
1210 1212 def savebackup(self, tr, suffix='', prefix=''):
1211 1213 '''Save current dirstate into backup file with suffix'''
1212 1214 filename = self._actualfilename(tr)
1213 1215
1214 1216 # use '_writedirstate' instead of 'write' to write changes certainly,
1215 1217 # because the latter omits writing out if transaction is running.
1216 1218 # output file will be used to create backup of dirstate at this point.
1217 1219 self._writedirstate(self._opener(filename, "w", atomictemp=True))
1218 1220
1219 1221 if tr:
1220 1222 # ensure that subsequent tr.writepending returns True for
1221 1223 # changes written out above, even if dirstate is never
1222 1224 # changed after this
1223 1225 tr.addfilegenerator('dirstate', (self._filename,),
1224 1226 self._writedirstate, location='plain')
1225 1227
1226 1228 # ensure that pending file written above is unlinked at
1227 1229 # failure, even if tr.writepending isn't invoked until the
1228 1230 # end of this transaction
1229 1231 tr.registertmp(filename, location='plain')
1230 1232
1231 1233 self._opener.write(prefix + filename + suffix,
1232 1234 self._opener.tryread(filename))
1233 1235
1234 1236 def restorebackup(self, tr, suffix='', prefix=''):
1235 1237 '''Restore dirstate by backup file with suffix'''
1236 1238 # this "invalidate()" prevents "wlock.release()" from writing
1237 1239 # changes of dirstate out after restoring from backup file
1238 1240 self.invalidate()
1239 1241 filename = self._actualfilename(tr)
1240 1242 self._opener.rename(prefix + filename + suffix, filename)
1241 1243
1242 1244 def clearbackup(self, tr, suffix='', prefix=''):
1243 1245 '''Clear backup file with suffix'''
1244 1246 filename = self._actualfilename(tr)
1245 1247 self._opener.unlink(prefix + filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now