##// END OF EJS Templates
scmutil: migrate finddirs from dirstate
Bryan O'Sullivan -
r18897:38982de2 default
parent child Browse files
Show More
@@ -1,838 +1,832 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 import errno
8 8
9 9 from node import nullid
10 10 from i18n import _
11 11 import scmutil, util, ignore, osutil, parsers, encoding
12 12 import os, stat, errno, gc
13 13
14 14 propertycache = util.propertycache
15 15 filecache = scmutil.filecache
16 16 _rangemask = 0x7fffffff
17 17
18 18 class repocache(filecache):
19 19 """filecache for files in .hg/"""
20 20 def join(self, obj, fname):
21 21 return obj._opener.join(fname)
22 22
23 23 class rootcache(filecache):
24 24 """filecache for files in the repository root"""
25 25 def join(self, obj, fname):
26 26 return obj._join(fname)
27 27
28 def _finddirs(path):
29 pos = path.rfind('/')
30 while pos != -1:
31 yield path[:pos]
32 pos = path.rfind('/', 0, pos)
33
34 28 def _incdirs(dirs, path):
35 for base in _finddirs(path):
29 for base in scmutil.finddirs(path):
36 30 if base in dirs:
37 31 dirs[base] += 1
38 32 return
39 33 dirs[base] = 1
40 34
41 35 def _decdirs(dirs, path):
42 for base in _finddirs(path):
36 for base in scmutil.finddirs(path):
43 37 if dirs[base] > 1:
44 38 dirs[base] -= 1
45 39 return
46 40 del dirs[base]
47 41
48 42 class dirstate(object):
49 43
50 44 def __init__(self, opener, ui, root, validate):
51 45 '''Create a new dirstate object.
52 46
53 47 opener is an open()-like callable that can be used to open the
54 48 dirstate file; root is the root of the directory tracked by
55 49 the dirstate.
56 50 '''
57 51 self._opener = opener
58 52 self._validate = validate
59 53 self._root = root
60 54 self._rootdir = os.path.join(root, '')
61 55 self._dirty = False
62 56 self._dirtypl = False
63 57 self._lastnormaltime = 0
64 58 self._ui = ui
65 59 self._filecache = {}
66 60
67 61 @propertycache
68 62 def _map(self):
69 63 '''Return the dirstate contents as a map from filename to
70 64 (state, mode, size, time).'''
71 65 self._read()
72 66 return self._map
73 67
74 68 @propertycache
75 69 def _copymap(self):
76 70 self._read()
77 71 return self._copymap
78 72
79 73 @propertycache
80 74 def _foldmap(self):
81 75 f = {}
82 76 for name in self._map:
83 77 f[util.normcase(name)] = name
84 78 for name in self._dirs:
85 79 f[util.normcase(name)] = name
86 80 f['.'] = '.' # prevents useless util.fspath() invocation
87 81 return f
88 82
89 83 @repocache('branch')
90 84 def _branch(self):
91 85 try:
92 86 return self._opener.read("branch").strip() or "default"
93 87 except IOError, inst:
94 88 if inst.errno != errno.ENOENT:
95 89 raise
96 90 return "default"
97 91
98 92 @propertycache
99 93 def _pl(self):
100 94 try:
101 95 fp = self._opener("dirstate")
102 96 st = fp.read(40)
103 97 fp.close()
104 98 l = len(st)
105 99 if l == 40:
106 100 return st[:20], st[20:40]
107 101 elif l > 0 and l < 40:
108 102 raise util.Abort(_('working directory state appears damaged!'))
109 103 except IOError, err:
110 104 if err.errno != errno.ENOENT:
111 105 raise
112 106 return [nullid, nullid]
113 107
114 108 @propertycache
115 109 def _dirs(self):
116 110 dirs = {}
117 111 for f, s in self._map.iteritems():
118 112 if s[0] != 'r':
119 113 _incdirs(dirs, f)
120 114 return dirs
121 115
122 116 def dirs(self):
123 117 return self._dirs
124 118
125 119 @rootcache('.hgignore')
126 120 def _ignore(self):
127 121 files = [self._join('.hgignore')]
128 122 for name, path in self._ui.configitems("ui"):
129 123 if name == 'ignore' or name.startswith('ignore.'):
130 124 files.append(util.expandpath(path))
131 125 return ignore.ignore(self._root, files, self._ui.warn)
132 126
133 127 @propertycache
134 128 def _slash(self):
135 129 return self._ui.configbool('ui', 'slash') and os.sep != '/'
136 130
137 131 @propertycache
138 132 def _checklink(self):
139 133 return util.checklink(self._root)
140 134
141 135 @propertycache
142 136 def _checkexec(self):
143 137 return util.checkexec(self._root)
144 138
145 139 @propertycache
146 140 def _checkcase(self):
147 141 return not util.checkcase(self._join('.hg'))
148 142
149 143 def _join(self, f):
150 144 # much faster than os.path.join()
151 145 # it's safe because f is always a relative path
152 146 return self._rootdir + f
153 147
154 148 def flagfunc(self, buildfallback):
155 149 if self._checklink and self._checkexec:
156 150 def f(x):
157 151 try:
158 152 st = os.lstat(self._join(x))
159 153 if util.statislink(st):
160 154 return 'l'
161 155 if util.statisexec(st):
162 156 return 'x'
163 157 except OSError:
164 158 pass
165 159 return ''
166 160 return f
167 161
168 162 fallback = buildfallback()
169 163 if self._checklink:
170 164 def f(x):
171 165 if os.path.islink(self._join(x)):
172 166 return 'l'
173 167 if 'x' in fallback(x):
174 168 return 'x'
175 169 return ''
176 170 return f
177 171 if self._checkexec:
178 172 def f(x):
179 173 if 'l' in fallback(x):
180 174 return 'l'
181 175 if util.isexec(self._join(x)):
182 176 return 'x'
183 177 return ''
184 178 return f
185 179 else:
186 180 return fallback
187 181
188 182 def getcwd(self):
189 183 cwd = os.getcwd()
190 184 if cwd == self._root:
191 185 return ''
192 186 # self._root ends with a path separator if self._root is '/' or 'C:\'
193 187 rootsep = self._root
194 188 if not util.endswithsep(rootsep):
195 189 rootsep += os.sep
196 190 if cwd.startswith(rootsep):
197 191 return cwd[len(rootsep):]
198 192 else:
199 193 # we're outside the repo. return an absolute path.
200 194 return cwd
201 195
202 196 def pathto(self, f, cwd=None):
203 197 if cwd is None:
204 198 cwd = self.getcwd()
205 199 path = util.pathto(self._root, cwd, f)
206 200 if self._slash:
207 201 return util.normpath(path)
208 202 return path
209 203
210 204 def __getitem__(self, key):
211 205 '''Return the current state of key (a filename) in the dirstate.
212 206
213 207 States are:
214 208 n normal
215 209 m needs merging
216 210 r marked for removal
217 211 a marked for addition
218 212 ? not tracked
219 213 '''
220 214 return self._map.get(key, ("?",))[0]
221 215
222 216 def __contains__(self, key):
223 217 return key in self._map
224 218
225 219 def __iter__(self):
226 220 for x in sorted(self._map):
227 221 yield x
228 222
229 223 def iteritems(self):
230 224 return self._map.iteritems()
231 225
232 226 def parents(self):
233 227 return [self._validate(p) for p in self._pl]
234 228
235 229 def p1(self):
236 230 return self._validate(self._pl[0])
237 231
238 232 def p2(self):
239 233 return self._validate(self._pl[1])
240 234
241 235 def branch(self):
242 236 return encoding.tolocal(self._branch)
243 237
244 238 def setparents(self, p1, p2=nullid):
245 239 """Set dirstate parents to p1 and p2.
246 240
247 241 When moving from two parents to one, 'm' merged entries a
248 242 adjusted to normal and previous copy records discarded and
249 243 returned by the call.
250 244
251 245 See localrepo.setparents()
252 246 """
253 247 self._dirty = self._dirtypl = True
254 248 oldp2 = self._pl[1]
255 249 self._pl = p1, p2
256 250 copies = {}
257 251 if oldp2 != nullid and p2 == nullid:
258 252 # Discard 'm' markers when moving away from a merge state
259 253 for f, s in self._map.iteritems():
260 254 if s[0] == 'm':
261 255 if f in self._copymap:
262 256 copies[f] = self._copymap[f]
263 257 self.normallookup(f)
264 258 return copies
265 259
266 260 def setbranch(self, branch):
267 261 self._branch = encoding.fromlocal(branch)
268 262 f = self._opener('branch', 'w', atomictemp=True)
269 263 try:
270 264 f.write(self._branch + '\n')
271 265 f.close()
272 266
273 267 # make sure filecache has the correct stat info for _branch after
274 268 # replacing the underlying file
275 269 ce = self._filecache['_branch']
276 270 if ce:
277 271 ce.refresh()
278 272 except: # re-raises
279 273 f.discard()
280 274 raise
281 275
282 276 def _read(self):
283 277 self._map = {}
284 278 self._copymap = {}
285 279 try:
286 280 st = self._opener.read("dirstate")
287 281 except IOError, err:
288 282 if err.errno != errno.ENOENT:
289 283 raise
290 284 return
291 285 if not st:
292 286 return
293 287
294 288 # Python's garbage collector triggers a GC each time a certain number
295 289 # of container objects (the number being defined by
296 290 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
297 291 # for each file in the dirstate. The C version then immediately marks
298 292 # them as not to be tracked by the collector. However, this has no
299 293 # effect on when GCs are triggered, only on what objects the GC looks
300 294 # into. This means that O(number of files) GCs are unavoidable.
301 295 # Depending on when in the process's lifetime the dirstate is parsed,
302 296 # this can get very expensive. As a workaround, disable GC while
303 297 # parsing the dirstate.
304 298 gcenabled = gc.isenabled()
305 299 gc.disable()
306 300 try:
307 301 p = parsers.parse_dirstate(self._map, self._copymap, st)
308 302 finally:
309 303 if gcenabled:
310 304 gc.enable()
311 305 if not self._dirtypl:
312 306 self._pl = p
313 307
314 308 def invalidate(self):
315 309 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
316 310 "_ignore"):
317 311 if a in self.__dict__:
318 312 delattr(self, a)
319 313 self._lastnormaltime = 0
320 314 self._dirty = False
321 315
322 316 def copy(self, source, dest):
323 317 """Mark dest as a copy of source. Unmark dest if source is None."""
324 318 if source == dest:
325 319 return
326 320 self._dirty = True
327 321 if source is not None:
328 322 self._copymap[dest] = source
329 323 elif dest in self._copymap:
330 324 del self._copymap[dest]
331 325
332 326 def copied(self, file):
333 327 return self._copymap.get(file, None)
334 328
335 329 def copies(self):
336 330 return self._copymap
337 331
338 332 def _droppath(self, f):
339 333 if self[f] not in "?r" and "_dirs" in self.__dict__:
340 334 _decdirs(self._dirs, f)
341 335
342 336 def _addpath(self, f, state, mode, size, mtime):
343 337 oldstate = self[f]
344 338 if state == 'a' or oldstate == 'r':
345 339 scmutil.checkfilename(f)
346 340 if f in self._dirs:
347 341 raise util.Abort(_('directory %r already in dirstate') % f)
348 342 # shadows
349 for d in _finddirs(f):
343 for d in scmutil.finddirs(f):
350 344 if d in self._dirs:
351 345 break
352 346 if d in self._map and self[d] != 'r':
353 347 raise util.Abort(
354 348 _('file %r in dirstate clashes with %r') % (d, f))
355 349 if oldstate in "?r" and "_dirs" in self.__dict__:
356 350 _incdirs(self._dirs, f)
357 351 self._dirty = True
358 352 self._map[f] = (state, mode, size, mtime)
359 353
360 354 def normal(self, f):
361 355 '''Mark a file normal and clean.'''
362 356 s = os.lstat(self._join(f))
363 357 mtime = int(s.st_mtime)
364 358 self._addpath(f, 'n', s.st_mode,
365 359 s.st_size & _rangemask, mtime & _rangemask)
366 360 if f in self._copymap:
367 361 del self._copymap[f]
368 362 if mtime > self._lastnormaltime:
369 363 # Remember the most recent modification timeslot for status(),
370 364 # to make sure we won't miss future size-preserving file content
371 365 # modifications that happen within the same timeslot.
372 366 self._lastnormaltime = mtime
373 367
374 368 def normallookup(self, f):
375 369 '''Mark a file normal, but possibly dirty.'''
376 370 if self._pl[1] != nullid and f in self._map:
377 371 # if there is a merge going on and the file was either
378 372 # in state 'm' (-1) or coming from other parent (-2) before
379 373 # being removed, restore that state.
380 374 entry = self._map[f]
381 375 if entry[0] == 'r' and entry[2] in (-1, -2):
382 376 source = self._copymap.get(f)
383 377 if entry[2] == -1:
384 378 self.merge(f)
385 379 elif entry[2] == -2:
386 380 self.otherparent(f)
387 381 if source:
388 382 self.copy(source, f)
389 383 return
390 384 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
391 385 return
392 386 self._addpath(f, 'n', 0, -1, -1)
393 387 if f in self._copymap:
394 388 del self._copymap[f]
395 389
396 390 def otherparent(self, f):
397 391 '''Mark as coming from the other parent, always dirty.'''
398 392 if self._pl[1] == nullid:
399 393 raise util.Abort(_("setting %r to other parent "
400 394 "only allowed in merges") % f)
401 395 self._addpath(f, 'n', 0, -2, -1)
402 396 if f in self._copymap:
403 397 del self._copymap[f]
404 398
405 399 def add(self, f):
406 400 '''Mark a file added.'''
407 401 self._addpath(f, 'a', 0, -1, -1)
408 402 if f in self._copymap:
409 403 del self._copymap[f]
410 404
411 405 def remove(self, f):
412 406 '''Mark a file removed.'''
413 407 self._dirty = True
414 408 self._droppath(f)
415 409 size = 0
416 410 if self._pl[1] != nullid and f in self._map:
417 411 # backup the previous state
418 412 entry = self._map[f]
419 413 if entry[0] == 'm': # merge
420 414 size = -1
421 415 elif entry[0] == 'n' and entry[2] == -2: # other parent
422 416 size = -2
423 417 self._map[f] = ('r', 0, size, 0)
424 418 if size == 0 and f in self._copymap:
425 419 del self._copymap[f]
426 420
427 421 def merge(self, f):
428 422 '''Mark a file merged.'''
429 423 if self._pl[1] == nullid:
430 424 return self.normallookup(f)
431 425 s = os.lstat(self._join(f))
432 426 self._addpath(f, 'm', s.st_mode,
433 427 s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
434 428 if f in self._copymap:
435 429 del self._copymap[f]
436 430
437 431 def drop(self, f):
438 432 '''Drop a file from the dirstate'''
439 433 if f in self._map:
440 434 self._dirty = True
441 435 self._droppath(f)
442 436 del self._map[f]
443 437
444 438 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
445 439 normed = util.normcase(path)
446 440 folded = self._foldmap.get(normed, None)
447 441 if folded is None:
448 442 if isknown:
449 443 folded = path
450 444 else:
451 445 if exists is None:
452 446 exists = os.path.lexists(os.path.join(self._root, path))
453 447 if not exists:
454 448 # Maybe a path component exists
455 449 if not ignoremissing and '/' in path:
456 450 d, f = path.rsplit('/', 1)
457 451 d = self._normalize(d, isknown, ignoremissing, None)
458 452 folded = d + "/" + f
459 453 else:
460 454 # No path components, preserve original case
461 455 folded = path
462 456 else:
463 457 # recursively normalize leading directory components
464 458 # against dirstate
465 459 if '/' in normed:
466 460 d, f = normed.rsplit('/', 1)
467 461 d = self._normalize(d, isknown, ignoremissing, True)
468 462 r = self._root + "/" + d
469 463 folded = d + "/" + util.fspath(f, r)
470 464 else:
471 465 folded = util.fspath(normed, self._root)
472 466 self._foldmap[normed] = folded
473 467
474 468 return folded
475 469
476 470 def normalize(self, path, isknown=False, ignoremissing=False):
477 471 '''
478 472 normalize the case of a pathname when on a casefolding filesystem
479 473
480 474 isknown specifies whether the filename came from walking the
481 475 disk, to avoid extra filesystem access.
482 476
483 477 If ignoremissing is True, missing path are returned
484 478 unchanged. Otherwise, we try harder to normalize possibly
485 479 existing path components.
486 480
487 481 The normalized case is determined based on the following precedence:
488 482
489 483 - version of name already stored in the dirstate
490 484 - version of name stored on disk
491 485 - version provided via command arguments
492 486 '''
493 487
494 488 if self._checkcase:
495 489 return self._normalize(path, isknown, ignoremissing)
496 490 return path
497 491
498 492 def clear(self):
499 493 self._map = {}
500 494 if "_dirs" in self.__dict__:
501 495 delattr(self, "_dirs")
502 496 self._copymap = {}
503 497 self._pl = [nullid, nullid]
504 498 self._lastnormaltime = 0
505 499 self._dirty = True
506 500
507 501 def rebuild(self, parent, allfiles, changedfiles=None):
508 502 changedfiles = changedfiles or allfiles
509 503 oldmap = self._map
510 504 self.clear()
511 505 for f in allfiles:
512 506 if f not in changedfiles:
513 507 self._map[f] = oldmap[f]
514 508 else:
515 509 if 'x' in allfiles.flags(f):
516 510 self._map[f] = ('n', 0777, -1, 0)
517 511 else:
518 512 self._map[f] = ('n', 0666, -1, 0)
519 513 self._pl = (parent, nullid)
520 514 self._dirty = True
521 515
522 516 def write(self):
523 517 if not self._dirty:
524 518 return
525 519 st = self._opener("dirstate", "w", atomictemp=True)
526 520
527 521 def finish(s):
528 522 st.write(s)
529 523 st.close()
530 524 self._lastnormaltime = 0
531 525 self._dirty = self._dirtypl = False
532 526
533 527 # use the modification time of the newly created temporary file as the
534 528 # filesystem's notion of 'now'
535 529 now = util.fstat(st).st_mtime
536 530 finish(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
537 531
538 532 def _dirignore(self, f):
539 533 if f == '.':
540 534 return False
541 535 if self._ignore(f):
542 536 return True
543 for p in _finddirs(f):
537 for p in scmutil.finddirs(f):
544 538 if self._ignore(p):
545 539 return True
546 540 return False
547 541
548 542 def walk(self, match, subrepos, unknown, ignored):
549 543 '''
550 544 Walk recursively through the directory tree, finding all files
551 545 matched by match.
552 546
553 547 Return a dict mapping filename to stat-like object (either
554 548 mercurial.osutil.stat instance or return value of os.stat()).
555 549 '''
556 550
557 551 def fwarn(f, msg):
558 552 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
559 553 return False
560 554
561 555 def badtype(mode):
562 556 kind = _('unknown')
563 557 if stat.S_ISCHR(mode):
564 558 kind = _('character device')
565 559 elif stat.S_ISBLK(mode):
566 560 kind = _('block device')
567 561 elif stat.S_ISFIFO(mode):
568 562 kind = _('fifo')
569 563 elif stat.S_ISSOCK(mode):
570 564 kind = _('socket')
571 565 elif stat.S_ISDIR(mode):
572 566 kind = _('directory')
573 567 return _('unsupported file type (type is %s)') % kind
574 568
575 569 ignore = self._ignore
576 570 dirignore = self._dirignore
577 571 if ignored:
578 572 ignore = util.never
579 573 dirignore = util.never
580 574 elif not unknown:
581 575 # if unknown and ignored are False, skip step 2
582 576 ignore = util.always
583 577 dirignore = util.always
584 578
585 579 matchfn = match.matchfn
586 580 matchalways = match.always()
587 581 badfn = match.bad
588 582 dmap = self._map
589 583 normpath = util.normpath
590 584 listdir = osutil.listdir
591 585 lstat = os.lstat
592 586 getkind = stat.S_IFMT
593 587 dirkind = stat.S_IFDIR
594 588 regkind = stat.S_IFREG
595 589 lnkkind = stat.S_IFLNK
596 590 join = self._join
597 591 work = []
598 592 wadd = work.append
599 593
600 594 exact = skipstep3 = False
601 595 if matchfn == match.exact: # match.exact
602 596 exact = True
603 597 dirignore = util.always # skip step 2
604 598 elif match.files() and not match.anypats(): # match.match, no patterns
605 599 skipstep3 = True
606 600
607 601 if not exact and self._checkcase:
608 602 normalize = self._normalize
609 603 skipstep3 = False
610 604 else:
611 605 normalize = None
612 606
613 607 files = sorted(match.files())
614 608 subrepos.sort()
615 609 i, j = 0, 0
616 610 while i < len(files) and j < len(subrepos):
617 611 subpath = subrepos[j] + "/"
618 612 if files[i] < subpath:
619 613 i += 1
620 614 continue
621 615 while i < len(files) and files[i].startswith(subpath):
622 616 del files[i]
623 617 j += 1
624 618
625 619 if not files or '.' in files:
626 620 files = ['']
627 621 results = dict.fromkeys(subrepos)
628 622 results['.hg'] = None
629 623
630 624 # step 1: find all explicit files
631 625 for ff in files:
632 626 if normalize:
633 627 nf = normalize(normpath(ff), False, True)
634 628 else:
635 629 nf = normpath(ff)
636 630 if nf in results:
637 631 continue
638 632
639 633 try:
640 634 st = lstat(join(nf))
641 635 kind = getkind(st.st_mode)
642 636 if kind == dirkind:
643 637 skipstep3 = False
644 638 if nf in dmap:
645 639 #file deleted on disk but still in dirstate
646 640 results[nf] = None
647 641 match.dir(nf)
648 642 if not dirignore(nf):
649 643 wadd(nf)
650 644 elif kind == regkind or kind == lnkkind:
651 645 results[nf] = st
652 646 else:
653 647 badfn(ff, badtype(kind))
654 648 if nf in dmap:
655 649 results[nf] = None
656 650 except OSError, inst:
657 651 if nf in dmap: # does it exactly match a file?
658 652 results[nf] = None
659 653 else: # does it match a directory?
660 654 prefix = nf + "/"
661 655 for fn in dmap:
662 656 if fn.startswith(prefix):
663 657 match.dir(nf)
664 658 skipstep3 = False
665 659 break
666 660 else:
667 661 badfn(ff, inst.strerror)
668 662
669 663 # step 2: visit subdirectories
670 664 while work:
671 665 nd = work.pop()
672 666 skip = None
673 667 if nd == '.':
674 668 nd = ''
675 669 else:
676 670 skip = '.hg'
677 671 try:
678 672 entries = listdir(join(nd), stat=True, skip=skip)
679 673 except OSError, inst:
680 674 if inst.errno in (errno.EACCES, errno.ENOENT):
681 675 fwarn(nd, inst.strerror)
682 676 continue
683 677 raise
684 678 for f, kind, st in entries:
685 679 if normalize:
686 680 nf = normalize(nd and (nd + "/" + f) or f, True, True)
687 681 else:
688 682 nf = nd and (nd + "/" + f) or f
689 683 if nf not in results:
690 684 if kind == dirkind:
691 685 if not ignore(nf):
692 686 match.dir(nf)
693 687 wadd(nf)
694 688 if nf in dmap and (matchalways or matchfn(nf)):
695 689 results[nf] = None
696 690 elif kind == regkind or kind == lnkkind:
697 691 if nf in dmap:
698 692 if matchalways or matchfn(nf):
699 693 results[nf] = st
700 694 elif (matchalways or matchfn(nf)) and not ignore(nf):
701 695 results[nf] = st
702 696 elif nf in dmap and (matchalways or matchfn(nf)):
703 697 results[nf] = None
704 698
705 699 for s in subrepos:
706 700 del results[s]
707 701 del results['.hg']
708 702
709 703 # step 3: report unseen items in the dmap hash
710 704 if not skipstep3 and not exact:
711 705 if not results and matchalways:
712 706 visit = dmap.keys()
713 707 else:
714 708 visit = [f for f in dmap if f not in results and matchfn(f)]
715 709 visit.sort()
716 710
717 711 if unknown:
718 712 # unknown == True means we walked the full directory tree above.
719 713 # So if a file is not seen it was either a) not matching matchfn
720 714 # b) ignored, c) missing, or d) under a symlink directory.
721 715 audit_path = scmutil.pathauditor(self._root)
722 716
723 717 for nf in iter(visit):
724 718 # Report ignored items in the dmap as long as they are not
725 719 # under a symlink directory.
726 720 if ignore(nf) and audit_path.check(nf):
727 721 try:
728 722 results[nf] = lstat(join(nf))
729 723 except OSError:
730 724 # file doesn't exist
731 725 results[nf] = None
732 726 else:
733 727 # It's either missing or under a symlink directory
734 728 results[nf] = None
735 729 else:
736 730 # We may not have walked the full directory tree above,
737 731 # so stat everything we missed.
738 732 nf = iter(visit).next
739 733 for st in util.statfiles([join(i) for i in visit]):
740 734 results[nf()] = st
741 735 return results
742 736
743 737 def status(self, match, subrepos, ignored, clean, unknown):
744 738 '''Determine the status of the working copy relative to the
745 739 dirstate and return a tuple of lists (unsure, modified, added,
746 740 removed, deleted, unknown, ignored, clean), where:
747 741
748 742 unsure:
749 743 files that might have been modified since the dirstate was
750 744 written, but need to be read to be sure (size is the same
751 745 but mtime differs)
752 746 modified:
753 747 files that have definitely been modified since the dirstate
754 748 was written (different size or mode)
755 749 added:
756 750 files that have been explicitly added with hg add
757 751 removed:
758 752 files that have been explicitly removed with hg remove
759 753 deleted:
760 754 files that have been deleted through other means ("missing")
761 755 unknown:
762 756 files not in the dirstate that are not ignored
763 757 ignored:
764 758 files not in the dirstate that are ignored
765 759 (by _dirignore())
766 760 clean:
767 761 files that have definitely not been modified since the
768 762 dirstate was written
769 763 '''
770 764 listignored, listclean, listunknown = ignored, clean, unknown
771 765 lookup, modified, added, unknown, ignored = [], [], [], [], []
772 766 removed, deleted, clean = [], [], []
773 767
774 768 dmap = self._map
775 769 ladd = lookup.append # aka "unsure"
776 770 madd = modified.append
777 771 aadd = added.append
778 772 uadd = unknown.append
779 773 iadd = ignored.append
780 774 radd = removed.append
781 775 dadd = deleted.append
782 776 cadd = clean.append
783 777 mexact = match.exact
784 778 dirignore = self._dirignore
785 779 checkexec = self._checkexec
786 780 checklink = self._checklink
787 781 copymap = self._copymap
788 782 lastnormaltime = self._lastnormaltime
789 783
790 784 lnkkind = stat.S_IFLNK
791 785
792 786 for fn, st in self.walk(match, subrepos, listunknown,
793 787 listignored).iteritems():
794 788 if fn not in dmap:
795 789 if (listignored or mexact(fn)) and dirignore(fn):
796 790 if listignored:
797 791 iadd(fn)
798 792 elif listunknown:
799 793 uadd(fn)
800 794 continue
801 795
802 796 state, mode, size, time = dmap[fn]
803 797
804 798 if not st and state in "nma":
805 799 dadd(fn)
806 800 elif state == 'n':
807 801 # The "mode & lnkkind != lnkkind or self._checklink"
808 802 # lines are an expansion of "islink => checklink"
809 803 # where islink means "is this a link?" and checklink
810 804 # means "can we check links?".
811 805 mtime = int(st.st_mtime)
812 806 if (size >= 0 and
813 807 ((size != st.st_size and size != st.st_size & _rangemask)
814 808 or ((mode ^ st.st_mode) & 0100 and checkexec))
815 809 and (mode & lnkkind != lnkkind or checklink)
816 810 or size == -2 # other parent
817 811 or fn in copymap):
818 812 madd(fn)
819 813 elif ((time != mtime and time != mtime & _rangemask)
820 814 and (mode & lnkkind != lnkkind or checklink)):
821 815 ladd(fn)
822 816 elif mtime == lastnormaltime:
823 817 # fn may have been changed in the same timeslot without
824 818 # changing its size. This can happen if we quickly do
825 819 # multiple commits in a single transaction.
826 820 # Force lookup, so we don't miss such a racy file change.
827 821 ladd(fn)
828 822 elif listclean:
829 823 cadd(fn)
830 824 elif state == 'm':
831 825 madd(fn)
832 826 elif state == 'a':
833 827 aadd(fn)
834 828 elif state == 'r':
835 829 radd(fn)
836 830
837 831 return (lookup, modified, added, removed, deleted, unknown, ignored,
838 832 clean)
@@ -1,892 +1,898 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, re, stat, glob
13 13
14 14 if os.name == 'nt':
15 15 import scmwindows as scmplatform
16 16 else:
17 17 import scmposix as scmplatform
18 18
19 19 systemrcpath = scmplatform.systemrcpath
20 20 userrcpath = scmplatform.userrcpath
21 21
22 22 def nochangesfound(ui, repo, excluded=None):
23 23 '''Report no changes for push/pull, excluded is None or a list of
24 24 nodes excluded from the push/pull.
25 25 '''
26 26 secretlist = []
27 27 if excluded:
28 28 for n in excluded:
29 29 if n not in repo:
30 30 # discovery should not have included the filtered revision,
31 31 # we have to explicitly exclude it until discovery is cleanup.
32 32 continue
33 33 ctx = repo[n]
34 34 if ctx.phase() >= phases.secret and not ctx.extinct():
35 35 secretlist.append(n)
36 36
37 37 if secretlist:
38 38 ui.status(_("no changes found (ignored %d secret changesets)\n")
39 39 % len(secretlist))
40 40 else:
41 41 ui.status(_("no changes found\n"))
42 42
43 43 def checknewlabel(repo, lbl, kind):
44 44 if lbl in ['tip', '.', 'null']:
45 45 raise util.Abort(_("the name '%s' is reserved") % lbl)
46 46 for c in (':', '\0', '\n', '\r'):
47 47 if c in lbl:
48 48 raise util.Abort(_("%r cannot be used in a name") % c)
49 49 try:
50 50 int(lbl)
51 51 raise util.Abort(_("a %s cannot have an integer as its name") % kind)
52 52 except ValueError:
53 53 pass
54 54
55 55 def checkfilename(f):
56 56 '''Check that the filename f is an acceptable filename for a tracked file'''
57 57 if '\r' in f or '\n' in f:
58 58 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
59 59
60 60 def checkportable(ui, f):
61 61 '''Check if filename f is portable and warn or abort depending on config'''
62 62 checkfilename(f)
63 63 abort, warn = checkportabilityalert(ui)
64 64 if abort or warn:
65 65 msg = util.checkwinfilename(f)
66 66 if msg:
67 67 msg = "%s: %r" % (msg, f)
68 68 if abort:
69 69 raise util.Abort(msg)
70 70 ui.warn(_("warning: %s\n") % msg)
71 71
72 72 def checkportabilityalert(ui):
73 73 '''check if the user's config requests nothing, a warning, or abort for
74 74 non-portable filenames'''
75 75 val = ui.config('ui', 'portablefilenames', 'warn')
76 76 lval = val.lower()
77 77 bval = util.parsebool(val)
78 78 abort = os.name == 'nt' or lval == 'abort'
79 79 warn = bval or lval == 'warn'
80 80 if bval is None and not (warn or abort or lval == 'ignore'):
81 81 raise error.ConfigError(
82 82 _("ui.portablefilenames value is invalid ('%s')") % val)
83 83 return abort, warn
84 84
85 85 class casecollisionauditor(object):
86 86 def __init__(self, ui, abort, dirstate):
87 87 self._ui = ui
88 88 self._abort = abort
89 89 allfiles = '\0'.join(dirstate._map)
90 90 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
91 91 self._dirstate = dirstate
92 92 # The purpose of _newfiles is so that we don't complain about
93 93 # case collisions if someone were to call this object with the
94 94 # same filename twice.
95 95 self._newfiles = set()
96 96
97 97 def __call__(self, f):
98 98 fl = encoding.lower(f)
99 99 if (fl in self._loweredfiles and f not in self._dirstate and
100 100 f not in self._newfiles):
101 101 msg = _('possible case-folding collision for %s') % f
102 102 if self._abort:
103 103 raise util.Abort(msg)
104 104 self._ui.warn(_("warning: %s\n") % msg)
105 105 self._loweredfiles.add(fl)
106 106 self._newfiles.add(f)
107 107
108 108 class pathauditor(object):
109 109 '''ensure that a filesystem path contains no banned components.
110 110 the following properties of a path are checked:
111 111
112 112 - ends with a directory separator
113 113 - under top-level .hg
114 114 - starts at the root of a windows drive
115 115 - contains ".."
116 116 - traverses a symlink (e.g. a/symlink_here/b)
117 117 - inside a nested repository (a callback can be used to approve
118 118 some nested repositories, e.g., subrepositories)
119 119 '''
120 120
121 121 def __init__(self, root, callback=None):
122 122 self.audited = set()
123 123 self.auditeddir = set()
124 124 self.root = root
125 125 self.callback = callback
126 126 if os.path.lexists(root) and not util.checkcase(root):
127 127 self.normcase = util.normcase
128 128 else:
129 129 self.normcase = lambda x: x
130 130
131 131 def __call__(self, path):
132 132 '''Check the relative path.
133 133 path may contain a pattern (e.g. foodir/**.txt)'''
134 134
135 135 path = util.localpath(path)
136 136 normpath = self.normcase(path)
137 137 if normpath in self.audited:
138 138 return
139 139 # AIX ignores "/" at end of path, others raise EISDIR.
140 140 if util.endswithsep(path):
141 141 raise util.Abort(_("path ends in directory separator: %s") % path)
142 142 parts = util.splitpath(path)
143 143 if (os.path.splitdrive(path)[0]
144 144 or parts[0].lower() in ('.hg', '.hg.', '')
145 145 or os.pardir in parts):
146 146 raise util.Abort(_("path contains illegal component: %s") % path)
147 147 if '.hg' in path.lower():
148 148 lparts = [p.lower() for p in parts]
149 149 for p in '.hg', '.hg.':
150 150 if p in lparts[1:]:
151 151 pos = lparts.index(p)
152 152 base = os.path.join(*parts[:pos])
153 153 raise util.Abort(_("path '%s' is inside nested repo %r")
154 154 % (path, base))
155 155
156 156 normparts = util.splitpath(normpath)
157 157 assert len(parts) == len(normparts)
158 158
159 159 parts.pop()
160 160 normparts.pop()
161 161 prefixes = []
162 162 while parts:
163 163 prefix = os.sep.join(parts)
164 164 normprefix = os.sep.join(normparts)
165 165 if normprefix in self.auditeddir:
166 166 break
167 167 curpath = os.path.join(self.root, prefix)
168 168 try:
169 169 st = os.lstat(curpath)
170 170 except OSError, err:
171 171 # EINVAL can be raised as invalid path syntax under win32.
172 172 # They must be ignored for patterns can be checked too.
173 173 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
174 174 raise
175 175 else:
176 176 if stat.S_ISLNK(st.st_mode):
177 177 raise util.Abort(
178 178 _('path %r traverses symbolic link %r')
179 179 % (path, prefix))
180 180 elif (stat.S_ISDIR(st.st_mode) and
181 181 os.path.isdir(os.path.join(curpath, '.hg'))):
182 182 if not self.callback or not self.callback(curpath):
183 183 raise util.Abort(_("path '%s' is inside nested "
184 184 "repo %r")
185 185 % (path, prefix))
186 186 prefixes.append(normprefix)
187 187 parts.pop()
188 188 normparts.pop()
189 189
190 190 self.audited.add(normpath)
191 191 # only add prefixes to the cache after checking everything: we don't
192 192 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
193 193 self.auditeddir.update(prefixes)
194 194
195 195 def check(self, path):
196 196 try:
197 197 self(path)
198 198 return True
199 199 except (OSError, util.Abort):
200 200 return False
201 201
202 202 class abstractvfs(object):
203 203 """Abstract base class; cannot be instantiated"""
204 204
205 205 def __init__(self, *args, **kwargs):
206 206 '''Prevent instantiation; don't call this from subclasses.'''
207 207 raise NotImplementedError('attempted instantiating ' + str(type(self)))
208 208
209 209 def tryread(self, path):
210 210 '''gracefully return an empty string for missing files'''
211 211 try:
212 212 return self.read(path)
213 213 except IOError, inst:
214 214 if inst.errno != errno.ENOENT:
215 215 raise
216 216 return ""
217 217
218 218 def read(self, path):
219 219 fp = self(path, 'rb')
220 220 try:
221 221 return fp.read()
222 222 finally:
223 223 fp.close()
224 224
225 225 def write(self, path, data):
226 226 fp = self(path, 'wb')
227 227 try:
228 228 return fp.write(data)
229 229 finally:
230 230 fp.close()
231 231
232 232 def append(self, path, data):
233 233 fp = self(path, 'ab')
234 234 try:
235 235 return fp.write(data)
236 236 finally:
237 237 fp.close()
238 238
239 239 def exists(self, path=None):
240 240 return os.path.exists(self.join(path))
241 241
242 242 def isdir(self, path=None):
243 243 return os.path.isdir(self.join(path))
244 244
245 245 def makedir(self, path=None, notindexed=True):
246 246 return util.makedir(self.join(path), notindexed)
247 247
248 248 def makedirs(self, path=None, mode=None):
249 249 return util.makedirs(self.join(path), mode)
250 250
251 251 def mkdir(self, path=None):
252 252 return os.mkdir(self.join(path))
253 253
254 254 def readdir(self, path=None, stat=None, skip=None):
255 255 return osutil.listdir(self.join(path), stat, skip)
256 256
257 257 def stat(self, path=None):
258 258 return os.stat(self.join(path))
259 259
260 260 class vfs(abstractvfs):
261 261 '''Operate files relative to a base directory
262 262
263 263 This class is used to hide the details of COW semantics and
264 264 remote file access from higher level code.
265 265 '''
266 266 def __init__(self, base, audit=True, expand=False):
267 267 if expand:
268 268 base = os.path.realpath(util.expandpath(base))
269 269 self.base = base
270 270 self._setmustaudit(audit)
271 271 self.createmode = None
272 272 self._trustnlink = None
273 273
274 274 def _getmustaudit(self):
275 275 return self._audit
276 276
277 277 def _setmustaudit(self, onoff):
278 278 self._audit = onoff
279 279 if onoff:
280 280 self.audit = pathauditor(self.base)
281 281 else:
282 282 self.audit = util.always
283 283
284 284 mustaudit = property(_getmustaudit, _setmustaudit)
285 285
286 286 @util.propertycache
287 287 def _cansymlink(self):
288 288 return util.checklink(self.base)
289 289
290 290 @util.propertycache
291 291 def _chmod(self):
292 292 return util.checkexec(self.base)
293 293
294 294 def _fixfilemode(self, name):
295 295 if self.createmode is None or not self._chmod:
296 296 return
297 297 os.chmod(name, self.createmode & 0666)
298 298
299 299 def __call__(self, path, mode="r", text=False, atomictemp=False):
300 300 if self._audit:
301 301 r = util.checkosfilename(path)
302 302 if r:
303 303 raise util.Abort("%s: %r" % (r, path))
304 304 self.audit(path)
305 305 f = self.join(path)
306 306
307 307 if not text and "b" not in mode:
308 308 mode += "b" # for that other OS
309 309
310 310 nlink = -1
311 311 if mode not in ('r', 'rb'):
312 312 dirname, basename = util.split(f)
313 313 # If basename is empty, then the path is malformed because it points
314 314 # to a directory. Let the posixfile() call below raise IOError.
315 315 if basename:
316 316 if atomictemp:
317 317 util.ensuredirs(dirname, self.createmode)
318 318 return util.atomictempfile(f, mode, self.createmode)
319 319 try:
320 320 if 'w' in mode:
321 321 util.unlink(f)
322 322 nlink = 0
323 323 else:
324 324 # nlinks() may behave differently for files on Windows
325 325 # shares if the file is open.
326 326 fd = util.posixfile(f)
327 327 nlink = util.nlinks(f)
328 328 if nlink < 1:
329 329 nlink = 2 # force mktempcopy (issue1922)
330 330 fd.close()
331 331 except (OSError, IOError), e:
332 332 if e.errno != errno.ENOENT:
333 333 raise
334 334 nlink = 0
335 335 util.ensuredirs(dirname, self.createmode)
336 336 if nlink > 0:
337 337 if self._trustnlink is None:
338 338 self._trustnlink = nlink > 1 or util.checknlink(f)
339 339 if nlink > 1 or not self._trustnlink:
340 340 util.rename(util.mktempcopy(f), f)
341 341 fp = util.posixfile(f, mode)
342 342 if nlink == 0:
343 343 self._fixfilemode(f)
344 344 return fp
345 345
346 346 def symlink(self, src, dst):
347 347 self.audit(dst)
348 348 linkname = self.join(dst)
349 349 try:
350 350 os.unlink(linkname)
351 351 except OSError:
352 352 pass
353 353
354 354 util.ensuredirs(os.path.dirname(linkname), self.createmode)
355 355
356 356 if self._cansymlink:
357 357 try:
358 358 os.symlink(src, linkname)
359 359 except OSError, err:
360 360 raise OSError(err.errno, _('could not symlink to %r: %s') %
361 361 (src, err.strerror), linkname)
362 362 else:
363 363 self.write(dst, src)
364 364
365 365 def join(self, path):
366 366 if path:
367 367 return os.path.join(self.base, path)
368 368 else:
369 369 return self.base
370 370
371 371 opener = vfs
372 372
373 373 class auditvfs(object):
374 374 def __init__(self, vfs):
375 375 self.vfs = vfs
376 376
377 377 def _getmustaudit(self):
378 378 return self.vfs.mustaudit
379 379
380 380 def _setmustaudit(self, onoff):
381 381 self.vfs.mustaudit = onoff
382 382
383 383 mustaudit = property(_getmustaudit, _setmustaudit)
384 384
385 385 class filtervfs(abstractvfs, auditvfs):
386 386 '''Wrapper vfs for filtering filenames with a function.'''
387 387
388 388 def __init__(self, vfs, filter):
389 389 auditvfs.__init__(self, vfs)
390 390 self._filter = filter
391 391
392 392 def __call__(self, path, *args, **kwargs):
393 393 return self.vfs(self._filter(path), *args, **kwargs)
394 394
395 395 def join(self, path):
396 396 if path:
397 397 return self.vfs.join(self._filter(path))
398 398 else:
399 399 return self.vfs.join(path)
400 400
401 401 filteropener = filtervfs
402 402
403 403 class readonlyvfs(abstractvfs, auditvfs):
404 404 '''Wrapper vfs preventing any writing.'''
405 405
406 406 def __init__(self, vfs):
407 407 auditvfs.__init__(self, vfs)
408 408
409 409 def __call__(self, path, mode='r', *args, **kw):
410 410 if mode not in ('r', 'rb'):
411 411 raise util.Abort('this vfs is read only')
412 412 return self.vfs(path, mode, *args, **kw)
413 413
414 414
415 415 def canonpath(root, cwd, myname, auditor=None):
416 416 '''return the canonical path of myname, given cwd and root'''
417 417 if util.endswithsep(root):
418 418 rootsep = root
419 419 else:
420 420 rootsep = root + os.sep
421 421 name = myname
422 422 if not os.path.isabs(name):
423 423 name = os.path.join(root, cwd, name)
424 424 name = os.path.normpath(name)
425 425 if auditor is None:
426 426 auditor = pathauditor(root)
427 427 if name != rootsep and name.startswith(rootsep):
428 428 name = name[len(rootsep):]
429 429 auditor(name)
430 430 return util.pconvert(name)
431 431 elif name == root:
432 432 return ''
433 433 else:
434 434 # Determine whether `name' is in the hierarchy at or beneath `root',
435 435 # by iterating name=dirname(name) until that causes no change (can't
436 436 # check name == '/', because that doesn't work on windows). The list
437 437 # `rel' holds the reversed list of components making up the relative
438 438 # file name we want.
439 439 rel = []
440 440 while True:
441 441 try:
442 442 s = util.samefile(name, root)
443 443 except OSError:
444 444 s = False
445 445 if s:
446 446 if not rel:
447 447 # name was actually the same as root (maybe a symlink)
448 448 return ''
449 449 rel.reverse()
450 450 name = os.path.join(*rel)
451 451 auditor(name)
452 452 return util.pconvert(name)
453 453 dirname, basename = util.split(name)
454 454 rel.append(basename)
455 455 if dirname == name:
456 456 break
457 457 name = dirname
458 458
459 459 raise util.Abort(_("%s not under root '%s'") % (myname, root))
460 460
461 461 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
462 462 '''yield every hg repository under path, always recursively.
463 463 The recurse flag will only control recursion into repo working dirs'''
464 464 def errhandler(err):
465 465 if err.filename == path:
466 466 raise err
467 467 samestat = getattr(os.path, 'samestat', None)
468 468 if followsym and samestat is not None:
469 469 def adddir(dirlst, dirname):
470 470 match = False
471 471 dirstat = os.stat(dirname)
472 472 for lstdirstat in dirlst:
473 473 if samestat(dirstat, lstdirstat):
474 474 match = True
475 475 break
476 476 if not match:
477 477 dirlst.append(dirstat)
478 478 return not match
479 479 else:
480 480 followsym = False
481 481
482 482 if (seen_dirs is None) and followsym:
483 483 seen_dirs = []
484 484 adddir(seen_dirs, path)
485 485 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
486 486 dirs.sort()
487 487 if '.hg' in dirs:
488 488 yield root # found a repository
489 489 qroot = os.path.join(root, '.hg', 'patches')
490 490 if os.path.isdir(os.path.join(qroot, '.hg')):
491 491 yield qroot # we have a patch queue repo here
492 492 if recurse:
493 493 # avoid recursing inside the .hg directory
494 494 dirs.remove('.hg')
495 495 else:
496 496 dirs[:] = [] # don't descend further
497 497 elif followsym:
498 498 newdirs = []
499 499 for d in dirs:
500 500 fname = os.path.join(root, d)
501 501 if adddir(seen_dirs, fname):
502 502 if os.path.islink(fname):
503 503 for hgname in walkrepos(fname, True, seen_dirs):
504 504 yield hgname
505 505 else:
506 506 newdirs.append(d)
507 507 dirs[:] = newdirs
508 508
509 509 def osrcpath():
510 510 '''return default os-specific hgrc search path'''
511 511 path = systemrcpath()
512 512 path.extend(userrcpath())
513 513 path = [os.path.normpath(f) for f in path]
514 514 return path
515 515
516 516 _rcpath = None
517 517
518 518 def rcpath():
519 519 '''return hgrc search path. if env var HGRCPATH is set, use it.
520 520 for each item in path, if directory, use files ending in .rc,
521 521 else use item.
522 522 make HGRCPATH empty to only look in .hg/hgrc of current repo.
523 523 if no HGRCPATH, use default os-specific path.'''
524 524 global _rcpath
525 525 if _rcpath is None:
526 526 if 'HGRCPATH' in os.environ:
527 527 _rcpath = []
528 528 for p in os.environ['HGRCPATH'].split(os.pathsep):
529 529 if not p:
530 530 continue
531 531 p = util.expandpath(p)
532 532 if os.path.isdir(p):
533 533 for f, kind in osutil.listdir(p):
534 534 if f.endswith('.rc'):
535 535 _rcpath.append(os.path.join(p, f))
536 536 else:
537 537 _rcpath.append(p)
538 538 else:
539 539 _rcpath = osrcpath()
540 540 return _rcpath
541 541
542 542 def revsingle(repo, revspec, default='.'):
543 543 if not revspec:
544 544 return repo[default]
545 545
546 546 l = revrange(repo, [revspec])
547 547 if len(l) < 1:
548 548 raise util.Abort(_('empty revision set'))
549 549 return repo[l[-1]]
550 550
551 551 def revpair(repo, revs):
552 552 if not revs:
553 553 return repo.dirstate.p1(), None
554 554
555 555 l = revrange(repo, revs)
556 556
557 557 if len(l) == 0:
558 558 if revs:
559 559 raise util.Abort(_('empty revision range'))
560 560 return repo.dirstate.p1(), None
561 561
562 562 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
563 563 return repo.lookup(l[0]), None
564 564
565 565 return repo.lookup(l[0]), repo.lookup(l[-1])
566 566
567 567 _revrangesep = ':'
568 568
569 569 def revrange(repo, revs):
570 570 """Yield revision as strings from a list of revision specifications."""
571 571
572 572 def revfix(repo, val, defval):
573 573 if not val and val != 0 and defval is not None:
574 574 return defval
575 575 return repo[val].rev()
576 576
577 577 seen, l = set(), []
578 578 for spec in revs:
579 579 if l and not seen:
580 580 seen = set(l)
581 581 # attempt to parse old-style ranges first to deal with
582 582 # things like old-tag which contain query metacharacters
583 583 try:
584 584 if isinstance(spec, int):
585 585 seen.add(spec)
586 586 l.append(spec)
587 587 continue
588 588
589 589 if _revrangesep in spec:
590 590 start, end = spec.split(_revrangesep, 1)
591 591 start = revfix(repo, start, 0)
592 592 end = revfix(repo, end, len(repo) - 1)
593 593 if end == nullrev and start <= 0:
594 594 start = nullrev
595 595 rangeiter = repo.changelog.revs(start, end)
596 596 if not seen and not l:
597 597 # by far the most common case: revs = ["-1:0"]
598 598 l = list(rangeiter)
599 599 # defer syncing seen until next iteration
600 600 continue
601 601 newrevs = set(rangeiter)
602 602 if seen:
603 603 newrevs.difference_update(seen)
604 604 seen.update(newrevs)
605 605 else:
606 606 seen = newrevs
607 607 l.extend(sorted(newrevs, reverse=start > end))
608 608 continue
609 609 elif spec and spec in repo: # single unquoted rev
610 610 rev = revfix(repo, spec, None)
611 611 if rev in seen:
612 612 continue
613 613 seen.add(rev)
614 614 l.append(rev)
615 615 continue
616 616 except error.RepoLookupError:
617 617 pass
618 618
619 619 # fall through to new-style queries if old-style fails
620 620 m = revset.match(repo.ui, spec)
621 621 dl = [r for r in m(repo, list(repo)) if r not in seen]
622 622 l.extend(dl)
623 623 seen.update(dl)
624 624
625 625 return l
626 626
627 627 def expandpats(pats):
628 628 if not util.expandglobs:
629 629 return list(pats)
630 630 ret = []
631 631 for p in pats:
632 632 kind, name = matchmod._patsplit(p, None)
633 633 if kind is None:
634 634 try:
635 635 globbed = glob.glob(name)
636 636 except re.error:
637 637 globbed = [name]
638 638 if globbed:
639 639 ret.extend(globbed)
640 640 continue
641 641 ret.append(p)
642 642 return ret
643 643
644 644 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
645 645 if pats == ("",):
646 646 pats = []
647 647 if not globbed and default == 'relpath':
648 648 pats = expandpats(pats or [])
649 649
650 650 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
651 651 default)
652 652 def badfn(f, msg):
653 653 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
654 654 m.bad = badfn
655 655 return m, pats
656 656
657 657 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
658 658 return matchandpats(ctx, pats, opts, globbed, default)[0]
659 659
660 660 def matchall(repo):
661 661 return matchmod.always(repo.root, repo.getcwd())
662 662
663 663 def matchfiles(repo, files):
664 664 return matchmod.exact(repo.root, repo.getcwd(), files)
665 665
666 666 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
667 667 if dry_run is None:
668 668 dry_run = opts.get('dry_run')
669 669 if similarity is None:
670 670 similarity = float(opts.get('similarity') or 0)
671 671 # we'd use status here, except handling of symlinks and ignore is tricky
672 672 added, unknown, deleted, removed = [], [], [], []
673 673 audit_path = pathauditor(repo.root)
674 674 m = match(repo[None], pats, opts)
675 675 rejected = []
676 676 m.bad = lambda x, y: rejected.append(x)
677 677
678 678 ctx = repo[None]
679 679 dirstate = repo.dirstate
680 680 walkresults = dirstate.walk(m, sorted(ctx.substate), True, False)
681 681 for abs, st in walkresults.iteritems():
682 682 dstate = dirstate[abs]
683 683 if dstate == '?' and audit_path.check(abs):
684 684 unknown.append(abs)
685 685 elif dstate != 'r' and not st:
686 686 deleted.append(abs)
687 687 # for finding renames
688 688 elif dstate == 'r':
689 689 removed.append(abs)
690 690 elif dstate == 'a':
691 691 added.append(abs)
692 692
693 693 unknownset = set(unknown)
694 694 toprint = unknownset.copy()
695 695 toprint.update(deleted)
696 696 for abs in sorted(toprint):
697 697 if repo.ui.verbose or not m.exact(abs):
698 698 rel = m.rel(abs)
699 699 if abs in unknownset:
700 700 status = _('adding %s\n') % ((pats and rel) or abs)
701 701 else:
702 702 status = _('removing %s\n') % ((pats and rel) or abs)
703 703 repo.ui.status(status)
704 704
705 705 copies = {}
706 706 if similarity > 0:
707 707 for old, new, score in similar.findrenames(repo,
708 708 added + unknown, removed + deleted, similarity):
709 709 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
710 710 repo.ui.status(_('recording removal of %s as rename to %s '
711 711 '(%d%% similar)\n') %
712 712 (m.rel(old), m.rel(new), score * 100))
713 713 copies[new] = old
714 714
715 715 if not dry_run:
716 716 wctx = repo[None]
717 717 wlock = repo.wlock()
718 718 try:
719 719 wctx.forget(deleted)
720 720 wctx.add(unknown)
721 721 for new, old in copies.iteritems():
722 722 wctx.copy(old, new)
723 723 finally:
724 724 wlock.release()
725 725
726 726 for f in rejected:
727 727 if f in m.files():
728 728 return 1
729 729 return 0
730 730
731 731 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
732 732 """Update the dirstate to reflect the intent of copying src to dst. For
733 733 different reasons it might not end with dst being marked as copied from src.
734 734 """
735 735 origsrc = repo.dirstate.copied(src) or src
736 736 if dst == origsrc: # copying back a copy?
737 737 if repo.dirstate[dst] not in 'mn' and not dryrun:
738 738 repo.dirstate.normallookup(dst)
739 739 else:
740 740 if repo.dirstate[origsrc] == 'a' and origsrc == src:
741 741 if not ui.quiet:
742 742 ui.warn(_("%s has not been committed yet, so no copy "
743 743 "data will be stored for %s.\n")
744 744 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
745 745 if repo.dirstate[dst] in '?r' and not dryrun:
746 746 wctx.add([dst])
747 747 elif not dryrun:
748 748 wctx.copy(origsrc, dst)
749 749
750 750 def readrequires(opener, supported):
751 751 '''Reads and parses .hg/requires and checks if all entries found
752 752 are in the list of supported features.'''
753 753 requirements = set(opener.read("requires").splitlines())
754 754 missings = []
755 755 for r in requirements:
756 756 if r not in supported:
757 757 if not r or not r[0].isalnum():
758 758 raise error.RequirementError(_(".hg/requires file is corrupt"))
759 759 missings.append(r)
760 760 missings.sort()
761 761 if missings:
762 762 raise error.RequirementError(
763 763 _("unknown repository format: requires features '%s' (upgrade "
764 764 "Mercurial)") % "', '".join(missings))
765 765 return requirements
766 766
767 767 class filecacheentry(object):
768 768 def __init__(self, path, stat=True):
769 769 self.path = path
770 770 self.cachestat = None
771 771 self._cacheable = None
772 772
773 773 if stat:
774 774 self.cachestat = filecacheentry.stat(self.path)
775 775
776 776 if self.cachestat:
777 777 self._cacheable = self.cachestat.cacheable()
778 778 else:
779 779 # None means we don't know yet
780 780 self._cacheable = None
781 781
782 782 def refresh(self):
783 783 if self.cacheable():
784 784 self.cachestat = filecacheentry.stat(self.path)
785 785
786 786 def cacheable(self):
787 787 if self._cacheable is not None:
788 788 return self._cacheable
789 789
790 790 # we don't know yet, assume it is for now
791 791 return True
792 792
793 793 def changed(self):
794 794 # no point in going further if we can't cache it
795 795 if not self.cacheable():
796 796 return True
797 797
798 798 newstat = filecacheentry.stat(self.path)
799 799
800 800 # we may not know if it's cacheable yet, check again now
801 801 if newstat and self._cacheable is None:
802 802 self._cacheable = newstat.cacheable()
803 803
804 804 # check again
805 805 if not self._cacheable:
806 806 return True
807 807
808 808 if self.cachestat != newstat:
809 809 self.cachestat = newstat
810 810 return True
811 811 else:
812 812 return False
813 813
814 814 @staticmethod
815 815 def stat(path):
816 816 try:
817 817 return util.cachestat(path)
818 818 except OSError, e:
819 819 if e.errno != errno.ENOENT:
820 820 raise
821 821
822 822 class filecache(object):
823 823 '''A property like decorator that tracks a file under .hg/ for updates.
824 824
825 825 Records stat info when called in _filecache.
826 826
827 827 On subsequent calls, compares old stat info with new info, and recreates
828 828 the object when needed, updating the new stat info in _filecache.
829 829
830 830 Mercurial either atomic renames or appends for files under .hg,
831 831 so to ensure the cache is reliable we need the filesystem to be able
832 832 to tell us if a file has been replaced. If it can't, we fallback to
833 833 recreating the object on every call (essentially the same behaviour as
834 834 propertycache).'''
835 835 def __init__(self, path):
836 836 self.path = path
837 837
838 838 def join(self, obj, fname):
839 839 """Used to compute the runtime path of the cached file.
840 840
841 841 Users should subclass filecache and provide their own version of this
842 842 function to call the appropriate join function on 'obj' (an instance
843 843 of the class that its member function was decorated).
844 844 """
845 845 return obj.join(fname)
846 846
847 847 def __call__(self, func):
848 848 self.func = func
849 849 self.name = func.__name__
850 850 return self
851 851
852 852 def __get__(self, obj, type=None):
853 853 # do we need to check if the file changed?
854 854 if self.name in obj.__dict__:
855 855 assert self.name in obj._filecache, self.name
856 856 return obj.__dict__[self.name]
857 857
858 858 entry = obj._filecache.get(self.name)
859 859
860 860 if entry:
861 861 if entry.changed():
862 862 entry.obj = self.func(obj)
863 863 else:
864 864 path = self.join(obj, self.path)
865 865
866 866 # We stat -before- creating the object so our cache doesn't lie if
867 867 # a writer modified between the time we read and stat
868 868 entry = filecacheentry(path)
869 869 entry.obj = self.func(obj)
870 870
871 871 obj._filecache[self.name] = entry
872 872
873 873 obj.__dict__[self.name] = entry.obj
874 874 return entry.obj
875 875
876 876 def __set__(self, obj, value):
877 877 if self.name not in obj._filecache:
878 878 # we add an entry for the missing value because X in __dict__
879 879 # implies X in _filecache
880 880 ce = filecacheentry(self.join(obj, self.path), False)
881 881 obj._filecache[self.name] = ce
882 882 else:
883 883 ce = obj._filecache[self.name]
884 884
885 885 ce.obj = value # update cached copy
886 886 obj.__dict__[self.name] = value # update copy returned by obj.x
887 887
888 888 def __delete__(self, obj):
889 889 try:
890 890 del obj.__dict__[self.name]
891 891 except KeyError:
892 892 raise AttributeError(self.name)
893
894 def finddirs(path):
895 pos = path.rfind('/')
896 while pos != -1:
897 yield path[:pos]
898 pos = path.rfind('/', 0, pos)
General Comments 0
You need to be logged in to leave comments. Login now