##// END OF EJS Templates
scmutil.addremove: pull repo.dirstate fetch out of the loop...
Siddharth Agarwal -
r18861:ec91b66e default
parent child Browse files
Show More
@@ -1,886 +1,887
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, re, stat, glob
13 13
14 14 if os.name == 'nt':
15 15 import scmwindows as scmplatform
16 16 else:
17 17 import scmposix as scmplatform
18 18
19 19 systemrcpath = scmplatform.systemrcpath
20 20 userrcpath = scmplatform.userrcpath
21 21
22 22 def nochangesfound(ui, repo, excluded=None):
23 23 '''Report no changes for push/pull, excluded is None or a list of
24 24 nodes excluded from the push/pull.
25 25 '''
26 26 secretlist = []
27 27 if excluded:
28 28 for n in excluded:
29 29 if n not in repo:
30 30 # discovery should not have included the filtered revision,
31 31 # we have to explicitly exclude it until discovery is cleanup.
32 32 continue
33 33 ctx = repo[n]
34 34 if ctx.phase() >= phases.secret and not ctx.extinct():
35 35 secretlist.append(n)
36 36
37 37 if secretlist:
38 38 ui.status(_("no changes found (ignored %d secret changesets)\n")
39 39 % len(secretlist))
40 40 else:
41 41 ui.status(_("no changes found\n"))
42 42
43 43 def checknewlabel(repo, lbl, kind):
44 44 if lbl in ['tip', '.', 'null']:
45 45 raise util.Abort(_("the name '%s' is reserved") % lbl)
46 46 for c in (':', '\0', '\n', '\r'):
47 47 if c in lbl:
48 48 raise util.Abort(_("%r cannot be used in a name") % c)
49 49 try:
50 50 int(lbl)
51 51 raise util.Abort(_("a %s cannot have an integer as its name") % kind)
52 52 except ValueError:
53 53 pass
54 54
55 55 def checkfilename(f):
56 56 '''Check that the filename f is an acceptable filename for a tracked file'''
57 57 if '\r' in f or '\n' in f:
58 58 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
59 59
60 60 def checkportable(ui, f):
61 61 '''Check if filename f is portable and warn or abort depending on config'''
62 62 checkfilename(f)
63 63 abort, warn = checkportabilityalert(ui)
64 64 if abort or warn:
65 65 msg = util.checkwinfilename(f)
66 66 if msg:
67 67 msg = "%s: %r" % (msg, f)
68 68 if abort:
69 69 raise util.Abort(msg)
70 70 ui.warn(_("warning: %s\n") % msg)
71 71
72 72 def checkportabilityalert(ui):
73 73 '''check if the user's config requests nothing, a warning, or abort for
74 74 non-portable filenames'''
75 75 val = ui.config('ui', 'portablefilenames', 'warn')
76 76 lval = val.lower()
77 77 bval = util.parsebool(val)
78 78 abort = os.name == 'nt' or lval == 'abort'
79 79 warn = bval or lval == 'warn'
80 80 if bval is None and not (warn or abort or lval == 'ignore'):
81 81 raise error.ConfigError(
82 82 _("ui.portablefilenames value is invalid ('%s')") % val)
83 83 return abort, warn
84 84
85 85 class casecollisionauditor(object):
86 86 def __init__(self, ui, abort, dirstate):
87 87 self._ui = ui
88 88 self._abort = abort
89 89 allfiles = '\0'.join(dirstate._map)
90 90 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
91 91 self._dirstate = dirstate
92 92 # The purpose of _newfiles is so that we don't complain about
93 93 # case collisions if someone were to call this object with the
94 94 # same filename twice.
95 95 self._newfiles = set()
96 96
97 97 def __call__(self, f):
98 98 fl = encoding.lower(f)
99 99 if (fl in self._loweredfiles and f not in self._dirstate and
100 100 f not in self._newfiles):
101 101 msg = _('possible case-folding collision for %s') % f
102 102 if self._abort:
103 103 raise util.Abort(msg)
104 104 self._ui.warn(_("warning: %s\n") % msg)
105 105 self._loweredfiles.add(fl)
106 106 self._newfiles.add(f)
107 107
108 108 class pathauditor(object):
109 109 '''ensure that a filesystem path contains no banned components.
110 110 the following properties of a path are checked:
111 111
112 112 - ends with a directory separator
113 113 - under top-level .hg
114 114 - starts at the root of a windows drive
115 115 - contains ".."
116 116 - traverses a symlink (e.g. a/symlink_here/b)
117 117 - inside a nested repository (a callback can be used to approve
118 118 some nested repositories, e.g., subrepositories)
119 119 '''
120 120
121 121 def __init__(self, root, callback=None):
122 122 self.audited = set()
123 123 self.auditeddir = set()
124 124 self.root = root
125 125 self.callback = callback
126 126 if os.path.lexists(root) and not util.checkcase(root):
127 127 self.normcase = util.normcase
128 128 else:
129 129 self.normcase = lambda x: x
130 130
131 131 def __call__(self, path):
132 132 '''Check the relative path.
133 133 path may contain a pattern (e.g. foodir/**.txt)'''
134 134
135 135 path = util.localpath(path)
136 136 normpath = self.normcase(path)
137 137 if normpath in self.audited:
138 138 return
139 139 # AIX ignores "/" at end of path, others raise EISDIR.
140 140 if util.endswithsep(path):
141 141 raise util.Abort(_("path ends in directory separator: %s") % path)
142 142 parts = util.splitpath(path)
143 143 if (os.path.splitdrive(path)[0]
144 144 or parts[0].lower() in ('.hg', '.hg.', '')
145 145 or os.pardir in parts):
146 146 raise util.Abort(_("path contains illegal component: %s") % path)
147 147 if '.hg' in path.lower():
148 148 lparts = [p.lower() for p in parts]
149 149 for p in '.hg', '.hg.':
150 150 if p in lparts[1:]:
151 151 pos = lparts.index(p)
152 152 base = os.path.join(*parts[:pos])
153 153 raise util.Abort(_("path '%s' is inside nested repo %r")
154 154 % (path, base))
155 155
156 156 normparts = util.splitpath(normpath)
157 157 assert len(parts) == len(normparts)
158 158
159 159 parts.pop()
160 160 normparts.pop()
161 161 prefixes = []
162 162 while parts:
163 163 prefix = os.sep.join(parts)
164 164 normprefix = os.sep.join(normparts)
165 165 if normprefix in self.auditeddir:
166 166 break
167 167 curpath = os.path.join(self.root, prefix)
168 168 try:
169 169 st = os.lstat(curpath)
170 170 except OSError, err:
171 171 # EINVAL can be raised as invalid path syntax under win32.
172 172 # They must be ignored for patterns can be checked too.
173 173 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
174 174 raise
175 175 else:
176 176 if stat.S_ISLNK(st.st_mode):
177 177 raise util.Abort(
178 178 _('path %r traverses symbolic link %r')
179 179 % (path, prefix))
180 180 elif (stat.S_ISDIR(st.st_mode) and
181 181 os.path.isdir(os.path.join(curpath, '.hg'))):
182 182 if not self.callback or not self.callback(curpath):
183 183 raise util.Abort(_("path '%s' is inside nested "
184 184 "repo %r")
185 185 % (path, prefix))
186 186 prefixes.append(normprefix)
187 187 parts.pop()
188 188 normparts.pop()
189 189
190 190 self.audited.add(normpath)
191 191 # only add prefixes to the cache after checking everything: we don't
192 192 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
193 193 self.auditeddir.update(prefixes)
194 194
195 195 def check(self, path):
196 196 try:
197 197 self(path)
198 198 return True
199 199 except (OSError, util.Abort):
200 200 return False
201 201
202 202 class abstractvfs(object):
203 203 """Abstract base class; cannot be instantiated"""
204 204
205 205 def __init__(self, *args, **kwargs):
206 206 '''Prevent instantiation; don't call this from subclasses.'''
207 207 raise NotImplementedError('attempted instantiating ' + str(type(self)))
208 208
209 209 def tryread(self, path):
210 210 '''gracefully return an empty string for missing files'''
211 211 try:
212 212 return self.read(path)
213 213 except IOError, inst:
214 214 if inst.errno != errno.ENOENT:
215 215 raise
216 216 return ""
217 217
218 218 def read(self, path):
219 219 fp = self(path, 'rb')
220 220 try:
221 221 return fp.read()
222 222 finally:
223 223 fp.close()
224 224
225 225 def write(self, path, data):
226 226 fp = self(path, 'wb')
227 227 try:
228 228 return fp.write(data)
229 229 finally:
230 230 fp.close()
231 231
232 232 def append(self, path, data):
233 233 fp = self(path, 'ab')
234 234 try:
235 235 return fp.write(data)
236 236 finally:
237 237 fp.close()
238 238
239 239 def exists(self, path=None):
240 240 return os.path.exists(self.join(path))
241 241
242 242 def isdir(self, path=None):
243 243 return os.path.isdir(self.join(path))
244 244
245 245 def makedir(self, path=None, notindexed=True):
246 246 return util.makedir(self.join(path), notindexed)
247 247
248 248 def makedirs(self, path=None, mode=None):
249 249 return util.makedirs(self.join(path), mode)
250 250
251 251 def mkdir(self, path=None):
252 252 return os.mkdir(self.join(path))
253 253
254 254 def readdir(self, path=None, stat=None, skip=None):
255 255 return osutil.listdir(self.join(path), stat, skip)
256 256
257 257 def stat(self, path=None):
258 258 return os.stat(self.join(path))
259 259
260 260 class vfs(abstractvfs):
261 261 '''Operate files relative to a base directory
262 262
263 263 This class is used to hide the details of COW semantics and
264 264 remote file access from higher level code.
265 265 '''
266 266 def __init__(self, base, audit=True, expand=False):
267 267 if expand:
268 268 base = os.path.realpath(util.expandpath(base))
269 269 self.base = base
270 270 self._setmustaudit(audit)
271 271 self.createmode = None
272 272 self._trustnlink = None
273 273
274 274 def _getmustaudit(self):
275 275 return self._audit
276 276
277 277 def _setmustaudit(self, onoff):
278 278 self._audit = onoff
279 279 if onoff:
280 280 self.audit = pathauditor(self.base)
281 281 else:
282 282 self.audit = util.always
283 283
284 284 mustaudit = property(_getmustaudit, _setmustaudit)
285 285
286 286 @util.propertycache
287 287 def _cansymlink(self):
288 288 return util.checklink(self.base)
289 289
290 290 @util.propertycache
291 291 def _chmod(self):
292 292 return util.checkexec(self.base)
293 293
294 294 def _fixfilemode(self, name):
295 295 if self.createmode is None or not self._chmod:
296 296 return
297 297 os.chmod(name, self.createmode & 0666)
298 298
299 299 def __call__(self, path, mode="r", text=False, atomictemp=False):
300 300 if self._audit:
301 301 r = util.checkosfilename(path)
302 302 if r:
303 303 raise util.Abort("%s: %r" % (r, path))
304 304 self.audit(path)
305 305 f = self.join(path)
306 306
307 307 if not text and "b" not in mode:
308 308 mode += "b" # for that other OS
309 309
310 310 nlink = -1
311 311 if mode not in ('r', 'rb'):
312 312 dirname, basename = util.split(f)
313 313 # If basename is empty, then the path is malformed because it points
314 314 # to a directory. Let the posixfile() call below raise IOError.
315 315 if basename:
316 316 if atomictemp:
317 317 util.ensuredirs(dirname, self.createmode)
318 318 return util.atomictempfile(f, mode, self.createmode)
319 319 try:
320 320 if 'w' in mode:
321 321 util.unlink(f)
322 322 nlink = 0
323 323 else:
324 324 # nlinks() may behave differently for files on Windows
325 325 # shares if the file is open.
326 326 fd = util.posixfile(f)
327 327 nlink = util.nlinks(f)
328 328 if nlink < 1:
329 329 nlink = 2 # force mktempcopy (issue1922)
330 330 fd.close()
331 331 except (OSError, IOError), e:
332 332 if e.errno != errno.ENOENT:
333 333 raise
334 334 nlink = 0
335 335 util.ensuredirs(dirname, self.createmode)
336 336 if nlink > 0:
337 337 if self._trustnlink is None:
338 338 self._trustnlink = nlink > 1 or util.checknlink(f)
339 339 if nlink > 1 or not self._trustnlink:
340 340 util.rename(util.mktempcopy(f), f)
341 341 fp = util.posixfile(f, mode)
342 342 if nlink == 0:
343 343 self._fixfilemode(f)
344 344 return fp
345 345
346 346 def symlink(self, src, dst):
347 347 self.audit(dst)
348 348 linkname = self.join(dst)
349 349 try:
350 350 os.unlink(linkname)
351 351 except OSError:
352 352 pass
353 353
354 354 util.ensuredirs(os.path.dirname(linkname), self.createmode)
355 355
356 356 if self._cansymlink:
357 357 try:
358 358 os.symlink(src, linkname)
359 359 except OSError, err:
360 360 raise OSError(err.errno, _('could not symlink to %r: %s') %
361 361 (src, err.strerror), linkname)
362 362 else:
363 363 self.write(dst, src)
364 364
365 365 def join(self, path):
366 366 if path:
367 367 return os.path.join(self.base, path)
368 368 else:
369 369 return self.base
370 370
371 371 opener = vfs
372 372
373 373 class auditvfs(object):
374 374 def __init__(self, vfs):
375 375 self.vfs = vfs
376 376
377 377 def _getmustaudit(self):
378 378 return self.vfs.mustaudit
379 379
380 380 def _setmustaudit(self, onoff):
381 381 self.vfs.mustaudit = onoff
382 382
383 383 mustaudit = property(_getmustaudit, _setmustaudit)
384 384
385 385 class filtervfs(abstractvfs, auditvfs):
386 386 '''Wrapper vfs for filtering filenames with a function.'''
387 387
388 388 def __init__(self, vfs, filter):
389 389 auditvfs.__init__(self, vfs)
390 390 self._filter = filter
391 391
392 392 def __call__(self, path, *args, **kwargs):
393 393 return self.vfs(self._filter(path), *args, **kwargs)
394 394
395 395 def join(self, path):
396 396 if path:
397 397 return self.vfs.join(self._filter(path))
398 398 else:
399 399 return self.vfs.join(path)
400 400
401 401 filteropener = filtervfs
402 402
403 403 class readonlyvfs(abstractvfs, auditvfs):
404 404 '''Wrapper vfs preventing any writing.'''
405 405
406 406 def __init__(self, vfs):
407 407 auditvfs.__init__(self, vfs)
408 408
409 409 def __call__(self, path, mode='r', *args, **kw):
410 410 if mode not in ('r', 'rb'):
411 411 raise util.Abort('this vfs is read only')
412 412 return self.vfs(path, mode, *args, **kw)
413 413
414 414
415 415 def canonpath(root, cwd, myname, auditor=None):
416 416 '''return the canonical path of myname, given cwd and root'''
417 417 if util.endswithsep(root):
418 418 rootsep = root
419 419 else:
420 420 rootsep = root + os.sep
421 421 name = myname
422 422 if not os.path.isabs(name):
423 423 name = os.path.join(root, cwd, name)
424 424 name = os.path.normpath(name)
425 425 if auditor is None:
426 426 auditor = pathauditor(root)
427 427 if name != rootsep and name.startswith(rootsep):
428 428 name = name[len(rootsep):]
429 429 auditor(name)
430 430 return util.pconvert(name)
431 431 elif name == root:
432 432 return ''
433 433 else:
434 434 # Determine whether `name' is in the hierarchy at or beneath `root',
435 435 # by iterating name=dirname(name) until that causes no change (can't
436 436 # check name == '/', because that doesn't work on windows). The list
437 437 # `rel' holds the reversed list of components making up the relative
438 438 # file name we want.
439 439 rel = []
440 440 while True:
441 441 try:
442 442 s = util.samefile(name, root)
443 443 except OSError:
444 444 s = False
445 445 if s:
446 446 if not rel:
447 447 # name was actually the same as root (maybe a symlink)
448 448 return ''
449 449 rel.reverse()
450 450 name = os.path.join(*rel)
451 451 auditor(name)
452 452 return util.pconvert(name)
453 453 dirname, basename = util.split(name)
454 454 rel.append(basename)
455 455 if dirname == name:
456 456 break
457 457 name = dirname
458 458
459 459 raise util.Abort(_("%s not under root '%s'") % (myname, root))
460 460
461 461 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
462 462 '''yield every hg repository under path, always recursively.
463 463 The recurse flag will only control recursion into repo working dirs'''
464 464 def errhandler(err):
465 465 if err.filename == path:
466 466 raise err
467 467 samestat = getattr(os.path, 'samestat', None)
468 468 if followsym and samestat is not None:
469 469 def adddir(dirlst, dirname):
470 470 match = False
471 471 dirstat = os.stat(dirname)
472 472 for lstdirstat in dirlst:
473 473 if samestat(dirstat, lstdirstat):
474 474 match = True
475 475 break
476 476 if not match:
477 477 dirlst.append(dirstat)
478 478 return not match
479 479 else:
480 480 followsym = False
481 481
482 482 if (seen_dirs is None) and followsym:
483 483 seen_dirs = []
484 484 adddir(seen_dirs, path)
485 485 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
486 486 dirs.sort()
487 487 if '.hg' in dirs:
488 488 yield root # found a repository
489 489 qroot = os.path.join(root, '.hg', 'patches')
490 490 if os.path.isdir(os.path.join(qroot, '.hg')):
491 491 yield qroot # we have a patch queue repo here
492 492 if recurse:
493 493 # avoid recursing inside the .hg directory
494 494 dirs.remove('.hg')
495 495 else:
496 496 dirs[:] = [] # don't descend further
497 497 elif followsym:
498 498 newdirs = []
499 499 for d in dirs:
500 500 fname = os.path.join(root, d)
501 501 if adddir(seen_dirs, fname):
502 502 if os.path.islink(fname):
503 503 for hgname in walkrepos(fname, True, seen_dirs):
504 504 yield hgname
505 505 else:
506 506 newdirs.append(d)
507 507 dirs[:] = newdirs
508 508
509 509 def osrcpath():
510 510 '''return default os-specific hgrc search path'''
511 511 path = systemrcpath()
512 512 path.extend(userrcpath())
513 513 path = [os.path.normpath(f) for f in path]
514 514 return path
515 515
516 516 _rcpath = None
517 517
518 518 def rcpath():
519 519 '''return hgrc search path. if env var HGRCPATH is set, use it.
520 520 for each item in path, if directory, use files ending in .rc,
521 521 else use item.
522 522 make HGRCPATH empty to only look in .hg/hgrc of current repo.
523 523 if no HGRCPATH, use default os-specific path.'''
524 524 global _rcpath
525 525 if _rcpath is None:
526 526 if 'HGRCPATH' in os.environ:
527 527 _rcpath = []
528 528 for p in os.environ['HGRCPATH'].split(os.pathsep):
529 529 if not p:
530 530 continue
531 531 p = util.expandpath(p)
532 532 if os.path.isdir(p):
533 533 for f, kind in osutil.listdir(p):
534 534 if f.endswith('.rc'):
535 535 _rcpath.append(os.path.join(p, f))
536 536 else:
537 537 _rcpath.append(p)
538 538 else:
539 539 _rcpath = osrcpath()
540 540 return _rcpath
541 541
542 542 def revsingle(repo, revspec, default='.'):
543 543 if not revspec:
544 544 return repo[default]
545 545
546 546 l = revrange(repo, [revspec])
547 547 if len(l) < 1:
548 548 raise util.Abort(_('empty revision set'))
549 549 return repo[l[-1]]
550 550
551 551 def revpair(repo, revs):
552 552 if not revs:
553 553 return repo.dirstate.p1(), None
554 554
555 555 l = revrange(repo, revs)
556 556
557 557 if len(l) == 0:
558 558 if revs:
559 559 raise util.Abort(_('empty revision range'))
560 560 return repo.dirstate.p1(), None
561 561
562 562 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
563 563 return repo.lookup(l[0]), None
564 564
565 565 return repo.lookup(l[0]), repo.lookup(l[-1])
566 566
567 567 _revrangesep = ':'
568 568
569 569 def revrange(repo, revs):
570 570 """Yield revision as strings from a list of revision specifications."""
571 571
572 572 def revfix(repo, val, defval):
573 573 if not val and val != 0 and defval is not None:
574 574 return defval
575 575 return repo[val].rev()
576 576
577 577 seen, l = set(), []
578 578 for spec in revs:
579 579 if l and not seen:
580 580 seen = set(l)
581 581 # attempt to parse old-style ranges first to deal with
582 582 # things like old-tag which contain query metacharacters
583 583 try:
584 584 if isinstance(spec, int):
585 585 seen.add(spec)
586 586 l.append(spec)
587 587 continue
588 588
589 589 if _revrangesep in spec:
590 590 start, end = spec.split(_revrangesep, 1)
591 591 start = revfix(repo, start, 0)
592 592 end = revfix(repo, end, len(repo) - 1)
593 593 if end == nullrev and start <= 0:
594 594 start = nullrev
595 595 rangeiter = repo.changelog.revs(start, end)
596 596 if not seen and not l:
597 597 # by far the most common case: revs = ["-1:0"]
598 598 l = list(rangeiter)
599 599 # defer syncing seen until next iteration
600 600 continue
601 601 newrevs = set(rangeiter)
602 602 if seen:
603 603 newrevs.difference_update(seen)
604 604 seen.update(newrevs)
605 605 else:
606 606 seen = newrevs
607 607 l.extend(sorted(newrevs, reverse=start > end))
608 608 continue
609 609 elif spec and spec in repo: # single unquoted rev
610 610 rev = revfix(repo, spec, None)
611 611 if rev in seen:
612 612 continue
613 613 seen.add(rev)
614 614 l.append(rev)
615 615 continue
616 616 except error.RepoLookupError:
617 617 pass
618 618
619 619 # fall through to new-style queries if old-style fails
620 620 m = revset.match(repo.ui, spec)
621 621 dl = [r for r in m(repo, list(repo)) if r not in seen]
622 622 l.extend(dl)
623 623 seen.update(dl)
624 624
625 625 return l
626 626
627 627 def expandpats(pats):
628 628 if not util.expandglobs:
629 629 return list(pats)
630 630 ret = []
631 631 for p in pats:
632 632 kind, name = matchmod._patsplit(p, None)
633 633 if kind is None:
634 634 try:
635 635 globbed = glob.glob(name)
636 636 except re.error:
637 637 globbed = [name]
638 638 if globbed:
639 639 ret.extend(globbed)
640 640 continue
641 641 ret.append(p)
642 642 return ret
643 643
644 644 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
645 645 if pats == ("",):
646 646 pats = []
647 647 if not globbed and default == 'relpath':
648 648 pats = expandpats(pats or [])
649 649
650 650 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
651 651 default)
652 652 def badfn(f, msg):
653 653 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
654 654 m.bad = badfn
655 655 return m, pats
656 656
657 657 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
658 658 return matchandpats(ctx, pats, opts, globbed, default)[0]
659 659
660 660 def matchall(repo):
661 661 return matchmod.always(repo.root, repo.getcwd())
662 662
663 663 def matchfiles(repo, files):
664 664 return matchmod.exact(repo.root, repo.getcwd(), files)
665 665
666 666 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
667 667 if dry_run is None:
668 668 dry_run = opts.get('dry_run')
669 669 if similarity is None:
670 670 similarity = float(opts.get('similarity') or 0)
671 671 # we'd use status here, except handling of symlinks and ignore is tricky
672 672 added, unknown, deleted, removed = [], [], [], []
673 673 audit_path = pathauditor(repo.root)
674 674 m = match(repo[None], pats, opts)
675 675 rejected = []
676 676 m.bad = lambda x, y: rejected.append(x)
677 677
678 678 ctx = repo[None]
679 walkresults = repo.dirstate.walk(m, sorted(ctx.substate), True, False)
679 dirstate = repo.dirstate
680 walkresults = dirstate.walk(m, sorted(ctx.substate), True, False)
680 681 for abs in sorted(walkresults):
681 682 st = walkresults[abs]
682 dstate = repo.dirstate[abs]
683 dstate = dirstate[abs]
683 684 if dstate == '?' and audit_path.check(abs):
684 685 unknown.append(abs)
685 686 if repo.ui.verbose or not m.exact(abs):
686 687 rel = m.rel(abs)
687 688 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
688 689 elif (dstate != 'r' and (not st or
689 690 (stat.S_ISDIR(st.st_mode) and not stat.S_ISLNK(st.st_mode)))):
690 691 deleted.append(abs)
691 692 if repo.ui.verbose or not m.exact(abs):
692 693 rel = m.rel(abs)
693 694 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
694 695 # for finding renames
695 696 elif dstate == 'r':
696 697 removed.append(abs)
697 698 elif dstate == 'a':
698 699 added.append(abs)
699 700 copies = {}
700 701 if similarity > 0:
701 702 for old, new, score in similar.findrenames(repo,
702 703 added + unknown, removed + deleted, similarity):
703 704 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
704 705 repo.ui.status(_('recording removal of %s as rename to %s '
705 706 '(%d%% similar)\n') %
706 707 (m.rel(old), m.rel(new), score * 100))
707 708 copies[new] = old
708 709
709 710 if not dry_run:
710 711 wctx = repo[None]
711 712 wlock = repo.wlock()
712 713 try:
713 714 wctx.forget(deleted)
714 715 wctx.add(unknown)
715 716 for new, old in copies.iteritems():
716 717 wctx.copy(old, new)
717 718 finally:
718 719 wlock.release()
719 720
720 721 for f in rejected:
721 722 if f in m.files():
722 723 return 1
723 724 return 0
724 725
725 726 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
726 727 """Update the dirstate to reflect the intent of copying src to dst. For
727 728 different reasons it might not end with dst being marked as copied from src.
728 729 """
729 730 origsrc = repo.dirstate.copied(src) or src
730 731 if dst == origsrc: # copying back a copy?
731 732 if repo.dirstate[dst] not in 'mn' and not dryrun:
732 733 repo.dirstate.normallookup(dst)
733 734 else:
734 735 if repo.dirstate[origsrc] == 'a' and origsrc == src:
735 736 if not ui.quiet:
736 737 ui.warn(_("%s has not been committed yet, so no copy "
737 738 "data will be stored for %s.\n")
738 739 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
739 740 if repo.dirstate[dst] in '?r' and not dryrun:
740 741 wctx.add([dst])
741 742 elif not dryrun:
742 743 wctx.copy(origsrc, dst)
743 744
744 745 def readrequires(opener, supported):
745 746 '''Reads and parses .hg/requires and checks if all entries found
746 747 are in the list of supported features.'''
747 748 requirements = set(opener.read("requires").splitlines())
748 749 missings = []
749 750 for r in requirements:
750 751 if r not in supported:
751 752 if not r or not r[0].isalnum():
752 753 raise error.RequirementError(_(".hg/requires file is corrupt"))
753 754 missings.append(r)
754 755 missings.sort()
755 756 if missings:
756 757 raise error.RequirementError(
757 758 _("unknown repository format: requires features '%s' (upgrade "
758 759 "Mercurial)") % "', '".join(missings))
759 760 return requirements
760 761
761 762 class filecacheentry(object):
762 763 def __init__(self, path, stat=True):
763 764 self.path = path
764 765 self.cachestat = None
765 766 self._cacheable = None
766 767
767 768 if stat:
768 769 self.cachestat = filecacheentry.stat(self.path)
769 770
770 771 if self.cachestat:
771 772 self._cacheable = self.cachestat.cacheable()
772 773 else:
773 774 # None means we don't know yet
774 775 self._cacheable = None
775 776
776 777 def refresh(self):
777 778 if self.cacheable():
778 779 self.cachestat = filecacheentry.stat(self.path)
779 780
780 781 def cacheable(self):
781 782 if self._cacheable is not None:
782 783 return self._cacheable
783 784
784 785 # we don't know yet, assume it is for now
785 786 return True
786 787
787 788 def changed(self):
788 789 # no point in going further if we can't cache it
789 790 if not self.cacheable():
790 791 return True
791 792
792 793 newstat = filecacheentry.stat(self.path)
793 794
794 795 # we may not know if it's cacheable yet, check again now
795 796 if newstat and self._cacheable is None:
796 797 self._cacheable = newstat.cacheable()
797 798
798 799 # check again
799 800 if not self._cacheable:
800 801 return True
801 802
802 803 if self.cachestat != newstat:
803 804 self.cachestat = newstat
804 805 return True
805 806 else:
806 807 return False
807 808
808 809 @staticmethod
809 810 def stat(path):
810 811 try:
811 812 return util.cachestat(path)
812 813 except OSError, e:
813 814 if e.errno != errno.ENOENT:
814 815 raise
815 816
816 817 class filecache(object):
817 818 '''A property like decorator that tracks a file under .hg/ for updates.
818 819
819 820 Records stat info when called in _filecache.
820 821
821 822 On subsequent calls, compares old stat info with new info, and recreates
822 823 the object when needed, updating the new stat info in _filecache.
823 824
824 825 Mercurial either atomic renames or appends for files under .hg,
825 826 so to ensure the cache is reliable we need the filesystem to be able
826 827 to tell us if a file has been replaced. If it can't, we fallback to
827 828 recreating the object on every call (essentially the same behaviour as
828 829 propertycache).'''
829 830 def __init__(self, path):
830 831 self.path = path
831 832
832 833 def join(self, obj, fname):
833 834 """Used to compute the runtime path of the cached file.
834 835
835 836 Users should subclass filecache and provide their own version of this
836 837 function to call the appropriate join function on 'obj' (an instance
837 838 of the class that its member function was decorated).
838 839 """
839 840 return obj.join(fname)
840 841
841 842 def __call__(self, func):
842 843 self.func = func
843 844 self.name = func.__name__
844 845 return self
845 846
846 847 def __get__(self, obj, type=None):
847 848 # do we need to check if the file changed?
848 849 if self.name in obj.__dict__:
849 850 assert self.name in obj._filecache, self.name
850 851 return obj.__dict__[self.name]
851 852
852 853 entry = obj._filecache.get(self.name)
853 854
854 855 if entry:
855 856 if entry.changed():
856 857 entry.obj = self.func(obj)
857 858 else:
858 859 path = self.join(obj, self.path)
859 860
860 861 # We stat -before- creating the object so our cache doesn't lie if
861 862 # a writer modified between the time we read and stat
862 863 entry = filecacheentry(path)
863 864 entry.obj = self.func(obj)
864 865
865 866 obj._filecache[self.name] = entry
866 867
867 868 obj.__dict__[self.name] = entry.obj
868 869 return entry.obj
869 870
870 871 def __set__(self, obj, value):
871 872 if self.name not in obj._filecache:
872 873 # we add an entry for the missing value because X in __dict__
873 874 # implies X in _filecache
874 875 ce = filecacheentry(self.join(obj, self.path), False)
875 876 obj._filecache[self.name] = ce
876 877 else:
877 878 ce = obj._filecache[self.name]
878 879
879 880 ce.obj = value # update cached copy
880 881 obj.__dict__[self.name] = value # update copy returned by obj.x
881 882
882 883 def __delete__(self, obj):
883 884 try:
884 885 del obj.__dict__[self.name]
885 886 except KeyError:
886 887 raise AttributeError(self.name)
General Comments 0
You need to be logged in to leave comments. Login now