##// END OF EJS Templates
addremove: replace match.bad() monkey patching with match.badmatch()...
Matt Harbison -
r25434:5984dd42 default
parent child Browse files
Show More
@@ -1,1162 +1,1161 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob, tempfile, shutil, stat, inspect
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 class status(tuple):
24 24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 25 and 'ignored' properties are only relevant to the working copy.
26 26 '''
27 27
28 28 __slots__ = ()
29 29
30 30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 31 clean):
32 32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 33 ignored, clean))
34 34
35 35 @property
36 36 def modified(self):
37 37 '''files that have been modified'''
38 38 return self[0]
39 39
40 40 @property
41 41 def added(self):
42 42 '''files that have been added'''
43 43 return self[1]
44 44
45 45 @property
46 46 def removed(self):
47 47 '''files that have been removed'''
48 48 return self[2]
49 49
50 50 @property
51 51 def deleted(self):
52 52 '''files that are in the dirstate, but have been deleted from the
53 53 working copy (aka "missing")
54 54 '''
55 55 return self[3]
56 56
57 57 @property
58 58 def unknown(self):
59 59 '''files not in the dirstate that are not ignored'''
60 60 return self[4]
61 61
62 62 @property
63 63 def ignored(self):
64 64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 65 return self[5]
66 66
67 67 @property
68 68 def clean(self):
69 69 '''files that have not been modified'''
70 70 return self[6]
71 71
72 72 def __repr__(self, *args, **kwargs):
73 73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 74 'unknown=%r, ignored=%r, clean=%r>') % self)
75 75
76 76 def itersubrepos(ctx1, ctx2):
77 77 """find subrepos in ctx1 or ctx2"""
78 78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 83
84 84 missing = set()
85 85
86 86 for subpath in ctx2.substate:
87 87 if subpath not in ctx1.substate:
88 88 del subpaths[subpath]
89 89 missing.add(subpath)
90 90
91 91 for subpath, ctx in sorted(subpaths.iteritems()):
92 92 yield subpath, ctx.sub(subpath)
93 93
94 94 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
95 95 # status and diff will have an accurate result when it does
96 96 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
97 97 # against itself.
98 98 for subpath in missing:
99 99 yield subpath, ctx2.nullsub(subpath, ctx1)
100 100
101 101 def nochangesfound(ui, repo, excluded=None):
102 102 '''Report no changes for push/pull, excluded is None or a list of
103 103 nodes excluded from the push/pull.
104 104 '''
105 105 secretlist = []
106 106 if excluded:
107 107 for n in excluded:
108 108 if n not in repo:
109 109 # discovery should not have included the filtered revision,
110 110 # we have to explicitly exclude it until discovery is cleanup.
111 111 continue
112 112 ctx = repo[n]
113 113 if ctx.phase() >= phases.secret and not ctx.extinct():
114 114 secretlist.append(n)
115 115
116 116 if secretlist:
117 117 ui.status(_("no changes found (ignored %d secret changesets)\n")
118 118 % len(secretlist))
119 119 else:
120 120 ui.status(_("no changes found\n"))
121 121
122 122 def checknewlabel(repo, lbl, kind):
123 123 # Do not use the "kind" parameter in ui output.
124 124 # It makes strings difficult to translate.
125 125 if lbl in ['tip', '.', 'null']:
126 126 raise util.Abort(_("the name '%s' is reserved") % lbl)
127 127 for c in (':', '\0', '\n', '\r'):
128 128 if c in lbl:
129 129 raise util.Abort(_("%r cannot be used in a name") % c)
130 130 try:
131 131 int(lbl)
132 132 raise util.Abort(_("cannot use an integer as a name"))
133 133 except ValueError:
134 134 pass
135 135
136 136 def checkfilename(f):
137 137 '''Check that the filename f is an acceptable filename for a tracked file'''
138 138 if '\r' in f or '\n' in f:
139 139 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
140 140
141 141 def checkportable(ui, f):
142 142 '''Check if filename f is portable and warn or abort depending on config'''
143 143 checkfilename(f)
144 144 abort, warn = checkportabilityalert(ui)
145 145 if abort or warn:
146 146 msg = util.checkwinfilename(f)
147 147 if msg:
148 148 msg = "%s: %r" % (msg, f)
149 149 if abort:
150 150 raise util.Abort(msg)
151 151 ui.warn(_("warning: %s\n") % msg)
152 152
153 153 def checkportabilityalert(ui):
154 154 '''check if the user's config requests nothing, a warning, or abort for
155 155 non-portable filenames'''
156 156 val = ui.config('ui', 'portablefilenames', 'warn')
157 157 lval = val.lower()
158 158 bval = util.parsebool(val)
159 159 abort = os.name == 'nt' or lval == 'abort'
160 160 warn = bval or lval == 'warn'
161 161 if bval is None and not (warn or abort or lval == 'ignore'):
162 162 raise error.ConfigError(
163 163 _("ui.portablefilenames value is invalid ('%s')") % val)
164 164 return abort, warn
165 165
166 166 class casecollisionauditor(object):
167 167 def __init__(self, ui, abort, dirstate):
168 168 self._ui = ui
169 169 self._abort = abort
170 170 allfiles = '\0'.join(dirstate._map)
171 171 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
172 172 self._dirstate = dirstate
173 173 # The purpose of _newfiles is so that we don't complain about
174 174 # case collisions if someone were to call this object with the
175 175 # same filename twice.
176 176 self._newfiles = set()
177 177
178 178 def __call__(self, f):
179 179 if f in self._newfiles:
180 180 return
181 181 fl = encoding.lower(f)
182 182 if fl in self._loweredfiles and f not in self._dirstate:
183 183 msg = _('possible case-folding collision for %s') % f
184 184 if self._abort:
185 185 raise util.Abort(msg)
186 186 self._ui.warn(_("warning: %s\n") % msg)
187 187 self._loweredfiles.add(fl)
188 188 self._newfiles.add(f)
189 189
190 190 def develwarn(tui, msg):
191 191 """issue a developer warning message"""
192 192 msg = 'devel-warn: ' + msg
193 193 if tui.tracebackflag:
194 194 util.debugstacktrace(msg, 2)
195 195 else:
196 196 curframe = inspect.currentframe()
197 197 calframe = inspect.getouterframes(curframe, 2)
198 198 tui.write_err('%s at: %s:%s (%s)\n' % ((msg,) + calframe[2][1:4]))
199 199
200 200 def filteredhash(repo, maxrev):
201 201 """build hash of filtered revisions in the current repoview.
202 202
203 203 Multiple caches perform up-to-date validation by checking that the
204 204 tiprev and tipnode stored in the cache file match the current repository.
205 205 However, this is not sufficient for validating repoviews because the set
206 206 of revisions in the view may change without the repository tiprev and
207 207 tipnode changing.
208 208
209 209 This function hashes all the revs filtered from the view and returns
210 210 that SHA-1 digest.
211 211 """
212 212 cl = repo.changelog
213 213 if not cl.filteredrevs:
214 214 return None
215 215 key = None
216 216 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
217 217 if revs:
218 218 s = util.sha1()
219 219 for rev in revs:
220 220 s.update('%s;' % rev)
221 221 key = s.digest()
222 222 return key
223 223
224 224 class abstractvfs(object):
225 225 """Abstract base class; cannot be instantiated"""
226 226
227 227 def __init__(self, *args, **kwargs):
228 228 '''Prevent instantiation; don't call this from subclasses.'''
229 229 raise NotImplementedError('attempted instantiating ' + str(type(self)))
230 230
231 231 def tryread(self, path):
232 232 '''gracefully return an empty string for missing files'''
233 233 try:
234 234 return self.read(path)
235 235 except IOError, inst:
236 236 if inst.errno != errno.ENOENT:
237 237 raise
238 238 return ""
239 239
240 240 def tryreadlines(self, path, mode='rb'):
241 241 '''gracefully return an empty array for missing files'''
242 242 try:
243 243 return self.readlines(path, mode=mode)
244 244 except IOError, inst:
245 245 if inst.errno != errno.ENOENT:
246 246 raise
247 247 return []
248 248
249 249 def open(self, path, mode="r", text=False, atomictemp=False,
250 250 notindexed=False):
251 251 '''Open ``path`` file, which is relative to vfs root.
252 252
253 253 Newly created directories are marked as "not to be indexed by
254 254 the content indexing service", if ``notindexed`` is specified
255 255 for "write" mode access.
256 256 '''
257 257 self.open = self.__call__
258 258 return self.__call__(path, mode, text, atomictemp, notindexed)
259 259
260 260 def read(self, path):
261 261 fp = self(path, 'rb')
262 262 try:
263 263 return fp.read()
264 264 finally:
265 265 fp.close()
266 266
267 267 def readlines(self, path, mode='rb'):
268 268 fp = self(path, mode=mode)
269 269 try:
270 270 return fp.readlines()
271 271 finally:
272 272 fp.close()
273 273
274 274 def write(self, path, data):
275 275 fp = self(path, 'wb')
276 276 try:
277 277 return fp.write(data)
278 278 finally:
279 279 fp.close()
280 280
281 281 def writelines(self, path, data, mode='wb', notindexed=False):
282 282 fp = self(path, mode=mode, notindexed=notindexed)
283 283 try:
284 284 return fp.writelines(data)
285 285 finally:
286 286 fp.close()
287 287
288 288 def append(self, path, data):
289 289 fp = self(path, 'ab')
290 290 try:
291 291 return fp.write(data)
292 292 finally:
293 293 fp.close()
294 294
295 295 def chmod(self, path, mode):
296 296 return os.chmod(self.join(path), mode)
297 297
298 298 def exists(self, path=None):
299 299 return os.path.exists(self.join(path))
300 300
301 301 def fstat(self, fp):
302 302 return util.fstat(fp)
303 303
304 304 def isdir(self, path=None):
305 305 return os.path.isdir(self.join(path))
306 306
307 307 def isfile(self, path=None):
308 308 return os.path.isfile(self.join(path))
309 309
310 310 def islink(self, path=None):
311 311 return os.path.islink(self.join(path))
312 312
313 313 def reljoin(self, *paths):
314 314 """join various elements of a path together (as os.path.join would do)
315 315
316 316 The vfs base is not injected so that path stay relative. This exists
317 317 to allow handling of strange encoding if needed."""
318 318 return os.path.join(*paths)
319 319
320 320 def split(self, path):
321 321 """split top-most element of a path (as os.path.split would do)
322 322
323 323 This exists to allow handling of strange encoding if needed."""
324 324 return os.path.split(path)
325 325
326 326 def lexists(self, path=None):
327 327 return os.path.lexists(self.join(path))
328 328
329 329 def lstat(self, path=None):
330 330 return os.lstat(self.join(path))
331 331
332 332 def listdir(self, path=None):
333 333 return os.listdir(self.join(path))
334 334
335 335 def makedir(self, path=None, notindexed=True):
336 336 return util.makedir(self.join(path), notindexed)
337 337
338 338 def makedirs(self, path=None, mode=None):
339 339 return util.makedirs(self.join(path), mode)
340 340
341 341 def makelock(self, info, path):
342 342 return util.makelock(info, self.join(path))
343 343
344 344 def mkdir(self, path=None):
345 345 return os.mkdir(self.join(path))
346 346
347 347 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
348 348 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
349 349 dir=self.join(dir), text=text)
350 350 dname, fname = util.split(name)
351 351 if dir:
352 352 return fd, os.path.join(dir, fname)
353 353 else:
354 354 return fd, fname
355 355
356 356 def readdir(self, path=None, stat=None, skip=None):
357 357 return osutil.listdir(self.join(path), stat, skip)
358 358
359 359 def readlock(self, path):
360 360 return util.readlock(self.join(path))
361 361
362 362 def rename(self, src, dst):
363 363 return util.rename(self.join(src), self.join(dst))
364 364
365 365 def readlink(self, path):
366 366 return os.readlink(self.join(path))
367 367
368 368 def removedirs(self, path=None):
369 369 """Remove a leaf directory and all empty intermediate ones
370 370 """
371 371 return util.removedirs(self.join(path))
372 372
373 373 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
374 374 """Remove a directory tree recursively
375 375
376 376 If ``forcibly``, this tries to remove READ-ONLY files, too.
377 377 """
378 378 if forcibly:
379 379 def onerror(function, path, excinfo):
380 380 if function is not os.remove:
381 381 raise
382 382 # read-only files cannot be unlinked under Windows
383 383 s = os.stat(path)
384 384 if (s.st_mode & stat.S_IWRITE) != 0:
385 385 raise
386 386 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
387 387 os.remove(path)
388 388 else:
389 389 onerror = None
390 390 return shutil.rmtree(self.join(path),
391 391 ignore_errors=ignore_errors, onerror=onerror)
392 392
393 393 def setflags(self, path, l, x):
394 394 return util.setflags(self.join(path), l, x)
395 395
396 396 def stat(self, path=None):
397 397 return os.stat(self.join(path))
398 398
399 399 def unlink(self, path=None):
400 400 return util.unlink(self.join(path))
401 401
402 402 def unlinkpath(self, path=None, ignoremissing=False):
403 403 return util.unlinkpath(self.join(path), ignoremissing)
404 404
405 405 def utime(self, path=None, t=None):
406 406 return os.utime(self.join(path), t)
407 407
408 408 def walk(self, path=None, onerror=None):
409 409 """Yield (dirpath, dirs, files) tuple for each directories under path
410 410
411 411 ``dirpath`` is relative one from the root of this vfs. This
412 412 uses ``os.sep`` as path separator, even you specify POSIX
413 413 style ``path``.
414 414
415 415 "The root of this vfs" is represented as empty ``dirpath``.
416 416 """
417 417 root = os.path.normpath(self.join(None))
418 418 # when dirpath == root, dirpath[prefixlen:] becomes empty
419 419 # because len(dirpath) < prefixlen.
420 420 prefixlen = len(pathutil.normasprefix(root))
421 421 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
422 422 yield (dirpath[prefixlen:], dirs, files)
423 423
424 424 class vfs(abstractvfs):
425 425 '''Operate files relative to a base directory
426 426
427 427 This class is used to hide the details of COW semantics and
428 428 remote file access from higher level code.
429 429 '''
430 430 def __init__(self, base, audit=True, expandpath=False, realpath=False):
431 431 if expandpath:
432 432 base = util.expandpath(base)
433 433 if realpath:
434 434 base = os.path.realpath(base)
435 435 self.base = base
436 436 self._setmustaudit(audit)
437 437 self.createmode = None
438 438 self._trustnlink = None
439 439
440 440 def _getmustaudit(self):
441 441 return self._audit
442 442
443 443 def _setmustaudit(self, onoff):
444 444 self._audit = onoff
445 445 if onoff:
446 446 self.audit = pathutil.pathauditor(self.base)
447 447 else:
448 448 self.audit = util.always
449 449
450 450 mustaudit = property(_getmustaudit, _setmustaudit)
451 451
452 452 @util.propertycache
453 453 def _cansymlink(self):
454 454 return util.checklink(self.base)
455 455
456 456 @util.propertycache
457 457 def _chmod(self):
458 458 return util.checkexec(self.base)
459 459
460 460 def _fixfilemode(self, name):
461 461 if self.createmode is None or not self._chmod:
462 462 return
463 463 os.chmod(name, self.createmode & 0666)
464 464
465 465 def __call__(self, path, mode="r", text=False, atomictemp=False,
466 466 notindexed=False):
467 467 '''Open ``path`` file, which is relative to vfs root.
468 468
469 469 Newly created directories are marked as "not to be indexed by
470 470 the content indexing service", if ``notindexed`` is specified
471 471 for "write" mode access.
472 472 '''
473 473 if self._audit:
474 474 r = util.checkosfilename(path)
475 475 if r:
476 476 raise util.Abort("%s: %r" % (r, path))
477 477 self.audit(path)
478 478 f = self.join(path)
479 479
480 480 if not text and "b" not in mode:
481 481 mode += "b" # for that other OS
482 482
483 483 nlink = -1
484 484 if mode not in ('r', 'rb'):
485 485 dirname, basename = util.split(f)
486 486 # If basename is empty, then the path is malformed because it points
487 487 # to a directory. Let the posixfile() call below raise IOError.
488 488 if basename:
489 489 if atomictemp:
490 490 util.ensuredirs(dirname, self.createmode, notindexed)
491 491 return util.atomictempfile(f, mode, self.createmode)
492 492 try:
493 493 if 'w' in mode:
494 494 util.unlink(f)
495 495 nlink = 0
496 496 else:
497 497 # nlinks() may behave differently for files on Windows
498 498 # shares if the file is open.
499 499 fd = util.posixfile(f)
500 500 nlink = util.nlinks(f)
501 501 if nlink < 1:
502 502 nlink = 2 # force mktempcopy (issue1922)
503 503 fd.close()
504 504 except (OSError, IOError), e:
505 505 if e.errno != errno.ENOENT:
506 506 raise
507 507 nlink = 0
508 508 util.ensuredirs(dirname, self.createmode, notindexed)
509 509 if nlink > 0:
510 510 if self._trustnlink is None:
511 511 self._trustnlink = nlink > 1 or util.checknlink(f)
512 512 if nlink > 1 or not self._trustnlink:
513 513 util.rename(util.mktempcopy(f), f)
514 514 fp = util.posixfile(f, mode)
515 515 if nlink == 0:
516 516 self._fixfilemode(f)
517 517 return fp
518 518
519 519 def symlink(self, src, dst):
520 520 self.audit(dst)
521 521 linkname = self.join(dst)
522 522 try:
523 523 os.unlink(linkname)
524 524 except OSError:
525 525 pass
526 526
527 527 util.ensuredirs(os.path.dirname(linkname), self.createmode)
528 528
529 529 if self._cansymlink:
530 530 try:
531 531 os.symlink(src, linkname)
532 532 except OSError, err:
533 533 raise OSError(err.errno, _('could not symlink to %r: %s') %
534 534 (src, err.strerror), linkname)
535 535 else:
536 536 self.write(dst, src)
537 537
538 538 def join(self, path, *insidef):
539 539 if path:
540 540 return os.path.join(self.base, path, *insidef)
541 541 else:
542 542 return self.base
543 543
544 544 opener = vfs
545 545
546 546 class auditvfs(object):
547 547 def __init__(self, vfs):
548 548 self.vfs = vfs
549 549
550 550 def _getmustaudit(self):
551 551 return self.vfs.mustaudit
552 552
553 553 def _setmustaudit(self, onoff):
554 554 self.vfs.mustaudit = onoff
555 555
556 556 mustaudit = property(_getmustaudit, _setmustaudit)
557 557
558 558 class filtervfs(abstractvfs, auditvfs):
559 559 '''Wrapper vfs for filtering filenames with a function.'''
560 560
561 561 def __init__(self, vfs, filter):
562 562 auditvfs.__init__(self, vfs)
563 563 self._filter = filter
564 564
565 565 def __call__(self, path, *args, **kwargs):
566 566 return self.vfs(self._filter(path), *args, **kwargs)
567 567
568 568 def join(self, path, *insidef):
569 569 if path:
570 570 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
571 571 else:
572 572 return self.vfs.join(path)
573 573
574 574 filteropener = filtervfs
575 575
576 576 class readonlyvfs(abstractvfs, auditvfs):
577 577 '''Wrapper vfs preventing any writing.'''
578 578
579 579 def __init__(self, vfs):
580 580 auditvfs.__init__(self, vfs)
581 581
582 582 def __call__(self, path, mode='r', *args, **kw):
583 583 if mode not in ('r', 'rb'):
584 584 raise util.Abort('this vfs is read only')
585 585 return self.vfs(path, mode, *args, **kw)
586 586
587 587
588 588 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
589 589 '''yield every hg repository under path, always recursively.
590 590 The recurse flag will only control recursion into repo working dirs'''
591 591 def errhandler(err):
592 592 if err.filename == path:
593 593 raise err
594 594 samestat = getattr(os.path, 'samestat', None)
595 595 if followsym and samestat is not None:
596 596 def adddir(dirlst, dirname):
597 597 match = False
598 598 dirstat = os.stat(dirname)
599 599 for lstdirstat in dirlst:
600 600 if samestat(dirstat, lstdirstat):
601 601 match = True
602 602 break
603 603 if not match:
604 604 dirlst.append(dirstat)
605 605 return not match
606 606 else:
607 607 followsym = False
608 608
609 609 if (seen_dirs is None) and followsym:
610 610 seen_dirs = []
611 611 adddir(seen_dirs, path)
612 612 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
613 613 dirs.sort()
614 614 if '.hg' in dirs:
615 615 yield root # found a repository
616 616 qroot = os.path.join(root, '.hg', 'patches')
617 617 if os.path.isdir(os.path.join(qroot, '.hg')):
618 618 yield qroot # we have a patch queue repo here
619 619 if recurse:
620 620 # avoid recursing inside the .hg directory
621 621 dirs.remove('.hg')
622 622 else:
623 623 dirs[:] = [] # don't descend further
624 624 elif followsym:
625 625 newdirs = []
626 626 for d in dirs:
627 627 fname = os.path.join(root, d)
628 628 if adddir(seen_dirs, fname):
629 629 if os.path.islink(fname):
630 630 for hgname in walkrepos(fname, True, seen_dirs):
631 631 yield hgname
632 632 else:
633 633 newdirs.append(d)
634 634 dirs[:] = newdirs
635 635
636 636 def osrcpath():
637 637 '''return default os-specific hgrc search path'''
638 638 path = []
639 639 defaultpath = os.path.join(util.datapath, 'default.d')
640 640 if os.path.isdir(defaultpath):
641 641 for f, kind in osutil.listdir(defaultpath):
642 642 if f.endswith('.rc'):
643 643 path.append(os.path.join(defaultpath, f))
644 644 path.extend(systemrcpath())
645 645 path.extend(userrcpath())
646 646 path = [os.path.normpath(f) for f in path]
647 647 return path
648 648
649 649 _rcpath = None
650 650
651 651 def rcpath():
652 652 '''return hgrc search path. if env var HGRCPATH is set, use it.
653 653 for each item in path, if directory, use files ending in .rc,
654 654 else use item.
655 655 make HGRCPATH empty to only look in .hg/hgrc of current repo.
656 656 if no HGRCPATH, use default os-specific path.'''
657 657 global _rcpath
658 658 if _rcpath is None:
659 659 if 'HGRCPATH' in os.environ:
660 660 _rcpath = []
661 661 for p in os.environ['HGRCPATH'].split(os.pathsep):
662 662 if not p:
663 663 continue
664 664 p = util.expandpath(p)
665 665 if os.path.isdir(p):
666 666 for f, kind in osutil.listdir(p):
667 667 if f.endswith('.rc'):
668 668 _rcpath.append(os.path.join(p, f))
669 669 else:
670 670 _rcpath.append(p)
671 671 else:
672 672 _rcpath = osrcpath()
673 673 return _rcpath
674 674
675 675 def intrev(repo, rev):
676 676 """Return integer for a given revision that can be used in comparison or
677 677 arithmetic operation"""
678 678 if rev is None:
679 679 return len(repo)
680 680 return rev
681 681
682 682 def revsingle(repo, revspec, default='.'):
683 683 if not revspec and revspec != 0:
684 684 return repo[default]
685 685
686 686 l = revrange(repo, [revspec])
687 687 if not l:
688 688 raise util.Abort(_('empty revision set'))
689 689 return repo[l.last()]
690 690
691 691 def revpair(repo, revs):
692 692 if not revs:
693 693 return repo.dirstate.p1(), None
694 694
695 695 l = revrange(repo, revs)
696 696
697 697 if not l:
698 698 first = second = None
699 699 elif l.isascending():
700 700 first = l.min()
701 701 second = l.max()
702 702 elif l.isdescending():
703 703 first = l.max()
704 704 second = l.min()
705 705 else:
706 706 first = l.first()
707 707 second = l.last()
708 708
709 709 if first is None:
710 710 raise util.Abort(_('empty revision range'))
711 711
712 712 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
713 713 return repo.lookup(first), None
714 714
715 715 return repo.lookup(first), repo.lookup(second)
716 716
717 717 _revrangesep = ':'
718 718
719 719 def revrange(repo, revs):
720 720 """Yield revision as strings from a list of revision specifications."""
721 721
722 722 def revfix(repo, val, defval):
723 723 if not val and val != 0 and defval is not None:
724 724 return defval
725 725 return repo[val].rev()
726 726
727 727 subsets = []
728 728
729 729 revsetaliases = [alias for (alias, _) in
730 730 repo.ui.configitems("revsetalias")]
731 731
732 732 for spec in revs:
733 733 # attempt to parse old-style ranges first to deal with
734 734 # things like old-tag which contain query metacharacters
735 735 try:
736 736 # ... except for revset aliases without arguments. These
737 737 # should be parsed as soon as possible, because they might
738 738 # clash with a hash prefix.
739 739 if spec in revsetaliases:
740 740 raise error.RepoLookupError
741 741
742 742 if isinstance(spec, int):
743 743 subsets.append(revset.baseset([spec]))
744 744 continue
745 745
746 746 if _revrangesep in spec:
747 747 start, end = spec.split(_revrangesep, 1)
748 748 if start in revsetaliases or end in revsetaliases:
749 749 raise error.RepoLookupError
750 750
751 751 start = revfix(repo, start, 0)
752 752 end = revfix(repo, end, len(repo) - 1)
753 753 if end == nullrev and start < 0:
754 754 start = nullrev
755 755 if start < end:
756 756 l = revset.spanset(repo, start, end + 1)
757 757 else:
758 758 l = revset.spanset(repo, start, end - 1)
759 759 subsets.append(l)
760 760 continue
761 761 elif spec and spec in repo: # single unquoted rev
762 762 rev = revfix(repo, spec, None)
763 763 subsets.append(revset.baseset([rev]))
764 764 continue
765 765 except error.RepoLookupError:
766 766 pass
767 767
768 768 # fall through to new-style queries if old-style fails
769 769 m = revset.match(repo.ui, spec, repo)
770 770 subsets.append(m(repo))
771 771
772 772 return revset._combinesets(subsets)
773 773
774 774 def expandpats(pats):
775 775 '''Expand bare globs when running on windows.
776 776 On posix we assume it already has already been done by sh.'''
777 777 if not util.expandglobs:
778 778 return list(pats)
779 779 ret = []
780 780 for kindpat in pats:
781 781 kind, pat = matchmod._patsplit(kindpat, None)
782 782 if kind is None:
783 783 try:
784 784 globbed = glob.glob(pat)
785 785 except re.error:
786 786 globbed = [pat]
787 787 if globbed:
788 788 ret.extend(globbed)
789 789 continue
790 790 ret.append(kindpat)
791 791 return ret
792 792
793 793 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
794 794 '''Return a matcher and the patterns that were used.
795 795 The matcher will warn about bad matches.'''
796 796 if pats == ("",):
797 797 pats = []
798 798 if not globbed and default == 'relpath':
799 799 pats = expandpats(pats or [])
800 800
801 801 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
802 802 default, listsubrepos=opts.get('subrepos'))
803 803 def badfn(f, msg):
804 804 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
805 805 m.bad = badfn
806 806 if m.always():
807 807 pats = []
808 808 return m, pats
809 809
810 810 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
811 811 '''Return a matcher that will warn about bad matches.'''
812 812 return matchandpats(ctx, pats, opts, globbed, default)[0]
813 813
814 814 def matchall(repo):
815 815 '''Return a matcher that will efficiently match everything.'''
816 816 return matchmod.always(repo.root, repo.getcwd())
817 817
818 818 def matchfiles(repo, files):
819 819 '''Return a matcher that will efficiently match exactly these files.'''
820 820 return matchmod.exact(repo.root, repo.getcwd(), files)
821 821
822 822 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
823 823 m = matcher
824 824 if dry_run is None:
825 825 dry_run = opts.get('dry_run')
826 826 if similarity is None:
827 827 similarity = float(opts.get('similarity') or 0)
828 828
829 829 ret = 0
830 830 join = lambda f: os.path.join(prefix, f)
831 831
832 832 def matchessubrepo(matcher, subpath):
833 833 if matcher.exact(subpath):
834 834 return True
835 835 for f in matcher.files():
836 836 if f.startswith(subpath):
837 837 return True
838 838 return False
839 839
840 840 wctx = repo[None]
841 841 for subpath in sorted(wctx.substate):
842 842 if opts.get('subrepos') or matchessubrepo(m, subpath):
843 843 sub = wctx.sub(subpath)
844 844 try:
845 845 submatch = matchmod.narrowmatcher(subpath, m)
846 846 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
847 847 ret = 1
848 848 except error.LookupError:
849 849 repo.ui.status(_("skipping missing subrepository: %s\n")
850 850 % join(subpath))
851 851
852 852 rejected = []
853 origbad = m.bad
854 853 def badfn(f, msg):
855 854 if f in m.files():
856 origbad(f, msg)
855 m.bad(f, msg)
857 856 rejected.append(f)
858 857
859 m.bad = badfn
860 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
861 m.bad = origbad
858 badmatch = matchmod.badmatch(m, badfn)
859 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
860 badmatch)
862 861
863 862 unknownset = set(unknown + forgotten)
864 863 toprint = unknownset.copy()
865 864 toprint.update(deleted)
866 865 for abs in sorted(toprint):
867 866 if repo.ui.verbose or not m.exact(abs):
868 867 if abs in unknownset:
869 868 status = _('adding %s\n') % m.uipath(abs)
870 869 else:
871 870 status = _('removing %s\n') % m.uipath(abs)
872 871 repo.ui.status(status)
873 872
874 873 renames = _findrenames(repo, m, added + unknown, removed + deleted,
875 874 similarity)
876 875
877 876 if not dry_run:
878 877 _markchanges(repo, unknown + forgotten, deleted, renames)
879 878
880 879 for f in rejected:
881 880 if f in m.files():
882 881 return 1
883 882 return ret
884 883
885 884 def marktouched(repo, files, similarity=0.0):
886 885 '''Assert that files have somehow been operated upon. files are relative to
887 886 the repo root.'''
888 887 m = matchfiles(repo, files)
889 888 rejected = []
890 889 m.bad = lambda x, y: rejected.append(x)
891 890
892 891 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
893 892
894 893 if repo.ui.verbose:
895 894 unknownset = set(unknown + forgotten)
896 895 toprint = unknownset.copy()
897 896 toprint.update(deleted)
898 897 for abs in sorted(toprint):
899 898 if abs in unknownset:
900 899 status = _('adding %s\n') % abs
901 900 else:
902 901 status = _('removing %s\n') % abs
903 902 repo.ui.status(status)
904 903
905 904 renames = _findrenames(repo, m, added + unknown, removed + deleted,
906 905 similarity)
907 906
908 907 _markchanges(repo, unknown + forgotten, deleted, renames)
909 908
910 909 for f in rejected:
911 910 if f in m.files():
912 911 return 1
913 912 return 0
914 913
915 914 def _interestingfiles(repo, matcher):
916 915 '''Walk dirstate with matcher, looking for files that addremove would care
917 916 about.
918 917
919 918 This is different from dirstate.status because it doesn't care about
920 919 whether files are modified or clean.'''
921 920 added, unknown, deleted, removed, forgotten = [], [], [], [], []
922 921 audit_path = pathutil.pathauditor(repo.root)
923 922
924 923 ctx = repo[None]
925 924 dirstate = repo.dirstate
926 925 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
927 926 full=False)
928 927 for abs, st in walkresults.iteritems():
929 928 dstate = dirstate[abs]
930 929 if dstate == '?' and audit_path.check(abs):
931 930 unknown.append(abs)
932 931 elif dstate != 'r' and not st:
933 932 deleted.append(abs)
934 933 elif dstate == 'r' and st:
935 934 forgotten.append(abs)
936 935 # for finding renames
937 936 elif dstate == 'r' and not st:
938 937 removed.append(abs)
939 938 elif dstate == 'a':
940 939 added.append(abs)
941 940
942 941 return added, unknown, deleted, removed, forgotten
943 942
944 943 def _findrenames(repo, matcher, added, removed, similarity):
945 944 '''Find renames from removed files to added ones.'''
946 945 renames = {}
947 946 if similarity > 0:
948 947 for old, new, score in similar.findrenames(repo, added, removed,
949 948 similarity):
950 949 if (repo.ui.verbose or not matcher.exact(old)
951 950 or not matcher.exact(new)):
952 951 repo.ui.status(_('recording removal of %s as rename to %s '
953 952 '(%d%% similar)\n') %
954 953 (matcher.rel(old), matcher.rel(new),
955 954 score * 100))
956 955 renames[new] = old
957 956 return renames
958 957
959 958 def _markchanges(repo, unknown, deleted, renames):
960 959 '''Marks the files in unknown as added, the files in deleted as removed,
961 960 and the files in renames as copied.'''
962 961 wctx = repo[None]
963 962 wlock = repo.wlock()
964 963 try:
965 964 wctx.forget(deleted)
966 965 wctx.add(unknown)
967 966 for new, old in renames.iteritems():
968 967 wctx.copy(old, new)
969 968 finally:
970 969 wlock.release()
971 970
972 971 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
973 972 """Update the dirstate to reflect the intent of copying src to dst. For
974 973 different reasons it might not end with dst being marked as copied from src.
975 974 """
976 975 origsrc = repo.dirstate.copied(src) or src
977 976 if dst == origsrc: # copying back a copy?
978 977 if repo.dirstate[dst] not in 'mn' and not dryrun:
979 978 repo.dirstate.normallookup(dst)
980 979 else:
981 980 if repo.dirstate[origsrc] == 'a' and origsrc == src:
982 981 if not ui.quiet:
983 982 ui.warn(_("%s has not been committed yet, so no copy "
984 983 "data will be stored for %s.\n")
985 984 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
986 985 if repo.dirstate[dst] in '?r' and not dryrun:
987 986 wctx.add([dst])
988 987 elif not dryrun:
989 988 wctx.copy(origsrc, dst)
990 989
991 990 def readrequires(opener, supported):
992 991 '''Reads and parses .hg/requires and checks if all entries found
993 992 are in the list of supported features.'''
994 993 requirements = set(opener.read("requires").splitlines())
995 994 missings = []
996 995 for r in requirements:
997 996 if r not in supported:
998 997 if not r or not r[0].isalnum():
999 998 raise error.RequirementError(_(".hg/requires file is corrupt"))
1000 999 missings.append(r)
1001 1000 missings.sort()
1002 1001 if missings:
1003 1002 raise error.RequirementError(
1004 1003 _("repository requires features unknown to this Mercurial: %s")
1005 1004 % " ".join(missings),
1006 1005 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
1007 1006 " for more information"))
1008 1007 return requirements
1009 1008
1010 1009 def writerequires(opener, requirements):
1011 1010 reqfile = opener("requires", "w")
1012 1011 for r in sorted(requirements):
1013 1012 reqfile.write("%s\n" % r)
1014 1013 reqfile.close()
1015 1014
1016 1015 class filecachesubentry(object):
1017 1016 def __init__(self, path, stat):
1018 1017 self.path = path
1019 1018 self.cachestat = None
1020 1019 self._cacheable = None
1021 1020
1022 1021 if stat:
1023 1022 self.cachestat = filecachesubentry.stat(self.path)
1024 1023
1025 1024 if self.cachestat:
1026 1025 self._cacheable = self.cachestat.cacheable()
1027 1026 else:
1028 1027 # None means we don't know yet
1029 1028 self._cacheable = None
1030 1029
1031 1030 def refresh(self):
1032 1031 if self.cacheable():
1033 1032 self.cachestat = filecachesubentry.stat(self.path)
1034 1033
1035 1034 def cacheable(self):
1036 1035 if self._cacheable is not None:
1037 1036 return self._cacheable
1038 1037
1039 1038 # we don't know yet, assume it is for now
1040 1039 return True
1041 1040
1042 1041 def changed(self):
1043 1042 # no point in going further if we can't cache it
1044 1043 if not self.cacheable():
1045 1044 return True
1046 1045
1047 1046 newstat = filecachesubentry.stat(self.path)
1048 1047
1049 1048 # we may not know if it's cacheable yet, check again now
1050 1049 if newstat and self._cacheable is None:
1051 1050 self._cacheable = newstat.cacheable()
1052 1051
1053 1052 # check again
1054 1053 if not self._cacheable:
1055 1054 return True
1056 1055
1057 1056 if self.cachestat != newstat:
1058 1057 self.cachestat = newstat
1059 1058 return True
1060 1059 else:
1061 1060 return False
1062 1061
1063 1062 @staticmethod
1064 1063 def stat(path):
1065 1064 try:
1066 1065 return util.cachestat(path)
1067 1066 except OSError, e:
1068 1067 if e.errno != errno.ENOENT:
1069 1068 raise
1070 1069
1071 1070 class filecacheentry(object):
1072 1071 def __init__(self, paths, stat=True):
1073 1072 self._entries = []
1074 1073 for path in paths:
1075 1074 self._entries.append(filecachesubentry(path, stat))
1076 1075
1077 1076 def changed(self):
1078 1077 '''true if any entry has changed'''
1079 1078 for entry in self._entries:
1080 1079 if entry.changed():
1081 1080 return True
1082 1081 return False
1083 1082
1084 1083 def refresh(self):
1085 1084 for entry in self._entries:
1086 1085 entry.refresh()
1087 1086
1088 1087 class filecache(object):
1089 1088 '''A property like decorator that tracks files under .hg/ for updates.
1090 1089
1091 1090 Records stat info when called in _filecache.
1092 1091
1093 1092 On subsequent calls, compares old stat info with new info, and recreates the
1094 1093 object when any of the files changes, updating the new stat info in
1095 1094 _filecache.
1096 1095
1097 1096 Mercurial either atomic renames or appends for files under .hg,
1098 1097 so to ensure the cache is reliable we need the filesystem to be able
1099 1098 to tell us if a file has been replaced. If it can't, we fallback to
1100 1099 recreating the object on every call (essentially the same behaviour as
1101 1100 propertycache).
1102 1101
1103 1102 '''
1104 1103 def __init__(self, *paths):
1105 1104 self.paths = paths
1106 1105
1107 1106 def join(self, obj, fname):
1108 1107 """Used to compute the runtime path of a cached file.
1109 1108
1110 1109 Users should subclass filecache and provide their own version of this
1111 1110 function to call the appropriate join function on 'obj' (an instance
1112 1111 of the class that its member function was decorated).
1113 1112 """
1114 1113 return obj.join(fname)
1115 1114
1116 1115 def __call__(self, func):
1117 1116 self.func = func
1118 1117 self.name = func.__name__
1119 1118 return self
1120 1119
1121 1120 def __get__(self, obj, type=None):
1122 1121 # do we need to check if the file changed?
1123 1122 if self.name in obj.__dict__:
1124 1123 assert self.name in obj._filecache, self.name
1125 1124 return obj.__dict__[self.name]
1126 1125
1127 1126 entry = obj._filecache.get(self.name)
1128 1127
1129 1128 if entry:
1130 1129 if entry.changed():
1131 1130 entry.obj = self.func(obj)
1132 1131 else:
1133 1132 paths = [self.join(obj, path) for path in self.paths]
1134 1133
1135 1134 # We stat -before- creating the object so our cache doesn't lie if
1136 1135 # a writer modified between the time we read and stat
1137 1136 entry = filecacheentry(paths, True)
1138 1137 entry.obj = self.func(obj)
1139 1138
1140 1139 obj._filecache[self.name] = entry
1141 1140
1142 1141 obj.__dict__[self.name] = entry.obj
1143 1142 return entry.obj
1144 1143
1145 1144 def __set__(self, obj, value):
1146 1145 if self.name not in obj._filecache:
1147 1146 # we add an entry for the missing value because X in __dict__
1148 1147 # implies X in _filecache
1149 1148 paths = [self.join(obj, path) for path in self.paths]
1150 1149 ce = filecacheentry(paths, False)
1151 1150 obj._filecache[self.name] = ce
1152 1151 else:
1153 1152 ce = obj._filecache[self.name]
1154 1153
1155 1154 ce.obj = value # update cached copy
1156 1155 obj.__dict__[self.name] = value # update copy returned by obj.x
1157 1156
1158 1157 def __delete__(self, obj):
1159 1158 try:
1160 1159 del obj.__dict__[self.name]
1161 1160 except KeyError:
1162 1161 raise AttributeError(self.name)
General Comments 0
You need to be logged in to leave comments. Login now