##// END OF EJS Templates
scmutil: use the optional badfn argument when building a matcher
Matt Harbison -
r25466:007a1d53 default
parent child Browse files
Show More
@@ -1,1161 +1,1162
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob, tempfile, shutil, stat, inspect
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 class status(tuple):
24 24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 25 and 'ignored' properties are only relevant to the working copy.
26 26 '''
27 27
28 28 __slots__ = ()
29 29
30 30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 31 clean):
32 32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 33 ignored, clean))
34 34
35 35 @property
36 36 def modified(self):
37 37 '''files that have been modified'''
38 38 return self[0]
39 39
40 40 @property
41 41 def added(self):
42 42 '''files that have been added'''
43 43 return self[1]
44 44
45 45 @property
46 46 def removed(self):
47 47 '''files that have been removed'''
48 48 return self[2]
49 49
50 50 @property
51 51 def deleted(self):
52 52 '''files that are in the dirstate, but have been deleted from the
53 53 working copy (aka "missing")
54 54 '''
55 55 return self[3]
56 56
57 57 @property
58 58 def unknown(self):
59 59 '''files not in the dirstate that are not ignored'''
60 60 return self[4]
61 61
62 62 @property
63 63 def ignored(self):
64 64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 65 return self[5]
66 66
67 67 @property
68 68 def clean(self):
69 69 '''files that have not been modified'''
70 70 return self[6]
71 71
72 72 def __repr__(self, *args, **kwargs):
73 73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 74 'unknown=%r, ignored=%r, clean=%r>') % self)
75 75
76 76 def itersubrepos(ctx1, ctx2):
77 77 """find subrepos in ctx1 or ctx2"""
78 78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 83
84 84 missing = set()
85 85
86 86 for subpath in ctx2.substate:
87 87 if subpath not in ctx1.substate:
88 88 del subpaths[subpath]
89 89 missing.add(subpath)
90 90
91 91 for subpath, ctx in sorted(subpaths.iteritems()):
92 92 yield subpath, ctx.sub(subpath)
93 93
94 94 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
95 95 # status and diff will have an accurate result when it does
96 96 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
97 97 # against itself.
98 98 for subpath in missing:
99 99 yield subpath, ctx2.nullsub(subpath, ctx1)
100 100
101 101 def nochangesfound(ui, repo, excluded=None):
102 102 '''Report no changes for push/pull, excluded is None or a list of
103 103 nodes excluded from the push/pull.
104 104 '''
105 105 secretlist = []
106 106 if excluded:
107 107 for n in excluded:
108 108 if n not in repo:
109 109 # discovery should not have included the filtered revision,
110 110 # we have to explicitly exclude it until discovery is cleanup.
111 111 continue
112 112 ctx = repo[n]
113 113 if ctx.phase() >= phases.secret and not ctx.extinct():
114 114 secretlist.append(n)
115 115
116 116 if secretlist:
117 117 ui.status(_("no changes found (ignored %d secret changesets)\n")
118 118 % len(secretlist))
119 119 else:
120 120 ui.status(_("no changes found\n"))
121 121
122 122 def checknewlabel(repo, lbl, kind):
123 123 # Do not use the "kind" parameter in ui output.
124 124 # It makes strings difficult to translate.
125 125 if lbl in ['tip', '.', 'null']:
126 126 raise util.Abort(_("the name '%s' is reserved") % lbl)
127 127 for c in (':', '\0', '\n', '\r'):
128 128 if c in lbl:
129 129 raise util.Abort(_("%r cannot be used in a name") % c)
130 130 try:
131 131 int(lbl)
132 132 raise util.Abort(_("cannot use an integer as a name"))
133 133 except ValueError:
134 134 pass
135 135
136 136 def checkfilename(f):
137 137 '''Check that the filename f is an acceptable filename for a tracked file'''
138 138 if '\r' in f or '\n' in f:
139 139 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
140 140
141 141 def checkportable(ui, f):
142 142 '''Check if filename f is portable and warn or abort depending on config'''
143 143 checkfilename(f)
144 144 abort, warn = checkportabilityalert(ui)
145 145 if abort or warn:
146 146 msg = util.checkwinfilename(f)
147 147 if msg:
148 148 msg = "%s: %r" % (msg, f)
149 149 if abort:
150 150 raise util.Abort(msg)
151 151 ui.warn(_("warning: %s\n") % msg)
152 152
153 153 def checkportabilityalert(ui):
154 154 '''check if the user's config requests nothing, a warning, or abort for
155 155 non-portable filenames'''
156 156 val = ui.config('ui', 'portablefilenames', 'warn')
157 157 lval = val.lower()
158 158 bval = util.parsebool(val)
159 159 abort = os.name == 'nt' or lval == 'abort'
160 160 warn = bval or lval == 'warn'
161 161 if bval is None and not (warn or abort or lval == 'ignore'):
162 162 raise error.ConfigError(
163 163 _("ui.portablefilenames value is invalid ('%s')") % val)
164 164 return abort, warn
165 165
166 166 class casecollisionauditor(object):
167 167 def __init__(self, ui, abort, dirstate):
168 168 self._ui = ui
169 169 self._abort = abort
170 170 allfiles = '\0'.join(dirstate._map)
171 171 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
172 172 self._dirstate = dirstate
173 173 # The purpose of _newfiles is so that we don't complain about
174 174 # case collisions if someone were to call this object with the
175 175 # same filename twice.
176 176 self._newfiles = set()
177 177
178 178 def __call__(self, f):
179 179 if f in self._newfiles:
180 180 return
181 181 fl = encoding.lower(f)
182 182 if fl in self._loweredfiles and f not in self._dirstate:
183 183 msg = _('possible case-folding collision for %s') % f
184 184 if self._abort:
185 185 raise util.Abort(msg)
186 186 self._ui.warn(_("warning: %s\n") % msg)
187 187 self._loweredfiles.add(fl)
188 188 self._newfiles.add(f)
189 189
190 190 def develwarn(tui, msg):
191 191 """issue a developer warning message"""
192 192 msg = 'devel-warn: ' + msg
193 193 if tui.tracebackflag:
194 194 util.debugstacktrace(msg, 2)
195 195 else:
196 196 curframe = inspect.currentframe()
197 197 calframe = inspect.getouterframes(curframe, 2)
198 198 tui.write_err('%s at: %s:%s (%s)\n' % ((msg,) + calframe[2][1:4]))
199 199
200 200 def filteredhash(repo, maxrev):
201 201 """build hash of filtered revisions in the current repoview.
202 202
203 203 Multiple caches perform up-to-date validation by checking that the
204 204 tiprev and tipnode stored in the cache file match the current repository.
205 205 However, this is not sufficient for validating repoviews because the set
206 206 of revisions in the view may change without the repository tiprev and
207 207 tipnode changing.
208 208
209 209 This function hashes all the revs filtered from the view and returns
210 210 that SHA-1 digest.
211 211 """
212 212 cl = repo.changelog
213 213 if not cl.filteredrevs:
214 214 return None
215 215 key = None
216 216 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
217 217 if revs:
218 218 s = util.sha1()
219 219 for rev in revs:
220 220 s.update('%s;' % rev)
221 221 key = s.digest()
222 222 return key
223 223
224 224 class abstractvfs(object):
225 225 """Abstract base class; cannot be instantiated"""
226 226
227 227 def __init__(self, *args, **kwargs):
228 228 '''Prevent instantiation; don't call this from subclasses.'''
229 229 raise NotImplementedError('attempted instantiating ' + str(type(self)))
230 230
231 231 def tryread(self, path):
232 232 '''gracefully return an empty string for missing files'''
233 233 try:
234 234 return self.read(path)
235 235 except IOError, inst:
236 236 if inst.errno != errno.ENOENT:
237 237 raise
238 238 return ""
239 239
240 240 def tryreadlines(self, path, mode='rb'):
241 241 '''gracefully return an empty array for missing files'''
242 242 try:
243 243 return self.readlines(path, mode=mode)
244 244 except IOError, inst:
245 245 if inst.errno != errno.ENOENT:
246 246 raise
247 247 return []
248 248
249 249 def open(self, path, mode="r", text=False, atomictemp=False,
250 250 notindexed=False):
251 251 '''Open ``path`` file, which is relative to vfs root.
252 252
253 253 Newly created directories are marked as "not to be indexed by
254 254 the content indexing service", if ``notindexed`` is specified
255 255 for "write" mode access.
256 256 '''
257 257 self.open = self.__call__
258 258 return self.__call__(path, mode, text, atomictemp, notindexed)
259 259
260 260 def read(self, path):
261 261 fp = self(path, 'rb')
262 262 try:
263 263 return fp.read()
264 264 finally:
265 265 fp.close()
266 266
267 267 def readlines(self, path, mode='rb'):
268 268 fp = self(path, mode=mode)
269 269 try:
270 270 return fp.readlines()
271 271 finally:
272 272 fp.close()
273 273
274 274 def write(self, path, data):
275 275 fp = self(path, 'wb')
276 276 try:
277 277 return fp.write(data)
278 278 finally:
279 279 fp.close()
280 280
281 281 def writelines(self, path, data, mode='wb', notindexed=False):
282 282 fp = self(path, mode=mode, notindexed=notindexed)
283 283 try:
284 284 return fp.writelines(data)
285 285 finally:
286 286 fp.close()
287 287
288 288 def append(self, path, data):
289 289 fp = self(path, 'ab')
290 290 try:
291 291 return fp.write(data)
292 292 finally:
293 293 fp.close()
294 294
295 295 def chmod(self, path, mode):
296 296 return os.chmod(self.join(path), mode)
297 297
298 298 def exists(self, path=None):
299 299 return os.path.exists(self.join(path))
300 300
301 301 def fstat(self, fp):
302 302 return util.fstat(fp)
303 303
304 304 def isdir(self, path=None):
305 305 return os.path.isdir(self.join(path))
306 306
307 307 def isfile(self, path=None):
308 308 return os.path.isfile(self.join(path))
309 309
310 310 def islink(self, path=None):
311 311 return os.path.islink(self.join(path))
312 312
313 313 def reljoin(self, *paths):
314 314 """join various elements of a path together (as os.path.join would do)
315 315
316 316 The vfs base is not injected so that path stay relative. This exists
317 317 to allow handling of strange encoding if needed."""
318 318 return os.path.join(*paths)
319 319
320 320 def split(self, path):
321 321 """split top-most element of a path (as os.path.split would do)
322 322
323 323 This exists to allow handling of strange encoding if needed."""
324 324 return os.path.split(path)
325 325
326 326 def lexists(self, path=None):
327 327 return os.path.lexists(self.join(path))
328 328
329 329 def lstat(self, path=None):
330 330 return os.lstat(self.join(path))
331 331
332 332 def listdir(self, path=None):
333 333 return os.listdir(self.join(path))
334 334
335 335 def makedir(self, path=None, notindexed=True):
336 336 return util.makedir(self.join(path), notindexed)
337 337
338 338 def makedirs(self, path=None, mode=None):
339 339 return util.makedirs(self.join(path), mode)
340 340
341 341 def makelock(self, info, path):
342 342 return util.makelock(info, self.join(path))
343 343
344 344 def mkdir(self, path=None):
345 345 return os.mkdir(self.join(path))
346 346
347 347 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
348 348 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
349 349 dir=self.join(dir), text=text)
350 350 dname, fname = util.split(name)
351 351 if dir:
352 352 return fd, os.path.join(dir, fname)
353 353 else:
354 354 return fd, fname
355 355
356 356 def readdir(self, path=None, stat=None, skip=None):
357 357 return osutil.listdir(self.join(path), stat, skip)
358 358
359 359 def readlock(self, path):
360 360 return util.readlock(self.join(path))
361 361
362 362 def rename(self, src, dst):
363 363 return util.rename(self.join(src), self.join(dst))
364 364
365 365 def readlink(self, path):
366 366 return os.readlink(self.join(path))
367 367
368 368 def removedirs(self, path=None):
369 369 """Remove a leaf directory and all empty intermediate ones
370 370 """
371 371 return util.removedirs(self.join(path))
372 372
373 373 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
374 374 """Remove a directory tree recursively
375 375
376 376 If ``forcibly``, this tries to remove READ-ONLY files, too.
377 377 """
378 378 if forcibly:
379 379 def onerror(function, path, excinfo):
380 380 if function is not os.remove:
381 381 raise
382 382 # read-only files cannot be unlinked under Windows
383 383 s = os.stat(path)
384 384 if (s.st_mode & stat.S_IWRITE) != 0:
385 385 raise
386 386 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
387 387 os.remove(path)
388 388 else:
389 389 onerror = None
390 390 return shutil.rmtree(self.join(path),
391 391 ignore_errors=ignore_errors, onerror=onerror)
392 392
393 393 def setflags(self, path, l, x):
394 394 return util.setflags(self.join(path), l, x)
395 395
396 396 def stat(self, path=None):
397 397 return os.stat(self.join(path))
398 398
399 399 def unlink(self, path=None):
400 400 return util.unlink(self.join(path))
401 401
402 402 def unlinkpath(self, path=None, ignoremissing=False):
403 403 return util.unlinkpath(self.join(path), ignoremissing)
404 404
405 405 def utime(self, path=None, t=None):
406 406 return os.utime(self.join(path), t)
407 407
408 408 def walk(self, path=None, onerror=None):
409 409 """Yield (dirpath, dirs, files) tuple for each directories under path
410 410
411 411 ``dirpath`` is relative one from the root of this vfs. This
412 412 uses ``os.sep`` as path separator, even you specify POSIX
413 413 style ``path``.
414 414
415 415 "The root of this vfs" is represented as empty ``dirpath``.
416 416 """
417 417 root = os.path.normpath(self.join(None))
418 418 # when dirpath == root, dirpath[prefixlen:] becomes empty
419 419 # because len(dirpath) < prefixlen.
420 420 prefixlen = len(pathutil.normasprefix(root))
421 421 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
422 422 yield (dirpath[prefixlen:], dirs, files)
423 423
424 424 class vfs(abstractvfs):
425 425 '''Operate files relative to a base directory
426 426
427 427 This class is used to hide the details of COW semantics and
428 428 remote file access from higher level code.
429 429 '''
430 430 def __init__(self, base, audit=True, expandpath=False, realpath=False):
431 431 if expandpath:
432 432 base = util.expandpath(base)
433 433 if realpath:
434 434 base = os.path.realpath(base)
435 435 self.base = base
436 436 self._setmustaudit(audit)
437 437 self.createmode = None
438 438 self._trustnlink = None
439 439
440 440 def _getmustaudit(self):
441 441 return self._audit
442 442
443 443 def _setmustaudit(self, onoff):
444 444 self._audit = onoff
445 445 if onoff:
446 446 self.audit = pathutil.pathauditor(self.base)
447 447 else:
448 448 self.audit = util.always
449 449
450 450 mustaudit = property(_getmustaudit, _setmustaudit)
451 451
452 452 @util.propertycache
453 453 def _cansymlink(self):
454 454 return util.checklink(self.base)
455 455
456 456 @util.propertycache
457 457 def _chmod(self):
458 458 return util.checkexec(self.base)
459 459
460 460 def _fixfilemode(self, name):
461 461 if self.createmode is None or not self._chmod:
462 462 return
463 463 os.chmod(name, self.createmode & 0666)
464 464
465 465 def __call__(self, path, mode="r", text=False, atomictemp=False,
466 466 notindexed=False):
467 467 '''Open ``path`` file, which is relative to vfs root.
468 468
469 469 Newly created directories are marked as "not to be indexed by
470 470 the content indexing service", if ``notindexed`` is specified
471 471 for "write" mode access.
472 472 '''
473 473 if self._audit:
474 474 r = util.checkosfilename(path)
475 475 if r:
476 476 raise util.Abort("%s: %r" % (r, path))
477 477 self.audit(path)
478 478 f = self.join(path)
479 479
480 480 if not text and "b" not in mode:
481 481 mode += "b" # for that other OS
482 482
483 483 nlink = -1
484 484 if mode not in ('r', 'rb'):
485 485 dirname, basename = util.split(f)
486 486 # If basename is empty, then the path is malformed because it points
487 487 # to a directory. Let the posixfile() call below raise IOError.
488 488 if basename:
489 489 if atomictemp:
490 490 util.ensuredirs(dirname, self.createmode, notindexed)
491 491 return util.atomictempfile(f, mode, self.createmode)
492 492 try:
493 493 if 'w' in mode:
494 494 util.unlink(f)
495 495 nlink = 0
496 496 else:
497 497 # nlinks() may behave differently for files on Windows
498 498 # shares if the file is open.
499 499 fd = util.posixfile(f)
500 500 nlink = util.nlinks(f)
501 501 if nlink < 1:
502 502 nlink = 2 # force mktempcopy (issue1922)
503 503 fd.close()
504 504 except (OSError, IOError), e:
505 505 if e.errno != errno.ENOENT:
506 506 raise
507 507 nlink = 0
508 508 util.ensuredirs(dirname, self.createmode, notindexed)
509 509 if nlink > 0:
510 510 if self._trustnlink is None:
511 511 self._trustnlink = nlink > 1 or util.checknlink(f)
512 512 if nlink > 1 or not self._trustnlink:
513 513 util.rename(util.mktempcopy(f), f)
514 514 fp = util.posixfile(f, mode)
515 515 if nlink == 0:
516 516 self._fixfilemode(f)
517 517 return fp
518 518
519 519 def symlink(self, src, dst):
520 520 self.audit(dst)
521 521 linkname = self.join(dst)
522 522 try:
523 523 os.unlink(linkname)
524 524 except OSError:
525 525 pass
526 526
527 527 util.ensuredirs(os.path.dirname(linkname), self.createmode)
528 528
529 529 if self._cansymlink:
530 530 try:
531 531 os.symlink(src, linkname)
532 532 except OSError, err:
533 533 raise OSError(err.errno, _('could not symlink to %r: %s') %
534 534 (src, err.strerror), linkname)
535 535 else:
536 536 self.write(dst, src)
537 537
538 538 def join(self, path, *insidef):
539 539 if path:
540 540 return os.path.join(self.base, path, *insidef)
541 541 else:
542 542 return self.base
543 543
544 544 opener = vfs
545 545
546 546 class auditvfs(object):
547 547 def __init__(self, vfs):
548 548 self.vfs = vfs
549 549
550 550 def _getmustaudit(self):
551 551 return self.vfs.mustaudit
552 552
553 553 def _setmustaudit(self, onoff):
554 554 self.vfs.mustaudit = onoff
555 555
556 556 mustaudit = property(_getmustaudit, _setmustaudit)
557 557
558 558 class filtervfs(abstractvfs, auditvfs):
559 559 '''Wrapper vfs for filtering filenames with a function.'''
560 560
561 561 def __init__(self, vfs, filter):
562 562 auditvfs.__init__(self, vfs)
563 563 self._filter = filter
564 564
565 565 def __call__(self, path, *args, **kwargs):
566 566 return self.vfs(self._filter(path), *args, **kwargs)
567 567
568 568 def join(self, path, *insidef):
569 569 if path:
570 570 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
571 571 else:
572 572 return self.vfs.join(path)
573 573
574 574 filteropener = filtervfs
575 575
576 576 class readonlyvfs(abstractvfs, auditvfs):
577 577 '''Wrapper vfs preventing any writing.'''
578 578
579 579 def __init__(self, vfs):
580 580 auditvfs.__init__(self, vfs)
581 581
582 582 def __call__(self, path, mode='r', *args, **kw):
583 583 if mode not in ('r', 'rb'):
584 584 raise util.Abort('this vfs is read only')
585 585 return self.vfs(path, mode, *args, **kw)
586 586
587 587
588 588 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
589 589 '''yield every hg repository under path, always recursively.
590 590 The recurse flag will only control recursion into repo working dirs'''
591 591 def errhandler(err):
592 592 if err.filename == path:
593 593 raise err
594 594 samestat = getattr(os.path, 'samestat', None)
595 595 if followsym and samestat is not None:
596 596 def adddir(dirlst, dirname):
597 597 match = False
598 598 dirstat = os.stat(dirname)
599 599 for lstdirstat in dirlst:
600 600 if samestat(dirstat, lstdirstat):
601 601 match = True
602 602 break
603 603 if not match:
604 604 dirlst.append(dirstat)
605 605 return not match
606 606 else:
607 607 followsym = False
608 608
609 609 if (seen_dirs is None) and followsym:
610 610 seen_dirs = []
611 611 adddir(seen_dirs, path)
612 612 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
613 613 dirs.sort()
614 614 if '.hg' in dirs:
615 615 yield root # found a repository
616 616 qroot = os.path.join(root, '.hg', 'patches')
617 617 if os.path.isdir(os.path.join(qroot, '.hg')):
618 618 yield qroot # we have a patch queue repo here
619 619 if recurse:
620 620 # avoid recursing inside the .hg directory
621 621 dirs.remove('.hg')
622 622 else:
623 623 dirs[:] = [] # don't descend further
624 624 elif followsym:
625 625 newdirs = []
626 626 for d in dirs:
627 627 fname = os.path.join(root, d)
628 628 if adddir(seen_dirs, fname):
629 629 if os.path.islink(fname):
630 630 for hgname in walkrepos(fname, True, seen_dirs):
631 631 yield hgname
632 632 else:
633 633 newdirs.append(d)
634 634 dirs[:] = newdirs
635 635
636 636 def osrcpath():
637 637 '''return default os-specific hgrc search path'''
638 638 path = []
639 639 defaultpath = os.path.join(util.datapath, 'default.d')
640 640 if os.path.isdir(defaultpath):
641 641 for f, kind in osutil.listdir(defaultpath):
642 642 if f.endswith('.rc'):
643 643 path.append(os.path.join(defaultpath, f))
644 644 path.extend(systemrcpath())
645 645 path.extend(userrcpath())
646 646 path = [os.path.normpath(f) for f in path]
647 647 return path
648 648
649 649 _rcpath = None
650 650
651 651 def rcpath():
652 652 '''return hgrc search path. if env var HGRCPATH is set, use it.
653 653 for each item in path, if directory, use files ending in .rc,
654 654 else use item.
655 655 make HGRCPATH empty to only look in .hg/hgrc of current repo.
656 656 if no HGRCPATH, use default os-specific path.'''
657 657 global _rcpath
658 658 if _rcpath is None:
659 659 if 'HGRCPATH' in os.environ:
660 660 _rcpath = []
661 661 for p in os.environ['HGRCPATH'].split(os.pathsep):
662 662 if not p:
663 663 continue
664 664 p = util.expandpath(p)
665 665 if os.path.isdir(p):
666 666 for f, kind in osutil.listdir(p):
667 667 if f.endswith('.rc'):
668 668 _rcpath.append(os.path.join(p, f))
669 669 else:
670 670 _rcpath.append(p)
671 671 else:
672 672 _rcpath = osrcpath()
673 673 return _rcpath
674 674
675 675 def intrev(repo, rev):
676 676 """Return integer for a given revision that can be used in comparison or
677 677 arithmetic operation"""
678 678 if rev is None:
679 679 return len(repo)
680 680 return rev
681 681
682 682 def revsingle(repo, revspec, default='.'):
683 683 if not revspec and revspec != 0:
684 684 return repo[default]
685 685
686 686 l = revrange(repo, [revspec])
687 687 if not l:
688 688 raise util.Abort(_('empty revision set'))
689 689 return repo[l.last()]
690 690
691 691 def revpair(repo, revs):
692 692 if not revs:
693 693 return repo.dirstate.p1(), None
694 694
695 695 l = revrange(repo, revs)
696 696
697 697 if not l:
698 698 first = second = None
699 699 elif l.isascending():
700 700 first = l.min()
701 701 second = l.max()
702 702 elif l.isdescending():
703 703 first = l.max()
704 704 second = l.min()
705 705 else:
706 706 first = l.first()
707 707 second = l.last()
708 708
709 709 if first is None:
710 710 raise util.Abort(_('empty revision range'))
711 711
712 712 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
713 713 return repo.lookup(first), None
714 714
715 715 return repo.lookup(first), repo.lookup(second)
716 716
717 717 _revrangesep = ':'
718 718
719 719 def revrange(repo, revs):
720 720 """Yield revision as strings from a list of revision specifications."""
721 721
722 722 def revfix(repo, val, defval):
723 723 if not val and val != 0 and defval is not None:
724 724 return defval
725 725 return repo[val].rev()
726 726
727 727 subsets = []
728 728
729 729 revsetaliases = [alias for (alias, _) in
730 730 repo.ui.configitems("revsetalias")]
731 731
732 732 for spec in revs:
733 733 # attempt to parse old-style ranges first to deal with
734 734 # things like old-tag which contain query metacharacters
735 735 try:
736 736 # ... except for revset aliases without arguments. These
737 737 # should be parsed as soon as possible, because they might
738 738 # clash with a hash prefix.
739 739 if spec in revsetaliases:
740 740 raise error.RepoLookupError
741 741
742 742 if isinstance(spec, int):
743 743 subsets.append(revset.baseset([spec]))
744 744 continue
745 745
746 746 if _revrangesep in spec:
747 747 start, end = spec.split(_revrangesep, 1)
748 748 if start in revsetaliases or end in revsetaliases:
749 749 raise error.RepoLookupError
750 750
751 751 start = revfix(repo, start, 0)
752 752 end = revfix(repo, end, len(repo) - 1)
753 753 if end == nullrev and start < 0:
754 754 start = nullrev
755 755 if start < end:
756 756 l = revset.spanset(repo, start, end + 1)
757 757 else:
758 758 l = revset.spanset(repo, start, end - 1)
759 759 subsets.append(l)
760 760 continue
761 761 elif spec and spec in repo: # single unquoted rev
762 762 rev = revfix(repo, spec, None)
763 763 subsets.append(revset.baseset([rev]))
764 764 continue
765 765 except error.RepoLookupError:
766 766 pass
767 767
768 768 # fall through to new-style queries if old-style fails
769 769 m = revset.match(repo.ui, spec, repo)
770 770 subsets.append(m(repo))
771 771
772 772 return revset._combinesets(subsets)
773 773
774 774 def expandpats(pats):
775 775 '''Expand bare globs when running on windows.
776 776 On posix we assume it already has already been done by sh.'''
777 777 if not util.expandglobs:
778 778 return list(pats)
779 779 ret = []
780 780 for kindpat in pats:
781 781 kind, pat = matchmod._patsplit(kindpat, None)
782 782 if kind is None:
783 783 try:
784 784 globbed = glob.glob(pat)
785 785 except re.error:
786 786 globbed = [pat]
787 787 if globbed:
788 788 ret.extend(globbed)
789 789 continue
790 790 ret.append(kindpat)
791 791 return ret
792 792
793 793 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
794 794 '''Return a matcher and the patterns that were used.
795 795 The matcher will warn about bad matches.'''
796 796 if pats == ("",):
797 797 pats = []
798 798 if not globbed and default == 'relpath':
799 799 pats = expandpats(pats or [])
800 800
801 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
802 default, listsubrepos=opts.get('subrepos'))
803 801 def badfn(f, msg):
804 802 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
805 m.bad = badfn
803
804 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
805 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
806
806 807 if m.always():
807 808 pats = []
808 809 return m, pats
809 810
810 811 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
811 812 '''Return a matcher that will warn about bad matches.'''
812 813 return matchandpats(ctx, pats, opts, globbed, default)[0]
813 814
814 815 def matchall(repo):
815 816 '''Return a matcher that will efficiently match everything.'''
816 817 return matchmod.always(repo.root, repo.getcwd())
817 818
818 819 def matchfiles(repo, files):
819 820 '''Return a matcher that will efficiently match exactly these files.'''
820 821 return matchmod.exact(repo.root, repo.getcwd(), files)
821 822
822 823 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
823 824 m = matcher
824 825 if dry_run is None:
825 826 dry_run = opts.get('dry_run')
826 827 if similarity is None:
827 828 similarity = float(opts.get('similarity') or 0)
828 829
829 830 ret = 0
830 831 join = lambda f: os.path.join(prefix, f)
831 832
832 833 def matchessubrepo(matcher, subpath):
833 834 if matcher.exact(subpath):
834 835 return True
835 836 for f in matcher.files():
836 837 if f.startswith(subpath):
837 838 return True
838 839 return False
839 840
840 841 wctx = repo[None]
841 842 for subpath in sorted(wctx.substate):
842 843 if opts.get('subrepos') or matchessubrepo(m, subpath):
843 844 sub = wctx.sub(subpath)
844 845 try:
845 846 submatch = matchmod.narrowmatcher(subpath, m)
846 847 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
847 848 ret = 1
848 849 except error.LookupError:
849 850 repo.ui.status(_("skipping missing subrepository: %s\n")
850 851 % join(subpath))
851 852
852 853 rejected = []
853 854 def badfn(f, msg):
854 855 if f in m.files():
855 856 m.bad(f, msg)
856 857 rejected.append(f)
857 858
858 859 badmatch = matchmod.badmatch(m, badfn)
859 860 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
860 861 badmatch)
861 862
862 863 unknownset = set(unknown + forgotten)
863 864 toprint = unknownset.copy()
864 865 toprint.update(deleted)
865 866 for abs in sorted(toprint):
866 867 if repo.ui.verbose or not m.exact(abs):
867 868 if abs in unknownset:
868 869 status = _('adding %s\n') % m.uipath(abs)
869 870 else:
870 871 status = _('removing %s\n') % m.uipath(abs)
871 872 repo.ui.status(status)
872 873
873 874 renames = _findrenames(repo, m, added + unknown, removed + deleted,
874 875 similarity)
875 876
876 877 if not dry_run:
877 878 _markchanges(repo, unknown + forgotten, deleted, renames)
878 879
879 880 for f in rejected:
880 881 if f in m.files():
881 882 return 1
882 883 return ret
883 884
884 885 def marktouched(repo, files, similarity=0.0):
885 886 '''Assert that files have somehow been operated upon. files are relative to
886 887 the repo root.'''
887 888 m = matchfiles(repo, files)
888 889 rejected = []
889 890 m.bad = lambda x, y: rejected.append(x)
890 891
891 892 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
892 893
893 894 if repo.ui.verbose:
894 895 unknownset = set(unknown + forgotten)
895 896 toprint = unknownset.copy()
896 897 toprint.update(deleted)
897 898 for abs in sorted(toprint):
898 899 if abs in unknownset:
899 900 status = _('adding %s\n') % abs
900 901 else:
901 902 status = _('removing %s\n') % abs
902 903 repo.ui.status(status)
903 904
904 905 renames = _findrenames(repo, m, added + unknown, removed + deleted,
905 906 similarity)
906 907
907 908 _markchanges(repo, unknown + forgotten, deleted, renames)
908 909
909 910 for f in rejected:
910 911 if f in m.files():
911 912 return 1
912 913 return 0
913 914
914 915 def _interestingfiles(repo, matcher):
915 916 '''Walk dirstate with matcher, looking for files that addremove would care
916 917 about.
917 918
918 919 This is different from dirstate.status because it doesn't care about
919 920 whether files are modified or clean.'''
920 921 added, unknown, deleted, removed, forgotten = [], [], [], [], []
921 922 audit_path = pathutil.pathauditor(repo.root)
922 923
923 924 ctx = repo[None]
924 925 dirstate = repo.dirstate
925 926 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
926 927 full=False)
927 928 for abs, st in walkresults.iteritems():
928 929 dstate = dirstate[abs]
929 930 if dstate == '?' and audit_path.check(abs):
930 931 unknown.append(abs)
931 932 elif dstate != 'r' and not st:
932 933 deleted.append(abs)
933 934 elif dstate == 'r' and st:
934 935 forgotten.append(abs)
935 936 # for finding renames
936 937 elif dstate == 'r' and not st:
937 938 removed.append(abs)
938 939 elif dstate == 'a':
939 940 added.append(abs)
940 941
941 942 return added, unknown, deleted, removed, forgotten
942 943
943 944 def _findrenames(repo, matcher, added, removed, similarity):
944 945 '''Find renames from removed files to added ones.'''
945 946 renames = {}
946 947 if similarity > 0:
947 948 for old, new, score in similar.findrenames(repo, added, removed,
948 949 similarity):
949 950 if (repo.ui.verbose or not matcher.exact(old)
950 951 or not matcher.exact(new)):
951 952 repo.ui.status(_('recording removal of %s as rename to %s '
952 953 '(%d%% similar)\n') %
953 954 (matcher.rel(old), matcher.rel(new),
954 955 score * 100))
955 956 renames[new] = old
956 957 return renames
957 958
958 959 def _markchanges(repo, unknown, deleted, renames):
959 960 '''Marks the files in unknown as added, the files in deleted as removed,
960 961 and the files in renames as copied.'''
961 962 wctx = repo[None]
962 963 wlock = repo.wlock()
963 964 try:
964 965 wctx.forget(deleted)
965 966 wctx.add(unknown)
966 967 for new, old in renames.iteritems():
967 968 wctx.copy(old, new)
968 969 finally:
969 970 wlock.release()
970 971
971 972 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
972 973 """Update the dirstate to reflect the intent of copying src to dst. For
973 974 different reasons it might not end with dst being marked as copied from src.
974 975 """
975 976 origsrc = repo.dirstate.copied(src) or src
976 977 if dst == origsrc: # copying back a copy?
977 978 if repo.dirstate[dst] not in 'mn' and not dryrun:
978 979 repo.dirstate.normallookup(dst)
979 980 else:
980 981 if repo.dirstate[origsrc] == 'a' and origsrc == src:
981 982 if not ui.quiet:
982 983 ui.warn(_("%s has not been committed yet, so no copy "
983 984 "data will be stored for %s.\n")
984 985 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
985 986 if repo.dirstate[dst] in '?r' and not dryrun:
986 987 wctx.add([dst])
987 988 elif not dryrun:
988 989 wctx.copy(origsrc, dst)
989 990
990 991 def readrequires(opener, supported):
991 992 '''Reads and parses .hg/requires and checks if all entries found
992 993 are in the list of supported features.'''
993 994 requirements = set(opener.read("requires").splitlines())
994 995 missings = []
995 996 for r in requirements:
996 997 if r not in supported:
997 998 if not r or not r[0].isalnum():
998 999 raise error.RequirementError(_(".hg/requires file is corrupt"))
999 1000 missings.append(r)
1000 1001 missings.sort()
1001 1002 if missings:
1002 1003 raise error.RequirementError(
1003 1004 _("repository requires features unknown to this Mercurial: %s")
1004 1005 % " ".join(missings),
1005 1006 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
1006 1007 " for more information"))
1007 1008 return requirements
1008 1009
1009 1010 def writerequires(opener, requirements):
1010 1011 reqfile = opener("requires", "w")
1011 1012 for r in sorted(requirements):
1012 1013 reqfile.write("%s\n" % r)
1013 1014 reqfile.close()
1014 1015
1015 1016 class filecachesubentry(object):
1016 1017 def __init__(self, path, stat):
1017 1018 self.path = path
1018 1019 self.cachestat = None
1019 1020 self._cacheable = None
1020 1021
1021 1022 if stat:
1022 1023 self.cachestat = filecachesubentry.stat(self.path)
1023 1024
1024 1025 if self.cachestat:
1025 1026 self._cacheable = self.cachestat.cacheable()
1026 1027 else:
1027 1028 # None means we don't know yet
1028 1029 self._cacheable = None
1029 1030
1030 1031 def refresh(self):
1031 1032 if self.cacheable():
1032 1033 self.cachestat = filecachesubentry.stat(self.path)
1033 1034
1034 1035 def cacheable(self):
1035 1036 if self._cacheable is not None:
1036 1037 return self._cacheable
1037 1038
1038 1039 # we don't know yet, assume it is for now
1039 1040 return True
1040 1041
1041 1042 def changed(self):
1042 1043 # no point in going further if we can't cache it
1043 1044 if not self.cacheable():
1044 1045 return True
1045 1046
1046 1047 newstat = filecachesubentry.stat(self.path)
1047 1048
1048 1049 # we may not know if it's cacheable yet, check again now
1049 1050 if newstat and self._cacheable is None:
1050 1051 self._cacheable = newstat.cacheable()
1051 1052
1052 1053 # check again
1053 1054 if not self._cacheable:
1054 1055 return True
1055 1056
1056 1057 if self.cachestat != newstat:
1057 1058 self.cachestat = newstat
1058 1059 return True
1059 1060 else:
1060 1061 return False
1061 1062
1062 1063 @staticmethod
1063 1064 def stat(path):
1064 1065 try:
1065 1066 return util.cachestat(path)
1066 1067 except OSError, e:
1067 1068 if e.errno != errno.ENOENT:
1068 1069 raise
1069 1070
1070 1071 class filecacheentry(object):
1071 1072 def __init__(self, paths, stat=True):
1072 1073 self._entries = []
1073 1074 for path in paths:
1074 1075 self._entries.append(filecachesubentry(path, stat))
1075 1076
1076 1077 def changed(self):
1077 1078 '''true if any entry has changed'''
1078 1079 for entry in self._entries:
1079 1080 if entry.changed():
1080 1081 return True
1081 1082 return False
1082 1083
1083 1084 def refresh(self):
1084 1085 for entry in self._entries:
1085 1086 entry.refresh()
1086 1087
1087 1088 class filecache(object):
1088 1089 '''A property like decorator that tracks files under .hg/ for updates.
1089 1090
1090 1091 Records stat info when called in _filecache.
1091 1092
1092 1093 On subsequent calls, compares old stat info with new info, and recreates the
1093 1094 object when any of the files changes, updating the new stat info in
1094 1095 _filecache.
1095 1096
1096 1097 Mercurial either atomic renames or appends for files under .hg,
1097 1098 so to ensure the cache is reliable we need the filesystem to be able
1098 1099 to tell us if a file has been replaced. If it can't, we fallback to
1099 1100 recreating the object on every call (essentially the same behaviour as
1100 1101 propertycache).
1101 1102
1102 1103 '''
1103 1104 def __init__(self, *paths):
1104 1105 self.paths = paths
1105 1106
1106 1107 def join(self, obj, fname):
1107 1108 """Used to compute the runtime path of a cached file.
1108 1109
1109 1110 Users should subclass filecache and provide their own version of this
1110 1111 function to call the appropriate join function on 'obj' (an instance
1111 1112 of the class that its member function was decorated).
1112 1113 """
1113 1114 return obj.join(fname)
1114 1115
1115 1116 def __call__(self, func):
1116 1117 self.func = func
1117 1118 self.name = func.__name__
1118 1119 return self
1119 1120
1120 1121 def __get__(self, obj, type=None):
1121 1122 # do we need to check if the file changed?
1122 1123 if self.name in obj.__dict__:
1123 1124 assert self.name in obj._filecache, self.name
1124 1125 return obj.__dict__[self.name]
1125 1126
1126 1127 entry = obj._filecache.get(self.name)
1127 1128
1128 1129 if entry:
1129 1130 if entry.changed():
1130 1131 entry.obj = self.func(obj)
1131 1132 else:
1132 1133 paths = [self.join(obj, path) for path in self.paths]
1133 1134
1134 1135 # We stat -before- creating the object so our cache doesn't lie if
1135 1136 # a writer modified between the time we read and stat
1136 1137 entry = filecacheentry(paths, True)
1137 1138 entry.obj = self.func(obj)
1138 1139
1139 1140 obj._filecache[self.name] = entry
1140 1141
1141 1142 obj.__dict__[self.name] = entry.obj
1142 1143 return entry.obj
1143 1144
1144 1145 def __set__(self, obj, value):
1145 1146 if self.name not in obj._filecache:
1146 1147 # we add an entry for the missing value because X in __dict__
1147 1148 # implies X in _filecache
1148 1149 paths = [self.join(obj, path) for path in self.paths]
1149 1150 ce = filecacheentry(paths, False)
1150 1151 obj._filecache[self.name] = ce
1151 1152 else:
1152 1153 ce = obj._filecache[self.name]
1153 1154
1154 1155 ce.obj = value # update cached copy
1155 1156 obj.__dict__[self.name] = value # update copy returned by obj.x
1156 1157
1157 1158 def __delete__(self, obj):
1158 1159 try:
1159 1160 del obj.__dict__[self.name]
1160 1161 except KeyError:
1161 1162 raise AttributeError(self.name)
General Comments 0
You need to be logged in to leave comments. Login now