##// END OF EJS Templates
revrange: drop old-style parser in favor of revset (API)...
Yuya Nishihara -
r25904:fbaa2de1 default
parent child Browse files
Show More
@@ -1,1168 +1,1124
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 from mercurial.node import nullrev, wdirrev
9 from mercurial.node import wdirrev
10 10 import util, error, osutil, revset, similar, encoding, phases
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob, tempfile, shutil, stat
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 class status(tuple):
24 24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 25 and 'ignored' properties are only relevant to the working copy.
26 26 '''
27 27
28 28 __slots__ = ()
29 29
30 30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 31 clean):
32 32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 33 ignored, clean))
34 34
35 35 @property
36 36 def modified(self):
37 37 '''files that have been modified'''
38 38 return self[0]
39 39
40 40 @property
41 41 def added(self):
42 42 '''files that have been added'''
43 43 return self[1]
44 44
45 45 @property
46 46 def removed(self):
47 47 '''files that have been removed'''
48 48 return self[2]
49 49
50 50 @property
51 51 def deleted(self):
52 52 '''files that are in the dirstate, but have been deleted from the
53 53 working copy (aka "missing")
54 54 '''
55 55 return self[3]
56 56
57 57 @property
58 58 def unknown(self):
59 59 '''files not in the dirstate that are not ignored'''
60 60 return self[4]
61 61
62 62 @property
63 63 def ignored(self):
64 64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 65 return self[5]
66 66
67 67 @property
68 68 def clean(self):
69 69 '''files that have not been modified'''
70 70 return self[6]
71 71
72 72 def __repr__(self, *args, **kwargs):
73 73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 74 'unknown=%r, ignored=%r, clean=%r>') % self)
75 75
76 76 def itersubrepos(ctx1, ctx2):
77 77 """find subrepos in ctx1 or ctx2"""
78 78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 83
84 84 missing = set()
85 85
86 86 for subpath in ctx2.substate:
87 87 if subpath not in ctx1.substate:
88 88 del subpaths[subpath]
89 89 missing.add(subpath)
90 90
91 91 for subpath, ctx in sorted(subpaths.iteritems()):
92 92 yield subpath, ctx.sub(subpath)
93 93
94 94 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
95 95 # status and diff will have an accurate result when it does
96 96 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
97 97 # against itself.
98 98 for subpath in missing:
99 99 yield subpath, ctx2.nullsub(subpath, ctx1)
100 100
101 101 def nochangesfound(ui, repo, excluded=None):
102 102 '''Report no changes for push/pull, excluded is None or a list of
103 103 nodes excluded from the push/pull.
104 104 '''
105 105 secretlist = []
106 106 if excluded:
107 107 for n in excluded:
108 108 if n not in repo:
109 109 # discovery should not have included the filtered revision,
110 110 # we have to explicitly exclude it until discovery is cleanup.
111 111 continue
112 112 ctx = repo[n]
113 113 if ctx.phase() >= phases.secret and not ctx.extinct():
114 114 secretlist.append(n)
115 115
116 116 if secretlist:
117 117 ui.status(_("no changes found (ignored %d secret changesets)\n")
118 118 % len(secretlist))
119 119 else:
120 120 ui.status(_("no changes found\n"))
121 121
122 122 def checknewlabel(repo, lbl, kind):
123 123 # Do not use the "kind" parameter in ui output.
124 124 # It makes strings difficult to translate.
125 125 if lbl in ['tip', '.', 'null']:
126 126 raise util.Abort(_("the name '%s' is reserved") % lbl)
127 127 for c in (':', '\0', '\n', '\r'):
128 128 if c in lbl:
129 129 raise util.Abort(_("%r cannot be used in a name") % c)
130 130 try:
131 131 int(lbl)
132 132 raise util.Abort(_("cannot use an integer as a name"))
133 133 except ValueError:
134 134 pass
135 135
136 136 def checkfilename(f):
137 137 '''Check that the filename f is an acceptable filename for a tracked file'''
138 138 if '\r' in f or '\n' in f:
139 139 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
140 140
141 141 def checkportable(ui, f):
142 142 '''Check if filename f is portable and warn or abort depending on config'''
143 143 checkfilename(f)
144 144 abort, warn = checkportabilityalert(ui)
145 145 if abort or warn:
146 146 msg = util.checkwinfilename(f)
147 147 if msg:
148 148 msg = "%s: %r" % (msg, f)
149 149 if abort:
150 150 raise util.Abort(msg)
151 151 ui.warn(_("warning: %s\n") % msg)
152 152
153 153 def checkportabilityalert(ui):
154 154 '''check if the user's config requests nothing, a warning, or abort for
155 155 non-portable filenames'''
156 156 val = ui.config('ui', 'portablefilenames', 'warn')
157 157 lval = val.lower()
158 158 bval = util.parsebool(val)
159 159 abort = os.name == 'nt' or lval == 'abort'
160 160 warn = bval or lval == 'warn'
161 161 if bval is None and not (warn or abort or lval == 'ignore'):
162 162 raise error.ConfigError(
163 163 _("ui.portablefilenames value is invalid ('%s')") % val)
164 164 return abort, warn
165 165
166 166 class casecollisionauditor(object):
167 167 def __init__(self, ui, abort, dirstate):
168 168 self._ui = ui
169 169 self._abort = abort
170 170 allfiles = '\0'.join(dirstate._map)
171 171 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
172 172 self._dirstate = dirstate
173 173 # The purpose of _newfiles is so that we don't complain about
174 174 # case collisions if someone were to call this object with the
175 175 # same filename twice.
176 176 self._newfiles = set()
177 177
178 178 def __call__(self, f):
179 179 if f in self._newfiles:
180 180 return
181 181 fl = encoding.lower(f)
182 182 if fl in self._loweredfiles and f not in self._dirstate:
183 183 msg = _('possible case-folding collision for %s') % f
184 184 if self._abort:
185 185 raise util.Abort(msg)
186 186 self._ui.warn(_("warning: %s\n") % msg)
187 187 self._loweredfiles.add(fl)
188 188 self._newfiles.add(f)
189 189
190 190 def filteredhash(repo, maxrev):
191 191 """build hash of filtered revisions in the current repoview.
192 192
193 193 Multiple caches perform up-to-date validation by checking that the
194 194 tiprev and tipnode stored in the cache file match the current repository.
195 195 However, this is not sufficient for validating repoviews because the set
196 196 of revisions in the view may change without the repository tiprev and
197 197 tipnode changing.
198 198
199 199 This function hashes all the revs filtered from the view and returns
200 200 that SHA-1 digest.
201 201 """
202 202 cl = repo.changelog
203 203 if not cl.filteredrevs:
204 204 return None
205 205 key = None
206 206 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
207 207 if revs:
208 208 s = util.sha1()
209 209 for rev in revs:
210 210 s.update('%s;' % rev)
211 211 key = s.digest()
212 212 return key
213 213
214 214 class abstractvfs(object):
215 215 """Abstract base class; cannot be instantiated"""
216 216
217 217 def __init__(self, *args, **kwargs):
218 218 '''Prevent instantiation; don't call this from subclasses.'''
219 219 raise NotImplementedError('attempted instantiating ' + str(type(self)))
220 220
221 221 def tryread(self, path):
222 222 '''gracefully return an empty string for missing files'''
223 223 try:
224 224 return self.read(path)
225 225 except IOError as inst:
226 226 if inst.errno != errno.ENOENT:
227 227 raise
228 228 return ""
229 229
230 230 def tryreadlines(self, path, mode='rb'):
231 231 '''gracefully return an empty array for missing files'''
232 232 try:
233 233 return self.readlines(path, mode=mode)
234 234 except IOError as inst:
235 235 if inst.errno != errno.ENOENT:
236 236 raise
237 237 return []
238 238
239 239 def open(self, path, mode="r", text=False, atomictemp=False,
240 240 notindexed=False):
241 241 '''Open ``path`` file, which is relative to vfs root.
242 242
243 243 Newly created directories are marked as "not to be indexed by
244 244 the content indexing service", if ``notindexed`` is specified
245 245 for "write" mode access.
246 246 '''
247 247 self.open = self.__call__
248 248 return self.__call__(path, mode, text, atomictemp, notindexed)
249 249
250 250 def read(self, path):
251 251 fp = self(path, 'rb')
252 252 try:
253 253 return fp.read()
254 254 finally:
255 255 fp.close()
256 256
257 257 def readlines(self, path, mode='rb'):
258 258 fp = self(path, mode=mode)
259 259 try:
260 260 return fp.readlines()
261 261 finally:
262 262 fp.close()
263 263
264 264 def write(self, path, data):
265 265 fp = self(path, 'wb')
266 266 try:
267 267 return fp.write(data)
268 268 finally:
269 269 fp.close()
270 270
271 271 def writelines(self, path, data, mode='wb', notindexed=False):
272 272 fp = self(path, mode=mode, notindexed=notindexed)
273 273 try:
274 274 return fp.writelines(data)
275 275 finally:
276 276 fp.close()
277 277
278 278 def append(self, path, data):
279 279 fp = self(path, 'ab')
280 280 try:
281 281 return fp.write(data)
282 282 finally:
283 283 fp.close()
284 284
285 285 def basename(self, path):
286 286 """return base element of a path (as os.path.basename would do)
287 287
288 288 This exists to allow handling of strange encoding if needed."""
289 289 return os.path.basename(path)
290 290
291 291 def chmod(self, path, mode):
292 292 return os.chmod(self.join(path), mode)
293 293
294 294 def dirname(self, path):
295 295 """return dirname element of a path (as os.path.dirname would do)
296 296
297 297 This exists to allow handling of strange encoding if needed."""
298 298 return os.path.dirname(path)
299 299
300 300 def exists(self, path=None):
301 301 return os.path.exists(self.join(path))
302 302
303 303 def fstat(self, fp):
304 304 return util.fstat(fp)
305 305
306 306 def isdir(self, path=None):
307 307 return os.path.isdir(self.join(path))
308 308
309 309 def isfile(self, path=None):
310 310 return os.path.isfile(self.join(path))
311 311
312 312 def islink(self, path=None):
313 313 return os.path.islink(self.join(path))
314 314
315 315 def reljoin(self, *paths):
316 316 """join various elements of a path together (as os.path.join would do)
317 317
318 318 The vfs base is not injected so that path stay relative. This exists
319 319 to allow handling of strange encoding if needed."""
320 320 return os.path.join(*paths)
321 321
322 322 def split(self, path):
323 323 """split top-most element of a path (as os.path.split would do)
324 324
325 325 This exists to allow handling of strange encoding if needed."""
326 326 return os.path.split(path)
327 327
328 328 def lexists(self, path=None):
329 329 return os.path.lexists(self.join(path))
330 330
331 331 def lstat(self, path=None):
332 332 return os.lstat(self.join(path))
333 333
334 334 def listdir(self, path=None):
335 335 return os.listdir(self.join(path))
336 336
337 337 def makedir(self, path=None, notindexed=True):
338 338 return util.makedir(self.join(path), notindexed)
339 339
340 340 def makedirs(self, path=None, mode=None):
341 341 return util.makedirs(self.join(path), mode)
342 342
343 343 def makelock(self, info, path):
344 344 return util.makelock(info, self.join(path))
345 345
346 346 def mkdir(self, path=None):
347 347 return os.mkdir(self.join(path))
348 348
349 349 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
350 350 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
351 351 dir=self.join(dir), text=text)
352 352 dname, fname = util.split(name)
353 353 if dir:
354 354 return fd, os.path.join(dir, fname)
355 355 else:
356 356 return fd, fname
357 357
358 358 def readdir(self, path=None, stat=None, skip=None):
359 359 return osutil.listdir(self.join(path), stat, skip)
360 360
361 361 def readlock(self, path):
362 362 return util.readlock(self.join(path))
363 363
364 364 def rename(self, src, dst):
365 365 return util.rename(self.join(src), self.join(dst))
366 366
367 367 def readlink(self, path):
368 368 return os.readlink(self.join(path))
369 369
370 370 def removedirs(self, path=None):
371 371 """Remove a leaf directory and all empty intermediate ones
372 372 """
373 373 return util.removedirs(self.join(path))
374 374
375 375 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
376 376 """Remove a directory tree recursively
377 377
378 378 If ``forcibly``, this tries to remove READ-ONLY files, too.
379 379 """
380 380 if forcibly:
381 381 def onerror(function, path, excinfo):
382 382 if function is not os.remove:
383 383 raise
384 384 # read-only files cannot be unlinked under Windows
385 385 s = os.stat(path)
386 386 if (s.st_mode & stat.S_IWRITE) != 0:
387 387 raise
388 388 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
389 389 os.remove(path)
390 390 else:
391 391 onerror = None
392 392 return shutil.rmtree(self.join(path),
393 393 ignore_errors=ignore_errors, onerror=onerror)
394 394
395 395 def setflags(self, path, l, x):
396 396 return util.setflags(self.join(path), l, x)
397 397
398 398 def stat(self, path=None):
399 399 return os.stat(self.join(path))
400 400
401 401 def unlink(self, path=None):
402 402 return util.unlink(self.join(path))
403 403
404 404 def unlinkpath(self, path=None, ignoremissing=False):
405 405 return util.unlinkpath(self.join(path), ignoremissing)
406 406
407 407 def utime(self, path=None, t=None):
408 408 return os.utime(self.join(path), t)
409 409
410 410 def walk(self, path=None, onerror=None):
411 411 """Yield (dirpath, dirs, files) tuple for each directories under path
412 412
413 413 ``dirpath`` is relative one from the root of this vfs. This
414 414 uses ``os.sep`` as path separator, even you specify POSIX
415 415 style ``path``.
416 416
417 417 "The root of this vfs" is represented as empty ``dirpath``.
418 418 """
419 419 root = os.path.normpath(self.join(None))
420 420 # when dirpath == root, dirpath[prefixlen:] becomes empty
421 421 # because len(dirpath) < prefixlen.
422 422 prefixlen = len(pathutil.normasprefix(root))
423 423 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
424 424 yield (dirpath[prefixlen:], dirs, files)
425 425
426 426 class vfs(abstractvfs):
427 427 '''Operate files relative to a base directory
428 428
429 429 This class is used to hide the details of COW semantics and
430 430 remote file access from higher level code.
431 431 '''
432 432 def __init__(self, base, audit=True, expandpath=False, realpath=False):
433 433 if expandpath:
434 434 base = util.expandpath(base)
435 435 if realpath:
436 436 base = os.path.realpath(base)
437 437 self.base = base
438 438 self._setmustaudit(audit)
439 439 self.createmode = None
440 440 self._trustnlink = None
441 441
442 442 def _getmustaudit(self):
443 443 return self._audit
444 444
445 445 def _setmustaudit(self, onoff):
446 446 self._audit = onoff
447 447 if onoff:
448 448 self.audit = pathutil.pathauditor(self.base)
449 449 else:
450 450 self.audit = util.always
451 451
452 452 mustaudit = property(_getmustaudit, _setmustaudit)
453 453
454 454 @util.propertycache
455 455 def _cansymlink(self):
456 456 return util.checklink(self.base)
457 457
458 458 @util.propertycache
459 459 def _chmod(self):
460 460 return util.checkexec(self.base)
461 461
462 462 def _fixfilemode(self, name):
463 463 if self.createmode is None or not self._chmod:
464 464 return
465 465 os.chmod(name, self.createmode & 0o666)
466 466
467 467 def __call__(self, path, mode="r", text=False, atomictemp=False,
468 468 notindexed=False):
469 469 '''Open ``path`` file, which is relative to vfs root.
470 470
471 471 Newly created directories are marked as "not to be indexed by
472 472 the content indexing service", if ``notindexed`` is specified
473 473 for "write" mode access.
474 474 '''
475 475 if self._audit:
476 476 r = util.checkosfilename(path)
477 477 if r:
478 478 raise util.Abort("%s: %r" % (r, path))
479 479 self.audit(path)
480 480 f = self.join(path)
481 481
482 482 if not text and "b" not in mode:
483 483 mode += "b" # for that other OS
484 484
485 485 nlink = -1
486 486 if mode not in ('r', 'rb'):
487 487 dirname, basename = util.split(f)
488 488 # If basename is empty, then the path is malformed because it points
489 489 # to a directory. Let the posixfile() call below raise IOError.
490 490 if basename:
491 491 if atomictemp:
492 492 util.ensuredirs(dirname, self.createmode, notindexed)
493 493 return util.atomictempfile(f, mode, self.createmode)
494 494 try:
495 495 if 'w' in mode:
496 496 util.unlink(f)
497 497 nlink = 0
498 498 else:
499 499 # nlinks() may behave differently for files on Windows
500 500 # shares if the file is open.
501 501 fd = util.posixfile(f)
502 502 nlink = util.nlinks(f)
503 503 if nlink < 1:
504 504 nlink = 2 # force mktempcopy (issue1922)
505 505 fd.close()
506 506 except (OSError, IOError) as e:
507 507 if e.errno != errno.ENOENT:
508 508 raise
509 509 nlink = 0
510 510 util.ensuredirs(dirname, self.createmode, notindexed)
511 511 if nlink > 0:
512 512 if self._trustnlink is None:
513 513 self._trustnlink = nlink > 1 or util.checknlink(f)
514 514 if nlink > 1 or not self._trustnlink:
515 515 util.rename(util.mktempcopy(f), f)
516 516 fp = util.posixfile(f, mode)
517 517 if nlink == 0:
518 518 self._fixfilemode(f)
519 519 return fp
520 520
521 521 def symlink(self, src, dst):
522 522 self.audit(dst)
523 523 linkname = self.join(dst)
524 524 try:
525 525 os.unlink(linkname)
526 526 except OSError:
527 527 pass
528 528
529 529 util.ensuredirs(os.path.dirname(linkname), self.createmode)
530 530
531 531 if self._cansymlink:
532 532 try:
533 533 os.symlink(src, linkname)
534 534 except OSError as err:
535 535 raise OSError(err.errno, _('could not symlink to %r: %s') %
536 536 (src, err.strerror), linkname)
537 537 else:
538 538 self.write(dst, src)
539 539
540 540 def join(self, path, *insidef):
541 541 if path:
542 542 return os.path.join(self.base, path, *insidef)
543 543 else:
544 544 return self.base
545 545
546 546 opener = vfs
547 547
548 548 class auditvfs(object):
549 549 def __init__(self, vfs):
550 550 self.vfs = vfs
551 551
552 552 def _getmustaudit(self):
553 553 return self.vfs.mustaudit
554 554
555 555 def _setmustaudit(self, onoff):
556 556 self.vfs.mustaudit = onoff
557 557
558 558 mustaudit = property(_getmustaudit, _setmustaudit)
559 559
560 560 class filtervfs(abstractvfs, auditvfs):
561 561 '''Wrapper vfs for filtering filenames with a function.'''
562 562
563 563 def __init__(self, vfs, filter):
564 564 auditvfs.__init__(self, vfs)
565 565 self._filter = filter
566 566
567 567 def __call__(self, path, *args, **kwargs):
568 568 return self.vfs(self._filter(path), *args, **kwargs)
569 569
570 570 def join(self, path, *insidef):
571 571 if path:
572 572 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
573 573 else:
574 574 return self.vfs.join(path)
575 575
576 576 filteropener = filtervfs
577 577
578 578 class readonlyvfs(abstractvfs, auditvfs):
579 579 '''Wrapper vfs preventing any writing.'''
580 580
581 581 def __init__(self, vfs):
582 582 auditvfs.__init__(self, vfs)
583 583
584 584 def __call__(self, path, mode='r', *args, **kw):
585 585 if mode not in ('r', 'rb'):
586 586 raise util.Abort('this vfs is read only')
587 587 return self.vfs(path, mode, *args, **kw)
588 588
589 589
590 590 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
591 591 '''yield every hg repository under path, always recursively.
592 592 The recurse flag will only control recursion into repo working dirs'''
593 593 def errhandler(err):
594 594 if err.filename == path:
595 595 raise err
596 596 samestat = getattr(os.path, 'samestat', None)
597 597 if followsym and samestat is not None:
598 598 def adddir(dirlst, dirname):
599 599 match = False
600 600 dirstat = os.stat(dirname)
601 601 for lstdirstat in dirlst:
602 602 if samestat(dirstat, lstdirstat):
603 603 match = True
604 604 break
605 605 if not match:
606 606 dirlst.append(dirstat)
607 607 return not match
608 608 else:
609 609 followsym = False
610 610
611 611 if (seen_dirs is None) and followsym:
612 612 seen_dirs = []
613 613 adddir(seen_dirs, path)
614 614 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
615 615 dirs.sort()
616 616 if '.hg' in dirs:
617 617 yield root # found a repository
618 618 qroot = os.path.join(root, '.hg', 'patches')
619 619 if os.path.isdir(os.path.join(qroot, '.hg')):
620 620 yield qroot # we have a patch queue repo here
621 621 if recurse:
622 622 # avoid recursing inside the .hg directory
623 623 dirs.remove('.hg')
624 624 else:
625 625 dirs[:] = [] # don't descend further
626 626 elif followsym:
627 627 newdirs = []
628 628 for d in dirs:
629 629 fname = os.path.join(root, d)
630 630 if adddir(seen_dirs, fname):
631 631 if os.path.islink(fname):
632 632 for hgname in walkrepos(fname, True, seen_dirs):
633 633 yield hgname
634 634 else:
635 635 newdirs.append(d)
636 636 dirs[:] = newdirs
637 637
638 638 def osrcpath():
639 639 '''return default os-specific hgrc search path'''
640 640 path = []
641 641 defaultpath = os.path.join(util.datapath, 'default.d')
642 642 if os.path.isdir(defaultpath):
643 643 for f, kind in osutil.listdir(defaultpath):
644 644 if f.endswith('.rc'):
645 645 path.append(os.path.join(defaultpath, f))
646 646 path.extend(systemrcpath())
647 647 path.extend(userrcpath())
648 648 path = [os.path.normpath(f) for f in path]
649 649 return path
650 650
651 651 _rcpath = None
652 652
653 653 def rcpath():
654 654 '''return hgrc search path. if env var HGRCPATH is set, use it.
655 655 for each item in path, if directory, use files ending in .rc,
656 656 else use item.
657 657 make HGRCPATH empty to only look in .hg/hgrc of current repo.
658 658 if no HGRCPATH, use default os-specific path.'''
659 659 global _rcpath
660 660 if _rcpath is None:
661 661 if 'HGRCPATH' in os.environ:
662 662 _rcpath = []
663 663 for p in os.environ['HGRCPATH'].split(os.pathsep):
664 664 if not p:
665 665 continue
666 666 p = util.expandpath(p)
667 667 if os.path.isdir(p):
668 668 for f, kind in osutil.listdir(p):
669 669 if f.endswith('.rc'):
670 670 _rcpath.append(os.path.join(p, f))
671 671 else:
672 672 _rcpath.append(p)
673 673 else:
674 674 _rcpath = osrcpath()
675 675 return _rcpath
676 676
677 677 def intrev(rev):
678 678 """Return integer for a given revision that can be used in comparison or
679 679 arithmetic operation"""
680 680 if rev is None:
681 681 return wdirrev
682 682 return rev
683 683
684 684 def revsingle(repo, revspec, default='.'):
685 685 if not revspec and revspec != 0:
686 686 return repo[default]
687 687
688 688 l = revrange(repo, [revspec])
689 689 if not l:
690 690 raise util.Abort(_('empty revision set'))
691 691 return repo[l.last()]
692 692
693 693 def revpair(repo, revs):
694 694 if not revs:
695 695 return repo.dirstate.p1(), None
696 696
697 697 l = revrange(repo, revs)
698 698
699 699 if not l:
700 700 first = second = None
701 701 elif l.isascending():
702 702 first = l.min()
703 703 second = l.max()
704 704 elif l.isdescending():
705 705 first = l.max()
706 706 second = l.min()
707 707 else:
708 708 first = l.first()
709 709 second = l.last()
710 710
711 711 if first is None:
712 712 raise util.Abort(_('empty revision range'))
713 713
714 714 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
715 715 return repo.lookup(first), None
716 716
717 717 return repo.lookup(first), repo.lookup(second)
718 718
719 719 _revrangesep = ':'
720 720
721 721 def revrange(repo, revs):
722 722 """Yield revision as strings from a list of revision specifications."""
723
724 def revfix(repo, val, defval):
725 if not val and val != 0 and defval is not None:
726 return defval
727 return repo[val].rev()
728
729 723 subsets = []
730
731 revsetaliases = [alias for (alias, _) in
732 repo.ui.configitems("revsetalias")]
733
734 724 for spec in revs:
735 # attempt to parse old-style ranges first to deal with
736 # things like old-tag which contain query metacharacters
737 try:
738 # ... except for revset aliases without arguments. These
739 # should be parsed as soon as possible, because they might
740 # clash with a hash prefix.
741 if spec in revsetaliases:
742 raise error.RepoLookupError
743
744 if isinstance(spec, int):
745 subsets.append(revset.baseset([spec]))
746 continue
747
748 if _revrangesep in spec:
749 start, end = spec.split(_revrangesep, 1)
750 if start in revsetaliases or end in revsetaliases:
751 raise error.RepoLookupError
752
753 start = revfix(repo, start, 0)
754 end = revfix(repo, end, len(repo) - 1)
755 if end == nullrev and start < 0:
756 start = nullrev
757 if start < end:
758 l = revset.spanset(repo, start, end + 1)
759 else:
760 l = revset.spanset(repo, start, end - 1)
761 subsets.append(l)
762 continue
763 elif spec and spec in repo: # single unquoted rev
764 rev = revfix(repo, spec, None)
765 subsets.append(revset.baseset([rev]))
766 continue
767 except error.RepoLookupError:
768 pass
769
770 # fall through to new-style queries if old-style fails
725 if isinstance(spec, int):
726 spec = revset.formatspec('rev(%d)', spec)
771 727 m = revset.match(repo.ui, spec, repo)
772 728 subsets.append(m(repo))
773 729
774 730 return revset._combinesets(subsets)
775 731
776 732 def expandpats(pats):
777 733 '''Expand bare globs when running on windows.
778 734 On posix we assume it already has already been done by sh.'''
779 735 if not util.expandglobs:
780 736 return list(pats)
781 737 ret = []
782 738 for kindpat in pats:
783 739 kind, pat = matchmod._patsplit(kindpat, None)
784 740 if kind is None:
785 741 try:
786 742 globbed = glob.glob(pat)
787 743 except re.error:
788 744 globbed = [pat]
789 745 if globbed:
790 746 ret.extend(globbed)
791 747 continue
792 748 ret.append(kindpat)
793 749 return ret
794 750
795 751 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath',
796 752 badfn=None):
797 753 '''Return a matcher and the patterns that were used.
798 754 The matcher will warn about bad matches, unless an alternate badfn callback
799 755 is provided.'''
800 756 if pats == ("",):
801 757 pats = []
802 758 if not globbed and default == 'relpath':
803 759 pats = expandpats(pats or [])
804 760
805 761 def bad(f, msg):
806 762 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
807 763
808 764 if badfn is None:
809 765 badfn = bad
810 766
811 767 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
812 768 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
813 769
814 770 if m.always():
815 771 pats = []
816 772 return m, pats
817 773
818 774 def match(ctx, pats=[], opts={}, globbed=False, default='relpath', badfn=None):
819 775 '''Return a matcher that will warn about bad matches.'''
820 776 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
821 777
822 778 def matchall(repo):
823 779 '''Return a matcher that will efficiently match everything.'''
824 780 return matchmod.always(repo.root, repo.getcwd())
825 781
826 782 def matchfiles(repo, files, badfn=None):
827 783 '''Return a matcher that will efficiently match exactly these files.'''
828 784 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
829 785
830 786 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
831 787 m = matcher
832 788 if dry_run is None:
833 789 dry_run = opts.get('dry_run')
834 790 if similarity is None:
835 791 similarity = float(opts.get('similarity') or 0)
836 792
837 793 ret = 0
838 794 join = lambda f: os.path.join(prefix, f)
839 795
840 796 def matchessubrepo(matcher, subpath):
841 797 if matcher.exact(subpath):
842 798 return True
843 799 for f in matcher.files():
844 800 if f.startswith(subpath):
845 801 return True
846 802 return False
847 803
848 804 wctx = repo[None]
849 805 for subpath in sorted(wctx.substate):
850 806 if opts.get('subrepos') or matchessubrepo(m, subpath):
851 807 sub = wctx.sub(subpath)
852 808 try:
853 809 submatch = matchmod.narrowmatcher(subpath, m)
854 810 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
855 811 ret = 1
856 812 except error.LookupError:
857 813 repo.ui.status(_("skipping missing subrepository: %s\n")
858 814 % join(subpath))
859 815
860 816 rejected = []
861 817 def badfn(f, msg):
862 818 if f in m.files():
863 819 m.bad(f, msg)
864 820 rejected.append(f)
865 821
866 822 badmatch = matchmod.badmatch(m, badfn)
867 823 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
868 824 badmatch)
869 825
870 826 unknownset = set(unknown + forgotten)
871 827 toprint = unknownset.copy()
872 828 toprint.update(deleted)
873 829 for abs in sorted(toprint):
874 830 if repo.ui.verbose or not m.exact(abs):
875 831 if abs in unknownset:
876 832 status = _('adding %s\n') % m.uipath(abs)
877 833 else:
878 834 status = _('removing %s\n') % m.uipath(abs)
879 835 repo.ui.status(status)
880 836
881 837 renames = _findrenames(repo, m, added + unknown, removed + deleted,
882 838 similarity)
883 839
884 840 if not dry_run:
885 841 _markchanges(repo, unknown + forgotten, deleted, renames)
886 842
887 843 for f in rejected:
888 844 if f in m.files():
889 845 return 1
890 846 return ret
891 847
892 848 def marktouched(repo, files, similarity=0.0):
893 849 '''Assert that files have somehow been operated upon. files are relative to
894 850 the repo root.'''
895 851 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
896 852 rejected = []
897 853
898 854 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
899 855
900 856 if repo.ui.verbose:
901 857 unknownset = set(unknown + forgotten)
902 858 toprint = unknownset.copy()
903 859 toprint.update(deleted)
904 860 for abs in sorted(toprint):
905 861 if abs in unknownset:
906 862 status = _('adding %s\n') % abs
907 863 else:
908 864 status = _('removing %s\n') % abs
909 865 repo.ui.status(status)
910 866
911 867 renames = _findrenames(repo, m, added + unknown, removed + deleted,
912 868 similarity)
913 869
914 870 _markchanges(repo, unknown + forgotten, deleted, renames)
915 871
916 872 for f in rejected:
917 873 if f in m.files():
918 874 return 1
919 875 return 0
920 876
921 877 def _interestingfiles(repo, matcher):
922 878 '''Walk dirstate with matcher, looking for files that addremove would care
923 879 about.
924 880
925 881 This is different from dirstate.status because it doesn't care about
926 882 whether files are modified or clean.'''
927 883 added, unknown, deleted, removed, forgotten = [], [], [], [], []
928 884 audit_path = pathutil.pathauditor(repo.root)
929 885
930 886 ctx = repo[None]
931 887 dirstate = repo.dirstate
932 888 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
933 889 full=False)
934 890 for abs, st in walkresults.iteritems():
935 891 dstate = dirstate[abs]
936 892 if dstate == '?' and audit_path.check(abs):
937 893 unknown.append(abs)
938 894 elif dstate != 'r' and not st:
939 895 deleted.append(abs)
940 896 elif dstate == 'r' and st:
941 897 forgotten.append(abs)
942 898 # for finding renames
943 899 elif dstate == 'r' and not st:
944 900 removed.append(abs)
945 901 elif dstate == 'a':
946 902 added.append(abs)
947 903
948 904 return added, unknown, deleted, removed, forgotten
949 905
950 906 def _findrenames(repo, matcher, added, removed, similarity):
951 907 '''Find renames from removed files to added ones.'''
952 908 renames = {}
953 909 if similarity > 0:
954 910 for old, new, score in similar.findrenames(repo, added, removed,
955 911 similarity):
956 912 if (repo.ui.verbose or not matcher.exact(old)
957 913 or not matcher.exact(new)):
958 914 repo.ui.status(_('recording removal of %s as rename to %s '
959 915 '(%d%% similar)\n') %
960 916 (matcher.rel(old), matcher.rel(new),
961 917 score * 100))
962 918 renames[new] = old
963 919 return renames
964 920
965 921 def _markchanges(repo, unknown, deleted, renames):
966 922 '''Marks the files in unknown as added, the files in deleted as removed,
967 923 and the files in renames as copied.'''
968 924 wctx = repo[None]
969 925 wlock = repo.wlock()
970 926 try:
971 927 wctx.forget(deleted)
972 928 wctx.add(unknown)
973 929 for new, old in renames.iteritems():
974 930 wctx.copy(old, new)
975 931 finally:
976 932 wlock.release()
977 933
978 934 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
979 935 """Update the dirstate to reflect the intent of copying src to dst. For
980 936 different reasons it might not end with dst being marked as copied from src.
981 937 """
982 938 origsrc = repo.dirstate.copied(src) or src
983 939 if dst == origsrc: # copying back a copy?
984 940 if repo.dirstate[dst] not in 'mn' and not dryrun:
985 941 repo.dirstate.normallookup(dst)
986 942 else:
987 943 if repo.dirstate[origsrc] == 'a' and origsrc == src:
988 944 if not ui.quiet:
989 945 ui.warn(_("%s has not been committed yet, so no copy "
990 946 "data will be stored for %s.\n")
991 947 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
992 948 if repo.dirstate[dst] in '?r' and not dryrun:
993 949 wctx.add([dst])
994 950 elif not dryrun:
995 951 wctx.copy(origsrc, dst)
996 952
997 953 def readrequires(opener, supported):
998 954 '''Reads and parses .hg/requires and checks if all entries found
999 955 are in the list of supported features.'''
1000 956 requirements = set(opener.read("requires").splitlines())
1001 957 missings = []
1002 958 for r in requirements:
1003 959 if r not in supported:
1004 960 if not r or not r[0].isalnum():
1005 961 raise error.RequirementError(_(".hg/requires file is corrupt"))
1006 962 missings.append(r)
1007 963 missings.sort()
1008 964 if missings:
1009 965 raise error.RequirementError(
1010 966 _("repository requires features unknown to this Mercurial: %s")
1011 967 % " ".join(missings),
1012 968 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
1013 969 " for more information"))
1014 970 return requirements
1015 971
1016 972 def writerequires(opener, requirements):
1017 973 reqfile = opener("requires", "w")
1018 974 for r in sorted(requirements):
1019 975 reqfile.write("%s\n" % r)
1020 976 reqfile.close()
1021 977
1022 978 class filecachesubentry(object):
1023 979 def __init__(self, path, stat):
1024 980 self.path = path
1025 981 self.cachestat = None
1026 982 self._cacheable = None
1027 983
1028 984 if stat:
1029 985 self.cachestat = filecachesubentry.stat(self.path)
1030 986
1031 987 if self.cachestat:
1032 988 self._cacheable = self.cachestat.cacheable()
1033 989 else:
1034 990 # None means we don't know yet
1035 991 self._cacheable = None
1036 992
1037 993 def refresh(self):
1038 994 if self.cacheable():
1039 995 self.cachestat = filecachesubentry.stat(self.path)
1040 996
1041 997 def cacheable(self):
1042 998 if self._cacheable is not None:
1043 999 return self._cacheable
1044 1000
1045 1001 # we don't know yet, assume it is for now
1046 1002 return True
1047 1003
1048 1004 def changed(self):
1049 1005 # no point in going further if we can't cache it
1050 1006 if not self.cacheable():
1051 1007 return True
1052 1008
1053 1009 newstat = filecachesubentry.stat(self.path)
1054 1010
1055 1011 # we may not know if it's cacheable yet, check again now
1056 1012 if newstat and self._cacheable is None:
1057 1013 self._cacheable = newstat.cacheable()
1058 1014
1059 1015 # check again
1060 1016 if not self._cacheable:
1061 1017 return True
1062 1018
1063 1019 if self.cachestat != newstat:
1064 1020 self.cachestat = newstat
1065 1021 return True
1066 1022 else:
1067 1023 return False
1068 1024
1069 1025 @staticmethod
1070 1026 def stat(path):
1071 1027 try:
1072 1028 return util.cachestat(path)
1073 1029 except OSError as e:
1074 1030 if e.errno != errno.ENOENT:
1075 1031 raise
1076 1032
1077 1033 class filecacheentry(object):
1078 1034 def __init__(self, paths, stat=True):
1079 1035 self._entries = []
1080 1036 for path in paths:
1081 1037 self._entries.append(filecachesubentry(path, stat))
1082 1038
1083 1039 def changed(self):
1084 1040 '''true if any entry has changed'''
1085 1041 for entry in self._entries:
1086 1042 if entry.changed():
1087 1043 return True
1088 1044 return False
1089 1045
1090 1046 def refresh(self):
1091 1047 for entry in self._entries:
1092 1048 entry.refresh()
1093 1049
1094 1050 class filecache(object):
1095 1051 '''A property like decorator that tracks files under .hg/ for updates.
1096 1052
1097 1053 Records stat info when called in _filecache.
1098 1054
1099 1055 On subsequent calls, compares old stat info with new info, and recreates the
1100 1056 object when any of the files changes, updating the new stat info in
1101 1057 _filecache.
1102 1058
1103 1059 Mercurial either atomic renames or appends for files under .hg,
1104 1060 so to ensure the cache is reliable we need the filesystem to be able
1105 1061 to tell us if a file has been replaced. If it can't, we fallback to
1106 1062 recreating the object on every call (essentially the same behaviour as
1107 1063 propertycache).
1108 1064
1109 1065 '''
1110 1066 def __init__(self, *paths):
1111 1067 self.paths = paths
1112 1068
1113 1069 def join(self, obj, fname):
1114 1070 """Used to compute the runtime path of a cached file.
1115 1071
1116 1072 Users should subclass filecache and provide their own version of this
1117 1073 function to call the appropriate join function on 'obj' (an instance
1118 1074 of the class that its member function was decorated).
1119 1075 """
1120 1076 return obj.join(fname)
1121 1077
1122 1078 def __call__(self, func):
1123 1079 self.func = func
1124 1080 self.name = func.__name__
1125 1081 return self
1126 1082
1127 1083 def __get__(self, obj, type=None):
1128 1084 # do we need to check if the file changed?
1129 1085 if self.name in obj.__dict__:
1130 1086 assert self.name in obj._filecache, self.name
1131 1087 return obj.__dict__[self.name]
1132 1088
1133 1089 entry = obj._filecache.get(self.name)
1134 1090
1135 1091 if entry:
1136 1092 if entry.changed():
1137 1093 entry.obj = self.func(obj)
1138 1094 else:
1139 1095 paths = [self.join(obj, path) for path in self.paths]
1140 1096
1141 1097 # We stat -before- creating the object so our cache doesn't lie if
1142 1098 # a writer modified between the time we read and stat
1143 1099 entry = filecacheentry(paths, True)
1144 1100 entry.obj = self.func(obj)
1145 1101
1146 1102 obj._filecache[self.name] = entry
1147 1103
1148 1104 obj.__dict__[self.name] = entry.obj
1149 1105 return entry.obj
1150 1106
1151 1107 def __set__(self, obj, value):
1152 1108 if self.name not in obj._filecache:
1153 1109 # we add an entry for the missing value because X in __dict__
1154 1110 # implies X in _filecache
1155 1111 paths = [self.join(obj, path) for path in self.paths]
1156 1112 ce = filecacheentry(paths, False)
1157 1113 obj._filecache[self.name] = ce
1158 1114 else:
1159 1115 ce = obj._filecache[self.name]
1160 1116
1161 1117 ce.obj = value # update cached copy
1162 1118 obj.__dict__[self.name] = value # update copy returned by obj.x
1163 1119
1164 1120 def __delete__(self, obj):
1165 1121 try:
1166 1122 del obj.__dict__[self.name]
1167 1123 except KeyError:
1168 1124 raise AttributeError(self.name)
General Comments 0
You need to be logged in to leave comments. Login now