##// END OF EJS Templates
revrange: build spanset from x:y range...
Yuya Nishihara -
r25386:a5a95642 default
parent child Browse files
Show More
@@ -1,1145 +1,1147 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob, tempfile, shutil, stat, inspect
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 class status(tuple):
24 24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 25 and 'ignored' properties are only relevant to the working copy.
26 26 '''
27 27
28 28 __slots__ = ()
29 29
30 30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 31 clean):
32 32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 33 ignored, clean))
34 34
35 35 @property
36 36 def modified(self):
37 37 '''files that have been modified'''
38 38 return self[0]
39 39
40 40 @property
41 41 def added(self):
42 42 '''files that have been added'''
43 43 return self[1]
44 44
45 45 @property
46 46 def removed(self):
47 47 '''files that have been removed'''
48 48 return self[2]
49 49
50 50 @property
51 51 def deleted(self):
52 52 '''files that are in the dirstate, but have been deleted from the
53 53 working copy (aka "missing")
54 54 '''
55 55 return self[3]
56 56
57 57 @property
58 58 def unknown(self):
59 59 '''files not in the dirstate that are not ignored'''
60 60 return self[4]
61 61
62 62 @property
63 63 def ignored(self):
64 64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 65 return self[5]
66 66
67 67 @property
68 68 def clean(self):
69 69 '''files that have not been modified'''
70 70 return self[6]
71 71
72 72 def __repr__(self, *args, **kwargs):
73 73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 74 'unknown=%r, ignored=%r, clean=%r>') % self)
75 75
76 76 def itersubrepos(ctx1, ctx2):
77 77 """find subrepos in ctx1 or ctx2"""
78 78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 83 for subpath, ctx in sorted(subpaths.iteritems()):
84 84 yield subpath, ctx.sub(subpath)
85 85
86 86 def nochangesfound(ui, repo, excluded=None):
87 87 '''Report no changes for push/pull, excluded is None or a list of
88 88 nodes excluded from the push/pull.
89 89 '''
90 90 secretlist = []
91 91 if excluded:
92 92 for n in excluded:
93 93 if n not in repo:
94 94 # discovery should not have included the filtered revision,
95 95 # we have to explicitly exclude it until discovery is cleanup.
96 96 continue
97 97 ctx = repo[n]
98 98 if ctx.phase() >= phases.secret and not ctx.extinct():
99 99 secretlist.append(n)
100 100
101 101 if secretlist:
102 102 ui.status(_("no changes found (ignored %d secret changesets)\n")
103 103 % len(secretlist))
104 104 else:
105 105 ui.status(_("no changes found\n"))
106 106
107 107 def checknewlabel(repo, lbl, kind):
108 108 # Do not use the "kind" parameter in ui output.
109 109 # It makes strings difficult to translate.
110 110 if lbl in ['tip', '.', 'null']:
111 111 raise util.Abort(_("the name '%s' is reserved") % lbl)
112 112 for c in (':', '\0', '\n', '\r'):
113 113 if c in lbl:
114 114 raise util.Abort(_("%r cannot be used in a name") % c)
115 115 try:
116 116 int(lbl)
117 117 raise util.Abort(_("cannot use an integer as a name"))
118 118 except ValueError:
119 119 pass
120 120
121 121 def checkfilename(f):
122 122 '''Check that the filename f is an acceptable filename for a tracked file'''
123 123 if '\r' in f or '\n' in f:
124 124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
125 125
126 126 def checkportable(ui, f):
127 127 '''Check if filename f is portable and warn or abort depending on config'''
128 128 checkfilename(f)
129 129 abort, warn = checkportabilityalert(ui)
130 130 if abort or warn:
131 131 msg = util.checkwinfilename(f)
132 132 if msg:
133 133 msg = "%s: %r" % (msg, f)
134 134 if abort:
135 135 raise util.Abort(msg)
136 136 ui.warn(_("warning: %s\n") % msg)
137 137
138 138 def checkportabilityalert(ui):
139 139 '''check if the user's config requests nothing, a warning, or abort for
140 140 non-portable filenames'''
141 141 val = ui.config('ui', 'portablefilenames', 'warn')
142 142 lval = val.lower()
143 143 bval = util.parsebool(val)
144 144 abort = os.name == 'nt' or lval == 'abort'
145 145 warn = bval or lval == 'warn'
146 146 if bval is None and not (warn or abort or lval == 'ignore'):
147 147 raise error.ConfigError(
148 148 _("ui.portablefilenames value is invalid ('%s')") % val)
149 149 return abort, warn
150 150
151 151 class casecollisionauditor(object):
152 152 def __init__(self, ui, abort, dirstate):
153 153 self._ui = ui
154 154 self._abort = abort
155 155 allfiles = '\0'.join(dirstate._map)
156 156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
157 157 self._dirstate = dirstate
158 158 # The purpose of _newfiles is so that we don't complain about
159 159 # case collisions if someone were to call this object with the
160 160 # same filename twice.
161 161 self._newfiles = set()
162 162
163 163 def __call__(self, f):
164 164 if f in self._newfiles:
165 165 return
166 166 fl = encoding.lower(f)
167 167 if fl in self._loweredfiles and f not in self._dirstate:
168 168 msg = _('possible case-folding collision for %s') % f
169 169 if self._abort:
170 170 raise util.Abort(msg)
171 171 self._ui.warn(_("warning: %s\n") % msg)
172 172 self._loweredfiles.add(fl)
173 173 self._newfiles.add(f)
174 174
175 175 def develwarn(tui, msg):
176 176 """issue a developer warning message"""
177 177 msg = 'devel-warn: ' + msg
178 178 if tui.tracebackflag:
179 179 util.debugstacktrace(msg, 2)
180 180 else:
181 181 curframe = inspect.currentframe()
182 182 calframe = inspect.getouterframes(curframe, 2)
183 183 tui.write_err('%s at: %s:%s (%s)\n' % ((msg,) + calframe[2][1:4]))
184 184
185 185 def filteredhash(repo, maxrev):
186 186 """build hash of filtered revisions in the current repoview.
187 187
188 188 Multiple caches perform up-to-date validation by checking that the
189 189 tiprev and tipnode stored in the cache file match the current repository.
190 190 However, this is not sufficient for validating repoviews because the set
191 191 of revisions in the view may change without the repository tiprev and
192 192 tipnode changing.
193 193
194 194 This function hashes all the revs filtered from the view and returns
195 195 that SHA-1 digest.
196 196 """
197 197 cl = repo.changelog
198 198 if not cl.filteredrevs:
199 199 return None
200 200 key = None
201 201 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
202 202 if revs:
203 203 s = util.sha1()
204 204 for rev in revs:
205 205 s.update('%s;' % rev)
206 206 key = s.digest()
207 207 return key
208 208
209 209 class abstractvfs(object):
210 210 """Abstract base class; cannot be instantiated"""
211 211
212 212 def __init__(self, *args, **kwargs):
213 213 '''Prevent instantiation; don't call this from subclasses.'''
214 214 raise NotImplementedError('attempted instantiating ' + str(type(self)))
215 215
216 216 def tryread(self, path):
217 217 '''gracefully return an empty string for missing files'''
218 218 try:
219 219 return self.read(path)
220 220 except IOError, inst:
221 221 if inst.errno != errno.ENOENT:
222 222 raise
223 223 return ""
224 224
225 225 def tryreadlines(self, path, mode='rb'):
226 226 '''gracefully return an empty array for missing files'''
227 227 try:
228 228 return self.readlines(path, mode=mode)
229 229 except IOError, inst:
230 230 if inst.errno != errno.ENOENT:
231 231 raise
232 232 return []
233 233
234 234 def open(self, path, mode="r", text=False, atomictemp=False,
235 235 notindexed=False):
236 236 '''Open ``path`` file, which is relative to vfs root.
237 237
238 238 Newly created directories are marked as "not to be indexed by
239 239 the content indexing service", if ``notindexed`` is specified
240 240 for "write" mode access.
241 241 '''
242 242 self.open = self.__call__
243 243 return self.__call__(path, mode, text, atomictemp, notindexed)
244 244
245 245 def read(self, path):
246 246 fp = self(path, 'rb')
247 247 try:
248 248 return fp.read()
249 249 finally:
250 250 fp.close()
251 251
252 252 def readlines(self, path, mode='rb'):
253 253 fp = self(path, mode=mode)
254 254 try:
255 255 return fp.readlines()
256 256 finally:
257 257 fp.close()
258 258
259 259 def write(self, path, data):
260 260 fp = self(path, 'wb')
261 261 try:
262 262 return fp.write(data)
263 263 finally:
264 264 fp.close()
265 265
266 266 def writelines(self, path, data, mode='wb', notindexed=False):
267 267 fp = self(path, mode=mode, notindexed=notindexed)
268 268 try:
269 269 return fp.writelines(data)
270 270 finally:
271 271 fp.close()
272 272
273 273 def append(self, path, data):
274 274 fp = self(path, 'ab')
275 275 try:
276 276 return fp.write(data)
277 277 finally:
278 278 fp.close()
279 279
280 280 def chmod(self, path, mode):
281 281 return os.chmod(self.join(path), mode)
282 282
283 283 def exists(self, path=None):
284 284 return os.path.exists(self.join(path))
285 285
286 286 def fstat(self, fp):
287 287 return util.fstat(fp)
288 288
289 289 def isdir(self, path=None):
290 290 return os.path.isdir(self.join(path))
291 291
292 292 def isfile(self, path=None):
293 293 return os.path.isfile(self.join(path))
294 294
295 295 def islink(self, path=None):
296 296 return os.path.islink(self.join(path))
297 297
298 298 def reljoin(self, *paths):
299 299 """join various elements of a path together (as os.path.join would do)
300 300
301 301 The vfs base is not injected so that path stay relative. This exists
302 302 to allow handling of strange encoding if needed."""
303 303 return os.path.join(*paths)
304 304
305 305 def split(self, path):
306 306 """split top-most element of a path (as os.path.split would do)
307 307
308 308 This exists to allow handling of strange encoding if needed."""
309 309 return os.path.split(path)
310 310
311 311 def lexists(self, path=None):
312 312 return os.path.lexists(self.join(path))
313 313
314 314 def lstat(self, path=None):
315 315 return os.lstat(self.join(path))
316 316
317 317 def listdir(self, path=None):
318 318 return os.listdir(self.join(path))
319 319
320 320 def makedir(self, path=None, notindexed=True):
321 321 return util.makedir(self.join(path), notindexed)
322 322
323 323 def makedirs(self, path=None, mode=None):
324 324 return util.makedirs(self.join(path), mode)
325 325
326 326 def makelock(self, info, path):
327 327 return util.makelock(info, self.join(path))
328 328
329 329 def mkdir(self, path=None):
330 330 return os.mkdir(self.join(path))
331 331
332 332 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
333 333 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
334 334 dir=self.join(dir), text=text)
335 335 dname, fname = util.split(name)
336 336 if dir:
337 337 return fd, os.path.join(dir, fname)
338 338 else:
339 339 return fd, fname
340 340
341 341 def readdir(self, path=None, stat=None, skip=None):
342 342 return osutil.listdir(self.join(path), stat, skip)
343 343
344 344 def readlock(self, path):
345 345 return util.readlock(self.join(path))
346 346
347 347 def rename(self, src, dst):
348 348 return util.rename(self.join(src), self.join(dst))
349 349
350 350 def readlink(self, path):
351 351 return os.readlink(self.join(path))
352 352
353 353 def removedirs(self, path=None):
354 354 """Remove a leaf directory and all empty intermediate ones
355 355 """
356 356 return util.removedirs(self.join(path))
357 357
358 358 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
359 359 """Remove a directory tree recursively
360 360
361 361 If ``forcibly``, this tries to remove READ-ONLY files, too.
362 362 """
363 363 if forcibly:
364 364 def onerror(function, path, excinfo):
365 365 if function is not os.remove:
366 366 raise
367 367 # read-only files cannot be unlinked under Windows
368 368 s = os.stat(path)
369 369 if (s.st_mode & stat.S_IWRITE) != 0:
370 370 raise
371 371 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
372 372 os.remove(path)
373 373 else:
374 374 onerror = None
375 375 return shutil.rmtree(self.join(path),
376 376 ignore_errors=ignore_errors, onerror=onerror)
377 377
378 378 def setflags(self, path, l, x):
379 379 return util.setflags(self.join(path), l, x)
380 380
381 381 def stat(self, path=None):
382 382 return os.stat(self.join(path))
383 383
384 384 def unlink(self, path=None):
385 385 return util.unlink(self.join(path))
386 386
387 387 def unlinkpath(self, path=None, ignoremissing=False):
388 388 return util.unlinkpath(self.join(path), ignoremissing)
389 389
390 390 def utime(self, path=None, t=None):
391 391 return os.utime(self.join(path), t)
392 392
393 393 def walk(self, path=None, onerror=None):
394 394 """Yield (dirpath, dirs, files) tuple for each directories under path
395 395
396 396 ``dirpath`` is relative one from the root of this vfs. This
397 397 uses ``os.sep`` as path separator, even you specify POSIX
398 398 style ``path``.
399 399
400 400 "The root of this vfs" is represented as empty ``dirpath``.
401 401 """
402 402 root = os.path.normpath(self.join(None))
403 403 # when dirpath == root, dirpath[prefixlen:] becomes empty
404 404 # because len(dirpath) < prefixlen.
405 405 prefixlen = len(pathutil.normasprefix(root))
406 406 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
407 407 yield (dirpath[prefixlen:], dirs, files)
408 408
409 409 class vfs(abstractvfs):
410 410 '''Operate files relative to a base directory
411 411
412 412 This class is used to hide the details of COW semantics and
413 413 remote file access from higher level code.
414 414 '''
415 415 def __init__(self, base, audit=True, expandpath=False, realpath=False):
416 416 if expandpath:
417 417 base = util.expandpath(base)
418 418 if realpath:
419 419 base = os.path.realpath(base)
420 420 self.base = base
421 421 self._setmustaudit(audit)
422 422 self.createmode = None
423 423 self._trustnlink = None
424 424
425 425 def _getmustaudit(self):
426 426 return self._audit
427 427
428 428 def _setmustaudit(self, onoff):
429 429 self._audit = onoff
430 430 if onoff:
431 431 self.audit = pathutil.pathauditor(self.base)
432 432 else:
433 433 self.audit = util.always
434 434
435 435 mustaudit = property(_getmustaudit, _setmustaudit)
436 436
437 437 @util.propertycache
438 438 def _cansymlink(self):
439 439 return util.checklink(self.base)
440 440
441 441 @util.propertycache
442 442 def _chmod(self):
443 443 return util.checkexec(self.base)
444 444
445 445 def _fixfilemode(self, name):
446 446 if self.createmode is None or not self._chmod:
447 447 return
448 448 os.chmod(name, self.createmode & 0666)
449 449
450 450 def __call__(self, path, mode="r", text=False, atomictemp=False,
451 451 notindexed=False):
452 452 '''Open ``path`` file, which is relative to vfs root.
453 453
454 454 Newly created directories are marked as "not to be indexed by
455 455 the content indexing service", if ``notindexed`` is specified
456 456 for "write" mode access.
457 457 '''
458 458 if self._audit:
459 459 r = util.checkosfilename(path)
460 460 if r:
461 461 raise util.Abort("%s: %r" % (r, path))
462 462 self.audit(path)
463 463 f = self.join(path)
464 464
465 465 if not text and "b" not in mode:
466 466 mode += "b" # for that other OS
467 467
468 468 nlink = -1
469 469 if mode not in ('r', 'rb'):
470 470 dirname, basename = util.split(f)
471 471 # If basename is empty, then the path is malformed because it points
472 472 # to a directory. Let the posixfile() call below raise IOError.
473 473 if basename:
474 474 if atomictemp:
475 475 util.ensuredirs(dirname, self.createmode, notindexed)
476 476 return util.atomictempfile(f, mode, self.createmode)
477 477 try:
478 478 if 'w' in mode:
479 479 util.unlink(f)
480 480 nlink = 0
481 481 else:
482 482 # nlinks() may behave differently for files on Windows
483 483 # shares if the file is open.
484 484 fd = util.posixfile(f)
485 485 nlink = util.nlinks(f)
486 486 if nlink < 1:
487 487 nlink = 2 # force mktempcopy (issue1922)
488 488 fd.close()
489 489 except (OSError, IOError), e:
490 490 if e.errno != errno.ENOENT:
491 491 raise
492 492 nlink = 0
493 493 util.ensuredirs(dirname, self.createmode, notindexed)
494 494 if nlink > 0:
495 495 if self._trustnlink is None:
496 496 self._trustnlink = nlink > 1 or util.checknlink(f)
497 497 if nlink > 1 or not self._trustnlink:
498 498 util.rename(util.mktempcopy(f), f)
499 499 fp = util.posixfile(f, mode)
500 500 if nlink == 0:
501 501 self._fixfilemode(f)
502 502 return fp
503 503
504 504 def symlink(self, src, dst):
505 505 self.audit(dst)
506 506 linkname = self.join(dst)
507 507 try:
508 508 os.unlink(linkname)
509 509 except OSError:
510 510 pass
511 511
512 512 util.ensuredirs(os.path.dirname(linkname), self.createmode)
513 513
514 514 if self._cansymlink:
515 515 try:
516 516 os.symlink(src, linkname)
517 517 except OSError, err:
518 518 raise OSError(err.errno, _('could not symlink to %r: %s') %
519 519 (src, err.strerror), linkname)
520 520 else:
521 521 self.write(dst, src)
522 522
523 523 def join(self, path, *insidef):
524 524 if path:
525 525 return os.path.join(self.base, path, *insidef)
526 526 else:
527 527 return self.base
528 528
529 529 opener = vfs
530 530
531 531 class auditvfs(object):
532 532 def __init__(self, vfs):
533 533 self.vfs = vfs
534 534
535 535 def _getmustaudit(self):
536 536 return self.vfs.mustaudit
537 537
538 538 def _setmustaudit(self, onoff):
539 539 self.vfs.mustaudit = onoff
540 540
541 541 mustaudit = property(_getmustaudit, _setmustaudit)
542 542
543 543 class filtervfs(abstractvfs, auditvfs):
544 544 '''Wrapper vfs for filtering filenames with a function.'''
545 545
546 546 def __init__(self, vfs, filter):
547 547 auditvfs.__init__(self, vfs)
548 548 self._filter = filter
549 549
550 550 def __call__(self, path, *args, **kwargs):
551 551 return self.vfs(self._filter(path), *args, **kwargs)
552 552
553 553 def join(self, path, *insidef):
554 554 if path:
555 555 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
556 556 else:
557 557 return self.vfs.join(path)
558 558
559 559 filteropener = filtervfs
560 560
561 561 class readonlyvfs(abstractvfs, auditvfs):
562 562 '''Wrapper vfs preventing any writing.'''
563 563
564 564 def __init__(self, vfs):
565 565 auditvfs.__init__(self, vfs)
566 566
567 567 def __call__(self, path, mode='r', *args, **kw):
568 568 if mode not in ('r', 'rb'):
569 569 raise util.Abort('this vfs is read only')
570 570 return self.vfs(path, mode, *args, **kw)
571 571
572 572
573 573 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
574 574 '''yield every hg repository under path, always recursively.
575 575 The recurse flag will only control recursion into repo working dirs'''
576 576 def errhandler(err):
577 577 if err.filename == path:
578 578 raise err
579 579 samestat = getattr(os.path, 'samestat', None)
580 580 if followsym and samestat is not None:
581 581 def adddir(dirlst, dirname):
582 582 match = False
583 583 dirstat = os.stat(dirname)
584 584 for lstdirstat in dirlst:
585 585 if samestat(dirstat, lstdirstat):
586 586 match = True
587 587 break
588 588 if not match:
589 589 dirlst.append(dirstat)
590 590 return not match
591 591 else:
592 592 followsym = False
593 593
594 594 if (seen_dirs is None) and followsym:
595 595 seen_dirs = []
596 596 adddir(seen_dirs, path)
597 597 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
598 598 dirs.sort()
599 599 if '.hg' in dirs:
600 600 yield root # found a repository
601 601 qroot = os.path.join(root, '.hg', 'patches')
602 602 if os.path.isdir(os.path.join(qroot, '.hg')):
603 603 yield qroot # we have a patch queue repo here
604 604 if recurse:
605 605 # avoid recursing inside the .hg directory
606 606 dirs.remove('.hg')
607 607 else:
608 608 dirs[:] = [] # don't descend further
609 609 elif followsym:
610 610 newdirs = []
611 611 for d in dirs:
612 612 fname = os.path.join(root, d)
613 613 if adddir(seen_dirs, fname):
614 614 if os.path.islink(fname):
615 615 for hgname in walkrepos(fname, True, seen_dirs):
616 616 yield hgname
617 617 else:
618 618 newdirs.append(d)
619 619 dirs[:] = newdirs
620 620
621 621 def osrcpath():
622 622 '''return default os-specific hgrc search path'''
623 623 path = []
624 624 defaultpath = os.path.join(util.datapath, 'default.d')
625 625 if os.path.isdir(defaultpath):
626 626 for f, kind in osutil.listdir(defaultpath):
627 627 if f.endswith('.rc'):
628 628 path.append(os.path.join(defaultpath, f))
629 629 path.extend(systemrcpath())
630 630 path.extend(userrcpath())
631 631 path = [os.path.normpath(f) for f in path]
632 632 return path
633 633
634 634 _rcpath = None
635 635
636 636 def rcpath():
637 637 '''return hgrc search path. if env var HGRCPATH is set, use it.
638 638 for each item in path, if directory, use files ending in .rc,
639 639 else use item.
640 640 make HGRCPATH empty to only look in .hg/hgrc of current repo.
641 641 if no HGRCPATH, use default os-specific path.'''
642 642 global _rcpath
643 643 if _rcpath is None:
644 644 if 'HGRCPATH' in os.environ:
645 645 _rcpath = []
646 646 for p in os.environ['HGRCPATH'].split(os.pathsep):
647 647 if not p:
648 648 continue
649 649 p = util.expandpath(p)
650 650 if os.path.isdir(p):
651 651 for f, kind in osutil.listdir(p):
652 652 if f.endswith('.rc'):
653 653 _rcpath.append(os.path.join(p, f))
654 654 else:
655 655 _rcpath.append(p)
656 656 else:
657 657 _rcpath = osrcpath()
658 658 return _rcpath
659 659
660 660 def intrev(repo, rev):
661 661 """Return integer for a given revision that can be used in comparison or
662 662 arithmetic operation"""
663 663 if rev is None:
664 664 return len(repo)
665 665 return rev
666 666
667 667 def revsingle(repo, revspec, default='.'):
668 668 if not revspec and revspec != 0:
669 669 return repo[default]
670 670
671 671 l = revrange(repo, [revspec])
672 672 if not l:
673 673 raise util.Abort(_('empty revision set'))
674 674 return repo[l.last()]
675 675
676 676 def revpair(repo, revs):
677 677 if not revs:
678 678 return repo.dirstate.p1(), None
679 679
680 680 l = revrange(repo, revs)
681 681
682 682 if not l:
683 683 first = second = None
684 684 elif l.isascending():
685 685 first = l.min()
686 686 second = l.max()
687 687 elif l.isdescending():
688 688 first = l.max()
689 689 second = l.min()
690 690 else:
691 691 first = l.first()
692 692 second = l.last()
693 693
694 694 if first is None:
695 695 raise util.Abort(_('empty revision range'))
696 696
697 697 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
698 698 return repo.lookup(first), None
699 699
700 700 return repo.lookup(first), repo.lookup(second)
701 701
702 702 _revrangesep = ':'
703 703
704 704 def revrange(repo, revs):
705 705 """Yield revision as strings from a list of revision specifications."""
706 706
707 707 def revfix(repo, val, defval):
708 708 if not val and val != 0 and defval is not None:
709 709 return defval
710 710 return repo[val].rev()
711 711
712 712 subsets = []
713 713
714 714 revsetaliases = [alias for (alias, _) in
715 715 repo.ui.configitems("revsetalias")]
716 716
717 717 for spec in revs:
718 718 # attempt to parse old-style ranges first to deal with
719 719 # things like old-tag which contain query metacharacters
720 720 try:
721 721 # ... except for revset aliases without arguments. These
722 722 # should be parsed as soon as possible, because they might
723 723 # clash with a hash prefix.
724 724 if spec in revsetaliases:
725 725 raise error.RepoLookupError
726 726
727 727 if isinstance(spec, int):
728 728 subsets.append(revset.baseset([spec]))
729 729 continue
730 730
731 731 if _revrangesep in spec:
732 732 start, end = spec.split(_revrangesep, 1)
733 733 if start in revsetaliases or end in revsetaliases:
734 734 raise error.RepoLookupError
735 735
736 736 start = revfix(repo, start, 0)
737 737 end = revfix(repo, end, len(repo) - 1)
738 738 if end == nullrev and start < 0:
739 739 start = nullrev
740 rangeiter = repo.changelog.revs(start, end)
741 l = revset.baseset(rangeiter)
740 if start < end:
741 l = revset.spanset(repo, start, end + 1)
742 else:
743 l = revset.spanset(repo, start, end - 1)
742 744 subsets.append(l)
743 745 continue
744 746 elif spec and spec in repo: # single unquoted rev
745 747 rev = revfix(repo, spec, None)
746 748 subsets.append(revset.baseset([rev]))
747 749 continue
748 750 except error.RepoLookupError:
749 751 pass
750 752
751 753 # fall through to new-style queries if old-style fails
752 754 m = revset.match(repo.ui, spec, repo)
753 755 subsets.append(m(repo))
754 756
755 757 return revset._combinesets(subsets)
756 758
757 759 def expandpats(pats):
758 760 '''Expand bare globs when running on windows.
759 761 On posix we assume it already has already been done by sh.'''
760 762 if not util.expandglobs:
761 763 return list(pats)
762 764 ret = []
763 765 for kindpat in pats:
764 766 kind, pat = matchmod._patsplit(kindpat, None)
765 767 if kind is None:
766 768 try:
767 769 globbed = glob.glob(pat)
768 770 except re.error:
769 771 globbed = [pat]
770 772 if globbed:
771 773 ret.extend(globbed)
772 774 continue
773 775 ret.append(kindpat)
774 776 return ret
775 777
776 778 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
777 779 '''Return a matcher and the patterns that were used.
778 780 The matcher will warn about bad matches.'''
779 781 if pats == ("",):
780 782 pats = []
781 783 if not globbed and default == 'relpath':
782 784 pats = expandpats(pats or [])
783 785
784 786 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
785 787 default, listsubrepos=opts.get('subrepos'))
786 788 def badfn(f, msg):
787 789 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
788 790 m.bad = badfn
789 791 if m.always():
790 792 pats = []
791 793 return m, pats
792 794
793 795 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
794 796 '''Return a matcher that will warn about bad matches.'''
795 797 return matchandpats(ctx, pats, opts, globbed, default)[0]
796 798
797 799 def matchall(repo):
798 800 '''Return a matcher that will efficiently match everything.'''
799 801 return matchmod.always(repo.root, repo.getcwd())
800 802
801 803 def matchfiles(repo, files):
802 804 '''Return a matcher that will efficiently match exactly these files.'''
803 805 return matchmod.exact(repo.root, repo.getcwd(), files)
804 806
805 807 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
806 808 m = matcher
807 809 if dry_run is None:
808 810 dry_run = opts.get('dry_run')
809 811 if similarity is None:
810 812 similarity = float(opts.get('similarity') or 0)
811 813
812 814 ret = 0
813 815 join = lambda f: os.path.join(prefix, f)
814 816
815 817 def matchessubrepo(matcher, subpath):
816 818 if matcher.exact(subpath):
817 819 return True
818 820 for f in matcher.files():
819 821 if f.startswith(subpath):
820 822 return True
821 823 return False
822 824
823 825 wctx = repo[None]
824 826 for subpath in sorted(wctx.substate):
825 827 if opts.get('subrepos') or matchessubrepo(m, subpath):
826 828 sub = wctx.sub(subpath)
827 829 try:
828 830 submatch = matchmod.narrowmatcher(subpath, m)
829 831 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
830 832 ret = 1
831 833 except error.LookupError:
832 834 repo.ui.status(_("skipping missing subrepository: %s\n")
833 835 % join(subpath))
834 836
835 837 rejected = []
836 838 origbad = m.bad
837 839 def badfn(f, msg):
838 840 if f in m.files():
839 841 origbad(f, msg)
840 842 rejected.append(f)
841 843
842 844 m.bad = badfn
843 845 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
844 846 m.bad = origbad
845 847
846 848 unknownset = set(unknown + forgotten)
847 849 toprint = unknownset.copy()
848 850 toprint.update(deleted)
849 851 for abs in sorted(toprint):
850 852 if repo.ui.verbose or not m.exact(abs):
851 853 if abs in unknownset:
852 854 status = _('adding %s\n') % m.uipath(abs)
853 855 else:
854 856 status = _('removing %s\n') % m.uipath(abs)
855 857 repo.ui.status(status)
856 858
857 859 renames = _findrenames(repo, m, added + unknown, removed + deleted,
858 860 similarity)
859 861
860 862 if not dry_run:
861 863 _markchanges(repo, unknown + forgotten, deleted, renames)
862 864
863 865 for f in rejected:
864 866 if f in m.files():
865 867 return 1
866 868 return ret
867 869
868 870 def marktouched(repo, files, similarity=0.0):
869 871 '''Assert that files have somehow been operated upon. files are relative to
870 872 the repo root.'''
871 873 m = matchfiles(repo, files)
872 874 rejected = []
873 875 m.bad = lambda x, y: rejected.append(x)
874 876
875 877 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
876 878
877 879 if repo.ui.verbose:
878 880 unknownset = set(unknown + forgotten)
879 881 toprint = unknownset.copy()
880 882 toprint.update(deleted)
881 883 for abs in sorted(toprint):
882 884 if abs in unknownset:
883 885 status = _('adding %s\n') % abs
884 886 else:
885 887 status = _('removing %s\n') % abs
886 888 repo.ui.status(status)
887 889
888 890 renames = _findrenames(repo, m, added + unknown, removed + deleted,
889 891 similarity)
890 892
891 893 _markchanges(repo, unknown + forgotten, deleted, renames)
892 894
893 895 for f in rejected:
894 896 if f in m.files():
895 897 return 1
896 898 return 0
897 899
898 900 def _interestingfiles(repo, matcher):
899 901 '''Walk dirstate with matcher, looking for files that addremove would care
900 902 about.
901 903
902 904 This is different from dirstate.status because it doesn't care about
903 905 whether files are modified or clean.'''
904 906 added, unknown, deleted, removed, forgotten = [], [], [], [], []
905 907 audit_path = pathutil.pathauditor(repo.root)
906 908
907 909 ctx = repo[None]
908 910 dirstate = repo.dirstate
909 911 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
910 912 full=False)
911 913 for abs, st in walkresults.iteritems():
912 914 dstate = dirstate[abs]
913 915 if dstate == '?' and audit_path.check(abs):
914 916 unknown.append(abs)
915 917 elif dstate != 'r' and not st:
916 918 deleted.append(abs)
917 919 elif dstate == 'r' and st:
918 920 forgotten.append(abs)
919 921 # for finding renames
920 922 elif dstate == 'r' and not st:
921 923 removed.append(abs)
922 924 elif dstate == 'a':
923 925 added.append(abs)
924 926
925 927 return added, unknown, deleted, removed, forgotten
926 928
927 929 def _findrenames(repo, matcher, added, removed, similarity):
928 930 '''Find renames from removed files to added ones.'''
929 931 renames = {}
930 932 if similarity > 0:
931 933 for old, new, score in similar.findrenames(repo, added, removed,
932 934 similarity):
933 935 if (repo.ui.verbose or not matcher.exact(old)
934 936 or not matcher.exact(new)):
935 937 repo.ui.status(_('recording removal of %s as rename to %s '
936 938 '(%d%% similar)\n') %
937 939 (matcher.rel(old), matcher.rel(new),
938 940 score * 100))
939 941 renames[new] = old
940 942 return renames
941 943
942 944 def _markchanges(repo, unknown, deleted, renames):
943 945 '''Marks the files in unknown as added, the files in deleted as removed,
944 946 and the files in renames as copied.'''
945 947 wctx = repo[None]
946 948 wlock = repo.wlock()
947 949 try:
948 950 wctx.forget(deleted)
949 951 wctx.add(unknown)
950 952 for new, old in renames.iteritems():
951 953 wctx.copy(old, new)
952 954 finally:
953 955 wlock.release()
954 956
955 957 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
956 958 """Update the dirstate to reflect the intent of copying src to dst. For
957 959 different reasons it might not end with dst being marked as copied from src.
958 960 """
959 961 origsrc = repo.dirstate.copied(src) or src
960 962 if dst == origsrc: # copying back a copy?
961 963 if repo.dirstate[dst] not in 'mn' and not dryrun:
962 964 repo.dirstate.normallookup(dst)
963 965 else:
964 966 if repo.dirstate[origsrc] == 'a' and origsrc == src:
965 967 if not ui.quiet:
966 968 ui.warn(_("%s has not been committed yet, so no copy "
967 969 "data will be stored for %s.\n")
968 970 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
969 971 if repo.dirstate[dst] in '?r' and not dryrun:
970 972 wctx.add([dst])
971 973 elif not dryrun:
972 974 wctx.copy(origsrc, dst)
973 975
974 976 def readrequires(opener, supported):
975 977 '''Reads and parses .hg/requires and checks if all entries found
976 978 are in the list of supported features.'''
977 979 requirements = set(opener.read("requires").splitlines())
978 980 missings = []
979 981 for r in requirements:
980 982 if r not in supported:
981 983 if not r or not r[0].isalnum():
982 984 raise error.RequirementError(_(".hg/requires file is corrupt"))
983 985 missings.append(r)
984 986 missings.sort()
985 987 if missings:
986 988 raise error.RequirementError(
987 989 _("repository requires features unknown to this Mercurial: %s")
988 990 % " ".join(missings),
989 991 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
990 992 " for more information"))
991 993 return requirements
992 994
993 995 def writerequires(opener, requirements):
994 996 reqfile = opener("requires", "w")
995 997 for r in sorted(requirements):
996 998 reqfile.write("%s\n" % r)
997 999 reqfile.close()
998 1000
999 1001 class filecachesubentry(object):
1000 1002 def __init__(self, path, stat):
1001 1003 self.path = path
1002 1004 self.cachestat = None
1003 1005 self._cacheable = None
1004 1006
1005 1007 if stat:
1006 1008 self.cachestat = filecachesubentry.stat(self.path)
1007 1009
1008 1010 if self.cachestat:
1009 1011 self._cacheable = self.cachestat.cacheable()
1010 1012 else:
1011 1013 # None means we don't know yet
1012 1014 self._cacheable = None
1013 1015
1014 1016 def refresh(self):
1015 1017 if self.cacheable():
1016 1018 self.cachestat = filecachesubentry.stat(self.path)
1017 1019
1018 1020 def cacheable(self):
1019 1021 if self._cacheable is not None:
1020 1022 return self._cacheable
1021 1023
1022 1024 # we don't know yet, assume it is for now
1023 1025 return True
1024 1026
1025 1027 def changed(self):
1026 1028 # no point in going further if we can't cache it
1027 1029 if not self.cacheable():
1028 1030 return True
1029 1031
1030 1032 newstat = filecachesubentry.stat(self.path)
1031 1033
1032 1034 # we may not know if it's cacheable yet, check again now
1033 1035 if newstat and self._cacheable is None:
1034 1036 self._cacheable = newstat.cacheable()
1035 1037
1036 1038 # check again
1037 1039 if not self._cacheable:
1038 1040 return True
1039 1041
1040 1042 if self.cachestat != newstat:
1041 1043 self.cachestat = newstat
1042 1044 return True
1043 1045 else:
1044 1046 return False
1045 1047
1046 1048 @staticmethod
1047 1049 def stat(path):
1048 1050 try:
1049 1051 return util.cachestat(path)
1050 1052 except OSError, e:
1051 1053 if e.errno != errno.ENOENT:
1052 1054 raise
1053 1055
1054 1056 class filecacheentry(object):
1055 1057 def __init__(self, paths, stat=True):
1056 1058 self._entries = []
1057 1059 for path in paths:
1058 1060 self._entries.append(filecachesubentry(path, stat))
1059 1061
1060 1062 def changed(self):
1061 1063 '''true if any entry has changed'''
1062 1064 for entry in self._entries:
1063 1065 if entry.changed():
1064 1066 return True
1065 1067 return False
1066 1068
1067 1069 def refresh(self):
1068 1070 for entry in self._entries:
1069 1071 entry.refresh()
1070 1072
1071 1073 class filecache(object):
1072 1074 '''A property like decorator that tracks files under .hg/ for updates.
1073 1075
1074 1076 Records stat info when called in _filecache.
1075 1077
1076 1078 On subsequent calls, compares old stat info with new info, and recreates the
1077 1079 object when any of the files changes, updating the new stat info in
1078 1080 _filecache.
1079 1081
1080 1082 Mercurial either atomic renames or appends for files under .hg,
1081 1083 so to ensure the cache is reliable we need the filesystem to be able
1082 1084 to tell us if a file has been replaced. If it can't, we fallback to
1083 1085 recreating the object on every call (essentially the same behaviour as
1084 1086 propertycache).
1085 1087
1086 1088 '''
1087 1089 def __init__(self, *paths):
1088 1090 self.paths = paths
1089 1091
1090 1092 def join(self, obj, fname):
1091 1093 """Used to compute the runtime path of a cached file.
1092 1094
1093 1095 Users should subclass filecache and provide their own version of this
1094 1096 function to call the appropriate join function on 'obj' (an instance
1095 1097 of the class that its member function was decorated).
1096 1098 """
1097 1099 return obj.join(fname)
1098 1100
1099 1101 def __call__(self, func):
1100 1102 self.func = func
1101 1103 self.name = func.__name__
1102 1104 return self
1103 1105
1104 1106 def __get__(self, obj, type=None):
1105 1107 # do we need to check if the file changed?
1106 1108 if self.name in obj.__dict__:
1107 1109 assert self.name in obj._filecache, self.name
1108 1110 return obj.__dict__[self.name]
1109 1111
1110 1112 entry = obj._filecache.get(self.name)
1111 1113
1112 1114 if entry:
1113 1115 if entry.changed():
1114 1116 entry.obj = self.func(obj)
1115 1117 else:
1116 1118 paths = [self.join(obj, path) for path in self.paths]
1117 1119
1118 1120 # We stat -before- creating the object so our cache doesn't lie if
1119 1121 # a writer modified between the time we read and stat
1120 1122 entry = filecacheentry(paths, True)
1121 1123 entry.obj = self.func(obj)
1122 1124
1123 1125 obj._filecache[self.name] = entry
1124 1126
1125 1127 obj.__dict__[self.name] = entry.obj
1126 1128 return entry.obj
1127 1129
1128 1130 def __set__(self, obj, value):
1129 1131 if self.name not in obj._filecache:
1130 1132 # we add an entry for the missing value because X in __dict__
1131 1133 # implies X in _filecache
1132 1134 paths = [self.join(obj, path) for path in self.paths]
1133 1135 ce = filecacheentry(paths, False)
1134 1136 obj._filecache[self.name] = ce
1135 1137 else:
1136 1138 ce = obj._filecache[self.name]
1137 1139
1138 1140 ce.obj = value # update cached copy
1139 1141 obj.__dict__[self.name] = value # update copy returned by obj.x
1140 1142
1141 1143 def __delete__(self, obj):
1142 1144 try:
1143 1145 del obj.__dict__[self.name]
1144 1146 except KeyError:
1145 1147 raise AttributeError(self.name)
General Comments 0
You need to be logged in to leave comments. Login now