##// END OF EJS Templates
devel-warn: add a prefix to all messages ("devel-warn: ")...
Pierre-Yves David -
r24755:cd89f4e6 default
parent child Browse files
Show More
@@ -1,1159 +1,1160 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob, tempfile, shutil, stat, inspect
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 class status(tuple):
24 24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 25 and 'ignored' properties are only relevant to the working copy.
26 26 '''
27 27
28 28 __slots__ = ()
29 29
30 30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 31 clean):
32 32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 33 ignored, clean))
34 34
35 35 @property
36 36 def modified(self):
37 37 '''files that have been modified'''
38 38 return self[0]
39 39
40 40 @property
41 41 def added(self):
42 42 '''files that have been added'''
43 43 return self[1]
44 44
45 45 @property
46 46 def removed(self):
47 47 '''files that have been removed'''
48 48 return self[2]
49 49
50 50 @property
51 51 def deleted(self):
52 52 '''files that are in the dirstate, but have been deleted from the
53 53 working copy (aka "missing")
54 54 '''
55 55 return self[3]
56 56
57 57 @property
58 58 def unknown(self):
59 59 '''files not in the dirstate that are not ignored'''
60 60 return self[4]
61 61
62 62 @property
63 63 def ignored(self):
64 64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 65 return self[5]
66 66
67 67 @property
68 68 def clean(self):
69 69 '''files that have not been modified'''
70 70 return self[6]
71 71
72 72 def __repr__(self, *args, **kwargs):
73 73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 74 'unknown=%r, ignored=%r, clean=%r>') % self)
75 75
76 76 def itersubrepos(ctx1, ctx2):
77 77 """find subrepos in ctx1 or ctx2"""
78 78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 83 for subpath, ctx in sorted(subpaths.iteritems()):
84 84 yield subpath, ctx.sub(subpath)
85 85
86 86 def nochangesfound(ui, repo, excluded=None):
87 87 '''Report no changes for push/pull, excluded is None or a list of
88 88 nodes excluded from the push/pull.
89 89 '''
90 90 secretlist = []
91 91 if excluded:
92 92 for n in excluded:
93 93 if n not in repo:
94 94 # discovery should not have included the filtered revision,
95 95 # we have to explicitly exclude it until discovery is cleanup.
96 96 continue
97 97 ctx = repo[n]
98 98 if ctx.phase() >= phases.secret and not ctx.extinct():
99 99 secretlist.append(n)
100 100
101 101 if secretlist:
102 102 ui.status(_("no changes found (ignored %d secret changesets)\n")
103 103 % len(secretlist))
104 104 else:
105 105 ui.status(_("no changes found\n"))
106 106
107 107 def checknewlabel(repo, lbl, kind):
108 108 # Do not use the "kind" parameter in ui output.
109 109 # It makes strings difficult to translate.
110 110 if lbl in ['tip', '.', 'null']:
111 111 raise util.Abort(_("the name '%s' is reserved") % lbl)
112 112 for c in (':', '\0', '\n', '\r'):
113 113 if c in lbl:
114 114 raise util.Abort(_("%r cannot be used in a name") % c)
115 115 try:
116 116 int(lbl)
117 117 raise util.Abort(_("cannot use an integer as a name"))
118 118 except ValueError:
119 119 pass
120 120
121 121 def checkfilename(f):
122 122 '''Check that the filename f is an acceptable filename for a tracked file'''
123 123 if '\r' in f or '\n' in f:
124 124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
125 125
126 126 def checkportable(ui, f):
127 127 '''Check if filename f is portable and warn or abort depending on config'''
128 128 checkfilename(f)
129 129 abort, warn = checkportabilityalert(ui)
130 130 if abort or warn:
131 131 msg = util.checkwinfilename(f)
132 132 if msg:
133 133 msg = "%s: %r" % (msg, f)
134 134 if abort:
135 135 raise util.Abort(msg)
136 136 ui.warn(_("warning: %s\n") % msg)
137 137
138 138 def checkportabilityalert(ui):
139 139 '''check if the user's config requests nothing, a warning, or abort for
140 140 non-portable filenames'''
141 141 val = ui.config('ui', 'portablefilenames', 'warn')
142 142 lval = val.lower()
143 143 bval = util.parsebool(val)
144 144 abort = os.name == 'nt' or lval == 'abort'
145 145 warn = bval or lval == 'warn'
146 146 if bval is None and not (warn or abort or lval == 'ignore'):
147 147 raise error.ConfigError(
148 148 _("ui.portablefilenames value is invalid ('%s')") % val)
149 149 return abort, warn
150 150
151 151 class casecollisionauditor(object):
152 152 def __init__(self, ui, abort, dirstate):
153 153 self._ui = ui
154 154 self._abort = abort
155 155 allfiles = '\0'.join(dirstate._map)
156 156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
157 157 self._dirstate = dirstate
158 158 # The purpose of _newfiles is so that we don't complain about
159 159 # case collisions if someone were to call this object with the
160 160 # same filename twice.
161 161 self._newfiles = set()
162 162
163 163 def __call__(self, f):
164 164 if f in self._newfiles:
165 165 return
166 166 fl = encoding.lower(f)
167 167 if fl in self._loweredfiles and f not in self._dirstate:
168 168 msg = _('possible case-folding collision for %s') % f
169 169 if self._abort:
170 170 raise util.Abort(msg)
171 171 self._ui.warn(_("warning: %s\n") % msg)
172 172 self._loweredfiles.add(fl)
173 173 self._newfiles.add(f)
174 174
175 175 def develwarn(tui, msg):
176 176 """issue a developer warning message"""
177 msg = 'devel-warn: ' + msg
177 178 if tui.tracebackflag:
178 179 util.debugstacktrace(msg, 2)
179 180 else:
180 181 curframe = inspect.currentframe()
181 182 calframe = inspect.getouterframes(curframe, 2)
182 183 tui.write_err('%s at: %s:%s (%s)\n' % ((msg,) + calframe[2][1:4]))
183 184
184 185 def filteredhash(repo, maxrev):
185 186 """build hash of filtered revisions in the current repoview.
186 187
187 188 Multiple caches perform up-to-date validation by checking that the
188 189 tiprev and tipnode stored in the cache file match the current repository.
189 190 However, this is not sufficient for validating repoviews because the set
190 191 of revisions in the view may change without the repository tiprev and
191 192 tipnode changing.
192 193
193 194 This function hashes all the revs filtered from the view and returns
194 195 that SHA-1 digest.
195 196 """
196 197 cl = repo.changelog
197 198 if not cl.filteredrevs:
198 199 return None
199 200 key = None
200 201 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
201 202 if revs:
202 203 s = util.sha1()
203 204 for rev in revs:
204 205 s.update('%s;' % rev)
205 206 key = s.digest()
206 207 return key
207 208
208 209 class abstractvfs(object):
209 210 """Abstract base class; cannot be instantiated"""
210 211
211 212 def __init__(self, *args, **kwargs):
212 213 '''Prevent instantiation; don't call this from subclasses.'''
213 214 raise NotImplementedError('attempted instantiating ' + str(type(self)))
214 215
215 216 def tryread(self, path):
216 217 '''gracefully return an empty string for missing files'''
217 218 try:
218 219 return self.read(path)
219 220 except IOError, inst:
220 221 if inst.errno != errno.ENOENT:
221 222 raise
222 223 return ""
223 224
224 225 def tryreadlines(self, path, mode='rb'):
225 226 '''gracefully return an empty array for missing files'''
226 227 try:
227 228 return self.readlines(path, mode=mode)
228 229 except IOError, inst:
229 230 if inst.errno != errno.ENOENT:
230 231 raise
231 232 return []
232 233
233 234 def open(self, path, mode="r", text=False, atomictemp=False,
234 235 notindexed=False):
235 236 '''Open ``path`` file, which is relative to vfs root.
236 237
237 238 Newly created directories are marked as "not to be indexed by
238 239 the content indexing service", if ``notindexed`` is specified
239 240 for "write" mode access.
240 241 '''
241 242 self.open = self.__call__
242 243 return self.__call__(path, mode, text, atomictemp, notindexed)
243 244
244 245 def read(self, path):
245 246 fp = self(path, 'rb')
246 247 try:
247 248 return fp.read()
248 249 finally:
249 250 fp.close()
250 251
251 252 def readlines(self, path, mode='rb'):
252 253 fp = self(path, mode=mode)
253 254 try:
254 255 return fp.readlines()
255 256 finally:
256 257 fp.close()
257 258
258 259 def write(self, path, data):
259 260 fp = self(path, 'wb')
260 261 try:
261 262 return fp.write(data)
262 263 finally:
263 264 fp.close()
264 265
265 266 def writelines(self, path, data, mode='wb', notindexed=False):
266 267 fp = self(path, mode=mode, notindexed=notindexed)
267 268 try:
268 269 return fp.writelines(data)
269 270 finally:
270 271 fp.close()
271 272
272 273 def append(self, path, data):
273 274 fp = self(path, 'ab')
274 275 try:
275 276 return fp.write(data)
276 277 finally:
277 278 fp.close()
278 279
279 280 def chmod(self, path, mode):
280 281 return os.chmod(self.join(path), mode)
281 282
282 283 def exists(self, path=None):
283 284 return os.path.exists(self.join(path))
284 285
285 286 def fstat(self, fp):
286 287 return util.fstat(fp)
287 288
288 289 def isdir(self, path=None):
289 290 return os.path.isdir(self.join(path))
290 291
291 292 def isfile(self, path=None):
292 293 return os.path.isfile(self.join(path))
293 294
294 295 def islink(self, path=None):
295 296 return os.path.islink(self.join(path))
296 297
297 298 def reljoin(self, *paths):
298 299 """join various elements of a path together (as os.path.join would do)
299 300
300 301 The vfs base is not injected so that path stay relative. This exists
301 302 to allow handling of strange encoding if needed."""
302 303 return os.path.join(*paths)
303 304
304 305 def split(self, path):
305 306 """split top-most element of a path (as os.path.split would do)
306 307
307 308 This exists to allow handling of strange encoding if needed."""
308 309 return os.path.split(path)
309 310
310 311 def lexists(self, path=None):
311 312 return os.path.lexists(self.join(path))
312 313
313 314 def lstat(self, path=None):
314 315 return os.lstat(self.join(path))
315 316
316 317 def listdir(self, path=None):
317 318 return os.listdir(self.join(path))
318 319
319 320 def makedir(self, path=None, notindexed=True):
320 321 return util.makedir(self.join(path), notindexed)
321 322
322 323 def makedirs(self, path=None, mode=None):
323 324 return util.makedirs(self.join(path), mode)
324 325
325 326 def makelock(self, info, path):
326 327 return util.makelock(info, self.join(path))
327 328
328 329 def mkdir(self, path=None):
329 330 return os.mkdir(self.join(path))
330 331
331 332 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
332 333 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
333 334 dir=self.join(dir), text=text)
334 335 dname, fname = util.split(name)
335 336 if dir:
336 337 return fd, os.path.join(dir, fname)
337 338 else:
338 339 return fd, fname
339 340
340 341 def readdir(self, path=None, stat=None, skip=None):
341 342 return osutil.listdir(self.join(path), stat, skip)
342 343
343 344 def readlock(self, path):
344 345 return util.readlock(self.join(path))
345 346
346 347 def rename(self, src, dst):
347 348 return util.rename(self.join(src), self.join(dst))
348 349
349 350 def readlink(self, path):
350 351 return os.readlink(self.join(path))
351 352
352 353 def removedirs(self, path=None):
353 354 """Remove a leaf directory and all empty intermediate ones
354 355 """
355 356 return util.removedirs(self.join(path))
356 357
357 358 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
358 359 """Remove a directory tree recursively
359 360
360 361 If ``forcibly``, this tries to remove READ-ONLY files, too.
361 362 """
362 363 if forcibly:
363 364 def onerror(function, path, excinfo):
364 365 if function is not os.remove:
365 366 raise
366 367 # read-only files cannot be unlinked under Windows
367 368 s = os.stat(path)
368 369 if (s.st_mode & stat.S_IWRITE) != 0:
369 370 raise
370 371 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
371 372 os.remove(path)
372 373 else:
373 374 onerror = None
374 375 return shutil.rmtree(self.join(path),
375 376 ignore_errors=ignore_errors, onerror=onerror)
376 377
377 378 def setflags(self, path, l, x):
378 379 return util.setflags(self.join(path), l, x)
379 380
380 381 def stat(self, path=None):
381 382 return os.stat(self.join(path))
382 383
383 384 def unlink(self, path=None):
384 385 return util.unlink(self.join(path))
385 386
386 387 def unlinkpath(self, path=None, ignoremissing=False):
387 388 return util.unlinkpath(self.join(path), ignoremissing)
388 389
389 390 def utime(self, path=None, t=None):
390 391 return os.utime(self.join(path), t)
391 392
392 393 def walk(self, path=None, onerror=None):
393 394 """Yield (dirpath, dirs, files) tuple for each directories under path
394 395
395 396 ``dirpath`` is relative one from the root of this vfs. This
396 397 uses ``os.sep`` as path separator, even you specify POSIX
397 398 style ``path``.
398 399
399 400 "The root of this vfs" is represented as empty ``dirpath``.
400 401 """
401 402 root = os.path.normpath(self.join(None))
402 403 # when dirpath == root, dirpath[prefixlen:] becomes empty
403 404 # because len(dirpath) < prefixlen.
404 405 prefixlen = len(pathutil.normasprefix(root))
405 406 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
406 407 yield (dirpath[prefixlen:], dirs, files)
407 408
408 409 class vfs(abstractvfs):
409 410 '''Operate files relative to a base directory
410 411
411 412 This class is used to hide the details of COW semantics and
412 413 remote file access from higher level code.
413 414 '''
414 415 def __init__(self, base, audit=True, expandpath=False, realpath=False):
415 416 if expandpath:
416 417 base = util.expandpath(base)
417 418 if realpath:
418 419 base = os.path.realpath(base)
419 420 self.base = base
420 421 self._setmustaudit(audit)
421 422 self.createmode = None
422 423 self._trustnlink = None
423 424
424 425 def _getmustaudit(self):
425 426 return self._audit
426 427
427 428 def _setmustaudit(self, onoff):
428 429 self._audit = onoff
429 430 if onoff:
430 431 self.audit = pathutil.pathauditor(self.base)
431 432 else:
432 433 self.audit = util.always
433 434
434 435 mustaudit = property(_getmustaudit, _setmustaudit)
435 436
436 437 @util.propertycache
437 438 def _cansymlink(self):
438 439 return util.checklink(self.base)
439 440
440 441 @util.propertycache
441 442 def _chmod(self):
442 443 return util.checkexec(self.base)
443 444
444 445 def _fixfilemode(self, name):
445 446 if self.createmode is None or not self._chmod:
446 447 return
447 448 os.chmod(name, self.createmode & 0666)
448 449
449 450 def __call__(self, path, mode="r", text=False, atomictemp=False,
450 451 notindexed=False):
451 452 '''Open ``path`` file, which is relative to vfs root.
452 453
453 454 Newly created directories are marked as "not to be indexed by
454 455 the content indexing service", if ``notindexed`` is specified
455 456 for "write" mode access.
456 457 '''
457 458 if self._audit:
458 459 r = util.checkosfilename(path)
459 460 if r:
460 461 raise util.Abort("%s: %r" % (r, path))
461 462 self.audit(path)
462 463 f = self.join(path)
463 464
464 465 if not text and "b" not in mode:
465 466 mode += "b" # for that other OS
466 467
467 468 nlink = -1
468 469 if mode not in ('r', 'rb'):
469 470 dirname, basename = util.split(f)
470 471 # If basename is empty, then the path is malformed because it points
471 472 # to a directory. Let the posixfile() call below raise IOError.
472 473 if basename:
473 474 if atomictemp:
474 475 util.ensuredirs(dirname, self.createmode, notindexed)
475 476 return util.atomictempfile(f, mode, self.createmode)
476 477 try:
477 478 if 'w' in mode:
478 479 util.unlink(f)
479 480 nlink = 0
480 481 else:
481 482 # nlinks() may behave differently for files on Windows
482 483 # shares if the file is open.
483 484 fd = util.posixfile(f)
484 485 nlink = util.nlinks(f)
485 486 if nlink < 1:
486 487 nlink = 2 # force mktempcopy (issue1922)
487 488 fd.close()
488 489 except (OSError, IOError), e:
489 490 if e.errno != errno.ENOENT:
490 491 raise
491 492 nlink = 0
492 493 util.ensuredirs(dirname, self.createmode, notindexed)
493 494 if nlink > 0:
494 495 if self._trustnlink is None:
495 496 self._trustnlink = nlink > 1 or util.checknlink(f)
496 497 if nlink > 1 or not self._trustnlink:
497 498 util.rename(util.mktempcopy(f), f)
498 499 fp = util.posixfile(f, mode)
499 500 if nlink == 0:
500 501 self._fixfilemode(f)
501 502 return fp
502 503
503 504 def symlink(self, src, dst):
504 505 self.audit(dst)
505 506 linkname = self.join(dst)
506 507 try:
507 508 os.unlink(linkname)
508 509 except OSError:
509 510 pass
510 511
511 512 util.ensuredirs(os.path.dirname(linkname), self.createmode)
512 513
513 514 if self._cansymlink:
514 515 try:
515 516 os.symlink(src, linkname)
516 517 except OSError, err:
517 518 raise OSError(err.errno, _('could not symlink to %r: %s') %
518 519 (src, err.strerror), linkname)
519 520 else:
520 521 self.write(dst, src)
521 522
522 523 def join(self, path, *insidef):
523 524 if path:
524 525 return os.path.join(self.base, path, *insidef)
525 526 else:
526 527 return self.base
527 528
528 529 opener = vfs
529 530
530 531 class auditvfs(object):
531 532 def __init__(self, vfs):
532 533 self.vfs = vfs
533 534
534 535 def _getmustaudit(self):
535 536 return self.vfs.mustaudit
536 537
537 538 def _setmustaudit(self, onoff):
538 539 self.vfs.mustaudit = onoff
539 540
540 541 mustaudit = property(_getmustaudit, _setmustaudit)
541 542
542 543 class filtervfs(abstractvfs, auditvfs):
543 544 '''Wrapper vfs for filtering filenames with a function.'''
544 545
545 546 def __init__(self, vfs, filter):
546 547 auditvfs.__init__(self, vfs)
547 548 self._filter = filter
548 549
549 550 def __call__(self, path, *args, **kwargs):
550 551 return self.vfs(self._filter(path), *args, **kwargs)
551 552
552 553 def join(self, path, *insidef):
553 554 if path:
554 555 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
555 556 else:
556 557 return self.vfs.join(path)
557 558
558 559 filteropener = filtervfs
559 560
560 561 class readonlyvfs(abstractvfs, auditvfs):
561 562 '''Wrapper vfs preventing any writing.'''
562 563
563 564 def __init__(self, vfs):
564 565 auditvfs.__init__(self, vfs)
565 566
566 567 def __call__(self, path, mode='r', *args, **kw):
567 568 if mode not in ('r', 'rb'):
568 569 raise util.Abort('this vfs is read only')
569 570 return self.vfs(path, mode, *args, **kw)
570 571
571 572
572 573 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
573 574 '''yield every hg repository under path, always recursively.
574 575 The recurse flag will only control recursion into repo working dirs'''
575 576 def errhandler(err):
576 577 if err.filename == path:
577 578 raise err
578 579 samestat = getattr(os.path, 'samestat', None)
579 580 if followsym and samestat is not None:
580 581 def adddir(dirlst, dirname):
581 582 match = False
582 583 dirstat = os.stat(dirname)
583 584 for lstdirstat in dirlst:
584 585 if samestat(dirstat, lstdirstat):
585 586 match = True
586 587 break
587 588 if not match:
588 589 dirlst.append(dirstat)
589 590 return not match
590 591 else:
591 592 followsym = False
592 593
593 594 if (seen_dirs is None) and followsym:
594 595 seen_dirs = []
595 596 adddir(seen_dirs, path)
596 597 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
597 598 dirs.sort()
598 599 if '.hg' in dirs:
599 600 yield root # found a repository
600 601 qroot = os.path.join(root, '.hg', 'patches')
601 602 if os.path.isdir(os.path.join(qroot, '.hg')):
602 603 yield qroot # we have a patch queue repo here
603 604 if recurse:
604 605 # avoid recursing inside the .hg directory
605 606 dirs.remove('.hg')
606 607 else:
607 608 dirs[:] = [] # don't descend further
608 609 elif followsym:
609 610 newdirs = []
610 611 for d in dirs:
611 612 fname = os.path.join(root, d)
612 613 if adddir(seen_dirs, fname):
613 614 if os.path.islink(fname):
614 615 for hgname in walkrepos(fname, True, seen_dirs):
615 616 yield hgname
616 617 else:
617 618 newdirs.append(d)
618 619 dirs[:] = newdirs
619 620
620 621 def osrcpath():
621 622 '''return default os-specific hgrc search path'''
622 623 path = []
623 624 defaultpath = os.path.join(util.datapath, 'default.d')
624 625 if os.path.isdir(defaultpath):
625 626 for f, kind in osutil.listdir(defaultpath):
626 627 if f.endswith('.rc'):
627 628 path.append(os.path.join(defaultpath, f))
628 629 path.extend(systemrcpath())
629 630 path.extend(userrcpath())
630 631 path = [os.path.normpath(f) for f in path]
631 632 return path
632 633
633 634 _rcpath = None
634 635
635 636 def rcpath():
636 637 '''return hgrc search path. if env var HGRCPATH is set, use it.
637 638 for each item in path, if directory, use files ending in .rc,
638 639 else use item.
639 640 make HGRCPATH empty to only look in .hg/hgrc of current repo.
640 641 if no HGRCPATH, use default os-specific path.'''
641 642 global _rcpath
642 643 if _rcpath is None:
643 644 if 'HGRCPATH' in os.environ:
644 645 _rcpath = []
645 646 for p in os.environ['HGRCPATH'].split(os.pathsep):
646 647 if not p:
647 648 continue
648 649 p = util.expandpath(p)
649 650 if os.path.isdir(p):
650 651 for f, kind in osutil.listdir(p):
651 652 if f.endswith('.rc'):
652 653 _rcpath.append(os.path.join(p, f))
653 654 else:
654 655 _rcpath.append(p)
655 656 else:
656 657 _rcpath = osrcpath()
657 658 return _rcpath
658 659
659 660 def intrev(repo, rev):
660 661 """Return integer for a given revision that can be used in comparison or
661 662 arithmetic operation"""
662 663 if rev is None:
663 664 return len(repo)
664 665 return rev
665 666
666 667 def revsingle(repo, revspec, default='.'):
667 668 if not revspec and revspec != 0:
668 669 return repo[default]
669 670
670 671 l = revrange(repo, [revspec])
671 672 if not l:
672 673 raise util.Abort(_('empty revision set'))
673 674 return repo[l.last()]
674 675
675 676 def revpair(repo, revs):
676 677 if not revs:
677 678 return repo.dirstate.p1(), None
678 679
679 680 l = revrange(repo, revs)
680 681
681 682 if not l:
682 683 first = second = None
683 684 elif l.isascending():
684 685 first = l.min()
685 686 second = l.max()
686 687 elif l.isdescending():
687 688 first = l.max()
688 689 second = l.min()
689 690 else:
690 691 first = l.first()
691 692 second = l.last()
692 693
693 694 if first is None:
694 695 raise util.Abort(_('empty revision range'))
695 696
696 697 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
697 698 return repo.lookup(first), None
698 699
699 700 return repo.lookup(first), repo.lookup(second)
700 701
701 702 _revrangesep = ':'
702 703
703 704 def revrange(repo, revs):
704 705 """Yield revision as strings from a list of revision specifications."""
705 706
706 707 def revfix(repo, val, defval):
707 708 if not val and val != 0 and defval is not None:
708 709 return defval
709 710 return repo[val].rev()
710 711
711 712 seen, l = set(), revset.baseset([])
712 713
713 714 revsetaliases = [alias for (alias, _) in
714 715 repo.ui.configitems("revsetalias")]
715 716
716 717 for spec in revs:
717 718 if l and not seen:
718 719 seen = set(l)
719 720 # attempt to parse old-style ranges first to deal with
720 721 # things like old-tag which contain query metacharacters
721 722 try:
722 723 # ... except for revset aliases without arguments. These
723 724 # should be parsed as soon as possible, because they might
724 725 # clash with a hash prefix.
725 726 if spec in revsetaliases:
726 727 raise error.RepoLookupError
727 728
728 729 if isinstance(spec, int):
729 730 seen.add(spec)
730 731 l = l + revset.baseset([spec])
731 732 continue
732 733
733 734 if _revrangesep in spec:
734 735 start, end = spec.split(_revrangesep, 1)
735 736 if start in revsetaliases or end in revsetaliases:
736 737 raise error.RepoLookupError
737 738
738 739 start = revfix(repo, start, 0)
739 740 end = revfix(repo, end, len(repo) - 1)
740 741 if end == nullrev and start < 0:
741 742 start = nullrev
742 743 rangeiter = repo.changelog.revs(start, end)
743 744 if not seen and not l:
744 745 # by far the most common case: revs = ["-1:0"]
745 746 l = revset.baseset(rangeiter)
746 747 # defer syncing seen until next iteration
747 748 continue
748 749 newrevs = set(rangeiter)
749 750 if seen:
750 751 newrevs.difference_update(seen)
751 752 seen.update(newrevs)
752 753 else:
753 754 seen = newrevs
754 755 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
755 756 continue
756 757 elif spec and spec in repo: # single unquoted rev
757 758 rev = revfix(repo, spec, None)
758 759 if rev in seen:
759 760 continue
760 761 seen.add(rev)
761 762 l = l + revset.baseset([rev])
762 763 continue
763 764 except error.RepoLookupError:
764 765 pass
765 766
766 767 # fall through to new-style queries if old-style fails
767 768 m = revset.match(repo.ui, spec, repo)
768 769 if seen or l:
769 770 dl = [r for r in m(repo) if r not in seen]
770 771 l = l + revset.baseset(dl)
771 772 seen.update(dl)
772 773 else:
773 774 l = m(repo)
774 775
775 776 return l
776 777
777 778 def expandpats(pats):
778 779 '''Expand bare globs when running on windows.
779 780 On posix we assume it already has already been done by sh.'''
780 781 if not util.expandglobs:
781 782 return list(pats)
782 783 ret = []
783 784 for kindpat in pats:
784 785 kind, pat = matchmod._patsplit(kindpat, None)
785 786 if kind is None:
786 787 try:
787 788 globbed = glob.glob(pat)
788 789 except re.error:
789 790 globbed = [pat]
790 791 if globbed:
791 792 ret.extend(globbed)
792 793 continue
793 794 ret.append(kindpat)
794 795 return ret
795 796
796 797 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
797 798 '''Return a matcher and the patterns that were used.
798 799 The matcher will warn about bad matches.'''
799 800 if pats == ("",):
800 801 pats = []
801 802 if not globbed and default == 'relpath':
802 803 pats = expandpats(pats or [])
803 804
804 805 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
805 806 default)
806 807 def badfn(f, msg):
807 808 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
808 809 m.bad = badfn
809 810 if m.always():
810 811 pats = []
811 812 return m, pats
812 813
813 814 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
814 815 '''Return a matcher that will warn about bad matches.'''
815 816 return matchandpats(ctx, pats, opts, globbed, default)[0]
816 817
817 818 def matchall(repo):
818 819 '''Return a matcher that will efficiently match everything.'''
819 820 return matchmod.always(repo.root, repo.getcwd())
820 821
821 822 def matchfiles(repo, files):
822 823 '''Return a matcher that will efficiently match exactly these files.'''
823 824 return matchmod.exact(repo.root, repo.getcwd(), files)
824 825
825 826 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
826 827 m = matcher
827 828 if dry_run is None:
828 829 dry_run = opts.get('dry_run')
829 830 if similarity is None:
830 831 similarity = float(opts.get('similarity') or 0)
831 832
832 833 ret = 0
833 834 join = lambda f: os.path.join(prefix, f)
834 835
835 836 def matchessubrepo(matcher, subpath):
836 837 if matcher.exact(subpath):
837 838 return True
838 839 for f in matcher.files():
839 840 if f.startswith(subpath):
840 841 return True
841 842 return False
842 843
843 844 wctx = repo[None]
844 845 for subpath in sorted(wctx.substate):
845 846 if opts.get('subrepos') or matchessubrepo(m, subpath):
846 847 sub = wctx.sub(subpath)
847 848 try:
848 849 submatch = matchmod.narrowmatcher(subpath, m)
849 850 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
850 851 ret = 1
851 852 except error.LookupError:
852 853 repo.ui.status(_("skipping missing subrepository: %s\n")
853 854 % join(subpath))
854 855
855 856 rejected = []
856 857 origbad = m.bad
857 858 def badfn(f, msg):
858 859 if f in m.files():
859 860 origbad(f, msg)
860 861 rejected.append(f)
861 862
862 863 m.bad = badfn
863 864 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
864 865 m.bad = origbad
865 866
866 867 unknownset = set(unknown + forgotten)
867 868 toprint = unknownset.copy()
868 869 toprint.update(deleted)
869 870 for abs in sorted(toprint):
870 871 if repo.ui.verbose or not m.exact(abs):
871 872 if abs in unknownset:
872 873 status = _('adding %s\n') % m.uipath(abs)
873 874 else:
874 875 status = _('removing %s\n') % m.uipath(abs)
875 876 repo.ui.status(status)
876 877
877 878 renames = _findrenames(repo, m, added + unknown, removed + deleted,
878 879 similarity)
879 880
880 881 if not dry_run:
881 882 _markchanges(repo, unknown + forgotten, deleted, renames)
882 883
883 884 for f in rejected:
884 885 if f in m.files():
885 886 return 1
886 887 return ret
887 888
888 889 def marktouched(repo, files, similarity=0.0):
889 890 '''Assert that files have somehow been operated upon. files are relative to
890 891 the repo root.'''
891 892 m = matchfiles(repo, files)
892 893 rejected = []
893 894 m.bad = lambda x, y: rejected.append(x)
894 895
895 896 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
896 897
897 898 if repo.ui.verbose:
898 899 unknownset = set(unknown + forgotten)
899 900 toprint = unknownset.copy()
900 901 toprint.update(deleted)
901 902 for abs in sorted(toprint):
902 903 if abs in unknownset:
903 904 status = _('adding %s\n') % abs
904 905 else:
905 906 status = _('removing %s\n') % abs
906 907 repo.ui.status(status)
907 908
908 909 renames = _findrenames(repo, m, added + unknown, removed + deleted,
909 910 similarity)
910 911
911 912 _markchanges(repo, unknown + forgotten, deleted, renames)
912 913
913 914 for f in rejected:
914 915 if f in m.files():
915 916 return 1
916 917 return 0
917 918
918 919 def _interestingfiles(repo, matcher):
919 920 '''Walk dirstate with matcher, looking for files that addremove would care
920 921 about.
921 922
922 923 This is different from dirstate.status because it doesn't care about
923 924 whether files are modified or clean.'''
924 925 added, unknown, deleted, removed, forgotten = [], [], [], [], []
925 926 audit_path = pathutil.pathauditor(repo.root)
926 927
927 928 ctx = repo[None]
928 929 dirstate = repo.dirstate
929 930 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
930 931 full=False)
931 932 for abs, st in walkresults.iteritems():
932 933 dstate = dirstate[abs]
933 934 if dstate == '?' and audit_path.check(abs):
934 935 unknown.append(abs)
935 936 elif dstate != 'r' and not st:
936 937 deleted.append(abs)
937 938 elif dstate == 'r' and st:
938 939 forgotten.append(abs)
939 940 # for finding renames
940 941 elif dstate == 'r' and not st:
941 942 removed.append(abs)
942 943 elif dstate == 'a':
943 944 added.append(abs)
944 945
945 946 return added, unknown, deleted, removed, forgotten
946 947
947 948 def _findrenames(repo, matcher, added, removed, similarity):
948 949 '''Find renames from removed files to added ones.'''
949 950 renames = {}
950 951 if similarity > 0:
951 952 for old, new, score in similar.findrenames(repo, added, removed,
952 953 similarity):
953 954 if (repo.ui.verbose or not matcher.exact(old)
954 955 or not matcher.exact(new)):
955 956 repo.ui.status(_('recording removal of %s as rename to %s '
956 957 '(%d%% similar)\n') %
957 958 (matcher.rel(old), matcher.rel(new),
958 959 score * 100))
959 960 renames[new] = old
960 961 return renames
961 962
962 963 def _markchanges(repo, unknown, deleted, renames):
963 964 '''Marks the files in unknown as added, the files in deleted as removed,
964 965 and the files in renames as copied.'''
965 966 wctx = repo[None]
966 967 wlock = repo.wlock()
967 968 try:
968 969 wctx.forget(deleted)
969 970 wctx.add(unknown)
970 971 for new, old in renames.iteritems():
971 972 wctx.copy(old, new)
972 973 finally:
973 974 wlock.release()
974 975
975 976 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
976 977 """Update the dirstate to reflect the intent of copying src to dst. For
977 978 different reasons it might not end with dst being marked as copied from src.
978 979 """
979 980 origsrc = repo.dirstate.copied(src) or src
980 981 if dst == origsrc: # copying back a copy?
981 982 if repo.dirstate[dst] not in 'mn' and not dryrun:
982 983 repo.dirstate.normallookup(dst)
983 984 else:
984 985 if repo.dirstate[origsrc] == 'a' and origsrc == src:
985 986 if not ui.quiet:
986 987 ui.warn(_("%s has not been committed yet, so no copy "
987 988 "data will be stored for %s.\n")
988 989 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
989 990 if repo.dirstate[dst] in '?r' and not dryrun:
990 991 wctx.add([dst])
991 992 elif not dryrun:
992 993 wctx.copy(origsrc, dst)
993 994
994 995 def readrequires(opener, supported):
995 996 '''Reads and parses .hg/requires and checks if all entries found
996 997 are in the list of supported features.'''
997 998 requirements = set(opener.read("requires").splitlines())
998 999 missings = []
999 1000 for r in requirements:
1000 1001 if r not in supported:
1001 1002 if not r or not r[0].isalnum():
1002 1003 raise error.RequirementError(_(".hg/requires file is corrupt"))
1003 1004 missings.append(r)
1004 1005 missings.sort()
1005 1006 if missings:
1006 1007 raise error.RequirementError(
1007 1008 _("repository requires features unknown to this Mercurial: %s")
1008 1009 % " ".join(missings),
1009 1010 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
1010 1011 " for more information"))
1011 1012 return requirements
1012 1013
1013 1014 class filecachesubentry(object):
1014 1015 def __init__(self, path, stat):
1015 1016 self.path = path
1016 1017 self.cachestat = None
1017 1018 self._cacheable = None
1018 1019
1019 1020 if stat:
1020 1021 self.cachestat = filecachesubentry.stat(self.path)
1021 1022
1022 1023 if self.cachestat:
1023 1024 self._cacheable = self.cachestat.cacheable()
1024 1025 else:
1025 1026 # None means we don't know yet
1026 1027 self._cacheable = None
1027 1028
1028 1029 def refresh(self):
1029 1030 if self.cacheable():
1030 1031 self.cachestat = filecachesubentry.stat(self.path)
1031 1032
1032 1033 def cacheable(self):
1033 1034 if self._cacheable is not None:
1034 1035 return self._cacheable
1035 1036
1036 1037 # we don't know yet, assume it is for now
1037 1038 return True
1038 1039
1039 1040 def changed(self):
1040 1041 # no point in going further if we can't cache it
1041 1042 if not self.cacheable():
1042 1043 return True
1043 1044
1044 1045 newstat = filecachesubentry.stat(self.path)
1045 1046
1046 1047 # we may not know if it's cacheable yet, check again now
1047 1048 if newstat and self._cacheable is None:
1048 1049 self._cacheable = newstat.cacheable()
1049 1050
1050 1051 # check again
1051 1052 if not self._cacheable:
1052 1053 return True
1053 1054
1054 1055 if self.cachestat != newstat:
1055 1056 self.cachestat = newstat
1056 1057 return True
1057 1058 else:
1058 1059 return False
1059 1060
1060 1061 @staticmethod
1061 1062 def stat(path):
1062 1063 try:
1063 1064 return util.cachestat(path)
1064 1065 except OSError, e:
1065 1066 if e.errno != errno.ENOENT:
1066 1067 raise
1067 1068
1068 1069 class filecacheentry(object):
1069 1070 def __init__(self, paths, stat=True):
1070 1071 self._entries = []
1071 1072 for path in paths:
1072 1073 self._entries.append(filecachesubentry(path, stat))
1073 1074
1074 1075 def changed(self):
1075 1076 '''true if any entry has changed'''
1076 1077 for entry in self._entries:
1077 1078 if entry.changed():
1078 1079 return True
1079 1080 return False
1080 1081
1081 1082 def refresh(self):
1082 1083 for entry in self._entries:
1083 1084 entry.refresh()
1084 1085
1085 1086 class filecache(object):
1086 1087 '''A property like decorator that tracks files under .hg/ for updates.
1087 1088
1088 1089 Records stat info when called in _filecache.
1089 1090
1090 1091 On subsequent calls, compares old stat info with new info, and recreates the
1091 1092 object when any of the files changes, updating the new stat info in
1092 1093 _filecache.
1093 1094
1094 1095 Mercurial either atomic renames or appends for files under .hg,
1095 1096 so to ensure the cache is reliable we need the filesystem to be able
1096 1097 to tell us if a file has been replaced. If it can't, we fallback to
1097 1098 recreating the object on every call (essentially the same behaviour as
1098 1099 propertycache).
1099 1100
1100 1101 '''
1101 1102 def __init__(self, *paths):
1102 1103 self.paths = paths
1103 1104
1104 1105 def join(self, obj, fname):
1105 1106 """Used to compute the runtime path of a cached file.
1106 1107
1107 1108 Users should subclass filecache and provide their own version of this
1108 1109 function to call the appropriate join function on 'obj' (an instance
1109 1110 of the class that its member function was decorated).
1110 1111 """
1111 1112 return obj.join(fname)
1112 1113
1113 1114 def __call__(self, func):
1114 1115 self.func = func
1115 1116 self.name = func.__name__
1116 1117 return self
1117 1118
1118 1119 def __get__(self, obj, type=None):
1119 1120 # do we need to check if the file changed?
1120 1121 if self.name in obj.__dict__:
1121 1122 assert self.name in obj._filecache, self.name
1122 1123 return obj.__dict__[self.name]
1123 1124
1124 1125 entry = obj._filecache.get(self.name)
1125 1126
1126 1127 if entry:
1127 1128 if entry.changed():
1128 1129 entry.obj = self.func(obj)
1129 1130 else:
1130 1131 paths = [self.join(obj, path) for path in self.paths]
1131 1132
1132 1133 # We stat -before- creating the object so our cache doesn't lie if
1133 1134 # a writer modified between the time we read and stat
1134 1135 entry = filecacheentry(paths, True)
1135 1136 entry.obj = self.func(obj)
1136 1137
1137 1138 obj._filecache[self.name] = entry
1138 1139
1139 1140 obj.__dict__[self.name] = entry.obj
1140 1141 return entry.obj
1141 1142
1142 1143 def __set__(self, obj, value):
1143 1144 if self.name not in obj._filecache:
1144 1145 # we add an entry for the missing value because X in __dict__
1145 1146 # implies X in _filecache
1146 1147 paths = [self.join(obj, path) for path in self.paths]
1147 1148 ce = filecacheentry(paths, False)
1148 1149 obj._filecache[self.name] = ce
1149 1150 else:
1150 1151 ce = obj._filecache[self.name]
1151 1152
1152 1153 ce.obj = value # update cached copy
1153 1154 obj.__dict__[self.name] = value # update copy returned by obj.x
1154 1155
1155 1156 def __delete__(self, obj):
1156 1157 try:
1157 1158 del obj.__dict__[self.name]
1158 1159 except KeyError:
1159 1160 raise AttributeError(self.name)
@@ -1,90 +1,90 b''
1 1
2 2 $ cat << EOF > buggylocking.py
3 3 > """A small extension that acquire locks in the wrong order
4 4 > """
5 5 >
6 6 > from mercurial import cmdutil
7 7 >
8 8 > cmdtable = {}
9 9 > command = cmdutil.command(cmdtable)
10 10 >
11 11 > @command('buggylocking', [], '')
12 12 > def buggylocking(ui, repo):
13 13 > tr = repo.transaction('buggy')
14 14 > lo = repo.lock()
15 15 > wl = repo.wlock()
16 16 > wl.release()
17 17 > lo.release()
18 18 >
19 19 > @command('properlocking', [], '')
20 20 > def properlocking(ui, repo):
21 21 > """check that reentrance is fine"""
22 22 > wl = repo.wlock()
23 23 > lo = repo.lock()
24 24 > tr = repo.transaction('proper')
25 25 > tr2 = repo.transaction('proper')
26 26 > lo2 = repo.lock()
27 27 > wl2 = repo.wlock()
28 28 > wl2.release()
29 29 > lo2.release()
30 30 > tr2.close()
31 31 > tr.close()
32 32 > lo.release()
33 33 > wl.release()
34 34 >
35 35 > @command('nowaitlocking', [], '')
36 36 > def nowaitlocking(ui, repo):
37 37 > lo = repo.lock()
38 38 > wl = repo.wlock(wait=False)
39 39 > wl.release()
40 40 > lo.release()
41 41 > EOF
42 42
43 43 $ cat << EOF >> $HGRCPATH
44 44 > [extensions]
45 45 > buggylocking=$TESTTMP/buggylocking.py
46 46 > [devel]
47 47 > all=1
48 48 > EOF
49 49
50 50 $ hg init lock-checker
51 51 $ cd lock-checker
52 52 $ hg buggylocking
53 transaction with no lock at: $TESTTMP/buggylocking.py:11 (buggylocking)
54 "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:13 (buggylocking)
53 devel-warn: transaction with no lock at: $TESTTMP/buggylocking.py:11 (buggylocking)
54 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:13 (buggylocking)
55 55 $ cat << EOF >> $HGRCPATH
56 56 > [devel]
57 57 > all=0
58 58 > check-locks=1
59 59 > EOF
60 60 $ hg buggylocking
61 transaction with no lock at: $TESTTMP/buggylocking.py:11 (buggylocking)
62 "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:13 (buggylocking)
61 devel-warn: transaction with no lock at: $TESTTMP/buggylocking.py:11 (buggylocking)
62 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:13 (buggylocking)
63 63 $ hg buggylocking --traceback
64 transaction with no lock at:
64 devel-warn: transaction with no lock at:
65 65 */hg:* in * (glob)
66 66 */mercurial/dispatch.py:* in run (glob)
67 67 */mercurial/dispatch.py:* in dispatch (glob)
68 68 */mercurial/dispatch.py:* in _runcatch (glob)
69 69 */mercurial/dispatch.py:* in _dispatch (glob)
70 70 */mercurial/dispatch.py:* in runcommand (glob)
71 71 */mercurial/dispatch.py:* in _runcommand (glob)
72 72 */mercurial/dispatch.py:* in checkargs (glob)
73 73 */mercurial/dispatch.py:* in <lambda> (glob)
74 74 */mercurial/util.py:* in check (glob)
75 75 $TESTTMP/buggylocking.py:* in buggylocking (glob)
76 "wlock" acquired after "lock" at:
76 devel-warn: "wlock" acquired after "lock" at:
77 77 */hg:* in * (glob)
78 78 */mercurial/dispatch.py:* in run (glob)
79 79 */mercurial/dispatch.py:* in dispatch (glob)
80 80 */mercurial/dispatch.py:* in _runcatch (glob)
81 81 */mercurial/dispatch.py:* in _dispatch (glob)
82 82 */mercurial/dispatch.py:* in runcommand (glob)
83 83 */mercurial/dispatch.py:* in _runcommand (glob)
84 84 */mercurial/dispatch.py:* in checkargs (glob)
85 85 */mercurial/dispatch.py:* in <lambda> (glob)
86 86 */mercurial/util.py:* in check (glob)
87 87 $TESTTMP/buggylocking.py:* in buggylocking (glob)
88 88 $ hg properlocking
89 89 $ hg nowaitlocking
90 90 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now