##// END OF EJS Templates
vfs: make it possible to pass multiple path elements to join...
Matt Harbison -
r24628:a0b47885 default
parent child Browse files
Show More
@@ -1,1130 +1,1130 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob, tempfile
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 class status(tuple):
24 24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 25 and 'ignored' properties are only relevant to the working copy.
26 26 '''
27 27
28 28 __slots__ = ()
29 29
30 30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 31 clean):
32 32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 33 ignored, clean))
34 34
35 35 @property
36 36 def modified(self):
37 37 '''files that have been modified'''
38 38 return self[0]
39 39
40 40 @property
41 41 def added(self):
42 42 '''files that have been added'''
43 43 return self[1]
44 44
45 45 @property
46 46 def removed(self):
47 47 '''files that have been removed'''
48 48 return self[2]
49 49
50 50 @property
51 51 def deleted(self):
52 52 '''files that are in the dirstate, but have been deleted from the
53 53 working copy (aka "missing")
54 54 '''
55 55 return self[3]
56 56
57 57 @property
58 58 def unknown(self):
59 59 '''files not in the dirstate that are not ignored'''
60 60 return self[4]
61 61
62 62 @property
63 63 def ignored(self):
64 64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 65 return self[5]
66 66
67 67 @property
68 68 def clean(self):
69 69 '''files that have not been modified'''
70 70 return self[6]
71 71
72 72 def __repr__(self, *args, **kwargs):
73 73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 74 'unknown=%r, ignored=%r, clean=%r>') % self)
75 75
76 76 def itersubrepos(ctx1, ctx2):
77 77 """find subrepos in ctx1 or ctx2"""
78 78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 83 for subpath, ctx in sorted(subpaths.iteritems()):
84 84 yield subpath, ctx.sub(subpath)
85 85
86 86 def nochangesfound(ui, repo, excluded=None):
87 87 '''Report no changes for push/pull, excluded is None or a list of
88 88 nodes excluded from the push/pull.
89 89 '''
90 90 secretlist = []
91 91 if excluded:
92 92 for n in excluded:
93 93 if n not in repo:
94 94 # discovery should not have included the filtered revision,
95 95 # we have to explicitly exclude it until discovery is cleanup.
96 96 continue
97 97 ctx = repo[n]
98 98 if ctx.phase() >= phases.secret and not ctx.extinct():
99 99 secretlist.append(n)
100 100
101 101 if secretlist:
102 102 ui.status(_("no changes found (ignored %d secret changesets)\n")
103 103 % len(secretlist))
104 104 else:
105 105 ui.status(_("no changes found\n"))
106 106
107 107 def checknewlabel(repo, lbl, kind):
108 108 # Do not use the "kind" parameter in ui output.
109 109 # It makes strings difficult to translate.
110 110 if lbl in ['tip', '.', 'null']:
111 111 raise util.Abort(_("the name '%s' is reserved") % lbl)
112 112 for c in (':', '\0', '\n', '\r'):
113 113 if c in lbl:
114 114 raise util.Abort(_("%r cannot be used in a name") % c)
115 115 try:
116 116 int(lbl)
117 117 raise util.Abort(_("cannot use an integer as a name"))
118 118 except ValueError:
119 119 pass
120 120
121 121 def checkfilename(f):
122 122 '''Check that the filename f is an acceptable filename for a tracked file'''
123 123 if '\r' in f or '\n' in f:
124 124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
125 125
126 126 def checkportable(ui, f):
127 127 '''Check if filename f is portable and warn or abort depending on config'''
128 128 checkfilename(f)
129 129 abort, warn = checkportabilityalert(ui)
130 130 if abort or warn:
131 131 msg = util.checkwinfilename(f)
132 132 if msg:
133 133 msg = "%s: %r" % (msg, f)
134 134 if abort:
135 135 raise util.Abort(msg)
136 136 ui.warn(_("warning: %s\n") % msg)
137 137
138 138 def checkportabilityalert(ui):
139 139 '''check if the user's config requests nothing, a warning, or abort for
140 140 non-portable filenames'''
141 141 val = ui.config('ui', 'portablefilenames', 'warn')
142 142 lval = val.lower()
143 143 bval = util.parsebool(val)
144 144 abort = os.name == 'nt' or lval == 'abort'
145 145 warn = bval or lval == 'warn'
146 146 if bval is None and not (warn or abort or lval == 'ignore'):
147 147 raise error.ConfigError(
148 148 _("ui.portablefilenames value is invalid ('%s')") % val)
149 149 return abort, warn
150 150
151 151 class casecollisionauditor(object):
152 152 def __init__(self, ui, abort, dirstate):
153 153 self._ui = ui
154 154 self._abort = abort
155 155 allfiles = '\0'.join(dirstate._map)
156 156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
157 157 self._dirstate = dirstate
158 158 # The purpose of _newfiles is so that we don't complain about
159 159 # case collisions if someone were to call this object with the
160 160 # same filename twice.
161 161 self._newfiles = set()
162 162
163 163 def __call__(self, f):
164 164 if f in self._newfiles:
165 165 return
166 166 fl = encoding.lower(f)
167 167 if fl in self._loweredfiles and f not in self._dirstate:
168 168 msg = _('possible case-folding collision for %s') % f
169 169 if self._abort:
170 170 raise util.Abort(msg)
171 171 self._ui.warn(_("warning: %s\n") % msg)
172 172 self._loweredfiles.add(fl)
173 173 self._newfiles.add(f)
174 174
175 175 class abstractvfs(object):
176 176 """Abstract base class; cannot be instantiated"""
177 177
178 178 def __init__(self, *args, **kwargs):
179 179 '''Prevent instantiation; don't call this from subclasses.'''
180 180 raise NotImplementedError('attempted instantiating ' + str(type(self)))
181 181
182 182 def tryread(self, path):
183 183 '''gracefully return an empty string for missing files'''
184 184 try:
185 185 return self.read(path)
186 186 except IOError, inst:
187 187 if inst.errno != errno.ENOENT:
188 188 raise
189 189 return ""
190 190
191 191 def tryreadlines(self, path, mode='rb'):
192 192 '''gracefully return an empty array for missing files'''
193 193 try:
194 194 return self.readlines(path, mode=mode)
195 195 except IOError, inst:
196 196 if inst.errno != errno.ENOENT:
197 197 raise
198 198 return []
199 199
200 200 def open(self, path, mode="r", text=False, atomictemp=False,
201 201 notindexed=False):
202 202 '''Open ``path`` file, which is relative to vfs root.
203 203
204 204 Newly created directories are marked as "not to be indexed by
205 205 the content indexing service", if ``notindexed`` is specified
206 206 for "write" mode access.
207 207 '''
208 208 self.open = self.__call__
209 209 return self.__call__(path, mode, text, atomictemp, notindexed)
210 210
211 211 def read(self, path):
212 212 fp = self(path, 'rb')
213 213 try:
214 214 return fp.read()
215 215 finally:
216 216 fp.close()
217 217
218 218 def readlines(self, path, mode='rb'):
219 219 fp = self(path, mode=mode)
220 220 try:
221 221 return fp.readlines()
222 222 finally:
223 223 fp.close()
224 224
225 225 def write(self, path, data):
226 226 fp = self(path, 'wb')
227 227 try:
228 228 return fp.write(data)
229 229 finally:
230 230 fp.close()
231 231
232 232 def writelines(self, path, data, mode='wb', notindexed=False):
233 233 fp = self(path, mode=mode, notindexed=notindexed)
234 234 try:
235 235 return fp.writelines(data)
236 236 finally:
237 237 fp.close()
238 238
239 239 def append(self, path, data):
240 240 fp = self(path, 'ab')
241 241 try:
242 242 return fp.write(data)
243 243 finally:
244 244 fp.close()
245 245
246 246 def chmod(self, path, mode):
247 247 return os.chmod(self.join(path), mode)
248 248
249 249 def exists(self, path=None):
250 250 return os.path.exists(self.join(path))
251 251
252 252 def fstat(self, fp):
253 253 return util.fstat(fp)
254 254
255 255 def isdir(self, path=None):
256 256 return os.path.isdir(self.join(path))
257 257
258 258 def isfile(self, path=None):
259 259 return os.path.isfile(self.join(path))
260 260
261 261 def islink(self, path=None):
262 262 return os.path.islink(self.join(path))
263 263
264 264 def reljoin(self, *paths):
265 265 """join various elements of a path together (as os.path.join would do)
266 266
267 267 The vfs base is not injected so that path stay relative. This exists
268 268 to allow handling of strange encoding if needed."""
269 269 return os.path.join(*paths)
270 270
271 271 def split(self, path):
272 272 """split top-most element of a path (as os.path.split would do)
273 273
274 274 This exists to allow handling of strange encoding if needed."""
275 275 return os.path.split(path)
276 276
277 277 def lexists(self, path=None):
278 278 return os.path.lexists(self.join(path))
279 279
280 280 def lstat(self, path=None):
281 281 return os.lstat(self.join(path))
282 282
283 283 def listdir(self, path=None):
284 284 return os.listdir(self.join(path))
285 285
286 286 def makedir(self, path=None, notindexed=True):
287 287 return util.makedir(self.join(path), notindexed)
288 288
289 289 def makedirs(self, path=None, mode=None):
290 290 return util.makedirs(self.join(path), mode)
291 291
292 292 def makelock(self, info, path):
293 293 return util.makelock(info, self.join(path))
294 294
295 295 def mkdir(self, path=None):
296 296 return os.mkdir(self.join(path))
297 297
298 298 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
299 299 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
300 300 dir=self.join(dir), text=text)
301 301 dname, fname = util.split(name)
302 302 if dir:
303 303 return fd, os.path.join(dir, fname)
304 304 else:
305 305 return fd, fname
306 306
307 307 def readdir(self, path=None, stat=None, skip=None):
308 308 return osutil.listdir(self.join(path), stat, skip)
309 309
310 310 def readlock(self, path):
311 311 return util.readlock(self.join(path))
312 312
313 313 def rename(self, src, dst):
314 314 return util.rename(self.join(src), self.join(dst))
315 315
316 316 def readlink(self, path):
317 317 return os.readlink(self.join(path))
318 318
319 319 def setflags(self, path, l, x):
320 320 return util.setflags(self.join(path), l, x)
321 321
322 322 def stat(self, path=None):
323 323 return os.stat(self.join(path))
324 324
325 325 def unlink(self, path=None):
326 326 return util.unlink(self.join(path))
327 327
328 328 def unlinkpath(self, path=None, ignoremissing=False):
329 329 return util.unlinkpath(self.join(path), ignoremissing)
330 330
331 331 def utime(self, path=None, t=None):
332 332 return os.utime(self.join(path), t)
333 333
334 334 class vfs(abstractvfs):
335 335 '''Operate files relative to a base directory
336 336
337 337 This class is used to hide the details of COW semantics and
338 338 remote file access from higher level code.
339 339 '''
340 340 def __init__(self, base, audit=True, expandpath=False, realpath=False):
341 341 if expandpath:
342 342 base = util.expandpath(base)
343 343 if realpath:
344 344 base = os.path.realpath(base)
345 345 self.base = base
346 346 self._setmustaudit(audit)
347 347 self.createmode = None
348 348 self._trustnlink = None
349 349
350 350 def _getmustaudit(self):
351 351 return self._audit
352 352
353 353 def _setmustaudit(self, onoff):
354 354 self._audit = onoff
355 355 if onoff:
356 356 self.audit = pathutil.pathauditor(self.base)
357 357 else:
358 358 self.audit = util.always
359 359
360 360 mustaudit = property(_getmustaudit, _setmustaudit)
361 361
362 362 @util.propertycache
363 363 def _cansymlink(self):
364 364 return util.checklink(self.base)
365 365
366 366 @util.propertycache
367 367 def _chmod(self):
368 368 return util.checkexec(self.base)
369 369
370 370 def _fixfilemode(self, name):
371 371 if self.createmode is None or not self._chmod:
372 372 return
373 373 os.chmod(name, self.createmode & 0666)
374 374
375 375 def __call__(self, path, mode="r", text=False, atomictemp=False,
376 376 notindexed=False):
377 377 '''Open ``path`` file, which is relative to vfs root.
378 378
379 379 Newly created directories are marked as "not to be indexed by
380 380 the content indexing service", if ``notindexed`` is specified
381 381 for "write" mode access.
382 382 '''
383 383 if self._audit:
384 384 r = util.checkosfilename(path)
385 385 if r:
386 386 raise util.Abort("%s: %r" % (r, path))
387 387 self.audit(path)
388 388 f = self.join(path)
389 389
390 390 if not text and "b" not in mode:
391 391 mode += "b" # for that other OS
392 392
393 393 nlink = -1
394 394 if mode not in ('r', 'rb'):
395 395 dirname, basename = util.split(f)
396 396 # If basename is empty, then the path is malformed because it points
397 397 # to a directory. Let the posixfile() call below raise IOError.
398 398 if basename:
399 399 if atomictemp:
400 400 util.ensuredirs(dirname, self.createmode, notindexed)
401 401 return util.atomictempfile(f, mode, self.createmode)
402 402 try:
403 403 if 'w' in mode:
404 404 util.unlink(f)
405 405 nlink = 0
406 406 else:
407 407 # nlinks() may behave differently for files on Windows
408 408 # shares if the file is open.
409 409 fd = util.posixfile(f)
410 410 nlink = util.nlinks(f)
411 411 if nlink < 1:
412 412 nlink = 2 # force mktempcopy (issue1922)
413 413 fd.close()
414 414 except (OSError, IOError), e:
415 415 if e.errno != errno.ENOENT:
416 416 raise
417 417 nlink = 0
418 418 util.ensuredirs(dirname, self.createmode, notindexed)
419 419 if nlink > 0:
420 420 if self._trustnlink is None:
421 421 self._trustnlink = nlink > 1 or util.checknlink(f)
422 422 if nlink > 1 or not self._trustnlink:
423 423 util.rename(util.mktempcopy(f), f)
424 424 fp = util.posixfile(f, mode)
425 425 if nlink == 0:
426 426 self._fixfilemode(f)
427 427 return fp
428 428
429 429 def symlink(self, src, dst):
430 430 self.audit(dst)
431 431 linkname = self.join(dst)
432 432 try:
433 433 os.unlink(linkname)
434 434 except OSError:
435 435 pass
436 436
437 437 util.ensuredirs(os.path.dirname(linkname), self.createmode)
438 438
439 439 if self._cansymlink:
440 440 try:
441 441 os.symlink(src, linkname)
442 442 except OSError, err:
443 443 raise OSError(err.errno, _('could not symlink to %r: %s') %
444 444 (src, err.strerror), linkname)
445 445 else:
446 446 self.write(dst, src)
447 447
448 def join(self, path):
448 def join(self, path, *insidef):
449 449 if path:
450 return os.path.join(self.base, path)
450 return os.path.join(self.base, path, *insidef)
451 451 else:
452 452 return self.base
453 453
454 454 opener = vfs
455 455
456 456 class auditvfs(object):
457 457 def __init__(self, vfs):
458 458 self.vfs = vfs
459 459
460 460 def _getmustaudit(self):
461 461 return self.vfs.mustaudit
462 462
463 463 def _setmustaudit(self, onoff):
464 464 self.vfs.mustaudit = onoff
465 465
466 466 mustaudit = property(_getmustaudit, _setmustaudit)
467 467
468 468 class filtervfs(abstractvfs, auditvfs):
469 469 '''Wrapper vfs for filtering filenames with a function.'''
470 470
471 471 def __init__(self, vfs, filter):
472 472 auditvfs.__init__(self, vfs)
473 473 self._filter = filter
474 474
475 475 def __call__(self, path, *args, **kwargs):
476 476 return self.vfs(self._filter(path), *args, **kwargs)
477 477
478 def join(self, path):
478 def join(self, path, *insidef):
479 479 if path:
480 return self.vfs.join(self._filter(path))
480 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
481 481 else:
482 482 return self.vfs.join(path)
483 483
484 484 filteropener = filtervfs
485 485
486 486 class readonlyvfs(abstractvfs, auditvfs):
487 487 '''Wrapper vfs preventing any writing.'''
488 488
489 489 def __init__(self, vfs):
490 490 auditvfs.__init__(self, vfs)
491 491
492 492 def __call__(self, path, mode='r', *args, **kw):
493 493 if mode not in ('r', 'rb'):
494 494 raise util.Abort('this vfs is read only')
495 495 return self.vfs(path, mode, *args, **kw)
496 496
497 497
498 498 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
499 499 '''yield every hg repository under path, always recursively.
500 500 The recurse flag will only control recursion into repo working dirs'''
501 501 def errhandler(err):
502 502 if err.filename == path:
503 503 raise err
504 504 samestat = getattr(os.path, 'samestat', None)
505 505 if followsym and samestat is not None:
506 506 def adddir(dirlst, dirname):
507 507 match = False
508 508 dirstat = os.stat(dirname)
509 509 for lstdirstat in dirlst:
510 510 if samestat(dirstat, lstdirstat):
511 511 match = True
512 512 break
513 513 if not match:
514 514 dirlst.append(dirstat)
515 515 return not match
516 516 else:
517 517 followsym = False
518 518
519 519 if (seen_dirs is None) and followsym:
520 520 seen_dirs = []
521 521 adddir(seen_dirs, path)
522 522 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
523 523 dirs.sort()
524 524 if '.hg' in dirs:
525 525 yield root # found a repository
526 526 qroot = os.path.join(root, '.hg', 'patches')
527 527 if os.path.isdir(os.path.join(qroot, '.hg')):
528 528 yield qroot # we have a patch queue repo here
529 529 if recurse:
530 530 # avoid recursing inside the .hg directory
531 531 dirs.remove('.hg')
532 532 else:
533 533 dirs[:] = [] # don't descend further
534 534 elif followsym:
535 535 newdirs = []
536 536 for d in dirs:
537 537 fname = os.path.join(root, d)
538 538 if adddir(seen_dirs, fname):
539 539 if os.path.islink(fname):
540 540 for hgname in walkrepos(fname, True, seen_dirs):
541 541 yield hgname
542 542 else:
543 543 newdirs.append(d)
544 544 dirs[:] = newdirs
545 545
546 546 def osrcpath():
547 547 '''return default os-specific hgrc search path'''
548 548 path = []
549 549 defaultpath = os.path.join(util.datapath, 'default.d')
550 550 if os.path.isdir(defaultpath):
551 551 for f, kind in osutil.listdir(defaultpath):
552 552 if f.endswith('.rc'):
553 553 path.append(os.path.join(defaultpath, f))
554 554 path.extend(systemrcpath())
555 555 path.extend(userrcpath())
556 556 path = [os.path.normpath(f) for f in path]
557 557 return path
558 558
559 559 _rcpath = None
560 560
561 561 def rcpath():
562 562 '''return hgrc search path. if env var HGRCPATH is set, use it.
563 563 for each item in path, if directory, use files ending in .rc,
564 564 else use item.
565 565 make HGRCPATH empty to only look in .hg/hgrc of current repo.
566 566 if no HGRCPATH, use default os-specific path.'''
567 567 global _rcpath
568 568 if _rcpath is None:
569 569 if 'HGRCPATH' in os.environ:
570 570 _rcpath = []
571 571 for p in os.environ['HGRCPATH'].split(os.pathsep):
572 572 if not p:
573 573 continue
574 574 p = util.expandpath(p)
575 575 if os.path.isdir(p):
576 576 for f, kind in osutil.listdir(p):
577 577 if f.endswith('.rc'):
578 578 _rcpath.append(os.path.join(p, f))
579 579 else:
580 580 _rcpath.append(p)
581 581 else:
582 582 _rcpath = osrcpath()
583 583 return _rcpath
584 584
585 585 def intrev(repo, rev):
586 586 """Return integer for a given revision that can be used in comparison or
587 587 arithmetic operation"""
588 588 if rev is None:
589 589 return len(repo)
590 590 return rev
591 591
592 592 def revsingle(repo, revspec, default='.'):
593 593 if not revspec and revspec != 0:
594 594 return repo[default]
595 595
596 596 l = revrange(repo, [revspec])
597 597 if not l:
598 598 raise util.Abort(_('empty revision set'))
599 599 return repo[l.last()]
600 600
601 601 def revpair(repo, revs):
602 602 if not revs:
603 603 return repo.dirstate.p1(), None
604 604
605 605 l = revrange(repo, revs)
606 606
607 607 if not l:
608 608 first = second = None
609 609 elif l.isascending():
610 610 first = l.min()
611 611 second = l.max()
612 612 elif l.isdescending():
613 613 first = l.max()
614 614 second = l.min()
615 615 else:
616 616 first = l.first()
617 617 second = l.last()
618 618
619 619 if first is None:
620 620 raise util.Abort(_('empty revision range'))
621 621
622 622 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
623 623 return repo.lookup(first), None
624 624
625 625 return repo.lookup(first), repo.lookup(second)
626 626
627 627 _revrangesep = ':'
628 628
629 629 def revrange(repo, revs):
630 630 """Yield revision as strings from a list of revision specifications."""
631 631
632 632 def revfix(repo, val, defval):
633 633 if not val and val != 0 and defval is not None:
634 634 return defval
635 635 return repo[val].rev()
636 636
637 637 seen, l = set(), revset.baseset([])
638 638
639 639 revsetaliases = [alias for (alias, _) in
640 640 repo.ui.configitems("revsetalias")]
641 641
642 642 for spec in revs:
643 643 if l and not seen:
644 644 seen = set(l)
645 645 # attempt to parse old-style ranges first to deal with
646 646 # things like old-tag which contain query metacharacters
647 647 try:
648 648 # ... except for revset aliases without arguments. These
649 649 # should be parsed as soon as possible, because they might
650 650 # clash with a hash prefix.
651 651 if spec in revsetaliases:
652 652 raise error.RepoLookupError
653 653
654 654 if isinstance(spec, int):
655 655 seen.add(spec)
656 656 l = l + revset.baseset([spec])
657 657 continue
658 658
659 659 if _revrangesep in spec:
660 660 start, end = spec.split(_revrangesep, 1)
661 661 if start in revsetaliases or end in revsetaliases:
662 662 raise error.RepoLookupError
663 663
664 664 start = revfix(repo, start, 0)
665 665 end = revfix(repo, end, len(repo) - 1)
666 666 if end == nullrev and start < 0:
667 667 start = nullrev
668 668 rangeiter = repo.changelog.revs(start, end)
669 669 if not seen and not l:
670 670 # by far the most common case: revs = ["-1:0"]
671 671 l = revset.baseset(rangeiter)
672 672 # defer syncing seen until next iteration
673 673 continue
674 674 newrevs = set(rangeiter)
675 675 if seen:
676 676 newrevs.difference_update(seen)
677 677 seen.update(newrevs)
678 678 else:
679 679 seen = newrevs
680 680 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
681 681 continue
682 682 elif spec and spec in repo: # single unquoted rev
683 683 rev = revfix(repo, spec, None)
684 684 if rev in seen:
685 685 continue
686 686 seen.add(rev)
687 687 l = l + revset.baseset([rev])
688 688 continue
689 689 except error.RepoLookupError:
690 690 pass
691 691
692 692 # fall through to new-style queries if old-style fails
693 693 m = revset.match(repo.ui, spec, repo)
694 694 if seen or l:
695 695 dl = [r for r in m(repo) if r not in seen]
696 696 l = l + revset.baseset(dl)
697 697 seen.update(dl)
698 698 else:
699 699 l = m(repo)
700 700
701 701 return l
702 702
703 703 def expandpats(pats):
704 704 '''Expand bare globs when running on windows.
705 705 On posix we assume it already has already been done by sh.'''
706 706 if not util.expandglobs:
707 707 return list(pats)
708 708 ret = []
709 709 for kindpat in pats:
710 710 kind, pat = matchmod._patsplit(kindpat, None)
711 711 if kind is None:
712 712 try:
713 713 globbed = glob.glob(pat)
714 714 except re.error:
715 715 globbed = [pat]
716 716 if globbed:
717 717 ret.extend(globbed)
718 718 continue
719 719 ret.append(kindpat)
720 720 return ret
721 721
722 722 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
723 723 '''Return a matcher and the patterns that were used.
724 724 The matcher will warn about bad matches.'''
725 725 if pats == ("",):
726 726 pats = []
727 727 if not globbed and default == 'relpath':
728 728 pats = expandpats(pats or [])
729 729
730 730 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
731 731 default)
732 732 def badfn(f, msg):
733 733 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
734 734 m.bad = badfn
735 735 if m.always():
736 736 pats = []
737 737 return m, pats
738 738
739 739 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
740 740 '''Return a matcher that will warn about bad matches.'''
741 741 return matchandpats(ctx, pats, opts, globbed, default)[0]
742 742
743 743 def matchall(repo):
744 744 '''Return a matcher that will efficiently match everything.'''
745 745 return matchmod.always(repo.root, repo.getcwd())
746 746
747 747 def matchfiles(repo, files):
748 748 '''Return a matcher that will efficiently match exactly these files.'''
749 749 return matchmod.exact(repo.root, repo.getcwd(), files)
750 750
751 751 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
752 752 m = matcher
753 753 if dry_run is None:
754 754 dry_run = opts.get('dry_run')
755 755 if similarity is None:
756 756 similarity = float(opts.get('similarity') or 0)
757 757
758 758 ret = 0
759 759 join = lambda f: os.path.join(prefix, f)
760 760
761 761 def matchessubrepo(matcher, subpath):
762 762 if matcher.exact(subpath):
763 763 return True
764 764 for f in matcher.files():
765 765 if f.startswith(subpath):
766 766 return True
767 767 return False
768 768
769 769 wctx = repo[None]
770 770 for subpath in sorted(wctx.substate):
771 771 if opts.get('subrepos') or matchessubrepo(m, subpath):
772 772 sub = wctx.sub(subpath)
773 773 try:
774 774 submatch = matchmod.narrowmatcher(subpath, m)
775 775 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
776 776 ret = 1
777 777 except error.LookupError:
778 778 repo.ui.status(_("skipping missing subrepository: %s\n")
779 779 % join(subpath))
780 780
781 781 rejected = []
782 782 origbad = m.bad
783 783 def badfn(f, msg):
784 784 if f in m.files():
785 785 origbad(f, msg)
786 786 rejected.append(f)
787 787
788 788 m.bad = badfn
789 789 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
790 790 m.bad = origbad
791 791
792 792 unknownset = set(unknown + forgotten)
793 793 toprint = unknownset.copy()
794 794 toprint.update(deleted)
795 795 for abs in sorted(toprint):
796 796 if repo.ui.verbose or not m.exact(abs):
797 797 if abs in unknownset:
798 798 status = _('adding %s\n') % m.uipath(abs)
799 799 else:
800 800 status = _('removing %s\n') % m.uipath(abs)
801 801 repo.ui.status(status)
802 802
803 803 renames = _findrenames(repo, m, added + unknown, removed + deleted,
804 804 similarity)
805 805
806 806 if not dry_run:
807 807 _markchanges(repo, unknown + forgotten, deleted, renames)
808 808
809 809 for f in rejected:
810 810 if f in m.files():
811 811 return 1
812 812 return ret
813 813
814 814 def marktouched(repo, files, similarity=0.0):
815 815 '''Assert that files have somehow been operated upon. files are relative to
816 816 the repo root.'''
817 817 m = matchfiles(repo, files)
818 818 rejected = []
819 819 m.bad = lambda x, y: rejected.append(x)
820 820
821 821 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
822 822
823 823 if repo.ui.verbose:
824 824 unknownset = set(unknown + forgotten)
825 825 toprint = unknownset.copy()
826 826 toprint.update(deleted)
827 827 for abs in sorted(toprint):
828 828 if abs in unknownset:
829 829 status = _('adding %s\n') % abs
830 830 else:
831 831 status = _('removing %s\n') % abs
832 832 repo.ui.status(status)
833 833
834 834 renames = _findrenames(repo, m, added + unknown, removed + deleted,
835 835 similarity)
836 836
837 837 _markchanges(repo, unknown + forgotten, deleted, renames)
838 838
839 839 for f in rejected:
840 840 if f in m.files():
841 841 return 1
842 842 return 0
843 843
844 844 def _interestingfiles(repo, matcher):
845 845 '''Walk dirstate with matcher, looking for files that addremove would care
846 846 about.
847 847
848 848 This is different from dirstate.status because it doesn't care about
849 849 whether files are modified or clean.'''
850 850 added, unknown, deleted, removed, forgotten = [], [], [], [], []
851 851 audit_path = pathutil.pathauditor(repo.root)
852 852
853 853 ctx = repo[None]
854 854 dirstate = repo.dirstate
855 855 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
856 856 full=False)
857 857 for abs, st in walkresults.iteritems():
858 858 dstate = dirstate[abs]
859 859 if dstate == '?' and audit_path.check(abs):
860 860 unknown.append(abs)
861 861 elif dstate != 'r' and not st:
862 862 deleted.append(abs)
863 863 elif dstate == 'r' and st:
864 864 forgotten.append(abs)
865 865 # for finding renames
866 866 elif dstate == 'r' and not st:
867 867 removed.append(abs)
868 868 elif dstate == 'a':
869 869 added.append(abs)
870 870
871 871 return added, unknown, deleted, removed, forgotten
872 872
873 873 def _findrenames(repo, matcher, added, removed, similarity):
874 874 '''Find renames from removed files to added ones.'''
875 875 renames = {}
876 876 if similarity > 0:
877 877 for old, new, score in similar.findrenames(repo, added, removed,
878 878 similarity):
879 879 if (repo.ui.verbose or not matcher.exact(old)
880 880 or not matcher.exact(new)):
881 881 repo.ui.status(_('recording removal of %s as rename to %s '
882 882 '(%d%% similar)\n') %
883 883 (matcher.rel(old), matcher.rel(new),
884 884 score * 100))
885 885 renames[new] = old
886 886 return renames
887 887
888 888 def _markchanges(repo, unknown, deleted, renames):
889 889 '''Marks the files in unknown as added, the files in deleted as removed,
890 890 and the files in renames as copied.'''
891 891 wctx = repo[None]
892 892 wlock = repo.wlock()
893 893 try:
894 894 wctx.forget(deleted)
895 895 wctx.add(unknown)
896 896 for new, old in renames.iteritems():
897 897 wctx.copy(old, new)
898 898 finally:
899 899 wlock.release()
900 900
901 901 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
902 902 """Update the dirstate to reflect the intent of copying src to dst. For
903 903 different reasons it might not end with dst being marked as copied from src.
904 904 """
905 905 origsrc = repo.dirstate.copied(src) or src
906 906 if dst == origsrc: # copying back a copy?
907 907 if repo.dirstate[dst] not in 'mn' and not dryrun:
908 908 repo.dirstate.normallookup(dst)
909 909 else:
910 910 if repo.dirstate[origsrc] == 'a' and origsrc == src:
911 911 if not ui.quiet:
912 912 ui.warn(_("%s has not been committed yet, so no copy "
913 913 "data will be stored for %s.\n")
914 914 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
915 915 if repo.dirstate[dst] in '?r' and not dryrun:
916 916 wctx.add([dst])
917 917 elif not dryrun:
918 918 wctx.copy(origsrc, dst)
919 919
920 920 def readrequires(opener, supported):
921 921 '''Reads and parses .hg/requires and checks if all entries found
922 922 are in the list of supported features.'''
923 923 requirements = set(opener.read("requires").splitlines())
924 924 missings = []
925 925 for r in requirements:
926 926 if r not in supported:
927 927 if not r or not r[0].isalnum():
928 928 raise error.RequirementError(_(".hg/requires file is corrupt"))
929 929 missings.append(r)
930 930 missings.sort()
931 931 if missings:
932 932 raise error.RequirementError(
933 933 _("repository requires features unknown to this Mercurial: %s")
934 934 % " ".join(missings),
935 935 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
936 936 " for more information"))
937 937 return requirements
938 938
939 939 class filecachesubentry(object):
940 940 def __init__(self, path, stat):
941 941 self.path = path
942 942 self.cachestat = None
943 943 self._cacheable = None
944 944
945 945 if stat:
946 946 self.cachestat = filecachesubentry.stat(self.path)
947 947
948 948 if self.cachestat:
949 949 self._cacheable = self.cachestat.cacheable()
950 950 else:
951 951 # None means we don't know yet
952 952 self._cacheable = None
953 953
954 954 def refresh(self):
955 955 if self.cacheable():
956 956 self.cachestat = filecachesubentry.stat(self.path)
957 957
958 958 def cacheable(self):
959 959 if self._cacheable is not None:
960 960 return self._cacheable
961 961
962 962 # we don't know yet, assume it is for now
963 963 return True
964 964
965 965 def changed(self):
966 966 # no point in going further if we can't cache it
967 967 if not self.cacheable():
968 968 return True
969 969
970 970 newstat = filecachesubentry.stat(self.path)
971 971
972 972 # we may not know if it's cacheable yet, check again now
973 973 if newstat and self._cacheable is None:
974 974 self._cacheable = newstat.cacheable()
975 975
976 976 # check again
977 977 if not self._cacheable:
978 978 return True
979 979
980 980 if self.cachestat != newstat:
981 981 self.cachestat = newstat
982 982 return True
983 983 else:
984 984 return False
985 985
986 986 @staticmethod
987 987 def stat(path):
988 988 try:
989 989 return util.cachestat(path)
990 990 except OSError, e:
991 991 if e.errno != errno.ENOENT:
992 992 raise
993 993
994 994 class filecacheentry(object):
995 995 def __init__(self, paths, stat=True):
996 996 self._entries = []
997 997 for path in paths:
998 998 self._entries.append(filecachesubentry(path, stat))
999 999
1000 1000 def changed(self):
1001 1001 '''true if any entry has changed'''
1002 1002 for entry in self._entries:
1003 1003 if entry.changed():
1004 1004 return True
1005 1005 return False
1006 1006
1007 1007 def refresh(self):
1008 1008 for entry in self._entries:
1009 1009 entry.refresh()
1010 1010
1011 1011 class filecache(object):
1012 1012 '''A property like decorator that tracks files under .hg/ for updates.
1013 1013
1014 1014 Records stat info when called in _filecache.
1015 1015
1016 1016 On subsequent calls, compares old stat info with new info, and recreates the
1017 1017 object when any of the files changes, updating the new stat info in
1018 1018 _filecache.
1019 1019
1020 1020 Mercurial either atomic renames or appends for files under .hg,
1021 1021 so to ensure the cache is reliable we need the filesystem to be able
1022 1022 to tell us if a file has been replaced. If it can't, we fallback to
1023 1023 recreating the object on every call (essentially the same behaviour as
1024 1024 propertycache).
1025 1025
1026 1026 '''
1027 1027 def __init__(self, *paths):
1028 1028 self.paths = paths
1029 1029
1030 1030 def join(self, obj, fname):
1031 1031 """Used to compute the runtime path of a cached file.
1032 1032
1033 1033 Users should subclass filecache and provide their own version of this
1034 1034 function to call the appropriate join function on 'obj' (an instance
1035 1035 of the class that its member function was decorated).
1036 1036 """
1037 1037 return obj.join(fname)
1038 1038
1039 1039 def __call__(self, func):
1040 1040 self.func = func
1041 1041 self.name = func.__name__
1042 1042 return self
1043 1043
1044 1044 def __get__(self, obj, type=None):
1045 1045 # do we need to check if the file changed?
1046 1046 if self.name in obj.__dict__:
1047 1047 assert self.name in obj._filecache, self.name
1048 1048 return obj.__dict__[self.name]
1049 1049
1050 1050 entry = obj._filecache.get(self.name)
1051 1051
1052 1052 if entry:
1053 1053 if entry.changed():
1054 1054 entry.obj = self.func(obj)
1055 1055 else:
1056 1056 paths = [self.join(obj, path) for path in self.paths]
1057 1057
1058 1058 # We stat -before- creating the object so our cache doesn't lie if
1059 1059 # a writer modified between the time we read and stat
1060 1060 entry = filecacheentry(paths, True)
1061 1061 entry.obj = self.func(obj)
1062 1062
1063 1063 obj._filecache[self.name] = entry
1064 1064
1065 1065 obj.__dict__[self.name] = entry.obj
1066 1066 return entry.obj
1067 1067
1068 1068 def __set__(self, obj, value):
1069 1069 if self.name not in obj._filecache:
1070 1070 # we add an entry for the missing value because X in __dict__
1071 1071 # implies X in _filecache
1072 1072 paths = [self.join(obj, path) for path in self.paths]
1073 1073 ce = filecacheentry(paths, False)
1074 1074 obj._filecache[self.name] = ce
1075 1075 else:
1076 1076 ce = obj._filecache[self.name]
1077 1077
1078 1078 ce.obj = value # update cached copy
1079 1079 obj.__dict__[self.name] = value # update copy returned by obj.x
1080 1080
1081 1081 def __delete__(self, obj):
1082 1082 try:
1083 1083 del obj.__dict__[self.name]
1084 1084 except KeyError:
1085 1085 raise AttributeError(self.name)
1086 1086
1087 1087 class dirs(object):
1088 1088 '''a multiset of directory names from a dirstate or manifest'''
1089 1089
1090 1090 def __init__(self, map, skip=None):
1091 1091 self._dirs = {}
1092 1092 addpath = self.addpath
1093 1093 if util.safehasattr(map, 'iteritems') and skip is not None:
1094 1094 for f, s in map.iteritems():
1095 1095 if s[0] != skip:
1096 1096 addpath(f)
1097 1097 else:
1098 1098 for f in map:
1099 1099 addpath(f)
1100 1100
1101 1101 def addpath(self, path):
1102 1102 dirs = self._dirs
1103 1103 for base in finddirs(path):
1104 1104 if base in dirs:
1105 1105 dirs[base] += 1
1106 1106 return
1107 1107 dirs[base] = 1
1108 1108
1109 1109 def delpath(self, path):
1110 1110 dirs = self._dirs
1111 1111 for base in finddirs(path):
1112 1112 if dirs[base] > 1:
1113 1113 dirs[base] -= 1
1114 1114 return
1115 1115 del dirs[base]
1116 1116
1117 1117 def __iter__(self):
1118 1118 return self._dirs.iterkeys()
1119 1119
1120 1120 def __contains__(self, d):
1121 1121 return d in self._dirs
1122 1122
1123 1123 if util.safehasattr(parsers, 'dirs'):
1124 1124 dirs = parsers.dirs
1125 1125
1126 1126 def finddirs(path):
1127 1127 pos = path.rfind('/')
1128 1128 while pos != -1:
1129 1129 yield path[:pos]
1130 1130 pos = path.rfind('/', 0, pos)
General Comments 0
You need to be logged in to leave comments. Login now