##// END OF EJS Templates
vfs: add basename
FUJIWARA Katsunori -
r25770:39de2e9c default
parent child Browse files
Show More
@@ -1,1156 +1,1162
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev, wdirrev
10 10 import util, error, osutil, revset, similar, encoding, phases
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob, tempfile, shutil, stat
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 class status(tuple):
24 24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 25 and 'ignored' properties are only relevant to the working copy.
26 26 '''
27 27
28 28 __slots__ = ()
29 29
30 30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 31 clean):
32 32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 33 ignored, clean))
34 34
35 35 @property
36 36 def modified(self):
37 37 '''files that have been modified'''
38 38 return self[0]
39 39
40 40 @property
41 41 def added(self):
42 42 '''files that have been added'''
43 43 return self[1]
44 44
45 45 @property
46 46 def removed(self):
47 47 '''files that have been removed'''
48 48 return self[2]
49 49
50 50 @property
51 51 def deleted(self):
52 52 '''files that are in the dirstate, but have been deleted from the
53 53 working copy (aka "missing")
54 54 '''
55 55 return self[3]
56 56
57 57 @property
58 58 def unknown(self):
59 59 '''files not in the dirstate that are not ignored'''
60 60 return self[4]
61 61
62 62 @property
63 63 def ignored(self):
64 64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 65 return self[5]
66 66
67 67 @property
68 68 def clean(self):
69 69 '''files that have not been modified'''
70 70 return self[6]
71 71
72 72 def __repr__(self, *args, **kwargs):
73 73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 74 'unknown=%r, ignored=%r, clean=%r>') % self)
75 75
76 76 def itersubrepos(ctx1, ctx2):
77 77 """find subrepos in ctx1 or ctx2"""
78 78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 83
84 84 missing = set()
85 85
86 86 for subpath in ctx2.substate:
87 87 if subpath not in ctx1.substate:
88 88 del subpaths[subpath]
89 89 missing.add(subpath)
90 90
91 91 for subpath, ctx in sorted(subpaths.iteritems()):
92 92 yield subpath, ctx.sub(subpath)
93 93
94 94 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
95 95 # status and diff will have an accurate result when it does
96 96 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
97 97 # against itself.
98 98 for subpath in missing:
99 99 yield subpath, ctx2.nullsub(subpath, ctx1)
100 100
101 101 def nochangesfound(ui, repo, excluded=None):
102 102 '''Report no changes for push/pull, excluded is None or a list of
103 103 nodes excluded from the push/pull.
104 104 '''
105 105 secretlist = []
106 106 if excluded:
107 107 for n in excluded:
108 108 if n not in repo:
109 109 # discovery should not have included the filtered revision,
110 110 # we have to explicitly exclude it until discovery is cleanup.
111 111 continue
112 112 ctx = repo[n]
113 113 if ctx.phase() >= phases.secret and not ctx.extinct():
114 114 secretlist.append(n)
115 115
116 116 if secretlist:
117 117 ui.status(_("no changes found (ignored %d secret changesets)\n")
118 118 % len(secretlist))
119 119 else:
120 120 ui.status(_("no changes found\n"))
121 121
122 122 def checknewlabel(repo, lbl, kind):
123 123 # Do not use the "kind" parameter in ui output.
124 124 # It makes strings difficult to translate.
125 125 if lbl in ['tip', '.', 'null']:
126 126 raise util.Abort(_("the name '%s' is reserved") % lbl)
127 127 for c in (':', '\0', '\n', '\r'):
128 128 if c in lbl:
129 129 raise util.Abort(_("%r cannot be used in a name") % c)
130 130 try:
131 131 int(lbl)
132 132 raise util.Abort(_("cannot use an integer as a name"))
133 133 except ValueError:
134 134 pass
135 135
136 136 def checkfilename(f):
137 137 '''Check that the filename f is an acceptable filename for a tracked file'''
138 138 if '\r' in f or '\n' in f:
139 139 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
140 140
141 141 def checkportable(ui, f):
142 142 '''Check if filename f is portable and warn or abort depending on config'''
143 143 checkfilename(f)
144 144 abort, warn = checkportabilityalert(ui)
145 145 if abort or warn:
146 146 msg = util.checkwinfilename(f)
147 147 if msg:
148 148 msg = "%s: %r" % (msg, f)
149 149 if abort:
150 150 raise util.Abort(msg)
151 151 ui.warn(_("warning: %s\n") % msg)
152 152
153 153 def checkportabilityalert(ui):
154 154 '''check if the user's config requests nothing, a warning, or abort for
155 155 non-portable filenames'''
156 156 val = ui.config('ui', 'portablefilenames', 'warn')
157 157 lval = val.lower()
158 158 bval = util.parsebool(val)
159 159 abort = os.name == 'nt' or lval == 'abort'
160 160 warn = bval or lval == 'warn'
161 161 if bval is None and not (warn or abort or lval == 'ignore'):
162 162 raise error.ConfigError(
163 163 _("ui.portablefilenames value is invalid ('%s')") % val)
164 164 return abort, warn
165 165
166 166 class casecollisionauditor(object):
167 167 def __init__(self, ui, abort, dirstate):
168 168 self._ui = ui
169 169 self._abort = abort
170 170 allfiles = '\0'.join(dirstate._map)
171 171 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
172 172 self._dirstate = dirstate
173 173 # The purpose of _newfiles is so that we don't complain about
174 174 # case collisions if someone were to call this object with the
175 175 # same filename twice.
176 176 self._newfiles = set()
177 177
178 178 def __call__(self, f):
179 179 if f in self._newfiles:
180 180 return
181 181 fl = encoding.lower(f)
182 182 if fl in self._loweredfiles and f not in self._dirstate:
183 183 msg = _('possible case-folding collision for %s') % f
184 184 if self._abort:
185 185 raise util.Abort(msg)
186 186 self._ui.warn(_("warning: %s\n") % msg)
187 187 self._loweredfiles.add(fl)
188 188 self._newfiles.add(f)
189 189
190 190 def filteredhash(repo, maxrev):
191 191 """build hash of filtered revisions in the current repoview.
192 192
193 193 Multiple caches perform up-to-date validation by checking that the
194 194 tiprev and tipnode stored in the cache file match the current repository.
195 195 However, this is not sufficient for validating repoviews because the set
196 196 of revisions in the view may change without the repository tiprev and
197 197 tipnode changing.
198 198
199 199 This function hashes all the revs filtered from the view and returns
200 200 that SHA-1 digest.
201 201 """
202 202 cl = repo.changelog
203 203 if not cl.filteredrevs:
204 204 return None
205 205 key = None
206 206 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
207 207 if revs:
208 208 s = util.sha1()
209 209 for rev in revs:
210 210 s.update('%s;' % rev)
211 211 key = s.digest()
212 212 return key
213 213
214 214 class abstractvfs(object):
215 215 """Abstract base class; cannot be instantiated"""
216 216
217 217 def __init__(self, *args, **kwargs):
218 218 '''Prevent instantiation; don't call this from subclasses.'''
219 219 raise NotImplementedError('attempted instantiating ' + str(type(self)))
220 220
221 221 def tryread(self, path):
222 222 '''gracefully return an empty string for missing files'''
223 223 try:
224 224 return self.read(path)
225 225 except IOError as inst:
226 226 if inst.errno != errno.ENOENT:
227 227 raise
228 228 return ""
229 229
230 230 def tryreadlines(self, path, mode='rb'):
231 231 '''gracefully return an empty array for missing files'''
232 232 try:
233 233 return self.readlines(path, mode=mode)
234 234 except IOError as inst:
235 235 if inst.errno != errno.ENOENT:
236 236 raise
237 237 return []
238 238
239 239 def open(self, path, mode="r", text=False, atomictemp=False,
240 240 notindexed=False):
241 241 '''Open ``path`` file, which is relative to vfs root.
242 242
243 243 Newly created directories are marked as "not to be indexed by
244 244 the content indexing service", if ``notindexed`` is specified
245 245 for "write" mode access.
246 246 '''
247 247 self.open = self.__call__
248 248 return self.__call__(path, mode, text, atomictemp, notindexed)
249 249
250 250 def read(self, path):
251 251 fp = self(path, 'rb')
252 252 try:
253 253 return fp.read()
254 254 finally:
255 255 fp.close()
256 256
257 257 def readlines(self, path, mode='rb'):
258 258 fp = self(path, mode=mode)
259 259 try:
260 260 return fp.readlines()
261 261 finally:
262 262 fp.close()
263 263
264 264 def write(self, path, data):
265 265 fp = self(path, 'wb')
266 266 try:
267 267 return fp.write(data)
268 268 finally:
269 269 fp.close()
270 270
271 271 def writelines(self, path, data, mode='wb', notindexed=False):
272 272 fp = self(path, mode=mode, notindexed=notindexed)
273 273 try:
274 274 return fp.writelines(data)
275 275 finally:
276 276 fp.close()
277 277
278 278 def append(self, path, data):
279 279 fp = self(path, 'ab')
280 280 try:
281 281 return fp.write(data)
282 282 finally:
283 283 fp.close()
284 284
285 def basename(self, path):
286 """return base element of a path (as os.path.basename would do)
287
288 This exists to allow handling of strange encoding if needed."""
289 return os.path.basename(path)
290
285 291 def chmod(self, path, mode):
286 292 return os.chmod(self.join(path), mode)
287 293
288 294 def exists(self, path=None):
289 295 return os.path.exists(self.join(path))
290 296
291 297 def fstat(self, fp):
292 298 return util.fstat(fp)
293 299
294 300 def isdir(self, path=None):
295 301 return os.path.isdir(self.join(path))
296 302
297 303 def isfile(self, path=None):
298 304 return os.path.isfile(self.join(path))
299 305
300 306 def islink(self, path=None):
301 307 return os.path.islink(self.join(path))
302 308
303 309 def reljoin(self, *paths):
304 310 """join various elements of a path together (as os.path.join would do)
305 311
306 312 The vfs base is not injected so that path stay relative. This exists
307 313 to allow handling of strange encoding if needed."""
308 314 return os.path.join(*paths)
309 315
310 316 def split(self, path):
311 317 """split top-most element of a path (as os.path.split would do)
312 318
313 319 This exists to allow handling of strange encoding if needed."""
314 320 return os.path.split(path)
315 321
316 322 def lexists(self, path=None):
317 323 return os.path.lexists(self.join(path))
318 324
319 325 def lstat(self, path=None):
320 326 return os.lstat(self.join(path))
321 327
322 328 def listdir(self, path=None):
323 329 return os.listdir(self.join(path))
324 330
325 331 def makedir(self, path=None, notindexed=True):
326 332 return util.makedir(self.join(path), notindexed)
327 333
328 334 def makedirs(self, path=None, mode=None):
329 335 return util.makedirs(self.join(path), mode)
330 336
331 337 def makelock(self, info, path):
332 338 return util.makelock(info, self.join(path))
333 339
334 340 def mkdir(self, path=None):
335 341 return os.mkdir(self.join(path))
336 342
337 343 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
338 344 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
339 345 dir=self.join(dir), text=text)
340 346 dname, fname = util.split(name)
341 347 if dir:
342 348 return fd, os.path.join(dir, fname)
343 349 else:
344 350 return fd, fname
345 351
346 352 def readdir(self, path=None, stat=None, skip=None):
347 353 return osutil.listdir(self.join(path), stat, skip)
348 354
349 355 def readlock(self, path):
350 356 return util.readlock(self.join(path))
351 357
352 358 def rename(self, src, dst):
353 359 return util.rename(self.join(src), self.join(dst))
354 360
355 361 def readlink(self, path):
356 362 return os.readlink(self.join(path))
357 363
358 364 def removedirs(self, path=None):
359 365 """Remove a leaf directory and all empty intermediate ones
360 366 """
361 367 return util.removedirs(self.join(path))
362 368
363 369 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
364 370 """Remove a directory tree recursively
365 371
366 372 If ``forcibly``, this tries to remove READ-ONLY files, too.
367 373 """
368 374 if forcibly:
369 375 def onerror(function, path, excinfo):
370 376 if function is not os.remove:
371 377 raise
372 378 # read-only files cannot be unlinked under Windows
373 379 s = os.stat(path)
374 380 if (s.st_mode & stat.S_IWRITE) != 0:
375 381 raise
376 382 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
377 383 os.remove(path)
378 384 else:
379 385 onerror = None
380 386 return shutil.rmtree(self.join(path),
381 387 ignore_errors=ignore_errors, onerror=onerror)
382 388
383 389 def setflags(self, path, l, x):
384 390 return util.setflags(self.join(path), l, x)
385 391
386 392 def stat(self, path=None):
387 393 return os.stat(self.join(path))
388 394
389 395 def unlink(self, path=None):
390 396 return util.unlink(self.join(path))
391 397
392 398 def unlinkpath(self, path=None, ignoremissing=False):
393 399 return util.unlinkpath(self.join(path), ignoremissing)
394 400
395 401 def utime(self, path=None, t=None):
396 402 return os.utime(self.join(path), t)
397 403
398 404 def walk(self, path=None, onerror=None):
399 405 """Yield (dirpath, dirs, files) tuple for each directories under path
400 406
401 407 ``dirpath`` is relative one from the root of this vfs. This
402 408 uses ``os.sep`` as path separator, even you specify POSIX
403 409 style ``path``.
404 410
405 411 "The root of this vfs" is represented as empty ``dirpath``.
406 412 """
407 413 root = os.path.normpath(self.join(None))
408 414 # when dirpath == root, dirpath[prefixlen:] becomes empty
409 415 # because len(dirpath) < prefixlen.
410 416 prefixlen = len(pathutil.normasprefix(root))
411 417 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
412 418 yield (dirpath[prefixlen:], dirs, files)
413 419
414 420 class vfs(abstractvfs):
415 421 '''Operate files relative to a base directory
416 422
417 423 This class is used to hide the details of COW semantics and
418 424 remote file access from higher level code.
419 425 '''
420 426 def __init__(self, base, audit=True, expandpath=False, realpath=False):
421 427 if expandpath:
422 428 base = util.expandpath(base)
423 429 if realpath:
424 430 base = os.path.realpath(base)
425 431 self.base = base
426 432 self._setmustaudit(audit)
427 433 self.createmode = None
428 434 self._trustnlink = None
429 435
430 436 def _getmustaudit(self):
431 437 return self._audit
432 438
433 439 def _setmustaudit(self, onoff):
434 440 self._audit = onoff
435 441 if onoff:
436 442 self.audit = pathutil.pathauditor(self.base)
437 443 else:
438 444 self.audit = util.always
439 445
440 446 mustaudit = property(_getmustaudit, _setmustaudit)
441 447
442 448 @util.propertycache
443 449 def _cansymlink(self):
444 450 return util.checklink(self.base)
445 451
446 452 @util.propertycache
447 453 def _chmod(self):
448 454 return util.checkexec(self.base)
449 455
450 456 def _fixfilemode(self, name):
451 457 if self.createmode is None or not self._chmod:
452 458 return
453 459 os.chmod(name, self.createmode & 0o666)
454 460
455 461 def __call__(self, path, mode="r", text=False, atomictemp=False,
456 462 notindexed=False):
457 463 '''Open ``path`` file, which is relative to vfs root.
458 464
459 465 Newly created directories are marked as "not to be indexed by
460 466 the content indexing service", if ``notindexed`` is specified
461 467 for "write" mode access.
462 468 '''
463 469 if self._audit:
464 470 r = util.checkosfilename(path)
465 471 if r:
466 472 raise util.Abort("%s: %r" % (r, path))
467 473 self.audit(path)
468 474 f = self.join(path)
469 475
470 476 if not text and "b" not in mode:
471 477 mode += "b" # for that other OS
472 478
473 479 nlink = -1
474 480 if mode not in ('r', 'rb'):
475 481 dirname, basename = util.split(f)
476 482 # If basename is empty, then the path is malformed because it points
477 483 # to a directory. Let the posixfile() call below raise IOError.
478 484 if basename:
479 485 if atomictemp:
480 486 util.ensuredirs(dirname, self.createmode, notindexed)
481 487 return util.atomictempfile(f, mode, self.createmode)
482 488 try:
483 489 if 'w' in mode:
484 490 util.unlink(f)
485 491 nlink = 0
486 492 else:
487 493 # nlinks() may behave differently for files on Windows
488 494 # shares if the file is open.
489 495 fd = util.posixfile(f)
490 496 nlink = util.nlinks(f)
491 497 if nlink < 1:
492 498 nlink = 2 # force mktempcopy (issue1922)
493 499 fd.close()
494 500 except (OSError, IOError) as e:
495 501 if e.errno != errno.ENOENT:
496 502 raise
497 503 nlink = 0
498 504 util.ensuredirs(dirname, self.createmode, notindexed)
499 505 if nlink > 0:
500 506 if self._trustnlink is None:
501 507 self._trustnlink = nlink > 1 or util.checknlink(f)
502 508 if nlink > 1 or not self._trustnlink:
503 509 util.rename(util.mktempcopy(f), f)
504 510 fp = util.posixfile(f, mode)
505 511 if nlink == 0:
506 512 self._fixfilemode(f)
507 513 return fp
508 514
509 515 def symlink(self, src, dst):
510 516 self.audit(dst)
511 517 linkname = self.join(dst)
512 518 try:
513 519 os.unlink(linkname)
514 520 except OSError:
515 521 pass
516 522
517 523 util.ensuredirs(os.path.dirname(linkname), self.createmode)
518 524
519 525 if self._cansymlink:
520 526 try:
521 527 os.symlink(src, linkname)
522 528 except OSError as err:
523 529 raise OSError(err.errno, _('could not symlink to %r: %s') %
524 530 (src, err.strerror), linkname)
525 531 else:
526 532 self.write(dst, src)
527 533
528 534 def join(self, path, *insidef):
529 535 if path:
530 536 return os.path.join(self.base, path, *insidef)
531 537 else:
532 538 return self.base
533 539
534 540 opener = vfs
535 541
536 542 class auditvfs(object):
537 543 def __init__(self, vfs):
538 544 self.vfs = vfs
539 545
540 546 def _getmustaudit(self):
541 547 return self.vfs.mustaudit
542 548
543 549 def _setmustaudit(self, onoff):
544 550 self.vfs.mustaudit = onoff
545 551
546 552 mustaudit = property(_getmustaudit, _setmustaudit)
547 553
548 554 class filtervfs(abstractvfs, auditvfs):
549 555 '''Wrapper vfs for filtering filenames with a function.'''
550 556
551 557 def __init__(self, vfs, filter):
552 558 auditvfs.__init__(self, vfs)
553 559 self._filter = filter
554 560
555 561 def __call__(self, path, *args, **kwargs):
556 562 return self.vfs(self._filter(path), *args, **kwargs)
557 563
558 564 def join(self, path, *insidef):
559 565 if path:
560 566 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
561 567 else:
562 568 return self.vfs.join(path)
563 569
564 570 filteropener = filtervfs
565 571
566 572 class readonlyvfs(abstractvfs, auditvfs):
567 573 '''Wrapper vfs preventing any writing.'''
568 574
569 575 def __init__(self, vfs):
570 576 auditvfs.__init__(self, vfs)
571 577
572 578 def __call__(self, path, mode='r', *args, **kw):
573 579 if mode not in ('r', 'rb'):
574 580 raise util.Abort('this vfs is read only')
575 581 return self.vfs(path, mode, *args, **kw)
576 582
577 583
578 584 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
579 585 '''yield every hg repository under path, always recursively.
580 586 The recurse flag will only control recursion into repo working dirs'''
581 587 def errhandler(err):
582 588 if err.filename == path:
583 589 raise err
584 590 samestat = getattr(os.path, 'samestat', None)
585 591 if followsym and samestat is not None:
586 592 def adddir(dirlst, dirname):
587 593 match = False
588 594 dirstat = os.stat(dirname)
589 595 for lstdirstat in dirlst:
590 596 if samestat(dirstat, lstdirstat):
591 597 match = True
592 598 break
593 599 if not match:
594 600 dirlst.append(dirstat)
595 601 return not match
596 602 else:
597 603 followsym = False
598 604
599 605 if (seen_dirs is None) and followsym:
600 606 seen_dirs = []
601 607 adddir(seen_dirs, path)
602 608 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
603 609 dirs.sort()
604 610 if '.hg' in dirs:
605 611 yield root # found a repository
606 612 qroot = os.path.join(root, '.hg', 'patches')
607 613 if os.path.isdir(os.path.join(qroot, '.hg')):
608 614 yield qroot # we have a patch queue repo here
609 615 if recurse:
610 616 # avoid recursing inside the .hg directory
611 617 dirs.remove('.hg')
612 618 else:
613 619 dirs[:] = [] # don't descend further
614 620 elif followsym:
615 621 newdirs = []
616 622 for d in dirs:
617 623 fname = os.path.join(root, d)
618 624 if adddir(seen_dirs, fname):
619 625 if os.path.islink(fname):
620 626 for hgname in walkrepos(fname, True, seen_dirs):
621 627 yield hgname
622 628 else:
623 629 newdirs.append(d)
624 630 dirs[:] = newdirs
625 631
626 632 def osrcpath():
627 633 '''return default os-specific hgrc search path'''
628 634 path = []
629 635 defaultpath = os.path.join(util.datapath, 'default.d')
630 636 if os.path.isdir(defaultpath):
631 637 for f, kind in osutil.listdir(defaultpath):
632 638 if f.endswith('.rc'):
633 639 path.append(os.path.join(defaultpath, f))
634 640 path.extend(systemrcpath())
635 641 path.extend(userrcpath())
636 642 path = [os.path.normpath(f) for f in path]
637 643 return path
638 644
639 645 _rcpath = None
640 646
641 647 def rcpath():
642 648 '''return hgrc search path. if env var HGRCPATH is set, use it.
643 649 for each item in path, if directory, use files ending in .rc,
644 650 else use item.
645 651 make HGRCPATH empty to only look in .hg/hgrc of current repo.
646 652 if no HGRCPATH, use default os-specific path.'''
647 653 global _rcpath
648 654 if _rcpath is None:
649 655 if 'HGRCPATH' in os.environ:
650 656 _rcpath = []
651 657 for p in os.environ['HGRCPATH'].split(os.pathsep):
652 658 if not p:
653 659 continue
654 660 p = util.expandpath(p)
655 661 if os.path.isdir(p):
656 662 for f, kind in osutil.listdir(p):
657 663 if f.endswith('.rc'):
658 664 _rcpath.append(os.path.join(p, f))
659 665 else:
660 666 _rcpath.append(p)
661 667 else:
662 668 _rcpath = osrcpath()
663 669 return _rcpath
664 670
665 671 def intrev(rev):
666 672 """Return integer for a given revision that can be used in comparison or
667 673 arithmetic operation"""
668 674 if rev is None:
669 675 return wdirrev
670 676 return rev
671 677
672 678 def revsingle(repo, revspec, default='.'):
673 679 if not revspec and revspec != 0:
674 680 return repo[default]
675 681
676 682 l = revrange(repo, [revspec])
677 683 if not l:
678 684 raise util.Abort(_('empty revision set'))
679 685 return repo[l.last()]
680 686
681 687 def revpair(repo, revs):
682 688 if not revs:
683 689 return repo.dirstate.p1(), None
684 690
685 691 l = revrange(repo, revs)
686 692
687 693 if not l:
688 694 first = second = None
689 695 elif l.isascending():
690 696 first = l.min()
691 697 second = l.max()
692 698 elif l.isdescending():
693 699 first = l.max()
694 700 second = l.min()
695 701 else:
696 702 first = l.first()
697 703 second = l.last()
698 704
699 705 if first is None:
700 706 raise util.Abort(_('empty revision range'))
701 707
702 708 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
703 709 return repo.lookup(first), None
704 710
705 711 return repo.lookup(first), repo.lookup(second)
706 712
707 713 _revrangesep = ':'
708 714
709 715 def revrange(repo, revs):
710 716 """Yield revision as strings from a list of revision specifications."""
711 717
712 718 def revfix(repo, val, defval):
713 719 if not val and val != 0 and defval is not None:
714 720 return defval
715 721 return repo[val].rev()
716 722
717 723 subsets = []
718 724
719 725 revsetaliases = [alias for (alias, _) in
720 726 repo.ui.configitems("revsetalias")]
721 727
722 728 for spec in revs:
723 729 # attempt to parse old-style ranges first to deal with
724 730 # things like old-tag which contain query metacharacters
725 731 try:
726 732 # ... except for revset aliases without arguments. These
727 733 # should be parsed as soon as possible, because they might
728 734 # clash with a hash prefix.
729 735 if spec in revsetaliases:
730 736 raise error.RepoLookupError
731 737
732 738 if isinstance(spec, int):
733 739 subsets.append(revset.baseset([spec]))
734 740 continue
735 741
736 742 if _revrangesep in spec:
737 743 start, end = spec.split(_revrangesep, 1)
738 744 if start in revsetaliases or end in revsetaliases:
739 745 raise error.RepoLookupError
740 746
741 747 start = revfix(repo, start, 0)
742 748 end = revfix(repo, end, len(repo) - 1)
743 749 if end == nullrev and start < 0:
744 750 start = nullrev
745 751 if start < end:
746 752 l = revset.spanset(repo, start, end + 1)
747 753 else:
748 754 l = revset.spanset(repo, start, end - 1)
749 755 subsets.append(l)
750 756 continue
751 757 elif spec and spec in repo: # single unquoted rev
752 758 rev = revfix(repo, spec, None)
753 759 subsets.append(revset.baseset([rev]))
754 760 continue
755 761 except error.RepoLookupError:
756 762 pass
757 763
758 764 # fall through to new-style queries if old-style fails
759 765 m = revset.match(repo.ui, spec, repo)
760 766 subsets.append(m(repo))
761 767
762 768 return revset._combinesets(subsets)
763 769
764 770 def expandpats(pats):
765 771 '''Expand bare globs when running on windows.
766 772 On posix we assume it already has already been done by sh.'''
767 773 if not util.expandglobs:
768 774 return list(pats)
769 775 ret = []
770 776 for kindpat in pats:
771 777 kind, pat = matchmod._patsplit(kindpat, None)
772 778 if kind is None:
773 779 try:
774 780 globbed = glob.glob(pat)
775 781 except re.error:
776 782 globbed = [pat]
777 783 if globbed:
778 784 ret.extend(globbed)
779 785 continue
780 786 ret.append(kindpat)
781 787 return ret
782 788
783 789 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath',
784 790 badfn=None):
785 791 '''Return a matcher and the patterns that were used.
786 792 The matcher will warn about bad matches, unless an alternate badfn callback
787 793 is provided.'''
788 794 if pats == ("",):
789 795 pats = []
790 796 if not globbed and default == 'relpath':
791 797 pats = expandpats(pats or [])
792 798
793 799 def bad(f, msg):
794 800 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
795 801
796 802 if badfn is None:
797 803 badfn = bad
798 804
799 805 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
800 806 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
801 807
802 808 if m.always():
803 809 pats = []
804 810 return m, pats
805 811
806 812 def match(ctx, pats=[], opts={}, globbed=False, default='relpath', badfn=None):
807 813 '''Return a matcher that will warn about bad matches.'''
808 814 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
809 815
810 816 def matchall(repo):
811 817 '''Return a matcher that will efficiently match everything.'''
812 818 return matchmod.always(repo.root, repo.getcwd())
813 819
814 820 def matchfiles(repo, files, badfn=None):
815 821 '''Return a matcher that will efficiently match exactly these files.'''
816 822 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
817 823
818 824 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
819 825 m = matcher
820 826 if dry_run is None:
821 827 dry_run = opts.get('dry_run')
822 828 if similarity is None:
823 829 similarity = float(opts.get('similarity') or 0)
824 830
825 831 ret = 0
826 832 join = lambda f: os.path.join(prefix, f)
827 833
828 834 def matchessubrepo(matcher, subpath):
829 835 if matcher.exact(subpath):
830 836 return True
831 837 for f in matcher.files():
832 838 if f.startswith(subpath):
833 839 return True
834 840 return False
835 841
836 842 wctx = repo[None]
837 843 for subpath in sorted(wctx.substate):
838 844 if opts.get('subrepos') or matchessubrepo(m, subpath):
839 845 sub = wctx.sub(subpath)
840 846 try:
841 847 submatch = matchmod.narrowmatcher(subpath, m)
842 848 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
843 849 ret = 1
844 850 except error.LookupError:
845 851 repo.ui.status(_("skipping missing subrepository: %s\n")
846 852 % join(subpath))
847 853
848 854 rejected = []
849 855 def badfn(f, msg):
850 856 if f in m.files():
851 857 m.bad(f, msg)
852 858 rejected.append(f)
853 859
854 860 badmatch = matchmod.badmatch(m, badfn)
855 861 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
856 862 badmatch)
857 863
858 864 unknownset = set(unknown + forgotten)
859 865 toprint = unknownset.copy()
860 866 toprint.update(deleted)
861 867 for abs in sorted(toprint):
862 868 if repo.ui.verbose or not m.exact(abs):
863 869 if abs in unknownset:
864 870 status = _('adding %s\n') % m.uipath(abs)
865 871 else:
866 872 status = _('removing %s\n') % m.uipath(abs)
867 873 repo.ui.status(status)
868 874
869 875 renames = _findrenames(repo, m, added + unknown, removed + deleted,
870 876 similarity)
871 877
872 878 if not dry_run:
873 879 _markchanges(repo, unknown + forgotten, deleted, renames)
874 880
875 881 for f in rejected:
876 882 if f in m.files():
877 883 return 1
878 884 return ret
879 885
880 886 def marktouched(repo, files, similarity=0.0):
881 887 '''Assert that files have somehow been operated upon. files are relative to
882 888 the repo root.'''
883 889 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
884 890 rejected = []
885 891
886 892 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
887 893
888 894 if repo.ui.verbose:
889 895 unknownset = set(unknown + forgotten)
890 896 toprint = unknownset.copy()
891 897 toprint.update(deleted)
892 898 for abs in sorted(toprint):
893 899 if abs in unknownset:
894 900 status = _('adding %s\n') % abs
895 901 else:
896 902 status = _('removing %s\n') % abs
897 903 repo.ui.status(status)
898 904
899 905 renames = _findrenames(repo, m, added + unknown, removed + deleted,
900 906 similarity)
901 907
902 908 _markchanges(repo, unknown + forgotten, deleted, renames)
903 909
904 910 for f in rejected:
905 911 if f in m.files():
906 912 return 1
907 913 return 0
908 914
909 915 def _interestingfiles(repo, matcher):
910 916 '''Walk dirstate with matcher, looking for files that addremove would care
911 917 about.
912 918
913 919 This is different from dirstate.status because it doesn't care about
914 920 whether files are modified or clean.'''
915 921 added, unknown, deleted, removed, forgotten = [], [], [], [], []
916 922 audit_path = pathutil.pathauditor(repo.root)
917 923
918 924 ctx = repo[None]
919 925 dirstate = repo.dirstate
920 926 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
921 927 full=False)
922 928 for abs, st in walkresults.iteritems():
923 929 dstate = dirstate[abs]
924 930 if dstate == '?' and audit_path.check(abs):
925 931 unknown.append(abs)
926 932 elif dstate != 'r' and not st:
927 933 deleted.append(abs)
928 934 elif dstate == 'r' and st:
929 935 forgotten.append(abs)
930 936 # for finding renames
931 937 elif dstate == 'r' and not st:
932 938 removed.append(abs)
933 939 elif dstate == 'a':
934 940 added.append(abs)
935 941
936 942 return added, unknown, deleted, removed, forgotten
937 943
938 944 def _findrenames(repo, matcher, added, removed, similarity):
939 945 '''Find renames from removed files to added ones.'''
940 946 renames = {}
941 947 if similarity > 0:
942 948 for old, new, score in similar.findrenames(repo, added, removed,
943 949 similarity):
944 950 if (repo.ui.verbose or not matcher.exact(old)
945 951 or not matcher.exact(new)):
946 952 repo.ui.status(_('recording removal of %s as rename to %s '
947 953 '(%d%% similar)\n') %
948 954 (matcher.rel(old), matcher.rel(new),
949 955 score * 100))
950 956 renames[new] = old
951 957 return renames
952 958
953 959 def _markchanges(repo, unknown, deleted, renames):
954 960 '''Marks the files in unknown as added, the files in deleted as removed,
955 961 and the files in renames as copied.'''
956 962 wctx = repo[None]
957 963 wlock = repo.wlock()
958 964 try:
959 965 wctx.forget(deleted)
960 966 wctx.add(unknown)
961 967 for new, old in renames.iteritems():
962 968 wctx.copy(old, new)
963 969 finally:
964 970 wlock.release()
965 971
966 972 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
967 973 """Update the dirstate to reflect the intent of copying src to dst. For
968 974 different reasons it might not end with dst being marked as copied from src.
969 975 """
970 976 origsrc = repo.dirstate.copied(src) or src
971 977 if dst == origsrc: # copying back a copy?
972 978 if repo.dirstate[dst] not in 'mn' and not dryrun:
973 979 repo.dirstate.normallookup(dst)
974 980 else:
975 981 if repo.dirstate[origsrc] == 'a' and origsrc == src:
976 982 if not ui.quiet:
977 983 ui.warn(_("%s has not been committed yet, so no copy "
978 984 "data will be stored for %s.\n")
979 985 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
980 986 if repo.dirstate[dst] in '?r' and not dryrun:
981 987 wctx.add([dst])
982 988 elif not dryrun:
983 989 wctx.copy(origsrc, dst)
984 990
985 991 def readrequires(opener, supported):
986 992 '''Reads and parses .hg/requires and checks if all entries found
987 993 are in the list of supported features.'''
988 994 requirements = set(opener.read("requires").splitlines())
989 995 missings = []
990 996 for r in requirements:
991 997 if r not in supported:
992 998 if not r or not r[0].isalnum():
993 999 raise error.RequirementError(_(".hg/requires file is corrupt"))
994 1000 missings.append(r)
995 1001 missings.sort()
996 1002 if missings:
997 1003 raise error.RequirementError(
998 1004 _("repository requires features unknown to this Mercurial: %s")
999 1005 % " ".join(missings),
1000 1006 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
1001 1007 " for more information"))
1002 1008 return requirements
1003 1009
1004 1010 def writerequires(opener, requirements):
1005 1011 reqfile = opener("requires", "w")
1006 1012 for r in sorted(requirements):
1007 1013 reqfile.write("%s\n" % r)
1008 1014 reqfile.close()
1009 1015
1010 1016 class filecachesubentry(object):
1011 1017 def __init__(self, path, stat):
1012 1018 self.path = path
1013 1019 self.cachestat = None
1014 1020 self._cacheable = None
1015 1021
1016 1022 if stat:
1017 1023 self.cachestat = filecachesubentry.stat(self.path)
1018 1024
1019 1025 if self.cachestat:
1020 1026 self._cacheable = self.cachestat.cacheable()
1021 1027 else:
1022 1028 # None means we don't know yet
1023 1029 self._cacheable = None
1024 1030
1025 1031 def refresh(self):
1026 1032 if self.cacheable():
1027 1033 self.cachestat = filecachesubentry.stat(self.path)
1028 1034
1029 1035 def cacheable(self):
1030 1036 if self._cacheable is not None:
1031 1037 return self._cacheable
1032 1038
1033 1039 # we don't know yet, assume it is for now
1034 1040 return True
1035 1041
1036 1042 def changed(self):
1037 1043 # no point in going further if we can't cache it
1038 1044 if not self.cacheable():
1039 1045 return True
1040 1046
1041 1047 newstat = filecachesubentry.stat(self.path)
1042 1048
1043 1049 # we may not know if it's cacheable yet, check again now
1044 1050 if newstat and self._cacheable is None:
1045 1051 self._cacheable = newstat.cacheable()
1046 1052
1047 1053 # check again
1048 1054 if not self._cacheable:
1049 1055 return True
1050 1056
1051 1057 if self.cachestat != newstat:
1052 1058 self.cachestat = newstat
1053 1059 return True
1054 1060 else:
1055 1061 return False
1056 1062
1057 1063 @staticmethod
1058 1064 def stat(path):
1059 1065 try:
1060 1066 return util.cachestat(path)
1061 1067 except OSError as e:
1062 1068 if e.errno != errno.ENOENT:
1063 1069 raise
1064 1070
1065 1071 class filecacheentry(object):
1066 1072 def __init__(self, paths, stat=True):
1067 1073 self._entries = []
1068 1074 for path in paths:
1069 1075 self._entries.append(filecachesubentry(path, stat))
1070 1076
1071 1077 def changed(self):
1072 1078 '''true if any entry has changed'''
1073 1079 for entry in self._entries:
1074 1080 if entry.changed():
1075 1081 return True
1076 1082 return False
1077 1083
1078 1084 def refresh(self):
1079 1085 for entry in self._entries:
1080 1086 entry.refresh()
1081 1087
1082 1088 class filecache(object):
1083 1089 '''A property like decorator that tracks files under .hg/ for updates.
1084 1090
1085 1091 Records stat info when called in _filecache.
1086 1092
1087 1093 On subsequent calls, compares old stat info with new info, and recreates the
1088 1094 object when any of the files changes, updating the new stat info in
1089 1095 _filecache.
1090 1096
1091 1097 Mercurial either atomic renames or appends for files under .hg,
1092 1098 so to ensure the cache is reliable we need the filesystem to be able
1093 1099 to tell us if a file has been replaced. If it can't, we fallback to
1094 1100 recreating the object on every call (essentially the same behaviour as
1095 1101 propertycache).
1096 1102
1097 1103 '''
1098 1104 def __init__(self, *paths):
1099 1105 self.paths = paths
1100 1106
1101 1107 def join(self, obj, fname):
1102 1108 """Used to compute the runtime path of a cached file.
1103 1109
1104 1110 Users should subclass filecache and provide their own version of this
1105 1111 function to call the appropriate join function on 'obj' (an instance
1106 1112 of the class that its member function was decorated).
1107 1113 """
1108 1114 return obj.join(fname)
1109 1115
1110 1116 def __call__(self, func):
1111 1117 self.func = func
1112 1118 self.name = func.__name__
1113 1119 return self
1114 1120
1115 1121 def __get__(self, obj, type=None):
1116 1122 # do we need to check if the file changed?
1117 1123 if self.name in obj.__dict__:
1118 1124 assert self.name in obj._filecache, self.name
1119 1125 return obj.__dict__[self.name]
1120 1126
1121 1127 entry = obj._filecache.get(self.name)
1122 1128
1123 1129 if entry:
1124 1130 if entry.changed():
1125 1131 entry.obj = self.func(obj)
1126 1132 else:
1127 1133 paths = [self.join(obj, path) for path in self.paths]
1128 1134
1129 1135 # We stat -before- creating the object so our cache doesn't lie if
1130 1136 # a writer modified between the time we read and stat
1131 1137 entry = filecacheentry(paths, True)
1132 1138 entry.obj = self.func(obj)
1133 1139
1134 1140 obj._filecache[self.name] = entry
1135 1141
1136 1142 obj.__dict__[self.name] = entry.obj
1137 1143 return entry.obj
1138 1144
1139 1145 def __set__(self, obj, value):
1140 1146 if self.name not in obj._filecache:
1141 1147 # we add an entry for the missing value because X in __dict__
1142 1148 # implies X in _filecache
1143 1149 paths = [self.join(obj, path) for path in self.paths]
1144 1150 ce = filecacheentry(paths, False)
1145 1151 obj._filecache[self.name] = ce
1146 1152 else:
1147 1153 ce = obj._filecache[self.name]
1148 1154
1149 1155 ce.obj = value # update cached copy
1150 1156 obj.__dict__[self.name] = value # update copy returned by obj.x
1151 1157
1152 1158 def __delete__(self, obj):
1153 1159 try:
1154 1160 del obj.__dict__[self.name]
1155 1161 except KeyError:
1156 1162 raise AttributeError(self.name)
General Comments 0
You need to be logged in to leave comments. Login now