##// END OF EJS Templates
vfs: extract 'vfs' class and related code to a new 'vfs' module (API)...
Pierre-Yves David -
r31217:0f31830f default
parent child Browse files
Show More
This diff has been collapsed as it changes many lines, (627 lines changed) Show them Hide them
@@ -1,1572 +1,967 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 import contextlib
11 10 import errno
12 11 import glob
13 12 import hashlib
14 13 import os
15 14 import re
16 import shutil
17 15 import socket
18 import stat
19 import tempfile
20 import threading
21 16
22 17 from .i18n import _
23 18 from .node import wdirrev
24 19 from . import (
25 20 encoding,
26 21 error,
27 22 match as matchmod,
28 23 osutil,
29 24 pathutil,
30 25 phases,
31 26 pycompat,
32 27 revsetlang,
33 28 similar,
34 29 util,
30 vfs as vfsmod,
35 31 )
36 32
37 33 if pycompat.osname == 'nt':
38 34 from . import scmwindows as scmplatform
39 35 else:
40 36 from . import scmposix as scmplatform
41 37
42 38 systemrcpath = scmplatform.systemrcpath
43 39 userrcpath = scmplatform.userrcpath
44 40 termsize = scmplatform.termsize
45 41
46 42 class status(tuple):
47 43 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
48 44 and 'ignored' properties are only relevant to the working copy.
49 45 '''
50 46
51 47 __slots__ = ()
52 48
53 49 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
54 50 clean):
55 51 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
56 52 ignored, clean))
57 53
58 54 @property
59 55 def modified(self):
60 56 '''files that have been modified'''
61 57 return self[0]
62 58
63 59 @property
64 60 def added(self):
65 61 '''files that have been added'''
66 62 return self[1]
67 63
68 64 @property
69 65 def removed(self):
70 66 '''files that have been removed'''
71 67 return self[2]
72 68
73 69 @property
74 70 def deleted(self):
75 71 '''files that are in the dirstate, but have been deleted from the
76 72 working copy (aka "missing")
77 73 '''
78 74 return self[3]
79 75
80 76 @property
81 77 def unknown(self):
82 78 '''files not in the dirstate that are not ignored'''
83 79 return self[4]
84 80
85 81 @property
86 82 def ignored(self):
87 83 '''files not in the dirstate that are ignored (by _dirignore())'''
88 84 return self[5]
89 85
90 86 @property
91 87 def clean(self):
92 88 '''files that have not been modified'''
93 89 return self[6]
94 90
95 91 def __repr__(self, *args, **kwargs):
96 92 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
97 93 'unknown=%r, ignored=%r, clean=%r>') % self)
98 94
99 95 def itersubrepos(ctx1, ctx2):
100 96 """find subrepos in ctx1 or ctx2"""
101 97 # Create a (subpath, ctx) mapping where we prefer subpaths from
102 98 # ctx1. The subpaths from ctx2 are important when the .hgsub file
103 99 # has been modified (in ctx2) but not yet committed (in ctx1).
104 100 subpaths = dict.fromkeys(ctx2.substate, ctx2)
105 101 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
106 102
107 103 missing = set()
108 104
109 105 for subpath in ctx2.substate:
110 106 if subpath not in ctx1.substate:
111 107 del subpaths[subpath]
112 108 missing.add(subpath)
113 109
114 110 for subpath, ctx in sorted(subpaths.iteritems()):
115 111 yield subpath, ctx.sub(subpath)
116 112
117 113 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
118 114 # status and diff will have an accurate result when it does
119 115 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
120 116 # against itself.
121 117 for subpath in missing:
122 118 yield subpath, ctx2.nullsub(subpath, ctx1)
123 119
124 120 def nochangesfound(ui, repo, excluded=None):
125 121 '''Report no changes for push/pull, excluded is None or a list of
126 122 nodes excluded from the push/pull.
127 123 '''
128 124 secretlist = []
129 125 if excluded:
130 126 for n in excluded:
131 127 if n not in repo:
132 128 # discovery should not have included the filtered revision,
133 129 # we have to explicitly exclude it until discovery is cleanup.
134 130 continue
135 131 ctx = repo[n]
136 132 if ctx.phase() >= phases.secret and not ctx.extinct():
137 133 secretlist.append(n)
138 134
139 135 if secretlist:
140 136 ui.status(_("no changes found (ignored %d secret changesets)\n")
141 137 % len(secretlist))
142 138 else:
143 139 ui.status(_("no changes found\n"))
144 140
145 141 def callcatch(ui, func):
146 142 """call func() with global exception handling
147 143
148 144 return func() if no exception happens. otherwise do some error handling
149 145 and return an exit code accordingly. does not handle all exceptions.
150 146 """
151 147 try:
152 148 return func()
153 149 # Global exception handling, alphabetically
154 150 # Mercurial-specific first, followed by built-in and library exceptions
155 151 except error.LockHeld as inst:
156 152 if inst.errno == errno.ETIMEDOUT:
157 153 reason = _('timed out waiting for lock held by %s') % inst.locker
158 154 else:
159 155 reason = _('lock held by %s') % inst.locker
160 156 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
161 157 except error.LockUnavailable as inst:
162 158 ui.warn(_("abort: could not lock %s: %s\n") %
163 159 (inst.desc or inst.filename, inst.strerror))
164 160 except error.OutOfBandError as inst:
165 161 if inst.args:
166 162 msg = _("abort: remote error:\n")
167 163 else:
168 164 msg = _("abort: remote error\n")
169 165 ui.warn(msg)
170 166 if inst.args:
171 167 ui.warn(''.join(inst.args))
172 168 if inst.hint:
173 169 ui.warn('(%s)\n' % inst.hint)
174 170 except error.RepoError as inst:
175 171 ui.warn(_("abort: %s!\n") % inst)
176 172 if inst.hint:
177 173 ui.warn(_("(%s)\n") % inst.hint)
178 174 except error.ResponseError as inst:
179 175 ui.warn(_("abort: %s") % inst.args[0])
180 176 if not isinstance(inst.args[1], basestring):
181 177 ui.warn(" %r\n" % (inst.args[1],))
182 178 elif not inst.args[1]:
183 179 ui.warn(_(" empty string\n"))
184 180 else:
185 181 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
186 182 except error.CensoredNodeError as inst:
187 183 ui.warn(_("abort: file censored %s!\n") % inst)
188 184 except error.RevlogError as inst:
189 185 ui.warn(_("abort: %s!\n") % inst)
190 186 except error.SignalInterrupt:
191 187 ui.warn(_("killed!\n"))
192 188 except error.InterventionRequired as inst:
193 189 ui.warn("%s\n" % inst)
194 190 if inst.hint:
195 191 ui.warn(_("(%s)\n") % inst.hint)
196 192 return 1
197 193 except error.Abort as inst:
198 194 ui.warn(_("abort: %s\n") % inst)
199 195 if inst.hint:
200 196 ui.warn(_("(%s)\n") % inst.hint)
201 197 except ImportError as inst:
202 198 ui.warn(_("abort: %s!\n") % inst)
203 199 m = str(inst).split()[-1]
204 200 if m in "mpatch bdiff".split():
205 201 ui.warn(_("(did you forget to compile extensions?)\n"))
206 202 elif m in "zlib".split():
207 203 ui.warn(_("(is your Python install correct?)\n"))
208 204 except IOError as inst:
209 205 if util.safehasattr(inst, "code"):
210 206 ui.warn(_("abort: %s\n") % inst)
211 207 elif util.safehasattr(inst, "reason"):
212 208 try: # usually it is in the form (errno, strerror)
213 209 reason = inst.reason.args[1]
214 210 except (AttributeError, IndexError):
215 211 # it might be anything, for example a string
216 212 reason = inst.reason
217 213 if isinstance(reason, unicode):
218 214 # SSLError of Python 2.7.9 contains a unicode
219 215 reason = reason.encode(encoding.encoding, 'replace')
220 216 ui.warn(_("abort: error: %s\n") % reason)
221 217 elif (util.safehasattr(inst, "args")
222 218 and inst.args and inst.args[0] == errno.EPIPE):
223 219 pass
224 220 elif getattr(inst, "strerror", None):
225 221 if getattr(inst, "filename", None):
226 222 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
227 223 else:
228 224 ui.warn(_("abort: %s\n") % inst.strerror)
229 225 else:
230 226 raise
231 227 except OSError as inst:
232 228 if getattr(inst, "filename", None) is not None:
233 229 ui.warn(_("abort: %s: '%s'\n") % (inst.strerror, inst.filename))
234 230 else:
235 231 ui.warn(_("abort: %s\n") % inst.strerror)
236 232 except MemoryError:
237 233 ui.warn(_("abort: out of memory\n"))
238 234 except SystemExit as inst:
239 235 # Commands shouldn't sys.exit directly, but give a return code.
240 236 # Just in case catch this and and pass exit code to caller.
241 237 return inst.code
242 238 except socket.error as inst:
243 239 ui.warn(_("abort: %s\n") % inst.args[-1])
244 240
245 241 return -1
246 242
247 243 def checknewlabel(repo, lbl, kind):
248 244 # Do not use the "kind" parameter in ui output.
249 245 # It makes strings difficult to translate.
250 246 if lbl in ['tip', '.', 'null']:
251 247 raise error.Abort(_("the name '%s' is reserved") % lbl)
252 248 for c in (':', '\0', '\n', '\r'):
253 249 if c in lbl:
254 250 raise error.Abort(_("%r cannot be used in a name") % c)
255 251 try:
256 252 int(lbl)
257 253 raise error.Abort(_("cannot use an integer as a name"))
258 254 except ValueError:
259 255 pass
260 256
261 257 def checkfilename(f):
262 258 '''Check that the filename f is an acceptable filename for a tracked file'''
263 259 if '\r' in f or '\n' in f:
264 260 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
265 261
266 262 def checkportable(ui, f):
267 263 '''Check if filename f is portable and warn or abort depending on config'''
268 264 checkfilename(f)
269 265 abort, warn = checkportabilityalert(ui)
270 266 if abort or warn:
271 267 msg = util.checkwinfilename(f)
272 268 if msg:
273 269 msg = "%s: %r" % (msg, f)
274 270 if abort:
275 271 raise error.Abort(msg)
276 272 ui.warn(_("warning: %s\n") % msg)
277 273
278 274 def checkportabilityalert(ui):
279 275 '''check if the user's config requests nothing, a warning, or abort for
280 276 non-portable filenames'''
281 277 val = ui.config('ui', 'portablefilenames', 'warn')
282 278 lval = val.lower()
283 279 bval = util.parsebool(val)
284 280 abort = pycompat.osname == 'nt' or lval == 'abort'
285 281 warn = bval or lval == 'warn'
286 282 if bval is None and not (warn or abort or lval == 'ignore'):
287 283 raise error.ConfigError(
288 284 _("ui.portablefilenames value is invalid ('%s')") % val)
289 285 return abort, warn
290 286
291 287 class casecollisionauditor(object):
292 288 def __init__(self, ui, abort, dirstate):
293 289 self._ui = ui
294 290 self._abort = abort
295 291 allfiles = '\0'.join(dirstate._map)
296 292 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
297 293 self._dirstate = dirstate
298 294 # The purpose of _newfiles is so that we don't complain about
299 295 # case collisions if someone were to call this object with the
300 296 # same filename twice.
301 297 self._newfiles = set()
302 298
303 299 def __call__(self, f):
304 300 if f in self._newfiles:
305 301 return
306 302 fl = encoding.lower(f)
307 303 if fl in self._loweredfiles and f not in self._dirstate:
308 304 msg = _('possible case-folding collision for %s') % f
309 305 if self._abort:
310 306 raise error.Abort(msg)
311 307 self._ui.warn(_("warning: %s\n") % msg)
312 308 self._loweredfiles.add(fl)
313 309 self._newfiles.add(f)
314 310
315 311 def filteredhash(repo, maxrev):
316 312 """build hash of filtered revisions in the current repoview.
317 313
318 314 Multiple caches perform up-to-date validation by checking that the
319 315 tiprev and tipnode stored in the cache file match the current repository.
320 316 However, this is not sufficient for validating repoviews because the set
321 317 of revisions in the view may change without the repository tiprev and
322 318 tipnode changing.
323 319
324 320 This function hashes all the revs filtered from the view and returns
325 321 that SHA-1 digest.
326 322 """
327 323 cl = repo.changelog
328 324 if not cl.filteredrevs:
329 325 return None
330 326 key = None
331 327 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
332 328 if revs:
333 329 s = hashlib.sha1()
334 330 for rev in revs:
335 331 s.update('%s;' % rev)
336 332 key = s.digest()
337 333 return key
338 334
339 class abstractvfs(object):
340 """Abstract base class; cannot be instantiated"""
341
342 def __init__(self, *args, **kwargs):
343 '''Prevent instantiation; don't call this from subclasses.'''
344 raise NotImplementedError('attempted instantiating ' + str(type(self)))
345
346 def tryread(self, path):
347 '''gracefully return an empty string for missing files'''
348 try:
349 return self.read(path)
350 except IOError as inst:
351 if inst.errno != errno.ENOENT:
352 raise
353 return ""
354
355 def tryreadlines(self, path, mode='rb'):
356 '''gracefully return an empty array for missing files'''
357 try:
358 return self.readlines(path, mode=mode)
359 except IOError as inst:
360 if inst.errno != errno.ENOENT:
361 raise
362 return []
363
364 @util.propertycache
365 def open(self):
366 '''Open ``path`` file, which is relative to vfs root.
367
368 Newly created directories are marked as "not to be indexed by
369 the content indexing service", if ``notindexed`` is specified
370 for "write" mode access.
371 '''
372 return self.__call__
373
374 def read(self, path):
375 with self(path, 'rb') as fp:
376 return fp.read()
377
378 def readlines(self, path, mode='rb'):
379 with self(path, mode=mode) as fp:
380 return fp.readlines()
381
382 def write(self, path, data, backgroundclose=False):
383 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
384 return fp.write(data)
385
386 def writelines(self, path, data, mode='wb', notindexed=False):
387 with self(path, mode=mode, notindexed=notindexed) as fp:
388 return fp.writelines(data)
389
390 def append(self, path, data):
391 with self(path, 'ab') as fp:
392 return fp.write(data)
393
394 def basename(self, path):
395 """return base element of a path (as os.path.basename would do)
396
397 This exists to allow handling of strange encoding if needed."""
398 return os.path.basename(path)
399
400 def chmod(self, path, mode):
401 return os.chmod(self.join(path), mode)
402
403 def dirname(self, path):
404 """return dirname element of a path (as os.path.dirname would do)
405
406 This exists to allow handling of strange encoding if needed."""
407 return os.path.dirname(path)
408
409 def exists(self, path=None):
410 return os.path.exists(self.join(path))
411
412 def fstat(self, fp):
413 return util.fstat(fp)
414
415 def isdir(self, path=None):
416 return os.path.isdir(self.join(path))
417
418 def isfile(self, path=None):
419 return os.path.isfile(self.join(path))
420
421 def islink(self, path=None):
422 return os.path.islink(self.join(path))
423
424 def isfileorlink(self, path=None):
425 '''return whether path is a regular file or a symlink
426
427 Unlike isfile, this doesn't follow symlinks.'''
428 try:
429 st = self.lstat(path)
430 except OSError:
431 return False
432 mode = st.st_mode
433 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
434
435 def reljoin(self, *paths):
436 """join various elements of a path together (as os.path.join would do)
437
438 The vfs base is not injected so that path stay relative. This exists
439 to allow handling of strange encoding if needed."""
440 return os.path.join(*paths)
441
442 def split(self, path):
443 """split top-most element of a path (as os.path.split would do)
444
445 This exists to allow handling of strange encoding if needed."""
446 return os.path.split(path)
447
448 def lexists(self, path=None):
449 return os.path.lexists(self.join(path))
450
451 def lstat(self, path=None):
452 return os.lstat(self.join(path))
453
454 def listdir(self, path=None):
455 return os.listdir(self.join(path))
456
457 def makedir(self, path=None, notindexed=True):
458 return util.makedir(self.join(path), notindexed)
459
460 def makedirs(self, path=None, mode=None):
461 return util.makedirs(self.join(path), mode)
462
463 def makelock(self, info, path):
464 return util.makelock(info, self.join(path))
465
466 def mkdir(self, path=None):
467 return os.mkdir(self.join(path))
468
469 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
470 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
471 dir=self.join(dir), text=text)
472 dname, fname = util.split(name)
473 if dir:
474 return fd, os.path.join(dir, fname)
475 else:
476 return fd, fname
477
478 def readdir(self, path=None, stat=None, skip=None):
479 return osutil.listdir(self.join(path), stat, skip)
480
481 def readlock(self, path):
482 return util.readlock(self.join(path))
483
484 def rename(self, src, dst, checkambig=False):
485 """Rename from src to dst
486
487 checkambig argument is used with util.filestat, and is useful
488 only if destination file is guarded by any lock
489 (e.g. repo.lock or repo.wlock).
490 """
491 dstpath = self.join(dst)
492 oldstat = checkambig and util.filestat(dstpath)
493 if oldstat and oldstat.stat:
494 ret = util.rename(self.join(src), dstpath)
495 newstat = util.filestat(dstpath)
496 if newstat.isambig(oldstat):
497 # stat of renamed file is ambiguous to original one
498 newstat.avoidambig(dstpath, oldstat)
499 return ret
500 return util.rename(self.join(src), dstpath)
501
502 def readlink(self, path):
503 return os.readlink(self.join(path))
504
505 def removedirs(self, path=None):
506 """Remove a leaf directory and all empty intermediate ones
507 """
508 return util.removedirs(self.join(path))
509
510 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
511 """Remove a directory tree recursively
512
513 If ``forcibly``, this tries to remove READ-ONLY files, too.
514 """
515 if forcibly:
516 def onerror(function, path, excinfo):
517 if function is not os.remove:
518 raise
519 # read-only files cannot be unlinked under Windows
520 s = os.stat(path)
521 if (s.st_mode & stat.S_IWRITE) != 0:
522 raise
523 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
524 os.remove(path)
525 else:
526 onerror = None
527 return shutil.rmtree(self.join(path),
528 ignore_errors=ignore_errors, onerror=onerror)
529
530 def setflags(self, path, l, x):
531 return util.setflags(self.join(path), l, x)
532
533 def stat(self, path=None):
534 return os.stat(self.join(path))
535
536 def unlink(self, path=None):
537 return util.unlink(self.join(path))
538
539 def unlinkpath(self, path=None, ignoremissing=False):
540 return util.unlinkpath(self.join(path), ignoremissing)
541
542 def utime(self, path=None, t=None):
543 return os.utime(self.join(path), t)
544
545 def walk(self, path=None, onerror=None):
546 """Yield (dirpath, dirs, files) tuple for each directories under path
547
548 ``dirpath`` is relative one from the root of this vfs. This
549 uses ``os.sep`` as path separator, even you specify POSIX
550 style ``path``.
551
552 "The root of this vfs" is represented as empty ``dirpath``.
553 """
554 root = os.path.normpath(self.join(None))
555 # when dirpath == root, dirpath[prefixlen:] becomes empty
556 # because len(dirpath) < prefixlen.
557 prefixlen = len(pathutil.normasprefix(root))
558 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
559 yield (dirpath[prefixlen:], dirs, files)
560
561 @contextlib.contextmanager
562 def backgroundclosing(self, ui, expectedcount=-1):
563 """Allow files to be closed asynchronously.
564
565 When this context manager is active, ``backgroundclose`` can be passed
566 to ``__call__``/``open`` to result in the file possibly being closed
567 asynchronously, on a background thread.
568 """
569 # This is an arbitrary restriction and could be changed if we ever
570 # have a use case.
571 vfs = getattr(self, 'vfs', self)
572 if getattr(vfs, '_backgroundfilecloser', None):
573 raise error.Abort(
574 _('can only have 1 active background file closer'))
575
576 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
577 try:
578 vfs._backgroundfilecloser = bfc
579 yield bfc
580 finally:
581 vfs._backgroundfilecloser = None
582
583 class vfs(abstractvfs):
584 '''Operate files relative to a base directory
585
586 This class is used to hide the details of COW semantics and
587 remote file access from higher level code.
588 '''
589 def __init__(self, base, audit=True, expandpath=False, realpath=False):
590 if expandpath:
591 base = util.expandpath(base)
592 if realpath:
593 base = os.path.realpath(base)
594 self.base = base
595 self.mustaudit = audit
596 self.createmode = None
597 self._trustnlink = None
598
599 @property
600 def mustaudit(self):
601 return self._audit
602
603 @mustaudit.setter
604 def mustaudit(self, onoff):
605 self._audit = onoff
606 if onoff:
607 self.audit = pathutil.pathauditor(self.base)
608 else:
609 self.audit = util.always
610
611 @util.propertycache
612 def _cansymlink(self):
613 return util.checklink(self.base)
614
615 @util.propertycache
616 def _chmod(self):
617 return util.checkexec(self.base)
618
619 def _fixfilemode(self, name):
620 if self.createmode is None or not self._chmod:
621 return
622 os.chmod(name, self.createmode & 0o666)
623
624 def __call__(self, path, mode="r", text=False, atomictemp=False,
625 notindexed=False, backgroundclose=False, checkambig=False):
626 '''Open ``path`` file, which is relative to vfs root.
627
628 Newly created directories are marked as "not to be indexed by
629 the content indexing service", if ``notindexed`` is specified
630 for "write" mode access.
631
632 If ``backgroundclose`` is passed, the file may be closed asynchronously.
633 It can only be used if the ``self.backgroundclosing()`` context manager
634 is active. This should only be specified if the following criteria hold:
635
636 1. There is a potential for writing thousands of files. Unless you
637 are writing thousands of files, the performance benefits of
638 asynchronously closing files is not realized.
639 2. Files are opened exactly once for the ``backgroundclosing``
640 active duration and are therefore free of race conditions between
641 closing a file on a background thread and reopening it. (If the
642 file were opened multiple times, there could be unflushed data
643 because the original file handle hasn't been flushed/closed yet.)
644
645 ``checkambig`` argument is passed to atomictemplfile (valid
646 only for writing), and is useful only if target file is
647 guarded by any lock (e.g. repo.lock or repo.wlock).
648 '''
649 if self._audit:
650 r = util.checkosfilename(path)
651 if r:
652 raise error.Abort("%s: %r" % (r, path))
653 self.audit(path)
654 f = self.join(path)
655
656 if not text and "b" not in mode:
657 mode += "b" # for that other OS
658
659 nlink = -1
660 if mode not in ('r', 'rb'):
661 dirname, basename = util.split(f)
662 # If basename is empty, then the path is malformed because it points
663 # to a directory. Let the posixfile() call below raise IOError.
664 if basename:
665 if atomictemp:
666 util.makedirs(dirname, self.createmode, notindexed)
667 return util.atomictempfile(f, mode, self.createmode,
668 checkambig=checkambig)
669 try:
670 if 'w' in mode:
671 util.unlink(f)
672 nlink = 0
673 else:
674 # nlinks() may behave differently for files on Windows
675 # shares if the file is open.
676 with util.posixfile(f):
677 nlink = util.nlinks(f)
678 if nlink < 1:
679 nlink = 2 # force mktempcopy (issue1922)
680 except (OSError, IOError) as e:
681 if e.errno != errno.ENOENT:
682 raise
683 nlink = 0
684 util.makedirs(dirname, self.createmode, notindexed)
685 if nlink > 0:
686 if self._trustnlink is None:
687 self._trustnlink = nlink > 1 or util.checknlink(f)
688 if nlink > 1 or not self._trustnlink:
689 util.rename(util.mktempcopy(f), f)
690 fp = util.posixfile(f, mode)
691 if nlink == 0:
692 self._fixfilemode(f)
693
694 if checkambig:
695 if mode in ('r', 'rb'):
696 raise error.Abort(_('implementation error: mode %s is not'
697 ' valid for checkambig=True') % mode)
698 fp = checkambigatclosing(fp)
699
700 if backgroundclose:
701 if not self._backgroundfilecloser:
702 raise error.Abort(_('backgroundclose can only be used when a '
703 'backgroundclosing context manager is active')
704 )
705
706 fp = delayclosedfile(fp, self._backgroundfilecloser)
707
708 return fp
709
710 def symlink(self, src, dst):
711 self.audit(dst)
712 linkname = self.join(dst)
713 try:
714 os.unlink(linkname)
715 except OSError:
716 pass
717
718 util.makedirs(os.path.dirname(linkname), self.createmode)
719
720 if self._cansymlink:
721 try:
722 os.symlink(src, linkname)
723 except OSError as err:
724 raise OSError(err.errno, _('could not symlink to %r: %s') %
725 (src, err.strerror), linkname)
726 else:
727 self.write(dst, src)
728
729 def join(self, path, *insidef):
730 if path:
731 return os.path.join(self.base, path, *insidef)
732 else:
733 return self.base
734
735 opener = vfs
736
737 class auditvfs(object):
738 def __init__(self, vfs):
739 self.vfs = vfs
740
741 @property
742 def mustaudit(self):
743 return self.vfs.mustaudit
744
745 @mustaudit.setter
746 def mustaudit(self, onoff):
747 self.vfs.mustaudit = onoff
748
749 @property
750 def options(self):
751 return self.vfs.options
752
753 @options.setter
754 def options(self, value):
755 self.vfs.options = value
756
757 class filtervfs(abstractvfs, auditvfs):
758 '''Wrapper vfs for filtering filenames with a function.'''
759
760 def __init__(self, vfs, filter):
761 auditvfs.__init__(self, vfs)
762 self._filter = filter
763
764 def __call__(self, path, *args, **kwargs):
765 return self.vfs(self._filter(path), *args, **kwargs)
766
767 def join(self, path, *insidef):
768 if path:
769 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
770 else:
771 return self.vfs.join(path)
772
773 filteropener = filtervfs
774
775 class readonlyvfs(abstractvfs, auditvfs):
776 '''Wrapper vfs preventing any writing.'''
777
778 def __init__(self, vfs):
779 auditvfs.__init__(self, vfs)
780
781 def __call__(self, path, mode='r', *args, **kw):
782 if mode not in ('r', 'rb'):
783 raise error.Abort(_('this vfs is read only'))
784 return self.vfs(path, mode, *args, **kw)
785
786 def join(self, path, *insidef):
787 return self.vfs.join(path, *insidef)
335 # compatibility layer since all 'vfs' code moved to 'mercurial.vfs'
336 #
337 # This is hard to instal deprecation warning to this since we do not have
338 # access to a 'ui' object.
339 opener = vfs = vfsmod.vfs
340 filteropener = filtervfs = vfsmod.filtervfs
341 abstractvfs = vfsmod.abstractvfs
342 readonlyvfs = vfsmod.readonlyvfs
343 auditvfs = vfsmod.auditvfs
344 checkambigatclosing = vfsmod.checkambigatclosing
788 345
789 346 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
790 347 '''yield every hg repository under path, always recursively.
791 348 The recurse flag will only control recursion into repo working dirs'''
792 349 def errhandler(err):
793 350 if err.filename == path:
794 351 raise err
795 352 samestat = getattr(os.path, 'samestat', None)
796 353 if followsym and samestat is not None:
797 354 def adddir(dirlst, dirname):
798 355 match = False
799 356 dirstat = os.stat(dirname)
800 357 for lstdirstat in dirlst:
801 358 if samestat(dirstat, lstdirstat):
802 359 match = True
803 360 break
804 361 if not match:
805 362 dirlst.append(dirstat)
806 363 return not match
807 364 else:
808 365 followsym = False
809 366
810 367 if (seen_dirs is None) and followsym:
811 368 seen_dirs = []
812 369 adddir(seen_dirs, path)
813 370 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
814 371 dirs.sort()
815 372 if '.hg' in dirs:
816 373 yield root # found a repository
817 374 qroot = os.path.join(root, '.hg', 'patches')
818 375 if os.path.isdir(os.path.join(qroot, '.hg')):
819 376 yield qroot # we have a patch queue repo here
820 377 if recurse:
821 378 # avoid recursing inside the .hg directory
822 379 dirs.remove('.hg')
823 380 else:
824 381 dirs[:] = [] # don't descend further
825 382 elif followsym:
826 383 newdirs = []
827 384 for d in dirs:
828 385 fname = os.path.join(root, d)
829 386 if adddir(seen_dirs, fname):
830 387 if os.path.islink(fname):
831 388 for hgname in walkrepos(fname, True, seen_dirs):
832 389 yield hgname
833 390 else:
834 391 newdirs.append(d)
835 392 dirs[:] = newdirs
836 393
837 394 def osrcpath():
838 395 '''return default os-specific hgrc search path'''
839 396 path = []
840 397 defaultpath = os.path.join(util.datapath, 'default.d')
841 398 if os.path.isdir(defaultpath):
842 399 for f, kind in osutil.listdir(defaultpath):
843 400 if f.endswith('.rc'):
844 401 path.append(os.path.join(defaultpath, f))
845 402 path.extend(systemrcpath())
846 403 path.extend(userrcpath())
847 404 path = [os.path.normpath(f) for f in path]
848 405 return path
849 406
850 407 _rcpath = None
851 408
852 409 def rcpath():
853 410 '''return hgrc search path. if env var HGRCPATH is set, use it.
854 411 for each item in path, if directory, use files ending in .rc,
855 412 else use item.
856 413 make HGRCPATH empty to only look in .hg/hgrc of current repo.
857 414 if no HGRCPATH, use default os-specific path.'''
858 415 global _rcpath
859 416 if _rcpath is None:
860 417 if 'HGRCPATH' in encoding.environ:
861 418 _rcpath = []
862 419 for p in encoding.environ['HGRCPATH'].split(pycompat.ospathsep):
863 420 if not p:
864 421 continue
865 422 p = util.expandpath(p)
866 423 if os.path.isdir(p):
867 424 for f, kind in osutil.listdir(p):
868 425 if f.endswith('.rc'):
869 426 _rcpath.append(os.path.join(p, f))
870 427 else:
871 428 _rcpath.append(p)
872 429 else:
873 430 _rcpath = osrcpath()
874 431 return _rcpath
875 432
876 433 def intrev(rev):
877 434 """Return integer for a given revision that can be used in comparison or
878 435 arithmetic operation"""
879 436 if rev is None:
880 437 return wdirrev
881 438 return rev
882 439
883 440 def revsingle(repo, revspec, default='.'):
884 441 if not revspec and revspec != 0:
885 442 return repo[default]
886 443
887 444 l = revrange(repo, [revspec])
888 445 if not l:
889 446 raise error.Abort(_('empty revision set'))
890 447 return repo[l.last()]
891 448
892 449 def _pairspec(revspec):
893 450 tree = revsetlang.parse(revspec)
894 451 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
895 452
896 453 def revpair(repo, revs):
897 454 if not revs:
898 455 return repo.dirstate.p1(), None
899 456
900 457 l = revrange(repo, revs)
901 458
902 459 if not l:
903 460 first = second = None
904 461 elif l.isascending():
905 462 first = l.min()
906 463 second = l.max()
907 464 elif l.isdescending():
908 465 first = l.max()
909 466 second = l.min()
910 467 else:
911 468 first = l.first()
912 469 second = l.last()
913 470
914 471 if first is None:
915 472 raise error.Abort(_('empty revision range'))
916 473 if (first == second and len(revs) >= 2
917 474 and not all(revrange(repo, [r]) for r in revs)):
918 475 raise error.Abort(_('empty revision on one side of range'))
919 476
920 477 # if top-level is range expression, the result must always be a pair
921 478 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
922 479 return repo.lookup(first), None
923 480
924 481 return repo.lookup(first), repo.lookup(second)
925 482
926 483 def revrange(repo, specs):
927 484 """Execute 1 to many revsets and return the union.
928 485
929 486 This is the preferred mechanism for executing revsets using user-specified
930 487 config options, such as revset aliases.
931 488
932 489 The revsets specified by ``specs`` will be executed via a chained ``OR``
933 490 expression. If ``specs`` is empty, an empty result is returned.
934 491
935 492 ``specs`` can contain integers, in which case they are assumed to be
936 493 revision numbers.
937 494
938 495 It is assumed the revsets are already formatted. If you have arguments
939 496 that need to be expanded in the revset, call ``revsetlang.formatspec()``
940 497 and pass the result as an element of ``specs``.
941 498
942 499 Specifying a single revset is allowed.
943 500
944 501 Returns a ``revset.abstractsmartset`` which is a list-like interface over
945 502 integer revisions.
946 503 """
947 504 allspecs = []
948 505 for spec in specs:
949 506 if isinstance(spec, int):
950 507 spec = revsetlang.formatspec('rev(%d)', spec)
951 508 allspecs.append(spec)
952 509 return repo.anyrevs(allspecs, user=True)
953 510
954 511 def meaningfulparents(repo, ctx):
955 512 """Return list of meaningful (or all if debug) parentrevs for rev.
956 513
957 514 For merges (two non-nullrev revisions) both parents are meaningful.
958 515 Otherwise the first parent revision is considered meaningful if it
959 516 is not the preceding revision.
960 517 """
961 518 parents = ctx.parents()
962 519 if len(parents) > 1:
963 520 return parents
964 521 if repo.ui.debugflag:
965 522 return [parents[0], repo['null']]
966 523 if parents[0].rev() >= intrev(ctx.rev()) - 1:
967 524 return []
968 525 return parents
969 526
970 527 def expandpats(pats):
971 528 '''Expand bare globs when running on windows.
972 529 On posix we assume it already has already been done by sh.'''
973 530 if not util.expandglobs:
974 531 return list(pats)
975 532 ret = []
976 533 for kindpat in pats:
977 534 kind, pat = matchmod._patsplit(kindpat, None)
978 535 if kind is None:
979 536 try:
980 537 globbed = glob.glob(pat)
981 538 except re.error:
982 539 globbed = [pat]
983 540 if globbed:
984 541 ret.extend(globbed)
985 542 continue
986 543 ret.append(kindpat)
987 544 return ret
988 545
989 546 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
990 547 badfn=None):
991 548 '''Return a matcher and the patterns that were used.
992 549 The matcher will warn about bad matches, unless an alternate badfn callback
993 550 is provided.'''
994 551 if pats == ("",):
995 552 pats = []
996 553 if opts is None:
997 554 opts = {}
998 555 if not globbed and default == 'relpath':
999 556 pats = expandpats(pats or [])
1000 557
1001 558 def bad(f, msg):
1002 559 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
1003 560
1004 561 if badfn is None:
1005 562 badfn = bad
1006 563
1007 564 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
1008 565 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
1009 566
1010 567 if m.always():
1011 568 pats = []
1012 569 return m, pats
1013 570
1014 571 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
1015 572 badfn=None):
1016 573 '''Return a matcher that will warn about bad matches.'''
1017 574 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
1018 575
1019 576 def matchall(repo):
1020 577 '''Return a matcher that will efficiently match everything.'''
1021 578 return matchmod.always(repo.root, repo.getcwd())
1022 579
1023 580 def matchfiles(repo, files, badfn=None):
1024 581 '''Return a matcher that will efficiently match exactly these files.'''
1025 582 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
1026 583
1027 584 def origpath(ui, repo, filepath):
1028 585 '''customize where .orig files are created
1029 586
1030 587 Fetch user defined path from config file: [ui] origbackuppath = <path>
1031 588 Fall back to default (filepath) if not specified
1032 589 '''
1033 590 origbackuppath = ui.config('ui', 'origbackuppath', None)
1034 591 if origbackuppath is None:
1035 592 return filepath + ".orig"
1036 593
1037 594 filepathfromroot = os.path.relpath(filepath, start=repo.root)
1038 595 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
1039 596
1040 597 origbackupdir = repo.vfs.dirname(fullorigpath)
1041 598 if not repo.vfs.exists(origbackupdir):
1042 599 ui.note(_('creating directory: %s\n') % origbackupdir)
1043 600 util.makedirs(origbackupdir)
1044 601
1045 602 return fullorigpath + ".orig"
1046 603
1047 604 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
1048 605 if opts is None:
1049 606 opts = {}
1050 607 m = matcher
1051 608 if dry_run is None:
1052 609 dry_run = opts.get('dry_run')
1053 610 if similarity is None:
1054 611 similarity = float(opts.get('similarity') or 0)
1055 612
1056 613 ret = 0
1057 614 join = lambda f: os.path.join(prefix, f)
1058 615
1059 616 wctx = repo[None]
1060 617 for subpath in sorted(wctx.substate):
1061 618 submatch = matchmod.subdirmatcher(subpath, m)
1062 619 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1063 620 sub = wctx.sub(subpath)
1064 621 try:
1065 622 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
1066 623 ret = 1
1067 624 except error.LookupError:
1068 625 repo.ui.status(_("skipping missing subrepository: %s\n")
1069 626 % join(subpath))
1070 627
1071 628 rejected = []
1072 629 def badfn(f, msg):
1073 630 if f in m.files():
1074 631 m.bad(f, msg)
1075 632 rejected.append(f)
1076 633
1077 634 badmatch = matchmod.badmatch(m, badfn)
1078 635 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1079 636 badmatch)
1080 637
1081 638 unknownset = set(unknown + forgotten)
1082 639 toprint = unknownset.copy()
1083 640 toprint.update(deleted)
1084 641 for abs in sorted(toprint):
1085 642 if repo.ui.verbose or not m.exact(abs):
1086 643 if abs in unknownset:
1087 644 status = _('adding %s\n') % m.uipath(abs)
1088 645 else:
1089 646 status = _('removing %s\n') % m.uipath(abs)
1090 647 repo.ui.status(status)
1091 648
1092 649 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1093 650 similarity)
1094 651
1095 652 if not dry_run:
1096 653 _markchanges(repo, unknown + forgotten, deleted, renames)
1097 654
1098 655 for f in rejected:
1099 656 if f in m.files():
1100 657 return 1
1101 658 return ret
1102 659
1103 660 def marktouched(repo, files, similarity=0.0):
1104 661 '''Assert that files have somehow been operated upon. files are relative to
1105 662 the repo root.'''
1106 663 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1107 664 rejected = []
1108 665
1109 666 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1110 667
1111 668 if repo.ui.verbose:
1112 669 unknownset = set(unknown + forgotten)
1113 670 toprint = unknownset.copy()
1114 671 toprint.update(deleted)
1115 672 for abs in sorted(toprint):
1116 673 if abs in unknownset:
1117 674 status = _('adding %s\n') % abs
1118 675 else:
1119 676 status = _('removing %s\n') % abs
1120 677 repo.ui.status(status)
1121 678
1122 679 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1123 680 similarity)
1124 681
1125 682 _markchanges(repo, unknown + forgotten, deleted, renames)
1126 683
1127 684 for f in rejected:
1128 685 if f in m.files():
1129 686 return 1
1130 687 return 0
1131 688
1132 689 def _interestingfiles(repo, matcher):
1133 690 '''Walk dirstate with matcher, looking for files that addremove would care
1134 691 about.
1135 692
1136 693 This is different from dirstate.status because it doesn't care about
1137 694 whether files are modified or clean.'''
1138 695 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1139 696 audit_path = pathutil.pathauditor(repo.root)
1140 697
1141 698 ctx = repo[None]
1142 699 dirstate = repo.dirstate
1143 700 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1144 701 full=False)
1145 702 for abs, st in walkresults.iteritems():
1146 703 dstate = dirstate[abs]
1147 704 if dstate == '?' and audit_path.check(abs):
1148 705 unknown.append(abs)
1149 706 elif dstate != 'r' and not st:
1150 707 deleted.append(abs)
1151 708 elif dstate == 'r' and st:
1152 709 forgotten.append(abs)
1153 710 # for finding renames
1154 711 elif dstate == 'r' and not st:
1155 712 removed.append(abs)
1156 713 elif dstate == 'a':
1157 714 added.append(abs)
1158 715
1159 716 return added, unknown, deleted, removed, forgotten
1160 717
1161 718 def _findrenames(repo, matcher, added, removed, similarity):
1162 719 '''Find renames from removed files to added ones.'''
1163 720 renames = {}
1164 721 if similarity > 0:
1165 722 for old, new, score in similar.findrenames(repo, added, removed,
1166 723 similarity):
1167 724 if (repo.ui.verbose or not matcher.exact(old)
1168 725 or not matcher.exact(new)):
1169 726 repo.ui.status(_('recording removal of %s as rename to %s '
1170 727 '(%d%% similar)\n') %
1171 728 (matcher.rel(old), matcher.rel(new),
1172 729 score * 100))
1173 730 renames[new] = old
1174 731 return renames
1175 732
1176 733 def _markchanges(repo, unknown, deleted, renames):
1177 734 '''Marks the files in unknown as added, the files in deleted as removed,
1178 735 and the files in renames as copied.'''
1179 736 wctx = repo[None]
1180 737 with repo.wlock():
1181 738 wctx.forget(deleted)
1182 739 wctx.add(unknown)
1183 740 for new, old in renames.iteritems():
1184 741 wctx.copy(old, new)
1185 742
1186 743 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1187 744 """Update the dirstate to reflect the intent of copying src to dst. For
1188 745 different reasons it might not end with dst being marked as copied from src.
1189 746 """
1190 747 origsrc = repo.dirstate.copied(src) or src
1191 748 if dst == origsrc: # copying back a copy?
1192 749 if repo.dirstate[dst] not in 'mn' and not dryrun:
1193 750 repo.dirstate.normallookup(dst)
1194 751 else:
1195 752 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1196 753 if not ui.quiet:
1197 754 ui.warn(_("%s has not been committed yet, so no copy "
1198 755 "data will be stored for %s.\n")
1199 756 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1200 757 if repo.dirstate[dst] in '?r' and not dryrun:
1201 758 wctx.add([dst])
1202 759 elif not dryrun:
1203 760 wctx.copy(origsrc, dst)
1204 761
1205 762 def readrequires(opener, supported):
1206 763 '''Reads and parses .hg/requires and checks if all entries found
1207 764 are in the list of supported features.'''
1208 765 requirements = set(opener.read("requires").splitlines())
1209 766 missings = []
1210 767 for r in requirements:
1211 768 if r not in supported:
1212 769 if not r or not r[0].isalnum():
1213 770 raise error.RequirementError(_(".hg/requires file is corrupt"))
1214 771 missings.append(r)
1215 772 missings.sort()
1216 773 if missings:
1217 774 raise error.RequirementError(
1218 775 _("repository requires features unknown to this Mercurial: %s")
1219 776 % " ".join(missings),
1220 777 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1221 778 " for more information"))
1222 779 return requirements
1223 780
1224 781 def writerequires(opener, requirements):
1225 782 with opener('requires', 'w') as fp:
1226 783 for r in sorted(requirements):
1227 784 fp.write("%s\n" % r)
1228 785
1229 786 class filecachesubentry(object):
1230 787 def __init__(self, path, stat):
1231 788 self.path = path
1232 789 self.cachestat = None
1233 790 self._cacheable = None
1234 791
1235 792 if stat:
1236 793 self.cachestat = filecachesubentry.stat(self.path)
1237 794
1238 795 if self.cachestat:
1239 796 self._cacheable = self.cachestat.cacheable()
1240 797 else:
1241 798 # None means we don't know yet
1242 799 self._cacheable = None
1243 800
1244 801 def refresh(self):
1245 802 if self.cacheable():
1246 803 self.cachestat = filecachesubentry.stat(self.path)
1247 804
1248 805 def cacheable(self):
1249 806 if self._cacheable is not None:
1250 807 return self._cacheable
1251 808
1252 809 # we don't know yet, assume it is for now
1253 810 return True
1254 811
1255 812 def changed(self):
1256 813 # no point in going further if we can't cache it
1257 814 if not self.cacheable():
1258 815 return True
1259 816
1260 817 newstat = filecachesubentry.stat(self.path)
1261 818
1262 819 # we may not know if it's cacheable yet, check again now
1263 820 if newstat and self._cacheable is None:
1264 821 self._cacheable = newstat.cacheable()
1265 822
1266 823 # check again
1267 824 if not self._cacheable:
1268 825 return True
1269 826
1270 827 if self.cachestat != newstat:
1271 828 self.cachestat = newstat
1272 829 return True
1273 830 else:
1274 831 return False
1275 832
1276 833 @staticmethod
1277 834 def stat(path):
1278 835 try:
1279 836 return util.cachestat(path)
1280 837 except OSError as e:
1281 838 if e.errno != errno.ENOENT:
1282 839 raise
1283 840
1284 841 class filecacheentry(object):
1285 842 def __init__(self, paths, stat=True):
1286 843 self._entries = []
1287 844 for path in paths:
1288 845 self._entries.append(filecachesubentry(path, stat))
1289 846
1290 847 def changed(self):
1291 848 '''true if any entry has changed'''
1292 849 for entry in self._entries:
1293 850 if entry.changed():
1294 851 return True
1295 852 return False
1296 853
1297 854 def refresh(self):
1298 855 for entry in self._entries:
1299 856 entry.refresh()
1300 857
1301 858 class filecache(object):
1302 859 '''A property like decorator that tracks files under .hg/ for updates.
1303 860
1304 861 Records stat info when called in _filecache.
1305 862
1306 863 On subsequent calls, compares old stat info with new info, and recreates the
1307 864 object when any of the files changes, updating the new stat info in
1308 865 _filecache.
1309 866
1310 867 Mercurial either atomic renames or appends for files under .hg,
1311 868 so to ensure the cache is reliable we need the filesystem to be able
1312 869 to tell us if a file has been replaced. If it can't, we fallback to
1313 870 recreating the object on every call (essentially the same behavior as
1314 871 propertycache).
1315 872
1316 873 '''
1317 874 def __init__(self, *paths):
1318 875 self.paths = paths
1319 876
1320 877 def join(self, obj, fname):
1321 878 """Used to compute the runtime path of a cached file.
1322 879
1323 880 Users should subclass filecache and provide their own version of this
1324 881 function to call the appropriate join function on 'obj' (an instance
1325 882 of the class that its member function was decorated).
1326 883 """
1327 884 return obj.join(fname)
1328 885
1329 886 def __call__(self, func):
1330 887 self.func = func
1331 888 self.name = func.__name__
1332 889 return self
1333 890
1334 891 def __get__(self, obj, type=None):
1335 892 # if accessed on the class, return the descriptor itself.
1336 893 if obj is None:
1337 894 return self
1338 895 # do we need to check if the file changed?
1339 896 if self.name in obj.__dict__:
1340 897 assert self.name in obj._filecache, self.name
1341 898 return obj.__dict__[self.name]
1342 899
1343 900 entry = obj._filecache.get(self.name)
1344 901
1345 902 if entry:
1346 903 if entry.changed():
1347 904 entry.obj = self.func(obj)
1348 905 else:
1349 906 paths = [self.join(obj, path) for path in self.paths]
1350 907
1351 908 # We stat -before- creating the object so our cache doesn't lie if
1352 909 # a writer modified between the time we read and stat
1353 910 entry = filecacheentry(paths, True)
1354 911 entry.obj = self.func(obj)
1355 912
1356 913 obj._filecache[self.name] = entry
1357 914
1358 915 obj.__dict__[self.name] = entry.obj
1359 916 return entry.obj
1360 917
1361 918 def __set__(self, obj, value):
1362 919 if self.name not in obj._filecache:
1363 920 # we add an entry for the missing value because X in __dict__
1364 921 # implies X in _filecache
1365 922 paths = [self.join(obj, path) for path in self.paths]
1366 923 ce = filecacheentry(paths, False)
1367 924 obj._filecache[self.name] = ce
1368 925 else:
1369 926 ce = obj._filecache[self.name]
1370 927
1371 928 ce.obj = value # update cached copy
1372 929 obj.__dict__[self.name] = value # update copy returned by obj.x
1373 930
1374 931 def __delete__(self, obj):
1375 932 try:
1376 933 del obj.__dict__[self.name]
1377 934 except KeyError:
1378 935 raise AttributeError(self.name)
1379 936
1380 937 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1381 938 if lock is None:
1382 939 raise error.LockInheritanceContractViolation(
1383 940 'lock can only be inherited while held')
1384 941 if environ is None:
1385 942 environ = {}
1386 943 with lock.inherit() as locker:
1387 944 environ[envvar] = locker
1388 945 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1389 946
1390 947 def wlocksub(repo, cmd, *args, **kwargs):
1391 948 """run cmd as a subprocess that allows inheriting repo's wlock
1392 949
1393 950 This can only be called while the wlock is held. This takes all the
1394 951 arguments that ui.system does, and returns the exit code of the
1395 952 subprocess."""
1396 953 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1397 954 **kwargs)
1398 955
1399 956 def gdinitconfig(ui):
1400 957 """helper function to know if a repo should be created as general delta
1401 958 """
1402 959 # experimental config: format.generaldelta
1403 960 return (ui.configbool('format', 'generaldelta', False)
1404 961 or ui.configbool('format', 'usegeneraldelta', True))
1405 962
1406 963 def gddeltaconfig(ui):
1407 964 """helper function to know if incoming delta should be optimised
1408 965 """
1409 966 # experimental config: format.generaldelta
1410 967 return ui.configbool('format', 'generaldelta', False)
1411
1412 class closewrapbase(object):
1413 """Base class of wrapper, which hooks closing
1414
1415 Do not instantiate outside of the vfs layer.
1416 """
1417 def __init__(self, fh):
1418 object.__setattr__(self, '_origfh', fh)
1419
1420 def __getattr__(self, attr):
1421 return getattr(self._origfh, attr)
1422
1423 def __setattr__(self, attr, value):
1424 return setattr(self._origfh, attr, value)
1425
1426 def __delattr__(self, attr):
1427 return delattr(self._origfh, attr)
1428
1429 def __enter__(self):
1430 return self._origfh.__enter__()
1431
1432 def __exit__(self, exc_type, exc_value, exc_tb):
1433 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1434
1435 def close(self):
1436 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1437
1438 class delayclosedfile(closewrapbase):
1439 """Proxy for a file object whose close is delayed.
1440
1441 Do not instantiate outside of the vfs layer.
1442 """
1443 def __init__(self, fh, closer):
1444 super(delayclosedfile, self).__init__(fh)
1445 object.__setattr__(self, '_closer', closer)
1446
1447 def __exit__(self, exc_type, exc_value, exc_tb):
1448 self._closer.close(self._origfh)
1449
1450 def close(self):
1451 self._closer.close(self._origfh)
1452
1453 class backgroundfilecloser(object):
1454 """Coordinates background closing of file handles on multiple threads."""
1455 def __init__(self, ui, expectedcount=-1):
1456 self._running = False
1457 self._entered = False
1458 self._threads = []
1459 self._threadexception = None
1460
1461 # Only Windows/NTFS has slow file closing. So only enable by default
1462 # on that platform. But allow to be enabled elsewhere for testing.
1463 defaultenabled = pycompat.osname == 'nt'
1464 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1465
1466 if not enabled:
1467 return
1468
1469 # There is overhead to starting and stopping the background threads.
1470 # Don't do background processing unless the file count is large enough
1471 # to justify it.
1472 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1473 2048)
1474 # FUTURE dynamically start background threads after minfilecount closes.
1475 # (We don't currently have any callers that don't know their file count)
1476 if expectedcount > 0 and expectedcount < minfilecount:
1477 return
1478
1479 # Windows defaults to a limit of 512 open files. A buffer of 128
1480 # should give us enough headway.
1481 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1482 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1483
1484 ui.debug('starting %d threads for background file closing\n' %
1485 threadcount)
1486
1487 self._queue = util.queue(maxsize=maxqueue)
1488 self._running = True
1489
1490 for i in range(threadcount):
1491 t = threading.Thread(target=self._worker, name='backgroundcloser')
1492 self._threads.append(t)
1493 t.start()
1494
1495 def __enter__(self):
1496 self._entered = True
1497 return self
1498
1499 def __exit__(self, exc_type, exc_value, exc_tb):
1500 self._running = False
1501
1502 # Wait for threads to finish closing so open files don't linger for
1503 # longer than lifetime of context manager.
1504 for t in self._threads:
1505 t.join()
1506
1507 def _worker(self):
1508 """Main routine for worker thread."""
1509 while True:
1510 try:
1511 fh = self._queue.get(block=True, timeout=0.100)
1512 # Need to catch or the thread will terminate and
1513 # we could orphan file descriptors.
1514 try:
1515 fh.close()
1516 except Exception as e:
1517 # Stash so can re-raise from main thread later.
1518 self._threadexception = e
1519 except util.empty:
1520 if not self._running:
1521 break
1522
1523 def close(self, fh):
1524 """Schedule a file for closing."""
1525 if not self._entered:
1526 raise error.Abort(_('can only call close() when context manager '
1527 'active'))
1528
1529 # If a background thread encountered an exception, raise now so we fail
1530 # fast. Otherwise we may potentially go on for minutes until the error
1531 # is acted on.
1532 if self._threadexception:
1533 e = self._threadexception
1534 self._threadexception = None
1535 raise e
1536
1537 # If we're not actively running, close synchronously.
1538 if not self._running:
1539 fh.close()
1540 return
1541
1542 self._queue.put(fh, block=True, timeout=None)
1543
1544 class checkambigatclosing(closewrapbase):
1545 """Proxy for a file object, to avoid ambiguity of file stat
1546
1547 See also util.filestat for detail about "ambiguity of file stat".
1548
1549 This proxy is useful only if the target file is guarded by any
1550 lock (e.g. repo.lock or repo.wlock)
1551
1552 Do not instantiate outside of the vfs layer.
1553 """
1554 def __init__(self, fh):
1555 super(checkambigatclosing, self).__init__(fh)
1556 object.__setattr__(self, '_oldstat', util.filestat(fh.name))
1557
1558 def _checkambig(self):
1559 oldstat = self._oldstat
1560 if oldstat.stat:
1561 newstat = util.filestat(self._origfh.name)
1562 if newstat.isambig(oldstat):
1563 # stat of changed file is ambiguous to original one
1564 newstat.avoidambig(self._origfh.name, oldstat)
1565
1566 def __exit__(self, exc_type, exc_value, exc_tb):
1567 self._origfh.__exit__(exc_type, exc_value, exc_tb)
1568 self._checkambig()
1569
1570 def close(self):
1571 self._origfh.close()
1572 self._checkambig()
This diff has been collapsed as it changes many lines, (938 lines changed) Show them Hide them
@@ -1,1572 +1,636 b''
1 # scmutil.py - Mercurial core utility functions
1 # vfs.py - Mercurial 'vfs' classes
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7
8 7 from __future__ import absolute_import
9 8
10 9 import contextlib
11 10 import errno
12 import glob
13 import hashlib
14 11 import os
15 import re
16 12 import shutil
17 import socket
18 13 import stat
19 14 import tempfile
20 15 import threading
21 16
22 17 from .i18n import _
23 from .node import wdirrev
24 18 from . import (
25 encoding,
26 19 error,
27 match as matchmod,
28 20 osutil,
29 21 pathutil,
30 phases,
31 22 pycompat,
32 revsetlang,
33 similar,
34 23 util,
35 24 )
36 25
37 if pycompat.osname == 'nt':
38 from . import scmwindows as scmplatform
39 else:
40 from . import scmposix as scmplatform
41
42 systemrcpath = scmplatform.systemrcpath
43 userrcpath = scmplatform.userrcpath
44 termsize = scmplatform.termsize
45
46 class status(tuple):
47 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
48 and 'ignored' properties are only relevant to the working copy.
49 '''
50
51 __slots__ = ()
52
53 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
54 clean):
55 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
56 ignored, clean))
57
58 @property
59 def modified(self):
60 '''files that have been modified'''
61 return self[0]
62
63 @property
64 def added(self):
65 '''files that have been added'''
66 return self[1]
67
68 @property
69 def removed(self):
70 '''files that have been removed'''
71 return self[2]
72
73 @property
74 def deleted(self):
75 '''files that are in the dirstate, but have been deleted from the
76 working copy (aka "missing")
77 '''
78 return self[3]
79
80 @property
81 def unknown(self):
82 '''files not in the dirstate that are not ignored'''
83 return self[4]
84
85 @property
86 def ignored(self):
87 '''files not in the dirstate that are ignored (by _dirignore())'''
88 return self[5]
89
90 @property
91 def clean(self):
92 '''files that have not been modified'''
93 return self[6]
94
95 def __repr__(self, *args, **kwargs):
96 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
97 'unknown=%r, ignored=%r, clean=%r>') % self)
98
99 def itersubrepos(ctx1, ctx2):
100 """find subrepos in ctx1 or ctx2"""
101 # Create a (subpath, ctx) mapping where we prefer subpaths from
102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
103 # has been modified (in ctx2) but not yet committed (in ctx1).
104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
106
107 missing = set()
108
109 for subpath in ctx2.substate:
110 if subpath not in ctx1.substate:
111 del subpaths[subpath]
112 missing.add(subpath)
113
114 for subpath, ctx in sorted(subpaths.iteritems()):
115 yield subpath, ctx.sub(subpath)
116
117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
118 # status and diff will have an accurate result when it does
119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
120 # against itself.
121 for subpath in missing:
122 yield subpath, ctx2.nullsub(subpath, ctx1)
123
124 def nochangesfound(ui, repo, excluded=None):
125 '''Report no changes for push/pull, excluded is None or a list of
126 nodes excluded from the push/pull.
127 '''
128 secretlist = []
129 if excluded:
130 for n in excluded:
131 if n not in repo:
132 # discovery should not have included the filtered revision,
133 # we have to explicitly exclude it until discovery is cleanup.
134 continue
135 ctx = repo[n]
136 if ctx.phase() >= phases.secret and not ctx.extinct():
137 secretlist.append(n)
138
139 if secretlist:
140 ui.status(_("no changes found (ignored %d secret changesets)\n")
141 % len(secretlist))
142 else:
143 ui.status(_("no changes found\n"))
144
145 def callcatch(ui, func):
146 """call func() with global exception handling
147
148 return func() if no exception happens. otherwise do some error handling
149 and return an exit code accordingly. does not handle all exceptions.
150 """
151 try:
152 return func()
153 # Global exception handling, alphabetically
154 # Mercurial-specific first, followed by built-in and library exceptions
155 except error.LockHeld as inst:
156 if inst.errno == errno.ETIMEDOUT:
157 reason = _('timed out waiting for lock held by %s') % inst.locker
158 else:
159 reason = _('lock held by %s') % inst.locker
160 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
161 except error.LockUnavailable as inst:
162 ui.warn(_("abort: could not lock %s: %s\n") %
163 (inst.desc or inst.filename, inst.strerror))
164 except error.OutOfBandError as inst:
165 if inst.args:
166 msg = _("abort: remote error:\n")
167 else:
168 msg = _("abort: remote error\n")
169 ui.warn(msg)
170 if inst.args:
171 ui.warn(''.join(inst.args))
172 if inst.hint:
173 ui.warn('(%s)\n' % inst.hint)
174 except error.RepoError as inst:
175 ui.warn(_("abort: %s!\n") % inst)
176 if inst.hint:
177 ui.warn(_("(%s)\n") % inst.hint)
178 except error.ResponseError as inst:
179 ui.warn(_("abort: %s") % inst.args[0])
180 if not isinstance(inst.args[1], basestring):
181 ui.warn(" %r\n" % (inst.args[1],))
182 elif not inst.args[1]:
183 ui.warn(_(" empty string\n"))
184 else:
185 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
186 except error.CensoredNodeError as inst:
187 ui.warn(_("abort: file censored %s!\n") % inst)
188 except error.RevlogError as inst:
189 ui.warn(_("abort: %s!\n") % inst)
190 except error.SignalInterrupt:
191 ui.warn(_("killed!\n"))
192 except error.InterventionRequired as inst:
193 ui.warn("%s\n" % inst)
194 if inst.hint:
195 ui.warn(_("(%s)\n") % inst.hint)
196 return 1
197 except error.Abort as inst:
198 ui.warn(_("abort: %s\n") % inst)
199 if inst.hint:
200 ui.warn(_("(%s)\n") % inst.hint)
201 except ImportError as inst:
202 ui.warn(_("abort: %s!\n") % inst)
203 m = str(inst).split()[-1]
204 if m in "mpatch bdiff".split():
205 ui.warn(_("(did you forget to compile extensions?)\n"))
206 elif m in "zlib".split():
207 ui.warn(_("(is your Python install correct?)\n"))
208 except IOError as inst:
209 if util.safehasattr(inst, "code"):
210 ui.warn(_("abort: %s\n") % inst)
211 elif util.safehasattr(inst, "reason"):
212 try: # usually it is in the form (errno, strerror)
213 reason = inst.reason.args[1]
214 except (AttributeError, IndexError):
215 # it might be anything, for example a string
216 reason = inst.reason
217 if isinstance(reason, unicode):
218 # SSLError of Python 2.7.9 contains a unicode
219 reason = reason.encode(encoding.encoding, 'replace')
220 ui.warn(_("abort: error: %s\n") % reason)
221 elif (util.safehasattr(inst, "args")
222 and inst.args and inst.args[0] == errno.EPIPE):
223 pass
224 elif getattr(inst, "strerror", None):
225 if getattr(inst, "filename", None):
226 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
227 else:
228 ui.warn(_("abort: %s\n") % inst.strerror)
229 else:
230 raise
231 except OSError as inst:
232 if getattr(inst, "filename", None) is not None:
233 ui.warn(_("abort: %s: '%s'\n") % (inst.strerror, inst.filename))
234 else:
235 ui.warn(_("abort: %s\n") % inst.strerror)
236 except MemoryError:
237 ui.warn(_("abort: out of memory\n"))
238 except SystemExit as inst:
239 # Commands shouldn't sys.exit directly, but give a return code.
240 # Just in case catch this and and pass exit code to caller.
241 return inst.code
242 except socket.error as inst:
243 ui.warn(_("abort: %s\n") % inst.args[-1])
244
245 return -1
246
247 def checknewlabel(repo, lbl, kind):
248 # Do not use the "kind" parameter in ui output.
249 # It makes strings difficult to translate.
250 if lbl in ['tip', '.', 'null']:
251 raise error.Abort(_("the name '%s' is reserved") % lbl)
252 for c in (':', '\0', '\n', '\r'):
253 if c in lbl:
254 raise error.Abort(_("%r cannot be used in a name") % c)
255 try:
256 int(lbl)
257 raise error.Abort(_("cannot use an integer as a name"))
258 except ValueError:
259 pass
260
261 def checkfilename(f):
262 '''Check that the filename f is an acceptable filename for a tracked file'''
263 if '\r' in f or '\n' in f:
264 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
265
266 def checkportable(ui, f):
267 '''Check if filename f is portable and warn or abort depending on config'''
268 checkfilename(f)
269 abort, warn = checkportabilityalert(ui)
270 if abort or warn:
271 msg = util.checkwinfilename(f)
272 if msg:
273 msg = "%s: %r" % (msg, f)
274 if abort:
275 raise error.Abort(msg)
276 ui.warn(_("warning: %s\n") % msg)
277
278 def checkportabilityalert(ui):
279 '''check if the user's config requests nothing, a warning, or abort for
280 non-portable filenames'''
281 val = ui.config('ui', 'portablefilenames', 'warn')
282 lval = val.lower()
283 bval = util.parsebool(val)
284 abort = pycompat.osname == 'nt' or lval == 'abort'
285 warn = bval or lval == 'warn'
286 if bval is None and not (warn or abort or lval == 'ignore'):
287 raise error.ConfigError(
288 _("ui.portablefilenames value is invalid ('%s')") % val)
289 return abort, warn
290
291 class casecollisionauditor(object):
292 def __init__(self, ui, abort, dirstate):
293 self._ui = ui
294 self._abort = abort
295 allfiles = '\0'.join(dirstate._map)
296 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
297 self._dirstate = dirstate
298 # The purpose of _newfiles is so that we don't complain about
299 # case collisions if someone were to call this object with the
300 # same filename twice.
301 self._newfiles = set()
302
303 def __call__(self, f):
304 if f in self._newfiles:
305 return
306 fl = encoding.lower(f)
307 if fl in self._loweredfiles and f not in self._dirstate:
308 msg = _('possible case-folding collision for %s') % f
309 if self._abort:
310 raise error.Abort(msg)
311 self._ui.warn(_("warning: %s\n") % msg)
312 self._loweredfiles.add(fl)
313 self._newfiles.add(f)
314
315 def filteredhash(repo, maxrev):
316 """build hash of filtered revisions in the current repoview.
317
318 Multiple caches perform up-to-date validation by checking that the
319 tiprev and tipnode stored in the cache file match the current repository.
320 However, this is not sufficient for validating repoviews because the set
321 of revisions in the view may change without the repository tiprev and
322 tipnode changing.
323
324 This function hashes all the revs filtered from the view and returns
325 that SHA-1 digest.
326 """
327 cl = repo.changelog
328 if not cl.filteredrevs:
329 return None
330 key = None
331 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
332 if revs:
333 s = hashlib.sha1()
334 for rev in revs:
335 s.update('%s;' % rev)
336 key = s.digest()
337 return key
338
339 26 class abstractvfs(object):
340 27 """Abstract base class; cannot be instantiated"""
341 28
342 29 def __init__(self, *args, **kwargs):
343 30 '''Prevent instantiation; don't call this from subclasses.'''
344 31 raise NotImplementedError('attempted instantiating ' + str(type(self)))
345 32
346 33 def tryread(self, path):
347 34 '''gracefully return an empty string for missing files'''
348 35 try:
349 36 return self.read(path)
350 37 except IOError as inst:
351 38 if inst.errno != errno.ENOENT:
352 39 raise
353 40 return ""
354 41
355 42 def tryreadlines(self, path, mode='rb'):
356 43 '''gracefully return an empty array for missing files'''
357 44 try:
358 45 return self.readlines(path, mode=mode)
359 46 except IOError as inst:
360 47 if inst.errno != errno.ENOENT:
361 48 raise
362 49 return []
363 50
364 51 @util.propertycache
365 52 def open(self):
366 53 '''Open ``path`` file, which is relative to vfs root.
367 54
368 55 Newly created directories are marked as "not to be indexed by
369 56 the content indexing service", if ``notindexed`` is specified
370 57 for "write" mode access.
371 58 '''
372 59 return self.__call__
373 60
374 61 def read(self, path):
375 62 with self(path, 'rb') as fp:
376 63 return fp.read()
377 64
378 65 def readlines(self, path, mode='rb'):
379 66 with self(path, mode=mode) as fp:
380 67 return fp.readlines()
381 68
382 69 def write(self, path, data, backgroundclose=False):
383 70 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
384 71 return fp.write(data)
385 72
386 73 def writelines(self, path, data, mode='wb', notindexed=False):
387 74 with self(path, mode=mode, notindexed=notindexed) as fp:
388 75 return fp.writelines(data)
389 76
390 77 def append(self, path, data):
391 78 with self(path, 'ab') as fp:
392 79 return fp.write(data)
393 80
394 81 def basename(self, path):
395 82 """return base element of a path (as os.path.basename would do)
396 83
397 84 This exists to allow handling of strange encoding if needed."""
398 85 return os.path.basename(path)
399 86
400 87 def chmod(self, path, mode):
401 88 return os.chmod(self.join(path), mode)
402 89
403 90 def dirname(self, path):
404 91 """return dirname element of a path (as os.path.dirname would do)
405 92
406 93 This exists to allow handling of strange encoding if needed."""
407 94 return os.path.dirname(path)
408 95
409 96 def exists(self, path=None):
410 97 return os.path.exists(self.join(path))
411 98
412 99 def fstat(self, fp):
413 100 return util.fstat(fp)
414 101
415 102 def isdir(self, path=None):
416 103 return os.path.isdir(self.join(path))
417 104
418 105 def isfile(self, path=None):
419 106 return os.path.isfile(self.join(path))
420 107
421 108 def islink(self, path=None):
422 109 return os.path.islink(self.join(path))
423 110
424 111 def isfileorlink(self, path=None):
425 112 '''return whether path is a regular file or a symlink
426 113
427 114 Unlike isfile, this doesn't follow symlinks.'''
428 115 try:
429 116 st = self.lstat(path)
430 117 except OSError:
431 118 return False
432 119 mode = st.st_mode
433 120 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
434 121
435 122 def reljoin(self, *paths):
436 123 """join various elements of a path together (as os.path.join would do)
437 124
438 125 The vfs base is not injected so that path stay relative. This exists
439 126 to allow handling of strange encoding if needed."""
440 127 return os.path.join(*paths)
441 128
442 129 def split(self, path):
443 130 """split top-most element of a path (as os.path.split would do)
444 131
445 132 This exists to allow handling of strange encoding if needed."""
446 133 return os.path.split(path)
447 134
448 135 def lexists(self, path=None):
449 136 return os.path.lexists(self.join(path))
450 137
451 138 def lstat(self, path=None):
452 139 return os.lstat(self.join(path))
453 140
454 141 def listdir(self, path=None):
455 142 return os.listdir(self.join(path))
456 143
457 144 def makedir(self, path=None, notindexed=True):
458 145 return util.makedir(self.join(path), notindexed)
459 146
460 147 def makedirs(self, path=None, mode=None):
461 148 return util.makedirs(self.join(path), mode)
462 149
463 150 def makelock(self, info, path):
464 151 return util.makelock(info, self.join(path))
465 152
466 153 def mkdir(self, path=None):
467 154 return os.mkdir(self.join(path))
468 155
469 156 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
470 157 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
471 158 dir=self.join(dir), text=text)
472 159 dname, fname = util.split(name)
473 160 if dir:
474 161 return fd, os.path.join(dir, fname)
475 162 else:
476 163 return fd, fname
477 164
478 165 def readdir(self, path=None, stat=None, skip=None):
479 166 return osutil.listdir(self.join(path), stat, skip)
480 167
481 168 def readlock(self, path):
482 169 return util.readlock(self.join(path))
483 170
484 171 def rename(self, src, dst, checkambig=False):
485 172 """Rename from src to dst
486 173
487 174 checkambig argument is used with util.filestat, and is useful
488 175 only if destination file is guarded by any lock
489 176 (e.g. repo.lock or repo.wlock).
490 177 """
491 178 dstpath = self.join(dst)
492 179 oldstat = checkambig and util.filestat(dstpath)
493 180 if oldstat and oldstat.stat:
494 181 ret = util.rename(self.join(src), dstpath)
495 182 newstat = util.filestat(dstpath)
496 183 if newstat.isambig(oldstat):
497 184 # stat of renamed file is ambiguous to original one
498 185 newstat.avoidambig(dstpath, oldstat)
499 186 return ret
500 187 return util.rename(self.join(src), dstpath)
501 188
502 189 def readlink(self, path):
503 190 return os.readlink(self.join(path))
504 191
505 192 def removedirs(self, path=None):
506 193 """Remove a leaf directory and all empty intermediate ones
507 194 """
508 195 return util.removedirs(self.join(path))
509 196
510 197 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
511 198 """Remove a directory tree recursively
512 199
513 200 If ``forcibly``, this tries to remove READ-ONLY files, too.
514 201 """
515 202 if forcibly:
516 203 def onerror(function, path, excinfo):
517 204 if function is not os.remove:
518 205 raise
519 206 # read-only files cannot be unlinked under Windows
520 207 s = os.stat(path)
521 208 if (s.st_mode & stat.S_IWRITE) != 0:
522 209 raise
523 210 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
524 211 os.remove(path)
525 212 else:
526 213 onerror = None
527 214 return shutil.rmtree(self.join(path),
528 215 ignore_errors=ignore_errors, onerror=onerror)
529 216
530 217 def setflags(self, path, l, x):
531 218 return util.setflags(self.join(path), l, x)
532 219
533 220 def stat(self, path=None):
534 221 return os.stat(self.join(path))
535 222
536 223 def unlink(self, path=None):
537 224 return util.unlink(self.join(path))
538 225
539 226 def unlinkpath(self, path=None, ignoremissing=False):
540 227 return util.unlinkpath(self.join(path), ignoremissing)
541 228
542 229 def utime(self, path=None, t=None):
543 230 return os.utime(self.join(path), t)
544 231
545 232 def walk(self, path=None, onerror=None):
546 233 """Yield (dirpath, dirs, files) tuple for each directories under path
547 234
548 235 ``dirpath`` is relative one from the root of this vfs. This
549 236 uses ``os.sep`` as path separator, even you specify POSIX
550 237 style ``path``.
551 238
552 239 "The root of this vfs" is represented as empty ``dirpath``.
553 240 """
554 241 root = os.path.normpath(self.join(None))
555 242 # when dirpath == root, dirpath[prefixlen:] becomes empty
556 243 # because len(dirpath) < prefixlen.
557 244 prefixlen = len(pathutil.normasprefix(root))
558 245 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
559 246 yield (dirpath[prefixlen:], dirs, files)
560 247
561 248 @contextlib.contextmanager
562 249 def backgroundclosing(self, ui, expectedcount=-1):
563 250 """Allow files to be closed asynchronously.
564 251
565 252 When this context manager is active, ``backgroundclose`` can be passed
566 253 to ``__call__``/``open`` to result in the file possibly being closed
567 254 asynchronously, on a background thread.
568 255 """
569 256 # This is an arbitrary restriction and could be changed if we ever
570 257 # have a use case.
571 258 vfs = getattr(self, 'vfs', self)
572 259 if getattr(vfs, '_backgroundfilecloser', None):
573 260 raise error.Abort(
574 261 _('can only have 1 active background file closer'))
575 262
576 263 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
577 264 try:
578 265 vfs._backgroundfilecloser = bfc
579 266 yield bfc
580 267 finally:
581 268 vfs._backgroundfilecloser = None
582 269
583 270 class vfs(abstractvfs):
584 271 '''Operate files relative to a base directory
585 272
586 273 This class is used to hide the details of COW semantics and
587 274 remote file access from higher level code.
588 275 '''
589 276 def __init__(self, base, audit=True, expandpath=False, realpath=False):
590 277 if expandpath:
591 278 base = util.expandpath(base)
592 279 if realpath:
593 280 base = os.path.realpath(base)
594 281 self.base = base
595 282 self.mustaudit = audit
596 283 self.createmode = None
597 284 self._trustnlink = None
598 285
599 286 @property
600 287 def mustaudit(self):
601 288 return self._audit
602 289
603 290 @mustaudit.setter
604 291 def mustaudit(self, onoff):
605 292 self._audit = onoff
606 293 if onoff:
607 294 self.audit = pathutil.pathauditor(self.base)
608 295 else:
609 296 self.audit = util.always
610 297
611 298 @util.propertycache
612 299 def _cansymlink(self):
613 300 return util.checklink(self.base)
614 301
615 302 @util.propertycache
616 303 def _chmod(self):
617 304 return util.checkexec(self.base)
618 305
619 306 def _fixfilemode(self, name):
620 307 if self.createmode is None or not self._chmod:
621 308 return
622 309 os.chmod(name, self.createmode & 0o666)
623 310
624 311 def __call__(self, path, mode="r", text=False, atomictemp=False,
625 312 notindexed=False, backgroundclose=False, checkambig=False):
626 313 '''Open ``path`` file, which is relative to vfs root.
627 314
628 315 Newly created directories are marked as "not to be indexed by
629 316 the content indexing service", if ``notindexed`` is specified
630 317 for "write" mode access.
631 318
632 319 If ``backgroundclose`` is passed, the file may be closed asynchronously.
633 320 It can only be used if the ``self.backgroundclosing()`` context manager
634 321 is active. This should only be specified if the following criteria hold:
635 322
636 323 1. There is a potential for writing thousands of files. Unless you
637 324 are writing thousands of files, the performance benefits of
638 325 asynchronously closing files is not realized.
639 326 2. Files are opened exactly once for the ``backgroundclosing``
640 327 active duration and are therefore free of race conditions between
641 328 closing a file on a background thread and reopening it. (If the
642 329 file were opened multiple times, there could be unflushed data
643 330 because the original file handle hasn't been flushed/closed yet.)
644 331
645 332 ``checkambig`` argument is passed to atomictemplfile (valid
646 333 only for writing), and is useful only if target file is
647 334 guarded by any lock (e.g. repo.lock or repo.wlock).
648 335 '''
649 336 if self._audit:
650 337 r = util.checkosfilename(path)
651 338 if r:
652 339 raise error.Abort("%s: %r" % (r, path))
653 340 self.audit(path)
654 341 f = self.join(path)
655 342
656 343 if not text and "b" not in mode:
657 344 mode += "b" # for that other OS
658 345
659 346 nlink = -1
660 347 if mode not in ('r', 'rb'):
661 348 dirname, basename = util.split(f)
662 349 # If basename is empty, then the path is malformed because it points
663 350 # to a directory. Let the posixfile() call below raise IOError.
664 351 if basename:
665 352 if atomictemp:
666 353 util.makedirs(dirname, self.createmode, notindexed)
667 354 return util.atomictempfile(f, mode, self.createmode,
668 355 checkambig=checkambig)
669 356 try:
670 357 if 'w' in mode:
671 358 util.unlink(f)
672 359 nlink = 0
673 360 else:
674 361 # nlinks() may behave differently for files on Windows
675 362 # shares if the file is open.
676 363 with util.posixfile(f):
677 364 nlink = util.nlinks(f)
678 365 if nlink < 1:
679 366 nlink = 2 # force mktempcopy (issue1922)
680 367 except (OSError, IOError) as e:
681 368 if e.errno != errno.ENOENT:
682 369 raise
683 370 nlink = 0
684 371 util.makedirs(dirname, self.createmode, notindexed)
685 372 if nlink > 0:
686 373 if self._trustnlink is None:
687 374 self._trustnlink = nlink > 1 or util.checknlink(f)
688 375 if nlink > 1 or not self._trustnlink:
689 376 util.rename(util.mktempcopy(f), f)
690 377 fp = util.posixfile(f, mode)
691 378 if nlink == 0:
692 379 self._fixfilemode(f)
693 380
694 381 if checkambig:
695 382 if mode in ('r', 'rb'):
696 383 raise error.Abort(_('implementation error: mode %s is not'
697 384 ' valid for checkambig=True') % mode)
698 385 fp = checkambigatclosing(fp)
699 386
700 387 if backgroundclose:
701 388 if not self._backgroundfilecloser:
702 389 raise error.Abort(_('backgroundclose can only be used when a '
703 390 'backgroundclosing context manager is active')
704 391 )
705 392
706 393 fp = delayclosedfile(fp, self._backgroundfilecloser)
707 394
708 395 return fp
709 396
710 397 def symlink(self, src, dst):
711 398 self.audit(dst)
712 399 linkname = self.join(dst)
713 400 try:
714 401 os.unlink(linkname)
715 402 except OSError:
716 403 pass
717 404
718 405 util.makedirs(os.path.dirname(linkname), self.createmode)
719 406
720 407 if self._cansymlink:
721 408 try:
722 409 os.symlink(src, linkname)
723 410 except OSError as err:
724 411 raise OSError(err.errno, _('could not symlink to %r: %s') %
725 412 (src, err.strerror), linkname)
726 413 else:
727 414 self.write(dst, src)
728 415
729 416 def join(self, path, *insidef):
730 417 if path:
731 418 return os.path.join(self.base, path, *insidef)
732 419 else:
733 420 return self.base
734 421
735 422 opener = vfs
736 423
737 424 class auditvfs(object):
738 425 def __init__(self, vfs):
739 426 self.vfs = vfs
740 427
741 428 @property
742 429 def mustaudit(self):
743 430 return self.vfs.mustaudit
744 431
745 432 @mustaudit.setter
746 433 def mustaudit(self, onoff):
747 434 self.vfs.mustaudit = onoff
748 435
749 436 @property
750 437 def options(self):
751 438 return self.vfs.options
752 439
753 440 @options.setter
754 441 def options(self, value):
755 442 self.vfs.options = value
756 443
757 444 class filtervfs(abstractvfs, auditvfs):
758 445 '''Wrapper vfs for filtering filenames with a function.'''
759 446
760 447 def __init__(self, vfs, filter):
761 448 auditvfs.__init__(self, vfs)
762 449 self._filter = filter
763 450
764 451 def __call__(self, path, *args, **kwargs):
765 452 return self.vfs(self._filter(path), *args, **kwargs)
766 453
767 454 def join(self, path, *insidef):
768 455 if path:
769 456 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
770 457 else:
771 458 return self.vfs.join(path)
772 459
773 460 filteropener = filtervfs
774 461
775 462 class readonlyvfs(abstractvfs, auditvfs):
776 463 '''Wrapper vfs preventing any writing.'''
777 464
778 465 def __init__(self, vfs):
779 466 auditvfs.__init__(self, vfs)
780 467
781 468 def __call__(self, path, mode='r', *args, **kw):
782 469 if mode not in ('r', 'rb'):
783 470 raise error.Abort(_('this vfs is read only'))
784 471 return self.vfs(path, mode, *args, **kw)
785 472
786 473 def join(self, path, *insidef):
787 474 return self.vfs.join(path, *insidef)
788 475
789 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
790 '''yield every hg repository under path, always recursively.
791 The recurse flag will only control recursion into repo working dirs'''
792 def errhandler(err):
793 if err.filename == path:
794 raise err
795 samestat = getattr(os.path, 'samestat', None)
796 if followsym and samestat is not None:
797 def adddir(dirlst, dirname):
798 match = False
799 dirstat = os.stat(dirname)
800 for lstdirstat in dirlst:
801 if samestat(dirstat, lstdirstat):
802 match = True
803 break
804 if not match:
805 dirlst.append(dirstat)
806 return not match
807 else:
808 followsym = False
809
810 if (seen_dirs is None) and followsym:
811 seen_dirs = []
812 adddir(seen_dirs, path)
813 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
814 dirs.sort()
815 if '.hg' in dirs:
816 yield root # found a repository
817 qroot = os.path.join(root, '.hg', 'patches')
818 if os.path.isdir(os.path.join(qroot, '.hg')):
819 yield qroot # we have a patch queue repo here
820 if recurse:
821 # avoid recursing inside the .hg directory
822 dirs.remove('.hg')
823 else:
824 dirs[:] = [] # don't descend further
825 elif followsym:
826 newdirs = []
827 for d in dirs:
828 fname = os.path.join(root, d)
829 if adddir(seen_dirs, fname):
830 if os.path.islink(fname):
831 for hgname in walkrepos(fname, True, seen_dirs):
832 yield hgname
833 else:
834 newdirs.append(d)
835 dirs[:] = newdirs
836
837 def osrcpath():
838 '''return default os-specific hgrc search path'''
839 path = []
840 defaultpath = os.path.join(util.datapath, 'default.d')
841 if os.path.isdir(defaultpath):
842 for f, kind in osutil.listdir(defaultpath):
843 if f.endswith('.rc'):
844 path.append(os.path.join(defaultpath, f))
845 path.extend(systemrcpath())
846 path.extend(userrcpath())
847 path = [os.path.normpath(f) for f in path]
848 return path
849
850 _rcpath = None
851
852 def rcpath():
853 '''return hgrc search path. if env var HGRCPATH is set, use it.
854 for each item in path, if directory, use files ending in .rc,
855 else use item.
856 make HGRCPATH empty to only look in .hg/hgrc of current repo.
857 if no HGRCPATH, use default os-specific path.'''
858 global _rcpath
859 if _rcpath is None:
860 if 'HGRCPATH' in encoding.environ:
861 _rcpath = []
862 for p in encoding.environ['HGRCPATH'].split(pycompat.ospathsep):
863 if not p:
864 continue
865 p = util.expandpath(p)
866 if os.path.isdir(p):
867 for f, kind in osutil.listdir(p):
868 if f.endswith('.rc'):
869 _rcpath.append(os.path.join(p, f))
870 else:
871 _rcpath.append(p)
872 else:
873 _rcpath = osrcpath()
874 return _rcpath
875
876 def intrev(rev):
877 """Return integer for a given revision that can be used in comparison or
878 arithmetic operation"""
879 if rev is None:
880 return wdirrev
881 return rev
882
883 def revsingle(repo, revspec, default='.'):
884 if not revspec and revspec != 0:
885 return repo[default]
886
887 l = revrange(repo, [revspec])
888 if not l:
889 raise error.Abort(_('empty revision set'))
890 return repo[l.last()]
891
892 def _pairspec(revspec):
893 tree = revsetlang.parse(revspec)
894 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
895
896 def revpair(repo, revs):
897 if not revs:
898 return repo.dirstate.p1(), None
899
900 l = revrange(repo, revs)
901
902 if not l:
903 first = second = None
904 elif l.isascending():
905 first = l.min()
906 second = l.max()
907 elif l.isdescending():
908 first = l.max()
909 second = l.min()
910 else:
911 first = l.first()
912 second = l.last()
913
914 if first is None:
915 raise error.Abort(_('empty revision range'))
916 if (first == second and len(revs) >= 2
917 and not all(revrange(repo, [r]) for r in revs)):
918 raise error.Abort(_('empty revision on one side of range'))
919
920 # if top-level is range expression, the result must always be a pair
921 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
922 return repo.lookup(first), None
923
924 return repo.lookup(first), repo.lookup(second)
925
926 def revrange(repo, specs):
927 """Execute 1 to many revsets and return the union.
928
929 This is the preferred mechanism for executing revsets using user-specified
930 config options, such as revset aliases.
931
932 The revsets specified by ``specs`` will be executed via a chained ``OR``
933 expression. If ``specs`` is empty, an empty result is returned.
934
935 ``specs`` can contain integers, in which case they are assumed to be
936 revision numbers.
937
938 It is assumed the revsets are already formatted. If you have arguments
939 that need to be expanded in the revset, call ``revsetlang.formatspec()``
940 and pass the result as an element of ``specs``.
941
942 Specifying a single revset is allowed.
943
944 Returns a ``revset.abstractsmartset`` which is a list-like interface over
945 integer revisions.
946 """
947 allspecs = []
948 for spec in specs:
949 if isinstance(spec, int):
950 spec = revsetlang.formatspec('rev(%d)', spec)
951 allspecs.append(spec)
952 return repo.anyrevs(allspecs, user=True)
953
954 def meaningfulparents(repo, ctx):
955 """Return list of meaningful (or all if debug) parentrevs for rev.
956
957 For merges (two non-nullrev revisions) both parents are meaningful.
958 Otherwise the first parent revision is considered meaningful if it
959 is not the preceding revision.
960 """
961 parents = ctx.parents()
962 if len(parents) > 1:
963 return parents
964 if repo.ui.debugflag:
965 return [parents[0], repo['null']]
966 if parents[0].rev() >= intrev(ctx.rev()) - 1:
967 return []
968 return parents
969
970 def expandpats(pats):
971 '''Expand bare globs when running on windows.
972 On posix we assume it already has already been done by sh.'''
973 if not util.expandglobs:
974 return list(pats)
975 ret = []
976 for kindpat in pats:
977 kind, pat = matchmod._patsplit(kindpat, None)
978 if kind is None:
979 try:
980 globbed = glob.glob(pat)
981 except re.error:
982 globbed = [pat]
983 if globbed:
984 ret.extend(globbed)
985 continue
986 ret.append(kindpat)
987 return ret
988
989 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
990 badfn=None):
991 '''Return a matcher and the patterns that were used.
992 The matcher will warn about bad matches, unless an alternate badfn callback
993 is provided.'''
994 if pats == ("",):
995 pats = []
996 if opts is None:
997 opts = {}
998 if not globbed and default == 'relpath':
999 pats = expandpats(pats or [])
1000
1001 def bad(f, msg):
1002 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
1003
1004 if badfn is None:
1005 badfn = bad
1006
1007 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
1008 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
1009
1010 if m.always():
1011 pats = []
1012 return m, pats
1013
1014 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
1015 badfn=None):
1016 '''Return a matcher that will warn about bad matches.'''
1017 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
1018
1019 def matchall(repo):
1020 '''Return a matcher that will efficiently match everything.'''
1021 return matchmod.always(repo.root, repo.getcwd())
1022
1023 def matchfiles(repo, files, badfn=None):
1024 '''Return a matcher that will efficiently match exactly these files.'''
1025 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
1026
1027 def origpath(ui, repo, filepath):
1028 '''customize where .orig files are created
1029
1030 Fetch user defined path from config file: [ui] origbackuppath = <path>
1031 Fall back to default (filepath) if not specified
1032 '''
1033 origbackuppath = ui.config('ui', 'origbackuppath', None)
1034 if origbackuppath is None:
1035 return filepath + ".orig"
1036
1037 filepathfromroot = os.path.relpath(filepath, start=repo.root)
1038 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
1039
1040 origbackupdir = repo.vfs.dirname(fullorigpath)
1041 if not repo.vfs.exists(origbackupdir):
1042 ui.note(_('creating directory: %s\n') % origbackupdir)
1043 util.makedirs(origbackupdir)
1044
1045 return fullorigpath + ".orig"
1046
1047 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
1048 if opts is None:
1049 opts = {}
1050 m = matcher
1051 if dry_run is None:
1052 dry_run = opts.get('dry_run')
1053 if similarity is None:
1054 similarity = float(opts.get('similarity') or 0)
1055
1056 ret = 0
1057 join = lambda f: os.path.join(prefix, f)
1058
1059 wctx = repo[None]
1060 for subpath in sorted(wctx.substate):
1061 submatch = matchmod.subdirmatcher(subpath, m)
1062 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1063 sub = wctx.sub(subpath)
1064 try:
1065 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
1066 ret = 1
1067 except error.LookupError:
1068 repo.ui.status(_("skipping missing subrepository: %s\n")
1069 % join(subpath))
1070
1071 rejected = []
1072 def badfn(f, msg):
1073 if f in m.files():
1074 m.bad(f, msg)
1075 rejected.append(f)
1076
1077 badmatch = matchmod.badmatch(m, badfn)
1078 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1079 badmatch)
1080
1081 unknownset = set(unknown + forgotten)
1082 toprint = unknownset.copy()
1083 toprint.update(deleted)
1084 for abs in sorted(toprint):
1085 if repo.ui.verbose or not m.exact(abs):
1086 if abs in unknownset:
1087 status = _('adding %s\n') % m.uipath(abs)
1088 else:
1089 status = _('removing %s\n') % m.uipath(abs)
1090 repo.ui.status(status)
1091
1092 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1093 similarity)
1094
1095 if not dry_run:
1096 _markchanges(repo, unknown + forgotten, deleted, renames)
1097
1098 for f in rejected:
1099 if f in m.files():
1100 return 1
1101 return ret
1102
1103 def marktouched(repo, files, similarity=0.0):
1104 '''Assert that files have somehow been operated upon. files are relative to
1105 the repo root.'''
1106 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1107 rejected = []
1108
1109 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1110
1111 if repo.ui.verbose:
1112 unknownset = set(unknown + forgotten)
1113 toprint = unknownset.copy()
1114 toprint.update(deleted)
1115 for abs in sorted(toprint):
1116 if abs in unknownset:
1117 status = _('adding %s\n') % abs
1118 else:
1119 status = _('removing %s\n') % abs
1120 repo.ui.status(status)
1121
1122 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1123 similarity)
1124
1125 _markchanges(repo, unknown + forgotten, deleted, renames)
1126
1127 for f in rejected:
1128 if f in m.files():
1129 return 1
1130 return 0
1131
1132 def _interestingfiles(repo, matcher):
1133 '''Walk dirstate with matcher, looking for files that addremove would care
1134 about.
1135
1136 This is different from dirstate.status because it doesn't care about
1137 whether files are modified or clean.'''
1138 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1139 audit_path = pathutil.pathauditor(repo.root)
1140
1141 ctx = repo[None]
1142 dirstate = repo.dirstate
1143 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1144 full=False)
1145 for abs, st in walkresults.iteritems():
1146 dstate = dirstate[abs]
1147 if dstate == '?' and audit_path.check(abs):
1148 unknown.append(abs)
1149 elif dstate != 'r' and not st:
1150 deleted.append(abs)
1151 elif dstate == 'r' and st:
1152 forgotten.append(abs)
1153 # for finding renames
1154 elif dstate == 'r' and not st:
1155 removed.append(abs)
1156 elif dstate == 'a':
1157 added.append(abs)
1158
1159 return added, unknown, deleted, removed, forgotten
1160
1161 def _findrenames(repo, matcher, added, removed, similarity):
1162 '''Find renames from removed files to added ones.'''
1163 renames = {}
1164 if similarity > 0:
1165 for old, new, score in similar.findrenames(repo, added, removed,
1166 similarity):
1167 if (repo.ui.verbose or not matcher.exact(old)
1168 or not matcher.exact(new)):
1169 repo.ui.status(_('recording removal of %s as rename to %s '
1170 '(%d%% similar)\n') %
1171 (matcher.rel(old), matcher.rel(new),
1172 score * 100))
1173 renames[new] = old
1174 return renames
1175
1176 def _markchanges(repo, unknown, deleted, renames):
1177 '''Marks the files in unknown as added, the files in deleted as removed,
1178 and the files in renames as copied.'''
1179 wctx = repo[None]
1180 with repo.wlock():
1181 wctx.forget(deleted)
1182 wctx.add(unknown)
1183 for new, old in renames.iteritems():
1184 wctx.copy(old, new)
1185
1186 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1187 """Update the dirstate to reflect the intent of copying src to dst. For
1188 different reasons it might not end with dst being marked as copied from src.
1189 """
1190 origsrc = repo.dirstate.copied(src) or src
1191 if dst == origsrc: # copying back a copy?
1192 if repo.dirstate[dst] not in 'mn' and not dryrun:
1193 repo.dirstate.normallookup(dst)
1194 else:
1195 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1196 if not ui.quiet:
1197 ui.warn(_("%s has not been committed yet, so no copy "
1198 "data will be stored for %s.\n")
1199 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1200 if repo.dirstate[dst] in '?r' and not dryrun:
1201 wctx.add([dst])
1202 elif not dryrun:
1203 wctx.copy(origsrc, dst)
1204
1205 def readrequires(opener, supported):
1206 '''Reads and parses .hg/requires and checks if all entries found
1207 are in the list of supported features.'''
1208 requirements = set(opener.read("requires").splitlines())
1209 missings = []
1210 for r in requirements:
1211 if r not in supported:
1212 if not r or not r[0].isalnum():
1213 raise error.RequirementError(_(".hg/requires file is corrupt"))
1214 missings.append(r)
1215 missings.sort()
1216 if missings:
1217 raise error.RequirementError(
1218 _("repository requires features unknown to this Mercurial: %s")
1219 % " ".join(missings),
1220 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1221 " for more information"))
1222 return requirements
1223
1224 def writerequires(opener, requirements):
1225 with opener('requires', 'w') as fp:
1226 for r in sorted(requirements):
1227 fp.write("%s\n" % r)
1228
1229 class filecachesubentry(object):
1230 def __init__(self, path, stat):
1231 self.path = path
1232 self.cachestat = None
1233 self._cacheable = None
1234
1235 if stat:
1236 self.cachestat = filecachesubentry.stat(self.path)
1237
1238 if self.cachestat:
1239 self._cacheable = self.cachestat.cacheable()
1240 else:
1241 # None means we don't know yet
1242 self._cacheable = None
1243
1244 def refresh(self):
1245 if self.cacheable():
1246 self.cachestat = filecachesubentry.stat(self.path)
1247
1248 def cacheable(self):
1249 if self._cacheable is not None:
1250 return self._cacheable
1251
1252 # we don't know yet, assume it is for now
1253 return True
1254
1255 def changed(self):
1256 # no point in going further if we can't cache it
1257 if not self.cacheable():
1258 return True
1259
1260 newstat = filecachesubentry.stat(self.path)
1261
1262 # we may not know if it's cacheable yet, check again now
1263 if newstat and self._cacheable is None:
1264 self._cacheable = newstat.cacheable()
1265
1266 # check again
1267 if not self._cacheable:
1268 return True
1269
1270 if self.cachestat != newstat:
1271 self.cachestat = newstat
1272 return True
1273 else:
1274 return False
1275
1276 @staticmethod
1277 def stat(path):
1278 try:
1279 return util.cachestat(path)
1280 except OSError as e:
1281 if e.errno != errno.ENOENT:
1282 raise
1283
1284 class filecacheentry(object):
1285 def __init__(self, paths, stat=True):
1286 self._entries = []
1287 for path in paths:
1288 self._entries.append(filecachesubentry(path, stat))
1289
1290 def changed(self):
1291 '''true if any entry has changed'''
1292 for entry in self._entries:
1293 if entry.changed():
1294 return True
1295 return False
1296
1297 def refresh(self):
1298 for entry in self._entries:
1299 entry.refresh()
1300
1301 class filecache(object):
1302 '''A property like decorator that tracks files under .hg/ for updates.
1303
1304 Records stat info when called in _filecache.
1305
1306 On subsequent calls, compares old stat info with new info, and recreates the
1307 object when any of the files changes, updating the new stat info in
1308 _filecache.
1309
1310 Mercurial either atomic renames or appends for files under .hg,
1311 so to ensure the cache is reliable we need the filesystem to be able
1312 to tell us if a file has been replaced. If it can't, we fallback to
1313 recreating the object on every call (essentially the same behavior as
1314 propertycache).
1315
1316 '''
1317 def __init__(self, *paths):
1318 self.paths = paths
1319
1320 def join(self, obj, fname):
1321 """Used to compute the runtime path of a cached file.
1322
1323 Users should subclass filecache and provide their own version of this
1324 function to call the appropriate join function on 'obj' (an instance
1325 of the class that its member function was decorated).
1326 """
1327 return obj.join(fname)
1328
1329 def __call__(self, func):
1330 self.func = func
1331 self.name = func.__name__
1332 return self
1333
1334 def __get__(self, obj, type=None):
1335 # if accessed on the class, return the descriptor itself.
1336 if obj is None:
1337 return self
1338 # do we need to check if the file changed?
1339 if self.name in obj.__dict__:
1340 assert self.name in obj._filecache, self.name
1341 return obj.__dict__[self.name]
1342
1343 entry = obj._filecache.get(self.name)
1344
1345 if entry:
1346 if entry.changed():
1347 entry.obj = self.func(obj)
1348 else:
1349 paths = [self.join(obj, path) for path in self.paths]
1350
1351 # We stat -before- creating the object so our cache doesn't lie if
1352 # a writer modified between the time we read and stat
1353 entry = filecacheentry(paths, True)
1354 entry.obj = self.func(obj)
1355
1356 obj._filecache[self.name] = entry
1357
1358 obj.__dict__[self.name] = entry.obj
1359 return entry.obj
1360
1361 def __set__(self, obj, value):
1362 if self.name not in obj._filecache:
1363 # we add an entry for the missing value because X in __dict__
1364 # implies X in _filecache
1365 paths = [self.join(obj, path) for path in self.paths]
1366 ce = filecacheentry(paths, False)
1367 obj._filecache[self.name] = ce
1368 else:
1369 ce = obj._filecache[self.name]
1370
1371 ce.obj = value # update cached copy
1372 obj.__dict__[self.name] = value # update copy returned by obj.x
1373
1374 def __delete__(self, obj):
1375 try:
1376 del obj.__dict__[self.name]
1377 except KeyError:
1378 raise AttributeError(self.name)
1379
1380 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1381 if lock is None:
1382 raise error.LockInheritanceContractViolation(
1383 'lock can only be inherited while held')
1384 if environ is None:
1385 environ = {}
1386 with lock.inherit() as locker:
1387 environ[envvar] = locker
1388 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1389
1390 def wlocksub(repo, cmd, *args, **kwargs):
1391 """run cmd as a subprocess that allows inheriting repo's wlock
1392
1393 This can only be called while the wlock is held. This takes all the
1394 arguments that ui.system does, and returns the exit code of the
1395 subprocess."""
1396 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1397 **kwargs)
1398
1399 def gdinitconfig(ui):
1400 """helper function to know if a repo should be created as general delta
1401 """
1402 # experimental config: format.generaldelta
1403 return (ui.configbool('format', 'generaldelta', False)
1404 or ui.configbool('format', 'usegeneraldelta', True))
1405
1406 def gddeltaconfig(ui):
1407 """helper function to know if incoming delta should be optimised
1408 """
1409 # experimental config: format.generaldelta
1410 return ui.configbool('format', 'generaldelta', False)
1411
1412 476 class closewrapbase(object):
1413 477 """Base class of wrapper, which hooks closing
1414 478
1415 479 Do not instantiate outside of the vfs layer.
1416 480 """
1417 481 def __init__(self, fh):
1418 482 object.__setattr__(self, '_origfh', fh)
1419 483
1420 484 def __getattr__(self, attr):
1421 485 return getattr(self._origfh, attr)
1422 486
1423 487 def __setattr__(self, attr, value):
1424 488 return setattr(self._origfh, attr, value)
1425 489
1426 490 def __delattr__(self, attr):
1427 491 return delattr(self._origfh, attr)
1428 492
1429 493 def __enter__(self):
1430 494 return self._origfh.__enter__()
1431 495
1432 496 def __exit__(self, exc_type, exc_value, exc_tb):
1433 497 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1434 498
1435 499 def close(self):
1436 500 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1437 501
1438 502 class delayclosedfile(closewrapbase):
1439 503 """Proxy for a file object whose close is delayed.
1440 504
1441 505 Do not instantiate outside of the vfs layer.
1442 506 """
1443 507 def __init__(self, fh, closer):
1444 508 super(delayclosedfile, self).__init__(fh)
1445 509 object.__setattr__(self, '_closer', closer)
1446 510
1447 511 def __exit__(self, exc_type, exc_value, exc_tb):
1448 512 self._closer.close(self._origfh)
1449 513
1450 514 def close(self):
1451 515 self._closer.close(self._origfh)
1452 516
1453 517 class backgroundfilecloser(object):
1454 518 """Coordinates background closing of file handles on multiple threads."""
1455 519 def __init__(self, ui, expectedcount=-1):
1456 520 self._running = False
1457 521 self._entered = False
1458 522 self._threads = []
1459 523 self._threadexception = None
1460 524
1461 525 # Only Windows/NTFS has slow file closing. So only enable by default
1462 526 # on that platform. But allow to be enabled elsewhere for testing.
1463 527 defaultenabled = pycompat.osname == 'nt'
1464 528 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1465 529
1466 530 if not enabled:
1467 531 return
1468 532
1469 533 # There is overhead to starting and stopping the background threads.
1470 534 # Don't do background processing unless the file count is large enough
1471 535 # to justify it.
1472 536 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1473 537 2048)
1474 538 # FUTURE dynamically start background threads after minfilecount closes.
1475 539 # (We don't currently have any callers that don't know their file count)
1476 540 if expectedcount > 0 and expectedcount < minfilecount:
1477 541 return
1478 542
1479 543 # Windows defaults to a limit of 512 open files. A buffer of 128
1480 544 # should give us enough headway.
1481 545 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1482 546 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1483 547
1484 548 ui.debug('starting %d threads for background file closing\n' %
1485 549 threadcount)
1486 550
1487 551 self._queue = util.queue(maxsize=maxqueue)
1488 552 self._running = True
1489 553
1490 554 for i in range(threadcount):
1491 555 t = threading.Thread(target=self._worker, name='backgroundcloser')
1492 556 self._threads.append(t)
1493 557 t.start()
1494 558
1495 559 def __enter__(self):
1496 560 self._entered = True
1497 561 return self
1498 562
1499 563 def __exit__(self, exc_type, exc_value, exc_tb):
1500 564 self._running = False
1501 565
1502 566 # Wait for threads to finish closing so open files don't linger for
1503 567 # longer than lifetime of context manager.
1504 568 for t in self._threads:
1505 569 t.join()
1506 570
1507 571 def _worker(self):
1508 572 """Main routine for worker thread."""
1509 573 while True:
1510 574 try:
1511 575 fh = self._queue.get(block=True, timeout=0.100)
1512 576 # Need to catch or the thread will terminate and
1513 577 # we could orphan file descriptors.
1514 578 try:
1515 579 fh.close()
1516 580 except Exception as e:
1517 581 # Stash so can re-raise from main thread later.
1518 582 self._threadexception = e
1519 583 except util.empty:
1520 584 if not self._running:
1521 585 break
1522 586
1523 587 def close(self, fh):
1524 588 """Schedule a file for closing."""
1525 589 if not self._entered:
1526 590 raise error.Abort(_('can only call close() when context manager '
1527 591 'active'))
1528 592
1529 593 # If a background thread encountered an exception, raise now so we fail
1530 594 # fast. Otherwise we may potentially go on for minutes until the error
1531 595 # is acted on.
1532 596 if self._threadexception:
1533 597 e = self._threadexception
1534 598 self._threadexception = None
1535 599 raise e
1536 600
1537 601 # If we're not actively running, close synchronously.
1538 602 if not self._running:
1539 603 fh.close()
1540 604 return
1541 605
1542 606 self._queue.put(fh, block=True, timeout=None)
1543 607
1544 608 class checkambigatclosing(closewrapbase):
1545 609 """Proxy for a file object, to avoid ambiguity of file stat
1546 610
1547 611 See also util.filestat for detail about "ambiguity of file stat".
1548 612
1549 613 This proxy is useful only if the target file is guarded by any
1550 614 lock (e.g. repo.lock or repo.wlock)
1551 615
1552 616 Do not instantiate outside of the vfs layer.
1553 617 """
1554 618 def __init__(self, fh):
1555 619 super(checkambigatclosing, self).__init__(fh)
1556 620 object.__setattr__(self, '_oldstat', util.filestat(fh.name))
1557 621
1558 622 def _checkambig(self):
1559 623 oldstat = self._oldstat
1560 624 if oldstat.stat:
1561 625 newstat = util.filestat(self._origfh.name)
1562 626 if newstat.isambig(oldstat):
1563 627 # stat of changed file is ambiguous to original one
1564 628 newstat.avoidambig(self._origfh.name, oldstat)
1565 629
1566 630 def __exit__(self, exc_type, exc_value, exc_tb):
1567 631 self._origfh.__exit__(exc_type, exc_value, exc_tb)
1568 632 self._checkambig()
1569 633
1570 634 def close(self):
1571 635 self._origfh.close()
1572 636 self._checkambig()
General Comments 0
You need to be logged in to leave comments. Login now