##// END OF EJS Templates
vfs: extract 'vfs' class and related code to a new 'vfs' module (API)...
Pierre-Yves David -
r31217:0f31830f default
parent child Browse files
Show More
This diff has been collapsed as it changes many lines, (627 lines changed) Show them Hide them
@@ -1,1572 +1,967 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
11 import errno
10 import errno
12 import glob
11 import glob
13 import hashlib
12 import hashlib
14 import os
13 import os
15 import re
14 import re
16 import shutil
17 import socket
15 import socket
18 import stat
19 import tempfile
20 import threading
21
16
22 from .i18n import _
17 from .i18n import _
23 from .node import wdirrev
18 from .node import wdirrev
24 from . import (
19 from . import (
25 encoding,
20 encoding,
26 error,
21 error,
27 match as matchmod,
22 match as matchmod,
28 osutil,
23 osutil,
29 pathutil,
24 pathutil,
30 phases,
25 phases,
31 pycompat,
26 pycompat,
32 revsetlang,
27 revsetlang,
33 similar,
28 similar,
34 util,
29 util,
30 vfs as vfsmod,
35 )
31 )
36
32
37 if pycompat.osname == 'nt':
33 if pycompat.osname == 'nt':
38 from . import scmwindows as scmplatform
34 from . import scmwindows as scmplatform
39 else:
35 else:
40 from . import scmposix as scmplatform
36 from . import scmposix as scmplatform
41
37
42 systemrcpath = scmplatform.systemrcpath
38 systemrcpath = scmplatform.systemrcpath
43 userrcpath = scmplatform.userrcpath
39 userrcpath = scmplatform.userrcpath
44 termsize = scmplatform.termsize
40 termsize = scmplatform.termsize
45
41
46 class status(tuple):
42 class status(tuple):
47 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
43 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
48 and 'ignored' properties are only relevant to the working copy.
44 and 'ignored' properties are only relevant to the working copy.
49 '''
45 '''
50
46
51 __slots__ = ()
47 __slots__ = ()
52
48
53 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
49 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
54 clean):
50 clean):
55 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
51 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
56 ignored, clean))
52 ignored, clean))
57
53
58 @property
54 @property
59 def modified(self):
55 def modified(self):
60 '''files that have been modified'''
56 '''files that have been modified'''
61 return self[0]
57 return self[0]
62
58
63 @property
59 @property
64 def added(self):
60 def added(self):
65 '''files that have been added'''
61 '''files that have been added'''
66 return self[1]
62 return self[1]
67
63
68 @property
64 @property
69 def removed(self):
65 def removed(self):
70 '''files that have been removed'''
66 '''files that have been removed'''
71 return self[2]
67 return self[2]
72
68
73 @property
69 @property
74 def deleted(self):
70 def deleted(self):
75 '''files that are in the dirstate, but have been deleted from the
71 '''files that are in the dirstate, but have been deleted from the
76 working copy (aka "missing")
72 working copy (aka "missing")
77 '''
73 '''
78 return self[3]
74 return self[3]
79
75
80 @property
76 @property
81 def unknown(self):
77 def unknown(self):
82 '''files not in the dirstate that are not ignored'''
78 '''files not in the dirstate that are not ignored'''
83 return self[4]
79 return self[4]
84
80
85 @property
81 @property
86 def ignored(self):
82 def ignored(self):
87 '''files not in the dirstate that are ignored (by _dirignore())'''
83 '''files not in the dirstate that are ignored (by _dirignore())'''
88 return self[5]
84 return self[5]
89
85
90 @property
86 @property
91 def clean(self):
87 def clean(self):
92 '''files that have not been modified'''
88 '''files that have not been modified'''
93 return self[6]
89 return self[6]
94
90
95 def __repr__(self, *args, **kwargs):
91 def __repr__(self, *args, **kwargs):
96 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
92 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
97 'unknown=%r, ignored=%r, clean=%r>') % self)
93 'unknown=%r, ignored=%r, clean=%r>') % self)
98
94
99 def itersubrepos(ctx1, ctx2):
95 def itersubrepos(ctx1, ctx2):
100 """find subrepos in ctx1 or ctx2"""
96 """find subrepos in ctx1 or ctx2"""
101 # Create a (subpath, ctx) mapping where we prefer subpaths from
97 # Create a (subpath, ctx) mapping where we prefer subpaths from
102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
98 # ctx1. The subpaths from ctx2 are important when the .hgsub file
103 # has been modified (in ctx2) but not yet committed (in ctx1).
99 # has been modified (in ctx2) but not yet committed (in ctx1).
104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
100 subpaths = dict.fromkeys(ctx2.substate, ctx2)
105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
101 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
106
102
107 missing = set()
103 missing = set()
108
104
109 for subpath in ctx2.substate:
105 for subpath in ctx2.substate:
110 if subpath not in ctx1.substate:
106 if subpath not in ctx1.substate:
111 del subpaths[subpath]
107 del subpaths[subpath]
112 missing.add(subpath)
108 missing.add(subpath)
113
109
114 for subpath, ctx in sorted(subpaths.iteritems()):
110 for subpath, ctx in sorted(subpaths.iteritems()):
115 yield subpath, ctx.sub(subpath)
111 yield subpath, ctx.sub(subpath)
116
112
117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
113 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
118 # status and diff will have an accurate result when it does
114 # status and diff will have an accurate result when it does
119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
115 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
120 # against itself.
116 # against itself.
121 for subpath in missing:
117 for subpath in missing:
122 yield subpath, ctx2.nullsub(subpath, ctx1)
118 yield subpath, ctx2.nullsub(subpath, ctx1)
123
119
124 def nochangesfound(ui, repo, excluded=None):
120 def nochangesfound(ui, repo, excluded=None):
125 '''Report no changes for push/pull, excluded is None or a list of
121 '''Report no changes for push/pull, excluded is None or a list of
126 nodes excluded from the push/pull.
122 nodes excluded from the push/pull.
127 '''
123 '''
128 secretlist = []
124 secretlist = []
129 if excluded:
125 if excluded:
130 for n in excluded:
126 for n in excluded:
131 if n not in repo:
127 if n not in repo:
132 # discovery should not have included the filtered revision,
128 # discovery should not have included the filtered revision,
133 # we have to explicitly exclude it until discovery is cleanup.
129 # we have to explicitly exclude it until discovery is cleanup.
134 continue
130 continue
135 ctx = repo[n]
131 ctx = repo[n]
136 if ctx.phase() >= phases.secret and not ctx.extinct():
132 if ctx.phase() >= phases.secret and not ctx.extinct():
137 secretlist.append(n)
133 secretlist.append(n)
138
134
139 if secretlist:
135 if secretlist:
140 ui.status(_("no changes found (ignored %d secret changesets)\n")
136 ui.status(_("no changes found (ignored %d secret changesets)\n")
141 % len(secretlist))
137 % len(secretlist))
142 else:
138 else:
143 ui.status(_("no changes found\n"))
139 ui.status(_("no changes found\n"))
144
140
145 def callcatch(ui, func):
141 def callcatch(ui, func):
146 """call func() with global exception handling
142 """call func() with global exception handling
147
143
148 return func() if no exception happens. otherwise do some error handling
144 return func() if no exception happens. otherwise do some error handling
149 and return an exit code accordingly. does not handle all exceptions.
145 and return an exit code accordingly. does not handle all exceptions.
150 """
146 """
151 try:
147 try:
152 return func()
148 return func()
153 # Global exception handling, alphabetically
149 # Global exception handling, alphabetically
154 # Mercurial-specific first, followed by built-in and library exceptions
150 # Mercurial-specific first, followed by built-in and library exceptions
155 except error.LockHeld as inst:
151 except error.LockHeld as inst:
156 if inst.errno == errno.ETIMEDOUT:
152 if inst.errno == errno.ETIMEDOUT:
157 reason = _('timed out waiting for lock held by %s') % inst.locker
153 reason = _('timed out waiting for lock held by %s') % inst.locker
158 else:
154 else:
159 reason = _('lock held by %s') % inst.locker
155 reason = _('lock held by %s') % inst.locker
160 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
156 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
161 except error.LockUnavailable as inst:
157 except error.LockUnavailable as inst:
162 ui.warn(_("abort: could not lock %s: %s\n") %
158 ui.warn(_("abort: could not lock %s: %s\n") %
163 (inst.desc or inst.filename, inst.strerror))
159 (inst.desc or inst.filename, inst.strerror))
164 except error.OutOfBandError as inst:
160 except error.OutOfBandError as inst:
165 if inst.args:
161 if inst.args:
166 msg = _("abort: remote error:\n")
162 msg = _("abort: remote error:\n")
167 else:
163 else:
168 msg = _("abort: remote error\n")
164 msg = _("abort: remote error\n")
169 ui.warn(msg)
165 ui.warn(msg)
170 if inst.args:
166 if inst.args:
171 ui.warn(''.join(inst.args))
167 ui.warn(''.join(inst.args))
172 if inst.hint:
168 if inst.hint:
173 ui.warn('(%s)\n' % inst.hint)
169 ui.warn('(%s)\n' % inst.hint)
174 except error.RepoError as inst:
170 except error.RepoError as inst:
175 ui.warn(_("abort: %s!\n") % inst)
171 ui.warn(_("abort: %s!\n") % inst)
176 if inst.hint:
172 if inst.hint:
177 ui.warn(_("(%s)\n") % inst.hint)
173 ui.warn(_("(%s)\n") % inst.hint)
178 except error.ResponseError as inst:
174 except error.ResponseError as inst:
179 ui.warn(_("abort: %s") % inst.args[0])
175 ui.warn(_("abort: %s") % inst.args[0])
180 if not isinstance(inst.args[1], basestring):
176 if not isinstance(inst.args[1], basestring):
181 ui.warn(" %r\n" % (inst.args[1],))
177 ui.warn(" %r\n" % (inst.args[1],))
182 elif not inst.args[1]:
178 elif not inst.args[1]:
183 ui.warn(_(" empty string\n"))
179 ui.warn(_(" empty string\n"))
184 else:
180 else:
185 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
181 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
186 except error.CensoredNodeError as inst:
182 except error.CensoredNodeError as inst:
187 ui.warn(_("abort: file censored %s!\n") % inst)
183 ui.warn(_("abort: file censored %s!\n") % inst)
188 except error.RevlogError as inst:
184 except error.RevlogError as inst:
189 ui.warn(_("abort: %s!\n") % inst)
185 ui.warn(_("abort: %s!\n") % inst)
190 except error.SignalInterrupt:
186 except error.SignalInterrupt:
191 ui.warn(_("killed!\n"))
187 ui.warn(_("killed!\n"))
192 except error.InterventionRequired as inst:
188 except error.InterventionRequired as inst:
193 ui.warn("%s\n" % inst)
189 ui.warn("%s\n" % inst)
194 if inst.hint:
190 if inst.hint:
195 ui.warn(_("(%s)\n") % inst.hint)
191 ui.warn(_("(%s)\n") % inst.hint)
196 return 1
192 return 1
197 except error.Abort as inst:
193 except error.Abort as inst:
198 ui.warn(_("abort: %s\n") % inst)
194 ui.warn(_("abort: %s\n") % inst)
199 if inst.hint:
195 if inst.hint:
200 ui.warn(_("(%s)\n") % inst.hint)
196 ui.warn(_("(%s)\n") % inst.hint)
201 except ImportError as inst:
197 except ImportError as inst:
202 ui.warn(_("abort: %s!\n") % inst)
198 ui.warn(_("abort: %s!\n") % inst)
203 m = str(inst).split()[-1]
199 m = str(inst).split()[-1]
204 if m in "mpatch bdiff".split():
200 if m in "mpatch bdiff".split():
205 ui.warn(_("(did you forget to compile extensions?)\n"))
201 ui.warn(_("(did you forget to compile extensions?)\n"))
206 elif m in "zlib".split():
202 elif m in "zlib".split():
207 ui.warn(_("(is your Python install correct?)\n"))
203 ui.warn(_("(is your Python install correct?)\n"))
208 except IOError as inst:
204 except IOError as inst:
209 if util.safehasattr(inst, "code"):
205 if util.safehasattr(inst, "code"):
210 ui.warn(_("abort: %s\n") % inst)
206 ui.warn(_("abort: %s\n") % inst)
211 elif util.safehasattr(inst, "reason"):
207 elif util.safehasattr(inst, "reason"):
212 try: # usually it is in the form (errno, strerror)
208 try: # usually it is in the form (errno, strerror)
213 reason = inst.reason.args[1]
209 reason = inst.reason.args[1]
214 except (AttributeError, IndexError):
210 except (AttributeError, IndexError):
215 # it might be anything, for example a string
211 # it might be anything, for example a string
216 reason = inst.reason
212 reason = inst.reason
217 if isinstance(reason, unicode):
213 if isinstance(reason, unicode):
218 # SSLError of Python 2.7.9 contains a unicode
214 # SSLError of Python 2.7.9 contains a unicode
219 reason = reason.encode(encoding.encoding, 'replace')
215 reason = reason.encode(encoding.encoding, 'replace')
220 ui.warn(_("abort: error: %s\n") % reason)
216 ui.warn(_("abort: error: %s\n") % reason)
221 elif (util.safehasattr(inst, "args")
217 elif (util.safehasattr(inst, "args")
222 and inst.args and inst.args[0] == errno.EPIPE):
218 and inst.args and inst.args[0] == errno.EPIPE):
223 pass
219 pass
224 elif getattr(inst, "strerror", None):
220 elif getattr(inst, "strerror", None):
225 if getattr(inst, "filename", None):
221 if getattr(inst, "filename", None):
226 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
222 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
227 else:
223 else:
228 ui.warn(_("abort: %s\n") % inst.strerror)
224 ui.warn(_("abort: %s\n") % inst.strerror)
229 else:
225 else:
230 raise
226 raise
231 except OSError as inst:
227 except OSError as inst:
232 if getattr(inst, "filename", None) is not None:
228 if getattr(inst, "filename", None) is not None:
233 ui.warn(_("abort: %s: '%s'\n") % (inst.strerror, inst.filename))
229 ui.warn(_("abort: %s: '%s'\n") % (inst.strerror, inst.filename))
234 else:
230 else:
235 ui.warn(_("abort: %s\n") % inst.strerror)
231 ui.warn(_("abort: %s\n") % inst.strerror)
236 except MemoryError:
232 except MemoryError:
237 ui.warn(_("abort: out of memory\n"))
233 ui.warn(_("abort: out of memory\n"))
238 except SystemExit as inst:
234 except SystemExit as inst:
239 # Commands shouldn't sys.exit directly, but give a return code.
235 # Commands shouldn't sys.exit directly, but give a return code.
240 # Just in case catch this and and pass exit code to caller.
236 # Just in case catch this and and pass exit code to caller.
241 return inst.code
237 return inst.code
242 except socket.error as inst:
238 except socket.error as inst:
243 ui.warn(_("abort: %s\n") % inst.args[-1])
239 ui.warn(_("abort: %s\n") % inst.args[-1])
244
240
245 return -1
241 return -1
246
242
247 def checknewlabel(repo, lbl, kind):
243 def checknewlabel(repo, lbl, kind):
248 # Do not use the "kind" parameter in ui output.
244 # Do not use the "kind" parameter in ui output.
249 # It makes strings difficult to translate.
245 # It makes strings difficult to translate.
250 if lbl in ['tip', '.', 'null']:
246 if lbl in ['tip', '.', 'null']:
251 raise error.Abort(_("the name '%s' is reserved") % lbl)
247 raise error.Abort(_("the name '%s' is reserved") % lbl)
252 for c in (':', '\0', '\n', '\r'):
248 for c in (':', '\0', '\n', '\r'):
253 if c in lbl:
249 if c in lbl:
254 raise error.Abort(_("%r cannot be used in a name") % c)
250 raise error.Abort(_("%r cannot be used in a name") % c)
255 try:
251 try:
256 int(lbl)
252 int(lbl)
257 raise error.Abort(_("cannot use an integer as a name"))
253 raise error.Abort(_("cannot use an integer as a name"))
258 except ValueError:
254 except ValueError:
259 pass
255 pass
260
256
261 def checkfilename(f):
257 def checkfilename(f):
262 '''Check that the filename f is an acceptable filename for a tracked file'''
258 '''Check that the filename f is an acceptable filename for a tracked file'''
263 if '\r' in f or '\n' in f:
259 if '\r' in f or '\n' in f:
264 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
260 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
265
261
266 def checkportable(ui, f):
262 def checkportable(ui, f):
267 '''Check if filename f is portable and warn or abort depending on config'''
263 '''Check if filename f is portable and warn or abort depending on config'''
268 checkfilename(f)
264 checkfilename(f)
269 abort, warn = checkportabilityalert(ui)
265 abort, warn = checkportabilityalert(ui)
270 if abort or warn:
266 if abort or warn:
271 msg = util.checkwinfilename(f)
267 msg = util.checkwinfilename(f)
272 if msg:
268 if msg:
273 msg = "%s: %r" % (msg, f)
269 msg = "%s: %r" % (msg, f)
274 if abort:
270 if abort:
275 raise error.Abort(msg)
271 raise error.Abort(msg)
276 ui.warn(_("warning: %s\n") % msg)
272 ui.warn(_("warning: %s\n") % msg)
277
273
278 def checkportabilityalert(ui):
274 def checkportabilityalert(ui):
279 '''check if the user's config requests nothing, a warning, or abort for
275 '''check if the user's config requests nothing, a warning, or abort for
280 non-portable filenames'''
276 non-portable filenames'''
281 val = ui.config('ui', 'portablefilenames', 'warn')
277 val = ui.config('ui', 'portablefilenames', 'warn')
282 lval = val.lower()
278 lval = val.lower()
283 bval = util.parsebool(val)
279 bval = util.parsebool(val)
284 abort = pycompat.osname == 'nt' or lval == 'abort'
280 abort = pycompat.osname == 'nt' or lval == 'abort'
285 warn = bval or lval == 'warn'
281 warn = bval or lval == 'warn'
286 if bval is None and not (warn or abort or lval == 'ignore'):
282 if bval is None and not (warn or abort or lval == 'ignore'):
287 raise error.ConfigError(
283 raise error.ConfigError(
288 _("ui.portablefilenames value is invalid ('%s')") % val)
284 _("ui.portablefilenames value is invalid ('%s')") % val)
289 return abort, warn
285 return abort, warn
290
286
291 class casecollisionauditor(object):
287 class casecollisionauditor(object):
292 def __init__(self, ui, abort, dirstate):
288 def __init__(self, ui, abort, dirstate):
293 self._ui = ui
289 self._ui = ui
294 self._abort = abort
290 self._abort = abort
295 allfiles = '\0'.join(dirstate._map)
291 allfiles = '\0'.join(dirstate._map)
296 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
292 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
297 self._dirstate = dirstate
293 self._dirstate = dirstate
298 # The purpose of _newfiles is so that we don't complain about
294 # The purpose of _newfiles is so that we don't complain about
299 # case collisions if someone were to call this object with the
295 # case collisions if someone were to call this object with the
300 # same filename twice.
296 # same filename twice.
301 self._newfiles = set()
297 self._newfiles = set()
302
298
303 def __call__(self, f):
299 def __call__(self, f):
304 if f in self._newfiles:
300 if f in self._newfiles:
305 return
301 return
306 fl = encoding.lower(f)
302 fl = encoding.lower(f)
307 if fl in self._loweredfiles and f not in self._dirstate:
303 if fl in self._loweredfiles and f not in self._dirstate:
308 msg = _('possible case-folding collision for %s') % f
304 msg = _('possible case-folding collision for %s') % f
309 if self._abort:
305 if self._abort:
310 raise error.Abort(msg)
306 raise error.Abort(msg)
311 self._ui.warn(_("warning: %s\n") % msg)
307 self._ui.warn(_("warning: %s\n") % msg)
312 self._loweredfiles.add(fl)
308 self._loweredfiles.add(fl)
313 self._newfiles.add(f)
309 self._newfiles.add(f)
314
310
315 def filteredhash(repo, maxrev):
311 def filteredhash(repo, maxrev):
316 """build hash of filtered revisions in the current repoview.
312 """build hash of filtered revisions in the current repoview.
317
313
318 Multiple caches perform up-to-date validation by checking that the
314 Multiple caches perform up-to-date validation by checking that the
319 tiprev and tipnode stored in the cache file match the current repository.
315 tiprev and tipnode stored in the cache file match the current repository.
320 However, this is not sufficient for validating repoviews because the set
316 However, this is not sufficient for validating repoviews because the set
321 of revisions in the view may change without the repository tiprev and
317 of revisions in the view may change without the repository tiprev and
322 tipnode changing.
318 tipnode changing.
323
319
324 This function hashes all the revs filtered from the view and returns
320 This function hashes all the revs filtered from the view and returns
325 that SHA-1 digest.
321 that SHA-1 digest.
326 """
322 """
327 cl = repo.changelog
323 cl = repo.changelog
328 if not cl.filteredrevs:
324 if not cl.filteredrevs:
329 return None
325 return None
330 key = None
326 key = None
331 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
327 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
332 if revs:
328 if revs:
333 s = hashlib.sha1()
329 s = hashlib.sha1()
334 for rev in revs:
330 for rev in revs:
335 s.update('%s;' % rev)
331 s.update('%s;' % rev)
336 key = s.digest()
332 key = s.digest()
337 return key
333 return key
338
334
339 class abstractvfs(object):
335 # compatibility layer since all 'vfs' code moved to 'mercurial.vfs'
340 """Abstract base class; cannot be instantiated"""
336 #
341
337 # This is hard to instal deprecation warning to this since we do not have
342 def __init__(self, *args, **kwargs):
338 # access to a 'ui' object.
343 '''Prevent instantiation; don't call this from subclasses.'''
339 opener = vfs = vfsmod.vfs
344 raise NotImplementedError('attempted instantiating ' + str(type(self)))
340 filteropener = filtervfs = vfsmod.filtervfs
345
341 abstractvfs = vfsmod.abstractvfs
346 def tryread(self, path):
342 readonlyvfs = vfsmod.readonlyvfs
347 '''gracefully return an empty string for missing files'''
343 auditvfs = vfsmod.auditvfs
348 try:
344 checkambigatclosing = vfsmod.checkambigatclosing
349 return self.read(path)
350 except IOError as inst:
351 if inst.errno != errno.ENOENT:
352 raise
353 return ""
354
355 def tryreadlines(self, path, mode='rb'):
356 '''gracefully return an empty array for missing files'''
357 try:
358 return self.readlines(path, mode=mode)
359 except IOError as inst:
360 if inst.errno != errno.ENOENT:
361 raise
362 return []
363
364 @util.propertycache
365 def open(self):
366 '''Open ``path`` file, which is relative to vfs root.
367
368 Newly created directories are marked as "not to be indexed by
369 the content indexing service", if ``notindexed`` is specified
370 for "write" mode access.
371 '''
372 return self.__call__
373
374 def read(self, path):
375 with self(path, 'rb') as fp:
376 return fp.read()
377
378 def readlines(self, path, mode='rb'):
379 with self(path, mode=mode) as fp:
380 return fp.readlines()
381
382 def write(self, path, data, backgroundclose=False):
383 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
384 return fp.write(data)
385
386 def writelines(self, path, data, mode='wb', notindexed=False):
387 with self(path, mode=mode, notindexed=notindexed) as fp:
388 return fp.writelines(data)
389
390 def append(self, path, data):
391 with self(path, 'ab') as fp:
392 return fp.write(data)
393
394 def basename(self, path):
395 """return base element of a path (as os.path.basename would do)
396
397 This exists to allow handling of strange encoding if needed."""
398 return os.path.basename(path)
399
400 def chmod(self, path, mode):
401 return os.chmod(self.join(path), mode)
402
403 def dirname(self, path):
404 """return dirname element of a path (as os.path.dirname would do)
405
406 This exists to allow handling of strange encoding if needed."""
407 return os.path.dirname(path)
408
409 def exists(self, path=None):
410 return os.path.exists(self.join(path))
411
412 def fstat(self, fp):
413 return util.fstat(fp)
414
415 def isdir(self, path=None):
416 return os.path.isdir(self.join(path))
417
418 def isfile(self, path=None):
419 return os.path.isfile(self.join(path))
420
421 def islink(self, path=None):
422 return os.path.islink(self.join(path))
423
424 def isfileorlink(self, path=None):
425 '''return whether path is a regular file or a symlink
426
427 Unlike isfile, this doesn't follow symlinks.'''
428 try:
429 st = self.lstat(path)
430 except OSError:
431 return False
432 mode = st.st_mode
433 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
434
435 def reljoin(self, *paths):
436 """join various elements of a path together (as os.path.join would do)
437
438 The vfs base is not injected so that path stay relative. This exists
439 to allow handling of strange encoding if needed."""
440 return os.path.join(*paths)
441
442 def split(self, path):
443 """split top-most element of a path (as os.path.split would do)
444
445 This exists to allow handling of strange encoding if needed."""
446 return os.path.split(path)
447
448 def lexists(self, path=None):
449 return os.path.lexists(self.join(path))
450
451 def lstat(self, path=None):
452 return os.lstat(self.join(path))
453
454 def listdir(self, path=None):
455 return os.listdir(self.join(path))
456
457 def makedir(self, path=None, notindexed=True):
458 return util.makedir(self.join(path), notindexed)
459
460 def makedirs(self, path=None, mode=None):
461 return util.makedirs(self.join(path), mode)
462
463 def makelock(self, info, path):
464 return util.makelock(info, self.join(path))
465
466 def mkdir(self, path=None):
467 return os.mkdir(self.join(path))
468
469 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
470 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
471 dir=self.join(dir), text=text)
472 dname, fname = util.split(name)
473 if dir:
474 return fd, os.path.join(dir, fname)
475 else:
476 return fd, fname
477
478 def readdir(self, path=None, stat=None, skip=None):
479 return osutil.listdir(self.join(path), stat, skip)
480
481 def readlock(self, path):
482 return util.readlock(self.join(path))
483
484 def rename(self, src, dst, checkambig=False):
485 """Rename from src to dst
486
487 checkambig argument is used with util.filestat, and is useful
488 only if destination file is guarded by any lock
489 (e.g. repo.lock or repo.wlock).
490 """
491 dstpath = self.join(dst)
492 oldstat = checkambig and util.filestat(dstpath)
493 if oldstat and oldstat.stat:
494 ret = util.rename(self.join(src), dstpath)
495 newstat = util.filestat(dstpath)
496 if newstat.isambig(oldstat):
497 # stat of renamed file is ambiguous to original one
498 newstat.avoidambig(dstpath, oldstat)
499 return ret
500 return util.rename(self.join(src), dstpath)
501
502 def readlink(self, path):
503 return os.readlink(self.join(path))
504
505 def removedirs(self, path=None):
506 """Remove a leaf directory and all empty intermediate ones
507 """
508 return util.removedirs(self.join(path))
509
510 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
511 """Remove a directory tree recursively
512
513 If ``forcibly``, this tries to remove READ-ONLY files, too.
514 """
515 if forcibly:
516 def onerror(function, path, excinfo):
517 if function is not os.remove:
518 raise
519 # read-only files cannot be unlinked under Windows
520 s = os.stat(path)
521 if (s.st_mode & stat.S_IWRITE) != 0:
522 raise
523 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
524 os.remove(path)
525 else:
526 onerror = None
527 return shutil.rmtree(self.join(path),
528 ignore_errors=ignore_errors, onerror=onerror)
529
530 def setflags(self, path, l, x):
531 return util.setflags(self.join(path), l, x)
532
533 def stat(self, path=None):
534 return os.stat(self.join(path))
535
536 def unlink(self, path=None):
537 return util.unlink(self.join(path))
538
539 def unlinkpath(self, path=None, ignoremissing=False):
540 return util.unlinkpath(self.join(path), ignoremissing)
541
542 def utime(self, path=None, t=None):
543 return os.utime(self.join(path), t)
544
545 def walk(self, path=None, onerror=None):
546 """Yield (dirpath, dirs, files) tuple for each directories under path
547
548 ``dirpath`` is relative one from the root of this vfs. This
549 uses ``os.sep`` as path separator, even you specify POSIX
550 style ``path``.
551
552 "The root of this vfs" is represented as empty ``dirpath``.
553 """
554 root = os.path.normpath(self.join(None))
555 # when dirpath == root, dirpath[prefixlen:] becomes empty
556 # because len(dirpath) < prefixlen.
557 prefixlen = len(pathutil.normasprefix(root))
558 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
559 yield (dirpath[prefixlen:], dirs, files)
560
561 @contextlib.contextmanager
562 def backgroundclosing(self, ui, expectedcount=-1):
563 """Allow files to be closed asynchronously.
564
565 When this context manager is active, ``backgroundclose`` can be passed
566 to ``__call__``/``open`` to result in the file possibly being closed
567 asynchronously, on a background thread.
568 """
569 # This is an arbitrary restriction and could be changed if we ever
570 # have a use case.
571 vfs = getattr(self, 'vfs', self)
572 if getattr(vfs, '_backgroundfilecloser', None):
573 raise error.Abort(
574 _('can only have 1 active background file closer'))
575
576 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
577 try:
578 vfs._backgroundfilecloser = bfc
579 yield bfc
580 finally:
581 vfs._backgroundfilecloser = None
582
583 class vfs(abstractvfs):
584 '''Operate files relative to a base directory
585
586 This class is used to hide the details of COW semantics and
587 remote file access from higher level code.
588 '''
589 def __init__(self, base, audit=True, expandpath=False, realpath=False):
590 if expandpath:
591 base = util.expandpath(base)
592 if realpath:
593 base = os.path.realpath(base)
594 self.base = base
595 self.mustaudit = audit
596 self.createmode = None
597 self._trustnlink = None
598
599 @property
600 def mustaudit(self):
601 return self._audit
602
603 @mustaudit.setter
604 def mustaudit(self, onoff):
605 self._audit = onoff
606 if onoff:
607 self.audit = pathutil.pathauditor(self.base)
608 else:
609 self.audit = util.always
610
611 @util.propertycache
612 def _cansymlink(self):
613 return util.checklink(self.base)
614
615 @util.propertycache
616 def _chmod(self):
617 return util.checkexec(self.base)
618
619 def _fixfilemode(self, name):
620 if self.createmode is None or not self._chmod:
621 return
622 os.chmod(name, self.createmode & 0o666)
623
624 def __call__(self, path, mode="r", text=False, atomictemp=False,
625 notindexed=False, backgroundclose=False, checkambig=False):
626 '''Open ``path`` file, which is relative to vfs root.
627
628 Newly created directories are marked as "not to be indexed by
629 the content indexing service", if ``notindexed`` is specified
630 for "write" mode access.
631
632 If ``backgroundclose`` is passed, the file may be closed asynchronously.
633 It can only be used if the ``self.backgroundclosing()`` context manager
634 is active. This should only be specified if the following criteria hold:
635
636 1. There is a potential for writing thousands of files. Unless you
637 are writing thousands of files, the performance benefits of
638 asynchronously closing files is not realized.
639 2. Files are opened exactly once for the ``backgroundclosing``
640 active duration and are therefore free of race conditions between
641 closing a file on a background thread and reopening it. (If the
642 file were opened multiple times, there could be unflushed data
643 because the original file handle hasn't been flushed/closed yet.)
644
645 ``checkambig`` argument is passed to atomictemplfile (valid
646 only for writing), and is useful only if target file is
647 guarded by any lock (e.g. repo.lock or repo.wlock).
648 '''
649 if self._audit:
650 r = util.checkosfilename(path)
651 if r:
652 raise error.Abort("%s: %r" % (r, path))
653 self.audit(path)
654 f = self.join(path)
655
656 if not text and "b" not in mode:
657 mode += "b" # for that other OS
658
659 nlink = -1
660 if mode not in ('r', 'rb'):
661 dirname, basename = util.split(f)
662 # If basename is empty, then the path is malformed because it points
663 # to a directory. Let the posixfile() call below raise IOError.
664 if basename:
665 if atomictemp:
666 util.makedirs(dirname, self.createmode, notindexed)
667 return util.atomictempfile(f, mode, self.createmode,
668 checkambig=checkambig)
669 try:
670 if 'w' in mode:
671 util.unlink(f)
672 nlink = 0
673 else:
674 # nlinks() may behave differently for files on Windows
675 # shares if the file is open.
676 with util.posixfile(f):
677 nlink = util.nlinks(f)
678 if nlink < 1:
679 nlink = 2 # force mktempcopy (issue1922)
680 except (OSError, IOError) as e:
681 if e.errno != errno.ENOENT:
682 raise
683 nlink = 0
684 util.makedirs(dirname, self.createmode, notindexed)
685 if nlink > 0:
686 if self._trustnlink is None:
687 self._trustnlink = nlink > 1 or util.checknlink(f)
688 if nlink > 1 or not self._trustnlink:
689 util.rename(util.mktempcopy(f), f)
690 fp = util.posixfile(f, mode)
691 if nlink == 0:
692 self._fixfilemode(f)
693
694 if checkambig:
695 if mode in ('r', 'rb'):
696 raise error.Abort(_('implementation error: mode %s is not'
697 ' valid for checkambig=True') % mode)
698 fp = checkambigatclosing(fp)
699
700 if backgroundclose:
701 if not self._backgroundfilecloser:
702 raise error.Abort(_('backgroundclose can only be used when a '
703 'backgroundclosing context manager is active')
704 )
705
706 fp = delayclosedfile(fp, self._backgroundfilecloser)
707
708 return fp
709
710 def symlink(self, src, dst):
711 self.audit(dst)
712 linkname = self.join(dst)
713 try:
714 os.unlink(linkname)
715 except OSError:
716 pass
717
718 util.makedirs(os.path.dirname(linkname), self.createmode)
719
720 if self._cansymlink:
721 try:
722 os.symlink(src, linkname)
723 except OSError as err:
724 raise OSError(err.errno, _('could not symlink to %r: %s') %
725 (src, err.strerror), linkname)
726 else:
727 self.write(dst, src)
728
729 def join(self, path, *insidef):
730 if path:
731 return os.path.join(self.base, path, *insidef)
732 else:
733 return self.base
734
735 opener = vfs
736
737 class auditvfs(object):
738 def __init__(self, vfs):
739 self.vfs = vfs
740
741 @property
742 def mustaudit(self):
743 return self.vfs.mustaudit
744
745 @mustaudit.setter
746 def mustaudit(self, onoff):
747 self.vfs.mustaudit = onoff
748
749 @property
750 def options(self):
751 return self.vfs.options
752
753 @options.setter
754 def options(self, value):
755 self.vfs.options = value
756
757 class filtervfs(abstractvfs, auditvfs):
758 '''Wrapper vfs for filtering filenames with a function.'''
759
760 def __init__(self, vfs, filter):
761 auditvfs.__init__(self, vfs)
762 self._filter = filter
763
764 def __call__(self, path, *args, **kwargs):
765 return self.vfs(self._filter(path), *args, **kwargs)
766
767 def join(self, path, *insidef):
768 if path:
769 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
770 else:
771 return self.vfs.join(path)
772
773 filteropener = filtervfs
774
775 class readonlyvfs(abstractvfs, auditvfs):
776 '''Wrapper vfs preventing any writing.'''
777
778 def __init__(self, vfs):
779 auditvfs.__init__(self, vfs)
780
781 def __call__(self, path, mode='r', *args, **kw):
782 if mode not in ('r', 'rb'):
783 raise error.Abort(_('this vfs is read only'))
784 return self.vfs(path, mode, *args, **kw)
785
786 def join(self, path, *insidef):
787 return self.vfs.join(path, *insidef)
788
345
789 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
346 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
790 '''yield every hg repository under path, always recursively.
347 '''yield every hg repository under path, always recursively.
791 The recurse flag will only control recursion into repo working dirs'''
348 The recurse flag will only control recursion into repo working dirs'''
792 def errhandler(err):
349 def errhandler(err):
793 if err.filename == path:
350 if err.filename == path:
794 raise err
351 raise err
795 samestat = getattr(os.path, 'samestat', None)
352 samestat = getattr(os.path, 'samestat', None)
796 if followsym and samestat is not None:
353 if followsym and samestat is not None:
797 def adddir(dirlst, dirname):
354 def adddir(dirlst, dirname):
798 match = False
355 match = False
799 dirstat = os.stat(dirname)
356 dirstat = os.stat(dirname)
800 for lstdirstat in dirlst:
357 for lstdirstat in dirlst:
801 if samestat(dirstat, lstdirstat):
358 if samestat(dirstat, lstdirstat):
802 match = True
359 match = True
803 break
360 break
804 if not match:
361 if not match:
805 dirlst.append(dirstat)
362 dirlst.append(dirstat)
806 return not match
363 return not match
807 else:
364 else:
808 followsym = False
365 followsym = False
809
366
810 if (seen_dirs is None) and followsym:
367 if (seen_dirs is None) and followsym:
811 seen_dirs = []
368 seen_dirs = []
812 adddir(seen_dirs, path)
369 adddir(seen_dirs, path)
813 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
370 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
814 dirs.sort()
371 dirs.sort()
815 if '.hg' in dirs:
372 if '.hg' in dirs:
816 yield root # found a repository
373 yield root # found a repository
817 qroot = os.path.join(root, '.hg', 'patches')
374 qroot = os.path.join(root, '.hg', 'patches')
818 if os.path.isdir(os.path.join(qroot, '.hg')):
375 if os.path.isdir(os.path.join(qroot, '.hg')):
819 yield qroot # we have a patch queue repo here
376 yield qroot # we have a patch queue repo here
820 if recurse:
377 if recurse:
821 # avoid recursing inside the .hg directory
378 # avoid recursing inside the .hg directory
822 dirs.remove('.hg')
379 dirs.remove('.hg')
823 else:
380 else:
824 dirs[:] = [] # don't descend further
381 dirs[:] = [] # don't descend further
825 elif followsym:
382 elif followsym:
826 newdirs = []
383 newdirs = []
827 for d in dirs:
384 for d in dirs:
828 fname = os.path.join(root, d)
385 fname = os.path.join(root, d)
829 if adddir(seen_dirs, fname):
386 if adddir(seen_dirs, fname):
830 if os.path.islink(fname):
387 if os.path.islink(fname):
831 for hgname in walkrepos(fname, True, seen_dirs):
388 for hgname in walkrepos(fname, True, seen_dirs):
832 yield hgname
389 yield hgname
833 else:
390 else:
834 newdirs.append(d)
391 newdirs.append(d)
835 dirs[:] = newdirs
392 dirs[:] = newdirs
836
393
837 def osrcpath():
394 def osrcpath():
838 '''return default os-specific hgrc search path'''
395 '''return default os-specific hgrc search path'''
839 path = []
396 path = []
840 defaultpath = os.path.join(util.datapath, 'default.d')
397 defaultpath = os.path.join(util.datapath, 'default.d')
841 if os.path.isdir(defaultpath):
398 if os.path.isdir(defaultpath):
842 for f, kind in osutil.listdir(defaultpath):
399 for f, kind in osutil.listdir(defaultpath):
843 if f.endswith('.rc'):
400 if f.endswith('.rc'):
844 path.append(os.path.join(defaultpath, f))
401 path.append(os.path.join(defaultpath, f))
845 path.extend(systemrcpath())
402 path.extend(systemrcpath())
846 path.extend(userrcpath())
403 path.extend(userrcpath())
847 path = [os.path.normpath(f) for f in path]
404 path = [os.path.normpath(f) for f in path]
848 return path
405 return path
849
406
850 _rcpath = None
407 _rcpath = None
851
408
852 def rcpath():
409 def rcpath():
853 '''return hgrc search path. if env var HGRCPATH is set, use it.
410 '''return hgrc search path. if env var HGRCPATH is set, use it.
854 for each item in path, if directory, use files ending in .rc,
411 for each item in path, if directory, use files ending in .rc,
855 else use item.
412 else use item.
856 make HGRCPATH empty to only look in .hg/hgrc of current repo.
413 make HGRCPATH empty to only look in .hg/hgrc of current repo.
857 if no HGRCPATH, use default os-specific path.'''
414 if no HGRCPATH, use default os-specific path.'''
858 global _rcpath
415 global _rcpath
859 if _rcpath is None:
416 if _rcpath is None:
860 if 'HGRCPATH' in encoding.environ:
417 if 'HGRCPATH' in encoding.environ:
861 _rcpath = []
418 _rcpath = []
862 for p in encoding.environ['HGRCPATH'].split(pycompat.ospathsep):
419 for p in encoding.environ['HGRCPATH'].split(pycompat.ospathsep):
863 if not p:
420 if not p:
864 continue
421 continue
865 p = util.expandpath(p)
422 p = util.expandpath(p)
866 if os.path.isdir(p):
423 if os.path.isdir(p):
867 for f, kind in osutil.listdir(p):
424 for f, kind in osutil.listdir(p):
868 if f.endswith('.rc'):
425 if f.endswith('.rc'):
869 _rcpath.append(os.path.join(p, f))
426 _rcpath.append(os.path.join(p, f))
870 else:
427 else:
871 _rcpath.append(p)
428 _rcpath.append(p)
872 else:
429 else:
873 _rcpath = osrcpath()
430 _rcpath = osrcpath()
874 return _rcpath
431 return _rcpath
875
432
876 def intrev(rev):
433 def intrev(rev):
877 """Return integer for a given revision that can be used in comparison or
434 """Return integer for a given revision that can be used in comparison or
878 arithmetic operation"""
435 arithmetic operation"""
879 if rev is None:
436 if rev is None:
880 return wdirrev
437 return wdirrev
881 return rev
438 return rev
882
439
883 def revsingle(repo, revspec, default='.'):
440 def revsingle(repo, revspec, default='.'):
884 if not revspec and revspec != 0:
441 if not revspec and revspec != 0:
885 return repo[default]
442 return repo[default]
886
443
887 l = revrange(repo, [revspec])
444 l = revrange(repo, [revspec])
888 if not l:
445 if not l:
889 raise error.Abort(_('empty revision set'))
446 raise error.Abort(_('empty revision set'))
890 return repo[l.last()]
447 return repo[l.last()]
891
448
892 def _pairspec(revspec):
449 def _pairspec(revspec):
893 tree = revsetlang.parse(revspec)
450 tree = revsetlang.parse(revspec)
894 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
451 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
895
452
896 def revpair(repo, revs):
453 def revpair(repo, revs):
897 if not revs:
454 if not revs:
898 return repo.dirstate.p1(), None
455 return repo.dirstate.p1(), None
899
456
900 l = revrange(repo, revs)
457 l = revrange(repo, revs)
901
458
902 if not l:
459 if not l:
903 first = second = None
460 first = second = None
904 elif l.isascending():
461 elif l.isascending():
905 first = l.min()
462 first = l.min()
906 second = l.max()
463 second = l.max()
907 elif l.isdescending():
464 elif l.isdescending():
908 first = l.max()
465 first = l.max()
909 second = l.min()
466 second = l.min()
910 else:
467 else:
911 first = l.first()
468 first = l.first()
912 second = l.last()
469 second = l.last()
913
470
914 if first is None:
471 if first is None:
915 raise error.Abort(_('empty revision range'))
472 raise error.Abort(_('empty revision range'))
916 if (first == second and len(revs) >= 2
473 if (first == second and len(revs) >= 2
917 and not all(revrange(repo, [r]) for r in revs)):
474 and not all(revrange(repo, [r]) for r in revs)):
918 raise error.Abort(_('empty revision on one side of range'))
475 raise error.Abort(_('empty revision on one side of range'))
919
476
920 # if top-level is range expression, the result must always be a pair
477 # if top-level is range expression, the result must always be a pair
921 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
478 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
922 return repo.lookup(first), None
479 return repo.lookup(first), None
923
480
924 return repo.lookup(first), repo.lookup(second)
481 return repo.lookup(first), repo.lookup(second)
925
482
926 def revrange(repo, specs):
483 def revrange(repo, specs):
927 """Execute 1 to many revsets and return the union.
484 """Execute 1 to many revsets and return the union.
928
485
929 This is the preferred mechanism for executing revsets using user-specified
486 This is the preferred mechanism for executing revsets using user-specified
930 config options, such as revset aliases.
487 config options, such as revset aliases.
931
488
932 The revsets specified by ``specs`` will be executed via a chained ``OR``
489 The revsets specified by ``specs`` will be executed via a chained ``OR``
933 expression. If ``specs`` is empty, an empty result is returned.
490 expression. If ``specs`` is empty, an empty result is returned.
934
491
935 ``specs`` can contain integers, in which case they are assumed to be
492 ``specs`` can contain integers, in which case they are assumed to be
936 revision numbers.
493 revision numbers.
937
494
938 It is assumed the revsets are already formatted. If you have arguments
495 It is assumed the revsets are already formatted. If you have arguments
939 that need to be expanded in the revset, call ``revsetlang.formatspec()``
496 that need to be expanded in the revset, call ``revsetlang.formatspec()``
940 and pass the result as an element of ``specs``.
497 and pass the result as an element of ``specs``.
941
498
942 Specifying a single revset is allowed.
499 Specifying a single revset is allowed.
943
500
944 Returns a ``revset.abstractsmartset`` which is a list-like interface over
501 Returns a ``revset.abstractsmartset`` which is a list-like interface over
945 integer revisions.
502 integer revisions.
946 """
503 """
947 allspecs = []
504 allspecs = []
948 for spec in specs:
505 for spec in specs:
949 if isinstance(spec, int):
506 if isinstance(spec, int):
950 spec = revsetlang.formatspec('rev(%d)', spec)
507 spec = revsetlang.formatspec('rev(%d)', spec)
951 allspecs.append(spec)
508 allspecs.append(spec)
952 return repo.anyrevs(allspecs, user=True)
509 return repo.anyrevs(allspecs, user=True)
953
510
954 def meaningfulparents(repo, ctx):
511 def meaningfulparents(repo, ctx):
955 """Return list of meaningful (or all if debug) parentrevs for rev.
512 """Return list of meaningful (or all if debug) parentrevs for rev.
956
513
957 For merges (two non-nullrev revisions) both parents are meaningful.
514 For merges (two non-nullrev revisions) both parents are meaningful.
958 Otherwise the first parent revision is considered meaningful if it
515 Otherwise the first parent revision is considered meaningful if it
959 is not the preceding revision.
516 is not the preceding revision.
960 """
517 """
961 parents = ctx.parents()
518 parents = ctx.parents()
962 if len(parents) > 1:
519 if len(parents) > 1:
963 return parents
520 return parents
964 if repo.ui.debugflag:
521 if repo.ui.debugflag:
965 return [parents[0], repo['null']]
522 return [parents[0], repo['null']]
966 if parents[0].rev() >= intrev(ctx.rev()) - 1:
523 if parents[0].rev() >= intrev(ctx.rev()) - 1:
967 return []
524 return []
968 return parents
525 return parents
969
526
970 def expandpats(pats):
527 def expandpats(pats):
971 '''Expand bare globs when running on windows.
528 '''Expand bare globs when running on windows.
972 On posix we assume it already has already been done by sh.'''
529 On posix we assume it already has already been done by sh.'''
973 if not util.expandglobs:
530 if not util.expandglobs:
974 return list(pats)
531 return list(pats)
975 ret = []
532 ret = []
976 for kindpat in pats:
533 for kindpat in pats:
977 kind, pat = matchmod._patsplit(kindpat, None)
534 kind, pat = matchmod._patsplit(kindpat, None)
978 if kind is None:
535 if kind is None:
979 try:
536 try:
980 globbed = glob.glob(pat)
537 globbed = glob.glob(pat)
981 except re.error:
538 except re.error:
982 globbed = [pat]
539 globbed = [pat]
983 if globbed:
540 if globbed:
984 ret.extend(globbed)
541 ret.extend(globbed)
985 continue
542 continue
986 ret.append(kindpat)
543 ret.append(kindpat)
987 return ret
544 return ret
988
545
989 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
546 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
990 badfn=None):
547 badfn=None):
991 '''Return a matcher and the patterns that were used.
548 '''Return a matcher and the patterns that were used.
992 The matcher will warn about bad matches, unless an alternate badfn callback
549 The matcher will warn about bad matches, unless an alternate badfn callback
993 is provided.'''
550 is provided.'''
994 if pats == ("",):
551 if pats == ("",):
995 pats = []
552 pats = []
996 if opts is None:
553 if opts is None:
997 opts = {}
554 opts = {}
998 if not globbed and default == 'relpath':
555 if not globbed and default == 'relpath':
999 pats = expandpats(pats or [])
556 pats = expandpats(pats or [])
1000
557
1001 def bad(f, msg):
558 def bad(f, msg):
1002 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
559 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
1003
560
1004 if badfn is None:
561 if badfn is None:
1005 badfn = bad
562 badfn = bad
1006
563
1007 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
564 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
1008 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
565 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
1009
566
1010 if m.always():
567 if m.always():
1011 pats = []
568 pats = []
1012 return m, pats
569 return m, pats
1013
570
1014 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
571 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
1015 badfn=None):
572 badfn=None):
1016 '''Return a matcher that will warn about bad matches.'''
573 '''Return a matcher that will warn about bad matches.'''
1017 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
574 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
1018
575
1019 def matchall(repo):
576 def matchall(repo):
1020 '''Return a matcher that will efficiently match everything.'''
577 '''Return a matcher that will efficiently match everything.'''
1021 return matchmod.always(repo.root, repo.getcwd())
578 return matchmod.always(repo.root, repo.getcwd())
1022
579
1023 def matchfiles(repo, files, badfn=None):
580 def matchfiles(repo, files, badfn=None):
1024 '''Return a matcher that will efficiently match exactly these files.'''
581 '''Return a matcher that will efficiently match exactly these files.'''
1025 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
582 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
1026
583
1027 def origpath(ui, repo, filepath):
584 def origpath(ui, repo, filepath):
1028 '''customize where .orig files are created
585 '''customize where .orig files are created
1029
586
1030 Fetch user defined path from config file: [ui] origbackuppath = <path>
587 Fetch user defined path from config file: [ui] origbackuppath = <path>
1031 Fall back to default (filepath) if not specified
588 Fall back to default (filepath) if not specified
1032 '''
589 '''
1033 origbackuppath = ui.config('ui', 'origbackuppath', None)
590 origbackuppath = ui.config('ui', 'origbackuppath', None)
1034 if origbackuppath is None:
591 if origbackuppath is None:
1035 return filepath + ".orig"
592 return filepath + ".orig"
1036
593
1037 filepathfromroot = os.path.relpath(filepath, start=repo.root)
594 filepathfromroot = os.path.relpath(filepath, start=repo.root)
1038 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
595 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
1039
596
1040 origbackupdir = repo.vfs.dirname(fullorigpath)
597 origbackupdir = repo.vfs.dirname(fullorigpath)
1041 if not repo.vfs.exists(origbackupdir):
598 if not repo.vfs.exists(origbackupdir):
1042 ui.note(_('creating directory: %s\n') % origbackupdir)
599 ui.note(_('creating directory: %s\n') % origbackupdir)
1043 util.makedirs(origbackupdir)
600 util.makedirs(origbackupdir)
1044
601
1045 return fullorigpath + ".orig"
602 return fullorigpath + ".orig"
1046
603
1047 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
604 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
1048 if opts is None:
605 if opts is None:
1049 opts = {}
606 opts = {}
1050 m = matcher
607 m = matcher
1051 if dry_run is None:
608 if dry_run is None:
1052 dry_run = opts.get('dry_run')
609 dry_run = opts.get('dry_run')
1053 if similarity is None:
610 if similarity is None:
1054 similarity = float(opts.get('similarity') or 0)
611 similarity = float(opts.get('similarity') or 0)
1055
612
1056 ret = 0
613 ret = 0
1057 join = lambda f: os.path.join(prefix, f)
614 join = lambda f: os.path.join(prefix, f)
1058
615
1059 wctx = repo[None]
616 wctx = repo[None]
1060 for subpath in sorted(wctx.substate):
617 for subpath in sorted(wctx.substate):
1061 submatch = matchmod.subdirmatcher(subpath, m)
618 submatch = matchmod.subdirmatcher(subpath, m)
1062 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
619 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1063 sub = wctx.sub(subpath)
620 sub = wctx.sub(subpath)
1064 try:
621 try:
1065 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
622 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
1066 ret = 1
623 ret = 1
1067 except error.LookupError:
624 except error.LookupError:
1068 repo.ui.status(_("skipping missing subrepository: %s\n")
625 repo.ui.status(_("skipping missing subrepository: %s\n")
1069 % join(subpath))
626 % join(subpath))
1070
627
1071 rejected = []
628 rejected = []
1072 def badfn(f, msg):
629 def badfn(f, msg):
1073 if f in m.files():
630 if f in m.files():
1074 m.bad(f, msg)
631 m.bad(f, msg)
1075 rejected.append(f)
632 rejected.append(f)
1076
633
1077 badmatch = matchmod.badmatch(m, badfn)
634 badmatch = matchmod.badmatch(m, badfn)
1078 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
635 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1079 badmatch)
636 badmatch)
1080
637
1081 unknownset = set(unknown + forgotten)
638 unknownset = set(unknown + forgotten)
1082 toprint = unknownset.copy()
639 toprint = unknownset.copy()
1083 toprint.update(deleted)
640 toprint.update(deleted)
1084 for abs in sorted(toprint):
641 for abs in sorted(toprint):
1085 if repo.ui.verbose or not m.exact(abs):
642 if repo.ui.verbose or not m.exact(abs):
1086 if abs in unknownset:
643 if abs in unknownset:
1087 status = _('adding %s\n') % m.uipath(abs)
644 status = _('adding %s\n') % m.uipath(abs)
1088 else:
645 else:
1089 status = _('removing %s\n') % m.uipath(abs)
646 status = _('removing %s\n') % m.uipath(abs)
1090 repo.ui.status(status)
647 repo.ui.status(status)
1091
648
1092 renames = _findrenames(repo, m, added + unknown, removed + deleted,
649 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1093 similarity)
650 similarity)
1094
651
1095 if not dry_run:
652 if not dry_run:
1096 _markchanges(repo, unknown + forgotten, deleted, renames)
653 _markchanges(repo, unknown + forgotten, deleted, renames)
1097
654
1098 for f in rejected:
655 for f in rejected:
1099 if f in m.files():
656 if f in m.files():
1100 return 1
657 return 1
1101 return ret
658 return ret
1102
659
1103 def marktouched(repo, files, similarity=0.0):
660 def marktouched(repo, files, similarity=0.0):
1104 '''Assert that files have somehow been operated upon. files are relative to
661 '''Assert that files have somehow been operated upon. files are relative to
1105 the repo root.'''
662 the repo root.'''
1106 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
663 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1107 rejected = []
664 rejected = []
1108
665
1109 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
666 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1110
667
1111 if repo.ui.verbose:
668 if repo.ui.verbose:
1112 unknownset = set(unknown + forgotten)
669 unknownset = set(unknown + forgotten)
1113 toprint = unknownset.copy()
670 toprint = unknownset.copy()
1114 toprint.update(deleted)
671 toprint.update(deleted)
1115 for abs in sorted(toprint):
672 for abs in sorted(toprint):
1116 if abs in unknownset:
673 if abs in unknownset:
1117 status = _('adding %s\n') % abs
674 status = _('adding %s\n') % abs
1118 else:
675 else:
1119 status = _('removing %s\n') % abs
676 status = _('removing %s\n') % abs
1120 repo.ui.status(status)
677 repo.ui.status(status)
1121
678
1122 renames = _findrenames(repo, m, added + unknown, removed + deleted,
679 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1123 similarity)
680 similarity)
1124
681
1125 _markchanges(repo, unknown + forgotten, deleted, renames)
682 _markchanges(repo, unknown + forgotten, deleted, renames)
1126
683
1127 for f in rejected:
684 for f in rejected:
1128 if f in m.files():
685 if f in m.files():
1129 return 1
686 return 1
1130 return 0
687 return 0
1131
688
1132 def _interestingfiles(repo, matcher):
689 def _interestingfiles(repo, matcher):
1133 '''Walk dirstate with matcher, looking for files that addremove would care
690 '''Walk dirstate with matcher, looking for files that addremove would care
1134 about.
691 about.
1135
692
1136 This is different from dirstate.status because it doesn't care about
693 This is different from dirstate.status because it doesn't care about
1137 whether files are modified or clean.'''
694 whether files are modified or clean.'''
1138 added, unknown, deleted, removed, forgotten = [], [], [], [], []
695 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1139 audit_path = pathutil.pathauditor(repo.root)
696 audit_path = pathutil.pathauditor(repo.root)
1140
697
1141 ctx = repo[None]
698 ctx = repo[None]
1142 dirstate = repo.dirstate
699 dirstate = repo.dirstate
1143 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
700 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1144 full=False)
701 full=False)
1145 for abs, st in walkresults.iteritems():
702 for abs, st in walkresults.iteritems():
1146 dstate = dirstate[abs]
703 dstate = dirstate[abs]
1147 if dstate == '?' and audit_path.check(abs):
704 if dstate == '?' and audit_path.check(abs):
1148 unknown.append(abs)
705 unknown.append(abs)
1149 elif dstate != 'r' and not st:
706 elif dstate != 'r' and not st:
1150 deleted.append(abs)
707 deleted.append(abs)
1151 elif dstate == 'r' and st:
708 elif dstate == 'r' and st:
1152 forgotten.append(abs)
709 forgotten.append(abs)
1153 # for finding renames
710 # for finding renames
1154 elif dstate == 'r' and not st:
711 elif dstate == 'r' and not st:
1155 removed.append(abs)
712 removed.append(abs)
1156 elif dstate == 'a':
713 elif dstate == 'a':
1157 added.append(abs)
714 added.append(abs)
1158
715
1159 return added, unknown, deleted, removed, forgotten
716 return added, unknown, deleted, removed, forgotten
1160
717
1161 def _findrenames(repo, matcher, added, removed, similarity):
718 def _findrenames(repo, matcher, added, removed, similarity):
1162 '''Find renames from removed files to added ones.'''
719 '''Find renames from removed files to added ones.'''
1163 renames = {}
720 renames = {}
1164 if similarity > 0:
721 if similarity > 0:
1165 for old, new, score in similar.findrenames(repo, added, removed,
722 for old, new, score in similar.findrenames(repo, added, removed,
1166 similarity):
723 similarity):
1167 if (repo.ui.verbose or not matcher.exact(old)
724 if (repo.ui.verbose or not matcher.exact(old)
1168 or not matcher.exact(new)):
725 or not matcher.exact(new)):
1169 repo.ui.status(_('recording removal of %s as rename to %s '
726 repo.ui.status(_('recording removal of %s as rename to %s '
1170 '(%d%% similar)\n') %
727 '(%d%% similar)\n') %
1171 (matcher.rel(old), matcher.rel(new),
728 (matcher.rel(old), matcher.rel(new),
1172 score * 100))
729 score * 100))
1173 renames[new] = old
730 renames[new] = old
1174 return renames
731 return renames
1175
732
1176 def _markchanges(repo, unknown, deleted, renames):
733 def _markchanges(repo, unknown, deleted, renames):
1177 '''Marks the files in unknown as added, the files in deleted as removed,
734 '''Marks the files in unknown as added, the files in deleted as removed,
1178 and the files in renames as copied.'''
735 and the files in renames as copied.'''
1179 wctx = repo[None]
736 wctx = repo[None]
1180 with repo.wlock():
737 with repo.wlock():
1181 wctx.forget(deleted)
738 wctx.forget(deleted)
1182 wctx.add(unknown)
739 wctx.add(unknown)
1183 for new, old in renames.iteritems():
740 for new, old in renames.iteritems():
1184 wctx.copy(old, new)
741 wctx.copy(old, new)
1185
742
1186 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
743 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1187 """Update the dirstate to reflect the intent of copying src to dst. For
744 """Update the dirstate to reflect the intent of copying src to dst. For
1188 different reasons it might not end with dst being marked as copied from src.
745 different reasons it might not end with dst being marked as copied from src.
1189 """
746 """
1190 origsrc = repo.dirstate.copied(src) or src
747 origsrc = repo.dirstate.copied(src) or src
1191 if dst == origsrc: # copying back a copy?
748 if dst == origsrc: # copying back a copy?
1192 if repo.dirstate[dst] not in 'mn' and not dryrun:
749 if repo.dirstate[dst] not in 'mn' and not dryrun:
1193 repo.dirstate.normallookup(dst)
750 repo.dirstate.normallookup(dst)
1194 else:
751 else:
1195 if repo.dirstate[origsrc] == 'a' and origsrc == src:
752 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1196 if not ui.quiet:
753 if not ui.quiet:
1197 ui.warn(_("%s has not been committed yet, so no copy "
754 ui.warn(_("%s has not been committed yet, so no copy "
1198 "data will be stored for %s.\n")
755 "data will be stored for %s.\n")
1199 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
756 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1200 if repo.dirstate[dst] in '?r' and not dryrun:
757 if repo.dirstate[dst] in '?r' and not dryrun:
1201 wctx.add([dst])
758 wctx.add([dst])
1202 elif not dryrun:
759 elif not dryrun:
1203 wctx.copy(origsrc, dst)
760 wctx.copy(origsrc, dst)
1204
761
1205 def readrequires(opener, supported):
762 def readrequires(opener, supported):
1206 '''Reads and parses .hg/requires and checks if all entries found
763 '''Reads and parses .hg/requires and checks if all entries found
1207 are in the list of supported features.'''
764 are in the list of supported features.'''
1208 requirements = set(opener.read("requires").splitlines())
765 requirements = set(opener.read("requires").splitlines())
1209 missings = []
766 missings = []
1210 for r in requirements:
767 for r in requirements:
1211 if r not in supported:
768 if r not in supported:
1212 if not r or not r[0].isalnum():
769 if not r or not r[0].isalnum():
1213 raise error.RequirementError(_(".hg/requires file is corrupt"))
770 raise error.RequirementError(_(".hg/requires file is corrupt"))
1214 missings.append(r)
771 missings.append(r)
1215 missings.sort()
772 missings.sort()
1216 if missings:
773 if missings:
1217 raise error.RequirementError(
774 raise error.RequirementError(
1218 _("repository requires features unknown to this Mercurial: %s")
775 _("repository requires features unknown to this Mercurial: %s")
1219 % " ".join(missings),
776 % " ".join(missings),
1220 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
777 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1221 " for more information"))
778 " for more information"))
1222 return requirements
779 return requirements
1223
780
1224 def writerequires(opener, requirements):
781 def writerequires(opener, requirements):
1225 with opener('requires', 'w') as fp:
782 with opener('requires', 'w') as fp:
1226 for r in sorted(requirements):
783 for r in sorted(requirements):
1227 fp.write("%s\n" % r)
784 fp.write("%s\n" % r)
1228
785
1229 class filecachesubentry(object):
786 class filecachesubentry(object):
1230 def __init__(self, path, stat):
787 def __init__(self, path, stat):
1231 self.path = path
788 self.path = path
1232 self.cachestat = None
789 self.cachestat = None
1233 self._cacheable = None
790 self._cacheable = None
1234
791
1235 if stat:
792 if stat:
1236 self.cachestat = filecachesubentry.stat(self.path)
793 self.cachestat = filecachesubentry.stat(self.path)
1237
794
1238 if self.cachestat:
795 if self.cachestat:
1239 self._cacheable = self.cachestat.cacheable()
796 self._cacheable = self.cachestat.cacheable()
1240 else:
797 else:
1241 # None means we don't know yet
798 # None means we don't know yet
1242 self._cacheable = None
799 self._cacheable = None
1243
800
1244 def refresh(self):
801 def refresh(self):
1245 if self.cacheable():
802 if self.cacheable():
1246 self.cachestat = filecachesubentry.stat(self.path)
803 self.cachestat = filecachesubentry.stat(self.path)
1247
804
1248 def cacheable(self):
805 def cacheable(self):
1249 if self._cacheable is not None:
806 if self._cacheable is not None:
1250 return self._cacheable
807 return self._cacheable
1251
808
1252 # we don't know yet, assume it is for now
809 # we don't know yet, assume it is for now
1253 return True
810 return True
1254
811
1255 def changed(self):
812 def changed(self):
1256 # no point in going further if we can't cache it
813 # no point in going further if we can't cache it
1257 if not self.cacheable():
814 if not self.cacheable():
1258 return True
815 return True
1259
816
1260 newstat = filecachesubentry.stat(self.path)
817 newstat = filecachesubentry.stat(self.path)
1261
818
1262 # we may not know if it's cacheable yet, check again now
819 # we may not know if it's cacheable yet, check again now
1263 if newstat and self._cacheable is None:
820 if newstat and self._cacheable is None:
1264 self._cacheable = newstat.cacheable()
821 self._cacheable = newstat.cacheable()
1265
822
1266 # check again
823 # check again
1267 if not self._cacheable:
824 if not self._cacheable:
1268 return True
825 return True
1269
826
1270 if self.cachestat != newstat:
827 if self.cachestat != newstat:
1271 self.cachestat = newstat
828 self.cachestat = newstat
1272 return True
829 return True
1273 else:
830 else:
1274 return False
831 return False
1275
832
1276 @staticmethod
833 @staticmethod
1277 def stat(path):
834 def stat(path):
1278 try:
835 try:
1279 return util.cachestat(path)
836 return util.cachestat(path)
1280 except OSError as e:
837 except OSError as e:
1281 if e.errno != errno.ENOENT:
838 if e.errno != errno.ENOENT:
1282 raise
839 raise
1283
840
1284 class filecacheentry(object):
841 class filecacheentry(object):
1285 def __init__(self, paths, stat=True):
842 def __init__(self, paths, stat=True):
1286 self._entries = []
843 self._entries = []
1287 for path in paths:
844 for path in paths:
1288 self._entries.append(filecachesubentry(path, stat))
845 self._entries.append(filecachesubentry(path, stat))
1289
846
1290 def changed(self):
847 def changed(self):
1291 '''true if any entry has changed'''
848 '''true if any entry has changed'''
1292 for entry in self._entries:
849 for entry in self._entries:
1293 if entry.changed():
850 if entry.changed():
1294 return True
851 return True
1295 return False
852 return False
1296
853
1297 def refresh(self):
854 def refresh(self):
1298 for entry in self._entries:
855 for entry in self._entries:
1299 entry.refresh()
856 entry.refresh()
1300
857
1301 class filecache(object):
858 class filecache(object):
1302 '''A property like decorator that tracks files under .hg/ for updates.
859 '''A property like decorator that tracks files under .hg/ for updates.
1303
860
1304 Records stat info when called in _filecache.
861 Records stat info when called in _filecache.
1305
862
1306 On subsequent calls, compares old stat info with new info, and recreates the
863 On subsequent calls, compares old stat info with new info, and recreates the
1307 object when any of the files changes, updating the new stat info in
864 object when any of the files changes, updating the new stat info in
1308 _filecache.
865 _filecache.
1309
866
1310 Mercurial either atomic renames or appends for files under .hg,
867 Mercurial either atomic renames or appends for files under .hg,
1311 so to ensure the cache is reliable we need the filesystem to be able
868 so to ensure the cache is reliable we need the filesystem to be able
1312 to tell us if a file has been replaced. If it can't, we fallback to
869 to tell us if a file has been replaced. If it can't, we fallback to
1313 recreating the object on every call (essentially the same behavior as
870 recreating the object on every call (essentially the same behavior as
1314 propertycache).
871 propertycache).
1315
872
1316 '''
873 '''
1317 def __init__(self, *paths):
874 def __init__(self, *paths):
1318 self.paths = paths
875 self.paths = paths
1319
876
1320 def join(self, obj, fname):
877 def join(self, obj, fname):
1321 """Used to compute the runtime path of a cached file.
878 """Used to compute the runtime path of a cached file.
1322
879
1323 Users should subclass filecache and provide their own version of this
880 Users should subclass filecache and provide their own version of this
1324 function to call the appropriate join function on 'obj' (an instance
881 function to call the appropriate join function on 'obj' (an instance
1325 of the class that its member function was decorated).
882 of the class that its member function was decorated).
1326 """
883 """
1327 return obj.join(fname)
884 return obj.join(fname)
1328
885
1329 def __call__(self, func):
886 def __call__(self, func):
1330 self.func = func
887 self.func = func
1331 self.name = func.__name__
888 self.name = func.__name__
1332 return self
889 return self
1333
890
1334 def __get__(self, obj, type=None):
891 def __get__(self, obj, type=None):
1335 # if accessed on the class, return the descriptor itself.
892 # if accessed on the class, return the descriptor itself.
1336 if obj is None:
893 if obj is None:
1337 return self
894 return self
1338 # do we need to check if the file changed?
895 # do we need to check if the file changed?
1339 if self.name in obj.__dict__:
896 if self.name in obj.__dict__:
1340 assert self.name in obj._filecache, self.name
897 assert self.name in obj._filecache, self.name
1341 return obj.__dict__[self.name]
898 return obj.__dict__[self.name]
1342
899
1343 entry = obj._filecache.get(self.name)
900 entry = obj._filecache.get(self.name)
1344
901
1345 if entry:
902 if entry:
1346 if entry.changed():
903 if entry.changed():
1347 entry.obj = self.func(obj)
904 entry.obj = self.func(obj)
1348 else:
905 else:
1349 paths = [self.join(obj, path) for path in self.paths]
906 paths = [self.join(obj, path) for path in self.paths]
1350
907
1351 # We stat -before- creating the object so our cache doesn't lie if
908 # We stat -before- creating the object so our cache doesn't lie if
1352 # a writer modified between the time we read and stat
909 # a writer modified between the time we read and stat
1353 entry = filecacheentry(paths, True)
910 entry = filecacheentry(paths, True)
1354 entry.obj = self.func(obj)
911 entry.obj = self.func(obj)
1355
912
1356 obj._filecache[self.name] = entry
913 obj._filecache[self.name] = entry
1357
914
1358 obj.__dict__[self.name] = entry.obj
915 obj.__dict__[self.name] = entry.obj
1359 return entry.obj
916 return entry.obj
1360
917
1361 def __set__(self, obj, value):
918 def __set__(self, obj, value):
1362 if self.name not in obj._filecache:
919 if self.name not in obj._filecache:
1363 # we add an entry for the missing value because X in __dict__
920 # we add an entry for the missing value because X in __dict__
1364 # implies X in _filecache
921 # implies X in _filecache
1365 paths = [self.join(obj, path) for path in self.paths]
922 paths = [self.join(obj, path) for path in self.paths]
1366 ce = filecacheentry(paths, False)
923 ce = filecacheentry(paths, False)
1367 obj._filecache[self.name] = ce
924 obj._filecache[self.name] = ce
1368 else:
925 else:
1369 ce = obj._filecache[self.name]
926 ce = obj._filecache[self.name]
1370
927
1371 ce.obj = value # update cached copy
928 ce.obj = value # update cached copy
1372 obj.__dict__[self.name] = value # update copy returned by obj.x
929 obj.__dict__[self.name] = value # update copy returned by obj.x
1373
930
1374 def __delete__(self, obj):
931 def __delete__(self, obj):
1375 try:
932 try:
1376 del obj.__dict__[self.name]
933 del obj.__dict__[self.name]
1377 except KeyError:
934 except KeyError:
1378 raise AttributeError(self.name)
935 raise AttributeError(self.name)
1379
936
1380 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
937 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1381 if lock is None:
938 if lock is None:
1382 raise error.LockInheritanceContractViolation(
939 raise error.LockInheritanceContractViolation(
1383 'lock can only be inherited while held')
940 'lock can only be inherited while held')
1384 if environ is None:
941 if environ is None:
1385 environ = {}
942 environ = {}
1386 with lock.inherit() as locker:
943 with lock.inherit() as locker:
1387 environ[envvar] = locker
944 environ[envvar] = locker
1388 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
945 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1389
946
1390 def wlocksub(repo, cmd, *args, **kwargs):
947 def wlocksub(repo, cmd, *args, **kwargs):
1391 """run cmd as a subprocess that allows inheriting repo's wlock
948 """run cmd as a subprocess that allows inheriting repo's wlock
1392
949
1393 This can only be called while the wlock is held. This takes all the
950 This can only be called while the wlock is held. This takes all the
1394 arguments that ui.system does, and returns the exit code of the
951 arguments that ui.system does, and returns the exit code of the
1395 subprocess."""
952 subprocess."""
1396 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
953 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1397 **kwargs)
954 **kwargs)
1398
955
1399 def gdinitconfig(ui):
956 def gdinitconfig(ui):
1400 """helper function to know if a repo should be created as general delta
957 """helper function to know if a repo should be created as general delta
1401 """
958 """
1402 # experimental config: format.generaldelta
959 # experimental config: format.generaldelta
1403 return (ui.configbool('format', 'generaldelta', False)
960 return (ui.configbool('format', 'generaldelta', False)
1404 or ui.configbool('format', 'usegeneraldelta', True))
961 or ui.configbool('format', 'usegeneraldelta', True))
1405
962
1406 def gddeltaconfig(ui):
963 def gddeltaconfig(ui):
1407 """helper function to know if incoming delta should be optimised
964 """helper function to know if incoming delta should be optimised
1408 """
965 """
1409 # experimental config: format.generaldelta
966 # experimental config: format.generaldelta
1410 return ui.configbool('format', 'generaldelta', False)
967 return ui.configbool('format', 'generaldelta', False)
1411
1412 class closewrapbase(object):
1413 """Base class of wrapper, which hooks closing
1414
1415 Do not instantiate outside of the vfs layer.
1416 """
1417 def __init__(self, fh):
1418 object.__setattr__(self, '_origfh', fh)
1419
1420 def __getattr__(self, attr):
1421 return getattr(self._origfh, attr)
1422
1423 def __setattr__(self, attr, value):
1424 return setattr(self._origfh, attr, value)
1425
1426 def __delattr__(self, attr):
1427 return delattr(self._origfh, attr)
1428
1429 def __enter__(self):
1430 return self._origfh.__enter__()
1431
1432 def __exit__(self, exc_type, exc_value, exc_tb):
1433 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1434
1435 def close(self):
1436 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1437
1438 class delayclosedfile(closewrapbase):
1439 """Proxy for a file object whose close is delayed.
1440
1441 Do not instantiate outside of the vfs layer.
1442 """
1443 def __init__(self, fh, closer):
1444 super(delayclosedfile, self).__init__(fh)
1445 object.__setattr__(self, '_closer', closer)
1446
1447 def __exit__(self, exc_type, exc_value, exc_tb):
1448 self._closer.close(self._origfh)
1449
1450 def close(self):
1451 self._closer.close(self._origfh)
1452
1453 class backgroundfilecloser(object):
1454 """Coordinates background closing of file handles on multiple threads."""
1455 def __init__(self, ui, expectedcount=-1):
1456 self._running = False
1457 self._entered = False
1458 self._threads = []
1459 self._threadexception = None
1460
1461 # Only Windows/NTFS has slow file closing. So only enable by default
1462 # on that platform. But allow to be enabled elsewhere for testing.
1463 defaultenabled = pycompat.osname == 'nt'
1464 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1465
1466 if not enabled:
1467 return
1468
1469 # There is overhead to starting and stopping the background threads.
1470 # Don't do background processing unless the file count is large enough
1471 # to justify it.
1472 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1473 2048)
1474 # FUTURE dynamically start background threads after minfilecount closes.
1475 # (We don't currently have any callers that don't know their file count)
1476 if expectedcount > 0 and expectedcount < minfilecount:
1477 return
1478
1479 # Windows defaults to a limit of 512 open files. A buffer of 128
1480 # should give us enough headway.
1481 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1482 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1483
1484 ui.debug('starting %d threads for background file closing\n' %
1485 threadcount)
1486
1487 self._queue = util.queue(maxsize=maxqueue)
1488 self._running = True
1489
1490 for i in range(threadcount):
1491 t = threading.Thread(target=self._worker, name='backgroundcloser')
1492 self._threads.append(t)
1493 t.start()
1494
1495 def __enter__(self):
1496 self._entered = True
1497 return self
1498
1499 def __exit__(self, exc_type, exc_value, exc_tb):
1500 self._running = False
1501
1502 # Wait for threads to finish closing so open files don't linger for
1503 # longer than lifetime of context manager.
1504 for t in self._threads:
1505 t.join()
1506
1507 def _worker(self):
1508 """Main routine for worker thread."""
1509 while True:
1510 try:
1511 fh = self._queue.get(block=True, timeout=0.100)
1512 # Need to catch or the thread will terminate and
1513 # we could orphan file descriptors.
1514 try:
1515 fh.close()
1516 except Exception as e:
1517 # Stash so can re-raise from main thread later.
1518 self._threadexception = e
1519 except util.empty:
1520 if not self._running:
1521 break
1522
1523 def close(self, fh):
1524 """Schedule a file for closing."""
1525 if not self._entered:
1526 raise error.Abort(_('can only call close() when context manager '
1527 'active'))
1528
1529 # If a background thread encountered an exception, raise now so we fail
1530 # fast. Otherwise we may potentially go on for minutes until the error
1531 # is acted on.
1532 if self._threadexception:
1533 e = self._threadexception
1534 self._threadexception = None
1535 raise e
1536
1537 # If we're not actively running, close synchronously.
1538 if not self._running:
1539 fh.close()
1540 return
1541
1542 self._queue.put(fh, block=True, timeout=None)
1543
1544 class checkambigatclosing(closewrapbase):
1545 """Proxy for a file object, to avoid ambiguity of file stat
1546
1547 See also util.filestat for detail about "ambiguity of file stat".
1548
1549 This proxy is useful only if the target file is guarded by any
1550 lock (e.g. repo.lock or repo.wlock)
1551
1552 Do not instantiate outside of the vfs layer.
1553 """
1554 def __init__(self, fh):
1555 super(checkambigatclosing, self).__init__(fh)
1556 object.__setattr__(self, '_oldstat', util.filestat(fh.name))
1557
1558 def _checkambig(self):
1559 oldstat = self._oldstat
1560 if oldstat.stat:
1561 newstat = util.filestat(self._origfh.name)
1562 if newstat.isambig(oldstat):
1563 # stat of changed file is ambiguous to original one
1564 newstat.avoidambig(self._origfh.name, oldstat)
1565
1566 def __exit__(self, exc_type, exc_value, exc_tb):
1567 self._origfh.__exit__(exc_type, exc_value, exc_tb)
1568 self._checkambig()
1569
1570 def close(self):
1571 self._origfh.close()
1572 self._checkambig()
This diff has been collapsed as it changes many lines, (938 lines changed) Show them Hide them
@@ -1,1572 +1,636 b''
1 # scmutil.py - Mercurial core utility functions
1 # vfs.py - Mercurial 'vfs' classes
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
8 from __future__ import absolute_import
7 from __future__ import absolute_import
9
8
10 import contextlib
9 import contextlib
11 import errno
10 import errno
12 import glob
13 import hashlib
14 import os
11 import os
15 import re
16 import shutil
12 import shutil
17 import socket
18 import stat
13 import stat
19 import tempfile
14 import tempfile
20 import threading
15 import threading
21
16
22 from .i18n import _
17 from .i18n import _
23 from .node import wdirrev
24 from . import (
18 from . import (
25 encoding,
26 error,
19 error,
27 match as matchmod,
28 osutil,
20 osutil,
29 pathutil,
21 pathutil,
30 phases,
31 pycompat,
22 pycompat,
32 revsetlang,
33 similar,
34 util,
23 util,
35 )
24 )
36
25
37 if pycompat.osname == 'nt':
38 from . import scmwindows as scmplatform
39 else:
40 from . import scmposix as scmplatform
41
42 systemrcpath = scmplatform.systemrcpath
43 userrcpath = scmplatform.userrcpath
44 termsize = scmplatform.termsize
45
46 class status(tuple):
47 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
48 and 'ignored' properties are only relevant to the working copy.
49 '''
50
51 __slots__ = ()
52
53 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
54 clean):
55 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
56 ignored, clean))
57
58 @property
59 def modified(self):
60 '''files that have been modified'''
61 return self[0]
62
63 @property
64 def added(self):
65 '''files that have been added'''
66 return self[1]
67
68 @property
69 def removed(self):
70 '''files that have been removed'''
71 return self[2]
72
73 @property
74 def deleted(self):
75 '''files that are in the dirstate, but have been deleted from the
76 working copy (aka "missing")
77 '''
78 return self[3]
79
80 @property
81 def unknown(self):
82 '''files not in the dirstate that are not ignored'''
83 return self[4]
84
85 @property
86 def ignored(self):
87 '''files not in the dirstate that are ignored (by _dirignore())'''
88 return self[5]
89
90 @property
91 def clean(self):
92 '''files that have not been modified'''
93 return self[6]
94
95 def __repr__(self, *args, **kwargs):
96 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
97 'unknown=%r, ignored=%r, clean=%r>') % self)
98
99 def itersubrepos(ctx1, ctx2):
100 """find subrepos in ctx1 or ctx2"""
101 # Create a (subpath, ctx) mapping where we prefer subpaths from
102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
103 # has been modified (in ctx2) but not yet committed (in ctx1).
104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
106
107 missing = set()
108
109 for subpath in ctx2.substate:
110 if subpath not in ctx1.substate:
111 del subpaths[subpath]
112 missing.add(subpath)
113
114 for subpath, ctx in sorted(subpaths.iteritems()):
115 yield subpath, ctx.sub(subpath)
116
117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
118 # status and diff will have an accurate result when it does
119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
120 # against itself.
121 for subpath in missing:
122 yield subpath, ctx2.nullsub(subpath, ctx1)
123
124 def nochangesfound(ui, repo, excluded=None):
125 '''Report no changes for push/pull, excluded is None or a list of
126 nodes excluded from the push/pull.
127 '''
128 secretlist = []
129 if excluded:
130 for n in excluded:
131 if n not in repo:
132 # discovery should not have included the filtered revision,
133 # we have to explicitly exclude it until discovery is cleanup.
134 continue
135 ctx = repo[n]
136 if ctx.phase() >= phases.secret and not ctx.extinct():
137 secretlist.append(n)
138
139 if secretlist:
140 ui.status(_("no changes found (ignored %d secret changesets)\n")
141 % len(secretlist))
142 else:
143 ui.status(_("no changes found\n"))
144
145 def callcatch(ui, func):
146 """call func() with global exception handling
147
148 return func() if no exception happens. otherwise do some error handling
149 and return an exit code accordingly. does not handle all exceptions.
150 """
151 try:
152 return func()
153 # Global exception handling, alphabetically
154 # Mercurial-specific first, followed by built-in and library exceptions
155 except error.LockHeld as inst:
156 if inst.errno == errno.ETIMEDOUT:
157 reason = _('timed out waiting for lock held by %s') % inst.locker
158 else:
159 reason = _('lock held by %s') % inst.locker
160 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
161 except error.LockUnavailable as inst:
162 ui.warn(_("abort: could not lock %s: %s\n") %
163 (inst.desc or inst.filename, inst.strerror))
164 except error.OutOfBandError as inst:
165 if inst.args:
166 msg = _("abort: remote error:\n")
167 else:
168 msg = _("abort: remote error\n")
169 ui.warn(msg)
170 if inst.args:
171 ui.warn(''.join(inst.args))
172 if inst.hint:
173 ui.warn('(%s)\n' % inst.hint)
174 except error.RepoError as inst:
175 ui.warn(_("abort: %s!\n") % inst)
176 if inst.hint:
177 ui.warn(_("(%s)\n") % inst.hint)
178 except error.ResponseError as inst:
179 ui.warn(_("abort: %s") % inst.args[0])
180 if not isinstance(inst.args[1], basestring):
181 ui.warn(" %r\n" % (inst.args[1],))
182 elif not inst.args[1]:
183 ui.warn(_(" empty string\n"))
184 else:
185 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
186 except error.CensoredNodeError as inst:
187 ui.warn(_("abort: file censored %s!\n") % inst)
188 except error.RevlogError as inst:
189 ui.warn(_("abort: %s!\n") % inst)
190 except error.SignalInterrupt:
191 ui.warn(_("killed!\n"))
192 except error.InterventionRequired as inst:
193 ui.warn("%s\n" % inst)
194 if inst.hint:
195 ui.warn(_("(%s)\n") % inst.hint)
196 return 1
197 except error.Abort as inst:
198 ui.warn(_("abort: %s\n") % inst)
199 if inst.hint:
200 ui.warn(_("(%s)\n") % inst.hint)
201 except ImportError as inst:
202 ui.warn(_("abort: %s!\n") % inst)
203 m = str(inst).split()[-1]
204 if m in "mpatch bdiff".split():
205 ui.warn(_("(did you forget to compile extensions?)\n"))
206 elif m in "zlib".split():
207 ui.warn(_("(is your Python install correct?)\n"))
208 except IOError as inst:
209 if util.safehasattr(inst, "code"):
210 ui.warn(_("abort: %s\n") % inst)
211 elif util.safehasattr(inst, "reason"):
212 try: # usually it is in the form (errno, strerror)
213 reason = inst.reason.args[1]
214 except (AttributeError, IndexError):
215 # it might be anything, for example a string
216 reason = inst.reason
217 if isinstance(reason, unicode):
218 # SSLError of Python 2.7.9 contains a unicode
219 reason = reason.encode(encoding.encoding, 'replace')
220 ui.warn(_("abort: error: %s\n") % reason)
221 elif (util.safehasattr(inst, "args")
222 and inst.args and inst.args[0] == errno.EPIPE):
223 pass
224 elif getattr(inst, "strerror", None):
225 if getattr(inst, "filename", None):
226 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
227 else:
228 ui.warn(_("abort: %s\n") % inst.strerror)
229 else:
230 raise
231 except OSError as inst:
232 if getattr(inst, "filename", None) is not None:
233 ui.warn(_("abort: %s: '%s'\n") % (inst.strerror, inst.filename))
234 else:
235 ui.warn(_("abort: %s\n") % inst.strerror)
236 except MemoryError:
237 ui.warn(_("abort: out of memory\n"))
238 except SystemExit as inst:
239 # Commands shouldn't sys.exit directly, but give a return code.
240 # Just in case catch this and and pass exit code to caller.
241 return inst.code
242 except socket.error as inst:
243 ui.warn(_("abort: %s\n") % inst.args[-1])
244
245 return -1
246
247 def checknewlabel(repo, lbl, kind):
248 # Do not use the "kind" parameter in ui output.
249 # It makes strings difficult to translate.
250 if lbl in ['tip', '.', 'null']:
251 raise error.Abort(_("the name '%s' is reserved") % lbl)
252 for c in (':', '\0', '\n', '\r'):
253 if c in lbl:
254 raise error.Abort(_("%r cannot be used in a name") % c)
255 try:
256 int(lbl)
257 raise error.Abort(_("cannot use an integer as a name"))
258 except ValueError:
259 pass
260
261 def checkfilename(f):
262 '''Check that the filename f is an acceptable filename for a tracked file'''
263 if '\r' in f or '\n' in f:
264 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
265
266 def checkportable(ui, f):
267 '''Check if filename f is portable and warn or abort depending on config'''
268 checkfilename(f)
269 abort, warn = checkportabilityalert(ui)
270 if abort or warn:
271 msg = util.checkwinfilename(f)
272 if msg:
273 msg = "%s: %r" % (msg, f)
274 if abort:
275 raise error.Abort(msg)
276 ui.warn(_("warning: %s\n") % msg)
277
278 def checkportabilityalert(ui):
279 '''check if the user's config requests nothing, a warning, or abort for
280 non-portable filenames'''
281 val = ui.config('ui', 'portablefilenames', 'warn')
282 lval = val.lower()
283 bval = util.parsebool(val)
284 abort = pycompat.osname == 'nt' or lval == 'abort'
285 warn = bval or lval == 'warn'
286 if bval is None and not (warn or abort or lval == 'ignore'):
287 raise error.ConfigError(
288 _("ui.portablefilenames value is invalid ('%s')") % val)
289 return abort, warn
290
291 class casecollisionauditor(object):
292 def __init__(self, ui, abort, dirstate):
293 self._ui = ui
294 self._abort = abort
295 allfiles = '\0'.join(dirstate._map)
296 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
297 self._dirstate = dirstate
298 # The purpose of _newfiles is so that we don't complain about
299 # case collisions if someone were to call this object with the
300 # same filename twice.
301 self._newfiles = set()
302
303 def __call__(self, f):
304 if f in self._newfiles:
305 return
306 fl = encoding.lower(f)
307 if fl in self._loweredfiles and f not in self._dirstate:
308 msg = _('possible case-folding collision for %s') % f
309 if self._abort:
310 raise error.Abort(msg)
311 self._ui.warn(_("warning: %s\n") % msg)
312 self._loweredfiles.add(fl)
313 self._newfiles.add(f)
314
315 def filteredhash(repo, maxrev):
316 """build hash of filtered revisions in the current repoview.
317
318 Multiple caches perform up-to-date validation by checking that the
319 tiprev and tipnode stored in the cache file match the current repository.
320 However, this is not sufficient for validating repoviews because the set
321 of revisions in the view may change without the repository tiprev and
322 tipnode changing.
323
324 This function hashes all the revs filtered from the view and returns
325 that SHA-1 digest.
326 """
327 cl = repo.changelog
328 if not cl.filteredrevs:
329 return None
330 key = None
331 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
332 if revs:
333 s = hashlib.sha1()
334 for rev in revs:
335 s.update('%s;' % rev)
336 key = s.digest()
337 return key
338
339 class abstractvfs(object):
26 class abstractvfs(object):
340 """Abstract base class; cannot be instantiated"""
27 """Abstract base class; cannot be instantiated"""
341
28
342 def __init__(self, *args, **kwargs):
29 def __init__(self, *args, **kwargs):
343 '''Prevent instantiation; don't call this from subclasses.'''
30 '''Prevent instantiation; don't call this from subclasses.'''
344 raise NotImplementedError('attempted instantiating ' + str(type(self)))
31 raise NotImplementedError('attempted instantiating ' + str(type(self)))
345
32
346 def tryread(self, path):
33 def tryread(self, path):
347 '''gracefully return an empty string for missing files'''
34 '''gracefully return an empty string for missing files'''
348 try:
35 try:
349 return self.read(path)
36 return self.read(path)
350 except IOError as inst:
37 except IOError as inst:
351 if inst.errno != errno.ENOENT:
38 if inst.errno != errno.ENOENT:
352 raise
39 raise
353 return ""
40 return ""
354
41
355 def tryreadlines(self, path, mode='rb'):
42 def tryreadlines(self, path, mode='rb'):
356 '''gracefully return an empty array for missing files'''
43 '''gracefully return an empty array for missing files'''
357 try:
44 try:
358 return self.readlines(path, mode=mode)
45 return self.readlines(path, mode=mode)
359 except IOError as inst:
46 except IOError as inst:
360 if inst.errno != errno.ENOENT:
47 if inst.errno != errno.ENOENT:
361 raise
48 raise
362 return []
49 return []
363
50
364 @util.propertycache
51 @util.propertycache
365 def open(self):
52 def open(self):
366 '''Open ``path`` file, which is relative to vfs root.
53 '''Open ``path`` file, which is relative to vfs root.
367
54
368 Newly created directories are marked as "not to be indexed by
55 Newly created directories are marked as "not to be indexed by
369 the content indexing service", if ``notindexed`` is specified
56 the content indexing service", if ``notindexed`` is specified
370 for "write" mode access.
57 for "write" mode access.
371 '''
58 '''
372 return self.__call__
59 return self.__call__
373
60
374 def read(self, path):
61 def read(self, path):
375 with self(path, 'rb') as fp:
62 with self(path, 'rb') as fp:
376 return fp.read()
63 return fp.read()
377
64
378 def readlines(self, path, mode='rb'):
65 def readlines(self, path, mode='rb'):
379 with self(path, mode=mode) as fp:
66 with self(path, mode=mode) as fp:
380 return fp.readlines()
67 return fp.readlines()
381
68
382 def write(self, path, data, backgroundclose=False):
69 def write(self, path, data, backgroundclose=False):
383 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
70 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
384 return fp.write(data)
71 return fp.write(data)
385
72
386 def writelines(self, path, data, mode='wb', notindexed=False):
73 def writelines(self, path, data, mode='wb', notindexed=False):
387 with self(path, mode=mode, notindexed=notindexed) as fp:
74 with self(path, mode=mode, notindexed=notindexed) as fp:
388 return fp.writelines(data)
75 return fp.writelines(data)
389
76
390 def append(self, path, data):
77 def append(self, path, data):
391 with self(path, 'ab') as fp:
78 with self(path, 'ab') as fp:
392 return fp.write(data)
79 return fp.write(data)
393
80
394 def basename(self, path):
81 def basename(self, path):
395 """return base element of a path (as os.path.basename would do)
82 """return base element of a path (as os.path.basename would do)
396
83
397 This exists to allow handling of strange encoding if needed."""
84 This exists to allow handling of strange encoding if needed."""
398 return os.path.basename(path)
85 return os.path.basename(path)
399
86
400 def chmod(self, path, mode):
87 def chmod(self, path, mode):
401 return os.chmod(self.join(path), mode)
88 return os.chmod(self.join(path), mode)
402
89
403 def dirname(self, path):
90 def dirname(self, path):
404 """return dirname element of a path (as os.path.dirname would do)
91 """return dirname element of a path (as os.path.dirname would do)
405
92
406 This exists to allow handling of strange encoding if needed."""
93 This exists to allow handling of strange encoding if needed."""
407 return os.path.dirname(path)
94 return os.path.dirname(path)
408
95
409 def exists(self, path=None):
96 def exists(self, path=None):
410 return os.path.exists(self.join(path))
97 return os.path.exists(self.join(path))
411
98
412 def fstat(self, fp):
99 def fstat(self, fp):
413 return util.fstat(fp)
100 return util.fstat(fp)
414
101
415 def isdir(self, path=None):
102 def isdir(self, path=None):
416 return os.path.isdir(self.join(path))
103 return os.path.isdir(self.join(path))
417
104
418 def isfile(self, path=None):
105 def isfile(self, path=None):
419 return os.path.isfile(self.join(path))
106 return os.path.isfile(self.join(path))
420
107
421 def islink(self, path=None):
108 def islink(self, path=None):
422 return os.path.islink(self.join(path))
109 return os.path.islink(self.join(path))
423
110
424 def isfileorlink(self, path=None):
111 def isfileorlink(self, path=None):
425 '''return whether path is a regular file or a symlink
112 '''return whether path is a regular file or a symlink
426
113
427 Unlike isfile, this doesn't follow symlinks.'''
114 Unlike isfile, this doesn't follow symlinks.'''
428 try:
115 try:
429 st = self.lstat(path)
116 st = self.lstat(path)
430 except OSError:
117 except OSError:
431 return False
118 return False
432 mode = st.st_mode
119 mode = st.st_mode
433 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
120 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
434
121
435 def reljoin(self, *paths):
122 def reljoin(self, *paths):
436 """join various elements of a path together (as os.path.join would do)
123 """join various elements of a path together (as os.path.join would do)
437
124
438 The vfs base is not injected so that path stay relative. This exists
125 The vfs base is not injected so that path stay relative. This exists
439 to allow handling of strange encoding if needed."""
126 to allow handling of strange encoding if needed."""
440 return os.path.join(*paths)
127 return os.path.join(*paths)
441
128
442 def split(self, path):
129 def split(self, path):
443 """split top-most element of a path (as os.path.split would do)
130 """split top-most element of a path (as os.path.split would do)
444
131
445 This exists to allow handling of strange encoding if needed."""
132 This exists to allow handling of strange encoding if needed."""
446 return os.path.split(path)
133 return os.path.split(path)
447
134
448 def lexists(self, path=None):
135 def lexists(self, path=None):
449 return os.path.lexists(self.join(path))
136 return os.path.lexists(self.join(path))
450
137
451 def lstat(self, path=None):
138 def lstat(self, path=None):
452 return os.lstat(self.join(path))
139 return os.lstat(self.join(path))
453
140
454 def listdir(self, path=None):
141 def listdir(self, path=None):
455 return os.listdir(self.join(path))
142 return os.listdir(self.join(path))
456
143
457 def makedir(self, path=None, notindexed=True):
144 def makedir(self, path=None, notindexed=True):
458 return util.makedir(self.join(path), notindexed)
145 return util.makedir(self.join(path), notindexed)
459
146
460 def makedirs(self, path=None, mode=None):
147 def makedirs(self, path=None, mode=None):
461 return util.makedirs(self.join(path), mode)
148 return util.makedirs(self.join(path), mode)
462
149
463 def makelock(self, info, path):
150 def makelock(self, info, path):
464 return util.makelock(info, self.join(path))
151 return util.makelock(info, self.join(path))
465
152
466 def mkdir(self, path=None):
153 def mkdir(self, path=None):
467 return os.mkdir(self.join(path))
154 return os.mkdir(self.join(path))
468
155
469 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
156 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
470 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
157 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
471 dir=self.join(dir), text=text)
158 dir=self.join(dir), text=text)
472 dname, fname = util.split(name)
159 dname, fname = util.split(name)
473 if dir:
160 if dir:
474 return fd, os.path.join(dir, fname)
161 return fd, os.path.join(dir, fname)
475 else:
162 else:
476 return fd, fname
163 return fd, fname
477
164
478 def readdir(self, path=None, stat=None, skip=None):
165 def readdir(self, path=None, stat=None, skip=None):
479 return osutil.listdir(self.join(path), stat, skip)
166 return osutil.listdir(self.join(path), stat, skip)
480
167
481 def readlock(self, path):
168 def readlock(self, path):
482 return util.readlock(self.join(path))
169 return util.readlock(self.join(path))
483
170
484 def rename(self, src, dst, checkambig=False):
171 def rename(self, src, dst, checkambig=False):
485 """Rename from src to dst
172 """Rename from src to dst
486
173
487 checkambig argument is used with util.filestat, and is useful
174 checkambig argument is used with util.filestat, and is useful
488 only if destination file is guarded by any lock
175 only if destination file is guarded by any lock
489 (e.g. repo.lock or repo.wlock).
176 (e.g. repo.lock or repo.wlock).
490 """
177 """
491 dstpath = self.join(dst)
178 dstpath = self.join(dst)
492 oldstat = checkambig and util.filestat(dstpath)
179 oldstat = checkambig and util.filestat(dstpath)
493 if oldstat and oldstat.stat:
180 if oldstat and oldstat.stat:
494 ret = util.rename(self.join(src), dstpath)
181 ret = util.rename(self.join(src), dstpath)
495 newstat = util.filestat(dstpath)
182 newstat = util.filestat(dstpath)
496 if newstat.isambig(oldstat):
183 if newstat.isambig(oldstat):
497 # stat of renamed file is ambiguous to original one
184 # stat of renamed file is ambiguous to original one
498 newstat.avoidambig(dstpath, oldstat)
185 newstat.avoidambig(dstpath, oldstat)
499 return ret
186 return ret
500 return util.rename(self.join(src), dstpath)
187 return util.rename(self.join(src), dstpath)
501
188
502 def readlink(self, path):
189 def readlink(self, path):
503 return os.readlink(self.join(path))
190 return os.readlink(self.join(path))
504
191
505 def removedirs(self, path=None):
192 def removedirs(self, path=None):
506 """Remove a leaf directory and all empty intermediate ones
193 """Remove a leaf directory and all empty intermediate ones
507 """
194 """
508 return util.removedirs(self.join(path))
195 return util.removedirs(self.join(path))
509
196
510 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
197 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
511 """Remove a directory tree recursively
198 """Remove a directory tree recursively
512
199
513 If ``forcibly``, this tries to remove READ-ONLY files, too.
200 If ``forcibly``, this tries to remove READ-ONLY files, too.
514 """
201 """
515 if forcibly:
202 if forcibly:
516 def onerror(function, path, excinfo):
203 def onerror(function, path, excinfo):
517 if function is not os.remove:
204 if function is not os.remove:
518 raise
205 raise
519 # read-only files cannot be unlinked under Windows
206 # read-only files cannot be unlinked under Windows
520 s = os.stat(path)
207 s = os.stat(path)
521 if (s.st_mode & stat.S_IWRITE) != 0:
208 if (s.st_mode & stat.S_IWRITE) != 0:
522 raise
209 raise
523 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
210 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
524 os.remove(path)
211 os.remove(path)
525 else:
212 else:
526 onerror = None
213 onerror = None
527 return shutil.rmtree(self.join(path),
214 return shutil.rmtree(self.join(path),
528 ignore_errors=ignore_errors, onerror=onerror)
215 ignore_errors=ignore_errors, onerror=onerror)
529
216
530 def setflags(self, path, l, x):
217 def setflags(self, path, l, x):
531 return util.setflags(self.join(path), l, x)
218 return util.setflags(self.join(path), l, x)
532
219
533 def stat(self, path=None):
220 def stat(self, path=None):
534 return os.stat(self.join(path))
221 return os.stat(self.join(path))
535
222
536 def unlink(self, path=None):
223 def unlink(self, path=None):
537 return util.unlink(self.join(path))
224 return util.unlink(self.join(path))
538
225
539 def unlinkpath(self, path=None, ignoremissing=False):
226 def unlinkpath(self, path=None, ignoremissing=False):
540 return util.unlinkpath(self.join(path), ignoremissing)
227 return util.unlinkpath(self.join(path), ignoremissing)
541
228
542 def utime(self, path=None, t=None):
229 def utime(self, path=None, t=None):
543 return os.utime(self.join(path), t)
230 return os.utime(self.join(path), t)
544
231
545 def walk(self, path=None, onerror=None):
232 def walk(self, path=None, onerror=None):
546 """Yield (dirpath, dirs, files) tuple for each directories under path
233 """Yield (dirpath, dirs, files) tuple for each directories under path
547
234
548 ``dirpath`` is relative one from the root of this vfs. This
235 ``dirpath`` is relative one from the root of this vfs. This
549 uses ``os.sep`` as path separator, even you specify POSIX
236 uses ``os.sep`` as path separator, even you specify POSIX
550 style ``path``.
237 style ``path``.
551
238
552 "The root of this vfs" is represented as empty ``dirpath``.
239 "The root of this vfs" is represented as empty ``dirpath``.
553 """
240 """
554 root = os.path.normpath(self.join(None))
241 root = os.path.normpath(self.join(None))
555 # when dirpath == root, dirpath[prefixlen:] becomes empty
242 # when dirpath == root, dirpath[prefixlen:] becomes empty
556 # because len(dirpath) < prefixlen.
243 # because len(dirpath) < prefixlen.
557 prefixlen = len(pathutil.normasprefix(root))
244 prefixlen = len(pathutil.normasprefix(root))
558 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
245 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
559 yield (dirpath[prefixlen:], dirs, files)
246 yield (dirpath[prefixlen:], dirs, files)
560
247
561 @contextlib.contextmanager
248 @contextlib.contextmanager
562 def backgroundclosing(self, ui, expectedcount=-1):
249 def backgroundclosing(self, ui, expectedcount=-1):
563 """Allow files to be closed asynchronously.
250 """Allow files to be closed asynchronously.
564
251
565 When this context manager is active, ``backgroundclose`` can be passed
252 When this context manager is active, ``backgroundclose`` can be passed
566 to ``__call__``/``open`` to result in the file possibly being closed
253 to ``__call__``/``open`` to result in the file possibly being closed
567 asynchronously, on a background thread.
254 asynchronously, on a background thread.
568 """
255 """
569 # This is an arbitrary restriction and could be changed if we ever
256 # This is an arbitrary restriction and could be changed if we ever
570 # have a use case.
257 # have a use case.
571 vfs = getattr(self, 'vfs', self)
258 vfs = getattr(self, 'vfs', self)
572 if getattr(vfs, '_backgroundfilecloser', None):
259 if getattr(vfs, '_backgroundfilecloser', None):
573 raise error.Abort(
260 raise error.Abort(
574 _('can only have 1 active background file closer'))
261 _('can only have 1 active background file closer'))
575
262
576 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
263 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
577 try:
264 try:
578 vfs._backgroundfilecloser = bfc
265 vfs._backgroundfilecloser = bfc
579 yield bfc
266 yield bfc
580 finally:
267 finally:
581 vfs._backgroundfilecloser = None
268 vfs._backgroundfilecloser = None
582
269
583 class vfs(abstractvfs):
270 class vfs(abstractvfs):
584 '''Operate files relative to a base directory
271 '''Operate files relative to a base directory
585
272
586 This class is used to hide the details of COW semantics and
273 This class is used to hide the details of COW semantics and
587 remote file access from higher level code.
274 remote file access from higher level code.
588 '''
275 '''
589 def __init__(self, base, audit=True, expandpath=False, realpath=False):
276 def __init__(self, base, audit=True, expandpath=False, realpath=False):
590 if expandpath:
277 if expandpath:
591 base = util.expandpath(base)
278 base = util.expandpath(base)
592 if realpath:
279 if realpath:
593 base = os.path.realpath(base)
280 base = os.path.realpath(base)
594 self.base = base
281 self.base = base
595 self.mustaudit = audit
282 self.mustaudit = audit
596 self.createmode = None
283 self.createmode = None
597 self._trustnlink = None
284 self._trustnlink = None
598
285
599 @property
286 @property
600 def mustaudit(self):
287 def mustaudit(self):
601 return self._audit
288 return self._audit
602
289
603 @mustaudit.setter
290 @mustaudit.setter
604 def mustaudit(self, onoff):
291 def mustaudit(self, onoff):
605 self._audit = onoff
292 self._audit = onoff
606 if onoff:
293 if onoff:
607 self.audit = pathutil.pathauditor(self.base)
294 self.audit = pathutil.pathauditor(self.base)
608 else:
295 else:
609 self.audit = util.always
296 self.audit = util.always
610
297
611 @util.propertycache
298 @util.propertycache
612 def _cansymlink(self):
299 def _cansymlink(self):
613 return util.checklink(self.base)
300 return util.checklink(self.base)
614
301
615 @util.propertycache
302 @util.propertycache
616 def _chmod(self):
303 def _chmod(self):
617 return util.checkexec(self.base)
304 return util.checkexec(self.base)
618
305
619 def _fixfilemode(self, name):
306 def _fixfilemode(self, name):
620 if self.createmode is None or not self._chmod:
307 if self.createmode is None or not self._chmod:
621 return
308 return
622 os.chmod(name, self.createmode & 0o666)
309 os.chmod(name, self.createmode & 0o666)
623
310
624 def __call__(self, path, mode="r", text=False, atomictemp=False,
311 def __call__(self, path, mode="r", text=False, atomictemp=False,
625 notindexed=False, backgroundclose=False, checkambig=False):
312 notindexed=False, backgroundclose=False, checkambig=False):
626 '''Open ``path`` file, which is relative to vfs root.
313 '''Open ``path`` file, which is relative to vfs root.
627
314
628 Newly created directories are marked as "not to be indexed by
315 Newly created directories are marked as "not to be indexed by
629 the content indexing service", if ``notindexed`` is specified
316 the content indexing service", if ``notindexed`` is specified
630 for "write" mode access.
317 for "write" mode access.
631
318
632 If ``backgroundclose`` is passed, the file may be closed asynchronously.
319 If ``backgroundclose`` is passed, the file may be closed asynchronously.
633 It can only be used if the ``self.backgroundclosing()`` context manager
320 It can only be used if the ``self.backgroundclosing()`` context manager
634 is active. This should only be specified if the following criteria hold:
321 is active. This should only be specified if the following criteria hold:
635
322
636 1. There is a potential for writing thousands of files. Unless you
323 1. There is a potential for writing thousands of files. Unless you
637 are writing thousands of files, the performance benefits of
324 are writing thousands of files, the performance benefits of
638 asynchronously closing files is not realized.
325 asynchronously closing files is not realized.
639 2. Files are opened exactly once for the ``backgroundclosing``
326 2. Files are opened exactly once for the ``backgroundclosing``
640 active duration and are therefore free of race conditions between
327 active duration and are therefore free of race conditions between
641 closing a file on a background thread and reopening it. (If the
328 closing a file on a background thread and reopening it. (If the
642 file were opened multiple times, there could be unflushed data
329 file were opened multiple times, there could be unflushed data
643 because the original file handle hasn't been flushed/closed yet.)
330 because the original file handle hasn't been flushed/closed yet.)
644
331
645 ``checkambig`` argument is passed to atomictemplfile (valid
332 ``checkambig`` argument is passed to atomictemplfile (valid
646 only for writing), and is useful only if target file is
333 only for writing), and is useful only if target file is
647 guarded by any lock (e.g. repo.lock or repo.wlock).
334 guarded by any lock (e.g. repo.lock or repo.wlock).
648 '''
335 '''
649 if self._audit:
336 if self._audit:
650 r = util.checkosfilename(path)
337 r = util.checkosfilename(path)
651 if r:
338 if r:
652 raise error.Abort("%s: %r" % (r, path))
339 raise error.Abort("%s: %r" % (r, path))
653 self.audit(path)
340 self.audit(path)
654 f = self.join(path)
341 f = self.join(path)
655
342
656 if not text and "b" not in mode:
343 if not text and "b" not in mode:
657 mode += "b" # for that other OS
344 mode += "b" # for that other OS
658
345
659 nlink = -1
346 nlink = -1
660 if mode not in ('r', 'rb'):
347 if mode not in ('r', 'rb'):
661 dirname, basename = util.split(f)
348 dirname, basename = util.split(f)
662 # If basename is empty, then the path is malformed because it points
349 # If basename is empty, then the path is malformed because it points
663 # to a directory. Let the posixfile() call below raise IOError.
350 # to a directory. Let the posixfile() call below raise IOError.
664 if basename:
351 if basename:
665 if atomictemp:
352 if atomictemp:
666 util.makedirs(dirname, self.createmode, notindexed)
353 util.makedirs(dirname, self.createmode, notindexed)
667 return util.atomictempfile(f, mode, self.createmode,
354 return util.atomictempfile(f, mode, self.createmode,
668 checkambig=checkambig)
355 checkambig=checkambig)
669 try:
356 try:
670 if 'w' in mode:
357 if 'w' in mode:
671 util.unlink(f)
358 util.unlink(f)
672 nlink = 0
359 nlink = 0
673 else:
360 else:
674 # nlinks() may behave differently for files on Windows
361 # nlinks() may behave differently for files on Windows
675 # shares if the file is open.
362 # shares if the file is open.
676 with util.posixfile(f):
363 with util.posixfile(f):
677 nlink = util.nlinks(f)
364 nlink = util.nlinks(f)
678 if nlink < 1:
365 if nlink < 1:
679 nlink = 2 # force mktempcopy (issue1922)
366 nlink = 2 # force mktempcopy (issue1922)
680 except (OSError, IOError) as e:
367 except (OSError, IOError) as e:
681 if e.errno != errno.ENOENT:
368 if e.errno != errno.ENOENT:
682 raise
369 raise
683 nlink = 0
370 nlink = 0
684 util.makedirs(dirname, self.createmode, notindexed)
371 util.makedirs(dirname, self.createmode, notindexed)
685 if nlink > 0:
372 if nlink > 0:
686 if self._trustnlink is None:
373 if self._trustnlink is None:
687 self._trustnlink = nlink > 1 or util.checknlink(f)
374 self._trustnlink = nlink > 1 or util.checknlink(f)
688 if nlink > 1 or not self._trustnlink:
375 if nlink > 1 or not self._trustnlink:
689 util.rename(util.mktempcopy(f), f)
376 util.rename(util.mktempcopy(f), f)
690 fp = util.posixfile(f, mode)
377 fp = util.posixfile(f, mode)
691 if nlink == 0:
378 if nlink == 0:
692 self._fixfilemode(f)
379 self._fixfilemode(f)
693
380
694 if checkambig:
381 if checkambig:
695 if mode in ('r', 'rb'):
382 if mode in ('r', 'rb'):
696 raise error.Abort(_('implementation error: mode %s is not'
383 raise error.Abort(_('implementation error: mode %s is not'
697 ' valid for checkambig=True') % mode)
384 ' valid for checkambig=True') % mode)
698 fp = checkambigatclosing(fp)
385 fp = checkambigatclosing(fp)
699
386
700 if backgroundclose:
387 if backgroundclose:
701 if not self._backgroundfilecloser:
388 if not self._backgroundfilecloser:
702 raise error.Abort(_('backgroundclose can only be used when a '
389 raise error.Abort(_('backgroundclose can only be used when a '
703 'backgroundclosing context manager is active')
390 'backgroundclosing context manager is active')
704 )
391 )
705
392
706 fp = delayclosedfile(fp, self._backgroundfilecloser)
393 fp = delayclosedfile(fp, self._backgroundfilecloser)
707
394
708 return fp
395 return fp
709
396
710 def symlink(self, src, dst):
397 def symlink(self, src, dst):
711 self.audit(dst)
398 self.audit(dst)
712 linkname = self.join(dst)
399 linkname = self.join(dst)
713 try:
400 try:
714 os.unlink(linkname)
401 os.unlink(linkname)
715 except OSError:
402 except OSError:
716 pass
403 pass
717
404
718 util.makedirs(os.path.dirname(linkname), self.createmode)
405 util.makedirs(os.path.dirname(linkname), self.createmode)
719
406
720 if self._cansymlink:
407 if self._cansymlink:
721 try:
408 try:
722 os.symlink(src, linkname)
409 os.symlink(src, linkname)
723 except OSError as err:
410 except OSError as err:
724 raise OSError(err.errno, _('could not symlink to %r: %s') %
411 raise OSError(err.errno, _('could not symlink to %r: %s') %
725 (src, err.strerror), linkname)
412 (src, err.strerror), linkname)
726 else:
413 else:
727 self.write(dst, src)
414 self.write(dst, src)
728
415
729 def join(self, path, *insidef):
416 def join(self, path, *insidef):
730 if path:
417 if path:
731 return os.path.join(self.base, path, *insidef)
418 return os.path.join(self.base, path, *insidef)
732 else:
419 else:
733 return self.base
420 return self.base
734
421
735 opener = vfs
422 opener = vfs
736
423
737 class auditvfs(object):
424 class auditvfs(object):
738 def __init__(self, vfs):
425 def __init__(self, vfs):
739 self.vfs = vfs
426 self.vfs = vfs
740
427
741 @property
428 @property
742 def mustaudit(self):
429 def mustaudit(self):
743 return self.vfs.mustaudit
430 return self.vfs.mustaudit
744
431
745 @mustaudit.setter
432 @mustaudit.setter
746 def mustaudit(self, onoff):
433 def mustaudit(self, onoff):
747 self.vfs.mustaudit = onoff
434 self.vfs.mustaudit = onoff
748
435
749 @property
436 @property
750 def options(self):
437 def options(self):
751 return self.vfs.options
438 return self.vfs.options
752
439
753 @options.setter
440 @options.setter
754 def options(self, value):
441 def options(self, value):
755 self.vfs.options = value
442 self.vfs.options = value
756
443
757 class filtervfs(abstractvfs, auditvfs):
444 class filtervfs(abstractvfs, auditvfs):
758 '''Wrapper vfs for filtering filenames with a function.'''
445 '''Wrapper vfs for filtering filenames with a function.'''
759
446
760 def __init__(self, vfs, filter):
447 def __init__(self, vfs, filter):
761 auditvfs.__init__(self, vfs)
448 auditvfs.__init__(self, vfs)
762 self._filter = filter
449 self._filter = filter
763
450
764 def __call__(self, path, *args, **kwargs):
451 def __call__(self, path, *args, **kwargs):
765 return self.vfs(self._filter(path), *args, **kwargs)
452 return self.vfs(self._filter(path), *args, **kwargs)
766
453
767 def join(self, path, *insidef):
454 def join(self, path, *insidef):
768 if path:
455 if path:
769 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
456 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
770 else:
457 else:
771 return self.vfs.join(path)
458 return self.vfs.join(path)
772
459
773 filteropener = filtervfs
460 filteropener = filtervfs
774
461
775 class readonlyvfs(abstractvfs, auditvfs):
462 class readonlyvfs(abstractvfs, auditvfs):
776 '''Wrapper vfs preventing any writing.'''
463 '''Wrapper vfs preventing any writing.'''
777
464
778 def __init__(self, vfs):
465 def __init__(self, vfs):
779 auditvfs.__init__(self, vfs)
466 auditvfs.__init__(self, vfs)
780
467
781 def __call__(self, path, mode='r', *args, **kw):
468 def __call__(self, path, mode='r', *args, **kw):
782 if mode not in ('r', 'rb'):
469 if mode not in ('r', 'rb'):
783 raise error.Abort(_('this vfs is read only'))
470 raise error.Abort(_('this vfs is read only'))
784 return self.vfs(path, mode, *args, **kw)
471 return self.vfs(path, mode, *args, **kw)
785
472
786 def join(self, path, *insidef):
473 def join(self, path, *insidef):
787 return self.vfs.join(path, *insidef)
474 return self.vfs.join(path, *insidef)
788
475
789 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
790 '''yield every hg repository under path, always recursively.
791 The recurse flag will only control recursion into repo working dirs'''
792 def errhandler(err):
793 if err.filename == path:
794 raise err
795 samestat = getattr(os.path, 'samestat', None)
796 if followsym and samestat is not None:
797 def adddir(dirlst, dirname):
798 match = False
799 dirstat = os.stat(dirname)
800 for lstdirstat in dirlst:
801 if samestat(dirstat, lstdirstat):
802 match = True
803 break
804 if not match:
805 dirlst.append(dirstat)
806 return not match
807 else:
808 followsym = False
809
810 if (seen_dirs is None) and followsym:
811 seen_dirs = []
812 adddir(seen_dirs, path)
813 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
814 dirs.sort()
815 if '.hg' in dirs:
816 yield root # found a repository
817 qroot = os.path.join(root, '.hg', 'patches')
818 if os.path.isdir(os.path.join(qroot, '.hg')):
819 yield qroot # we have a patch queue repo here
820 if recurse:
821 # avoid recursing inside the .hg directory
822 dirs.remove('.hg')
823 else:
824 dirs[:] = [] # don't descend further
825 elif followsym:
826 newdirs = []
827 for d in dirs:
828 fname = os.path.join(root, d)
829 if adddir(seen_dirs, fname):
830 if os.path.islink(fname):
831 for hgname in walkrepos(fname, True, seen_dirs):
832 yield hgname
833 else:
834 newdirs.append(d)
835 dirs[:] = newdirs
836
837 def osrcpath():
838 '''return default os-specific hgrc search path'''
839 path = []
840 defaultpath = os.path.join(util.datapath, 'default.d')
841 if os.path.isdir(defaultpath):
842 for f, kind in osutil.listdir(defaultpath):
843 if f.endswith('.rc'):
844 path.append(os.path.join(defaultpath, f))
845 path.extend(systemrcpath())
846 path.extend(userrcpath())
847 path = [os.path.normpath(f) for f in path]
848 return path
849
850 _rcpath = None
851
852 def rcpath():
853 '''return hgrc search path. if env var HGRCPATH is set, use it.
854 for each item in path, if directory, use files ending in .rc,
855 else use item.
856 make HGRCPATH empty to only look in .hg/hgrc of current repo.
857 if no HGRCPATH, use default os-specific path.'''
858 global _rcpath
859 if _rcpath is None:
860 if 'HGRCPATH' in encoding.environ:
861 _rcpath = []
862 for p in encoding.environ['HGRCPATH'].split(pycompat.ospathsep):
863 if not p:
864 continue
865 p = util.expandpath(p)
866 if os.path.isdir(p):
867 for f, kind in osutil.listdir(p):
868 if f.endswith('.rc'):
869 _rcpath.append(os.path.join(p, f))
870 else:
871 _rcpath.append(p)
872 else:
873 _rcpath = osrcpath()
874 return _rcpath
875
876 def intrev(rev):
877 """Return integer for a given revision that can be used in comparison or
878 arithmetic operation"""
879 if rev is None:
880 return wdirrev
881 return rev
882
883 def revsingle(repo, revspec, default='.'):
884 if not revspec and revspec != 0:
885 return repo[default]
886
887 l = revrange(repo, [revspec])
888 if not l:
889 raise error.Abort(_('empty revision set'))
890 return repo[l.last()]
891
892 def _pairspec(revspec):
893 tree = revsetlang.parse(revspec)
894 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
895
896 def revpair(repo, revs):
897 if not revs:
898 return repo.dirstate.p1(), None
899
900 l = revrange(repo, revs)
901
902 if not l:
903 first = second = None
904 elif l.isascending():
905 first = l.min()
906 second = l.max()
907 elif l.isdescending():
908 first = l.max()
909 second = l.min()
910 else:
911 first = l.first()
912 second = l.last()
913
914 if first is None:
915 raise error.Abort(_('empty revision range'))
916 if (first == second and len(revs) >= 2
917 and not all(revrange(repo, [r]) for r in revs)):
918 raise error.Abort(_('empty revision on one side of range'))
919
920 # if top-level is range expression, the result must always be a pair
921 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
922 return repo.lookup(first), None
923
924 return repo.lookup(first), repo.lookup(second)
925
926 def revrange(repo, specs):
927 """Execute 1 to many revsets and return the union.
928
929 This is the preferred mechanism for executing revsets using user-specified
930 config options, such as revset aliases.
931
932 The revsets specified by ``specs`` will be executed via a chained ``OR``
933 expression. If ``specs`` is empty, an empty result is returned.
934
935 ``specs`` can contain integers, in which case they are assumed to be
936 revision numbers.
937
938 It is assumed the revsets are already formatted. If you have arguments
939 that need to be expanded in the revset, call ``revsetlang.formatspec()``
940 and pass the result as an element of ``specs``.
941
942 Specifying a single revset is allowed.
943
944 Returns a ``revset.abstractsmartset`` which is a list-like interface over
945 integer revisions.
946 """
947 allspecs = []
948 for spec in specs:
949 if isinstance(spec, int):
950 spec = revsetlang.formatspec('rev(%d)', spec)
951 allspecs.append(spec)
952 return repo.anyrevs(allspecs, user=True)
953
954 def meaningfulparents(repo, ctx):
955 """Return list of meaningful (or all if debug) parentrevs for rev.
956
957 For merges (two non-nullrev revisions) both parents are meaningful.
958 Otherwise the first parent revision is considered meaningful if it
959 is not the preceding revision.
960 """
961 parents = ctx.parents()
962 if len(parents) > 1:
963 return parents
964 if repo.ui.debugflag:
965 return [parents[0], repo['null']]
966 if parents[0].rev() >= intrev(ctx.rev()) - 1:
967 return []
968 return parents
969
970 def expandpats(pats):
971 '''Expand bare globs when running on windows.
972 On posix we assume it already has already been done by sh.'''
973 if not util.expandglobs:
974 return list(pats)
975 ret = []
976 for kindpat in pats:
977 kind, pat = matchmod._patsplit(kindpat, None)
978 if kind is None:
979 try:
980 globbed = glob.glob(pat)
981 except re.error:
982 globbed = [pat]
983 if globbed:
984 ret.extend(globbed)
985 continue
986 ret.append(kindpat)
987 return ret
988
989 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
990 badfn=None):
991 '''Return a matcher and the patterns that were used.
992 The matcher will warn about bad matches, unless an alternate badfn callback
993 is provided.'''
994 if pats == ("",):
995 pats = []
996 if opts is None:
997 opts = {}
998 if not globbed and default == 'relpath':
999 pats = expandpats(pats or [])
1000
1001 def bad(f, msg):
1002 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
1003
1004 if badfn is None:
1005 badfn = bad
1006
1007 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
1008 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
1009
1010 if m.always():
1011 pats = []
1012 return m, pats
1013
1014 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
1015 badfn=None):
1016 '''Return a matcher that will warn about bad matches.'''
1017 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
1018
1019 def matchall(repo):
1020 '''Return a matcher that will efficiently match everything.'''
1021 return matchmod.always(repo.root, repo.getcwd())
1022
1023 def matchfiles(repo, files, badfn=None):
1024 '''Return a matcher that will efficiently match exactly these files.'''
1025 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
1026
1027 def origpath(ui, repo, filepath):
1028 '''customize where .orig files are created
1029
1030 Fetch user defined path from config file: [ui] origbackuppath = <path>
1031 Fall back to default (filepath) if not specified
1032 '''
1033 origbackuppath = ui.config('ui', 'origbackuppath', None)
1034 if origbackuppath is None:
1035 return filepath + ".orig"
1036
1037 filepathfromroot = os.path.relpath(filepath, start=repo.root)
1038 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
1039
1040 origbackupdir = repo.vfs.dirname(fullorigpath)
1041 if not repo.vfs.exists(origbackupdir):
1042 ui.note(_('creating directory: %s\n') % origbackupdir)
1043 util.makedirs(origbackupdir)
1044
1045 return fullorigpath + ".orig"
1046
1047 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
1048 if opts is None:
1049 opts = {}
1050 m = matcher
1051 if dry_run is None:
1052 dry_run = opts.get('dry_run')
1053 if similarity is None:
1054 similarity = float(opts.get('similarity') or 0)
1055
1056 ret = 0
1057 join = lambda f: os.path.join(prefix, f)
1058
1059 wctx = repo[None]
1060 for subpath in sorted(wctx.substate):
1061 submatch = matchmod.subdirmatcher(subpath, m)
1062 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1063 sub = wctx.sub(subpath)
1064 try:
1065 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
1066 ret = 1
1067 except error.LookupError:
1068 repo.ui.status(_("skipping missing subrepository: %s\n")
1069 % join(subpath))
1070
1071 rejected = []
1072 def badfn(f, msg):
1073 if f in m.files():
1074 m.bad(f, msg)
1075 rejected.append(f)
1076
1077 badmatch = matchmod.badmatch(m, badfn)
1078 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1079 badmatch)
1080
1081 unknownset = set(unknown + forgotten)
1082 toprint = unknownset.copy()
1083 toprint.update(deleted)
1084 for abs in sorted(toprint):
1085 if repo.ui.verbose or not m.exact(abs):
1086 if abs in unknownset:
1087 status = _('adding %s\n') % m.uipath(abs)
1088 else:
1089 status = _('removing %s\n') % m.uipath(abs)
1090 repo.ui.status(status)
1091
1092 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1093 similarity)
1094
1095 if not dry_run:
1096 _markchanges(repo, unknown + forgotten, deleted, renames)
1097
1098 for f in rejected:
1099 if f in m.files():
1100 return 1
1101 return ret
1102
1103 def marktouched(repo, files, similarity=0.0):
1104 '''Assert that files have somehow been operated upon. files are relative to
1105 the repo root.'''
1106 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1107 rejected = []
1108
1109 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1110
1111 if repo.ui.verbose:
1112 unknownset = set(unknown + forgotten)
1113 toprint = unknownset.copy()
1114 toprint.update(deleted)
1115 for abs in sorted(toprint):
1116 if abs in unknownset:
1117 status = _('adding %s\n') % abs
1118 else:
1119 status = _('removing %s\n') % abs
1120 repo.ui.status(status)
1121
1122 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1123 similarity)
1124
1125 _markchanges(repo, unknown + forgotten, deleted, renames)
1126
1127 for f in rejected:
1128 if f in m.files():
1129 return 1
1130 return 0
1131
1132 def _interestingfiles(repo, matcher):
1133 '''Walk dirstate with matcher, looking for files that addremove would care
1134 about.
1135
1136 This is different from dirstate.status because it doesn't care about
1137 whether files are modified or clean.'''
1138 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1139 audit_path = pathutil.pathauditor(repo.root)
1140
1141 ctx = repo[None]
1142 dirstate = repo.dirstate
1143 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1144 full=False)
1145 for abs, st in walkresults.iteritems():
1146 dstate = dirstate[abs]
1147 if dstate == '?' and audit_path.check(abs):
1148 unknown.append(abs)
1149 elif dstate != 'r' and not st:
1150 deleted.append(abs)
1151 elif dstate == 'r' and st:
1152 forgotten.append(abs)
1153 # for finding renames
1154 elif dstate == 'r' and not st:
1155 removed.append(abs)
1156 elif dstate == 'a':
1157 added.append(abs)
1158
1159 return added, unknown, deleted, removed, forgotten
1160
1161 def _findrenames(repo, matcher, added, removed, similarity):
1162 '''Find renames from removed files to added ones.'''
1163 renames = {}
1164 if similarity > 0:
1165 for old, new, score in similar.findrenames(repo, added, removed,
1166 similarity):
1167 if (repo.ui.verbose or not matcher.exact(old)
1168 or not matcher.exact(new)):
1169 repo.ui.status(_('recording removal of %s as rename to %s '
1170 '(%d%% similar)\n') %
1171 (matcher.rel(old), matcher.rel(new),
1172 score * 100))
1173 renames[new] = old
1174 return renames
1175
1176 def _markchanges(repo, unknown, deleted, renames):
1177 '''Marks the files in unknown as added, the files in deleted as removed,
1178 and the files in renames as copied.'''
1179 wctx = repo[None]
1180 with repo.wlock():
1181 wctx.forget(deleted)
1182 wctx.add(unknown)
1183 for new, old in renames.iteritems():
1184 wctx.copy(old, new)
1185
1186 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1187 """Update the dirstate to reflect the intent of copying src to dst. For
1188 different reasons it might not end with dst being marked as copied from src.
1189 """
1190 origsrc = repo.dirstate.copied(src) or src
1191 if dst == origsrc: # copying back a copy?
1192 if repo.dirstate[dst] not in 'mn' and not dryrun:
1193 repo.dirstate.normallookup(dst)
1194 else:
1195 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1196 if not ui.quiet:
1197 ui.warn(_("%s has not been committed yet, so no copy "
1198 "data will be stored for %s.\n")
1199 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1200 if repo.dirstate[dst] in '?r' and not dryrun:
1201 wctx.add([dst])
1202 elif not dryrun:
1203 wctx.copy(origsrc, dst)
1204
1205 def readrequires(opener, supported):
1206 '''Reads and parses .hg/requires and checks if all entries found
1207 are in the list of supported features.'''
1208 requirements = set(opener.read("requires").splitlines())
1209 missings = []
1210 for r in requirements:
1211 if r not in supported:
1212 if not r or not r[0].isalnum():
1213 raise error.RequirementError(_(".hg/requires file is corrupt"))
1214 missings.append(r)
1215 missings.sort()
1216 if missings:
1217 raise error.RequirementError(
1218 _("repository requires features unknown to this Mercurial: %s")
1219 % " ".join(missings),
1220 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1221 " for more information"))
1222 return requirements
1223
1224 def writerequires(opener, requirements):
1225 with opener('requires', 'w') as fp:
1226 for r in sorted(requirements):
1227 fp.write("%s\n" % r)
1228
1229 class filecachesubentry(object):
1230 def __init__(self, path, stat):
1231 self.path = path
1232 self.cachestat = None
1233 self._cacheable = None
1234
1235 if stat:
1236 self.cachestat = filecachesubentry.stat(self.path)
1237
1238 if self.cachestat:
1239 self._cacheable = self.cachestat.cacheable()
1240 else:
1241 # None means we don't know yet
1242 self._cacheable = None
1243
1244 def refresh(self):
1245 if self.cacheable():
1246 self.cachestat = filecachesubentry.stat(self.path)
1247
1248 def cacheable(self):
1249 if self._cacheable is not None:
1250 return self._cacheable
1251
1252 # we don't know yet, assume it is for now
1253 return True
1254
1255 def changed(self):
1256 # no point in going further if we can't cache it
1257 if not self.cacheable():
1258 return True
1259
1260 newstat = filecachesubentry.stat(self.path)
1261
1262 # we may not know if it's cacheable yet, check again now
1263 if newstat and self._cacheable is None:
1264 self._cacheable = newstat.cacheable()
1265
1266 # check again
1267 if not self._cacheable:
1268 return True
1269
1270 if self.cachestat != newstat:
1271 self.cachestat = newstat
1272 return True
1273 else:
1274 return False
1275
1276 @staticmethod
1277 def stat(path):
1278 try:
1279 return util.cachestat(path)
1280 except OSError as e:
1281 if e.errno != errno.ENOENT:
1282 raise
1283
1284 class filecacheentry(object):
1285 def __init__(self, paths, stat=True):
1286 self._entries = []
1287 for path in paths:
1288 self._entries.append(filecachesubentry(path, stat))
1289
1290 def changed(self):
1291 '''true if any entry has changed'''
1292 for entry in self._entries:
1293 if entry.changed():
1294 return True
1295 return False
1296
1297 def refresh(self):
1298 for entry in self._entries:
1299 entry.refresh()
1300
1301 class filecache(object):
1302 '''A property like decorator that tracks files under .hg/ for updates.
1303
1304 Records stat info when called in _filecache.
1305
1306 On subsequent calls, compares old stat info with new info, and recreates the
1307 object when any of the files changes, updating the new stat info in
1308 _filecache.
1309
1310 Mercurial either atomic renames or appends for files under .hg,
1311 so to ensure the cache is reliable we need the filesystem to be able
1312 to tell us if a file has been replaced. If it can't, we fallback to
1313 recreating the object on every call (essentially the same behavior as
1314 propertycache).
1315
1316 '''
1317 def __init__(self, *paths):
1318 self.paths = paths
1319
1320 def join(self, obj, fname):
1321 """Used to compute the runtime path of a cached file.
1322
1323 Users should subclass filecache and provide their own version of this
1324 function to call the appropriate join function on 'obj' (an instance
1325 of the class that its member function was decorated).
1326 """
1327 return obj.join(fname)
1328
1329 def __call__(self, func):
1330 self.func = func
1331 self.name = func.__name__
1332 return self
1333
1334 def __get__(self, obj, type=None):
1335 # if accessed on the class, return the descriptor itself.
1336 if obj is None:
1337 return self
1338 # do we need to check if the file changed?
1339 if self.name in obj.__dict__:
1340 assert self.name in obj._filecache, self.name
1341 return obj.__dict__[self.name]
1342
1343 entry = obj._filecache.get(self.name)
1344
1345 if entry:
1346 if entry.changed():
1347 entry.obj = self.func(obj)
1348 else:
1349 paths = [self.join(obj, path) for path in self.paths]
1350
1351 # We stat -before- creating the object so our cache doesn't lie if
1352 # a writer modified between the time we read and stat
1353 entry = filecacheentry(paths, True)
1354 entry.obj = self.func(obj)
1355
1356 obj._filecache[self.name] = entry
1357
1358 obj.__dict__[self.name] = entry.obj
1359 return entry.obj
1360
1361 def __set__(self, obj, value):
1362 if self.name not in obj._filecache:
1363 # we add an entry for the missing value because X in __dict__
1364 # implies X in _filecache
1365 paths = [self.join(obj, path) for path in self.paths]
1366 ce = filecacheentry(paths, False)
1367 obj._filecache[self.name] = ce
1368 else:
1369 ce = obj._filecache[self.name]
1370
1371 ce.obj = value # update cached copy
1372 obj.__dict__[self.name] = value # update copy returned by obj.x
1373
1374 def __delete__(self, obj):
1375 try:
1376 del obj.__dict__[self.name]
1377 except KeyError:
1378 raise AttributeError(self.name)
1379
1380 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1381 if lock is None:
1382 raise error.LockInheritanceContractViolation(
1383 'lock can only be inherited while held')
1384 if environ is None:
1385 environ = {}
1386 with lock.inherit() as locker:
1387 environ[envvar] = locker
1388 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1389
1390 def wlocksub(repo, cmd, *args, **kwargs):
1391 """run cmd as a subprocess that allows inheriting repo's wlock
1392
1393 This can only be called while the wlock is held. This takes all the
1394 arguments that ui.system does, and returns the exit code of the
1395 subprocess."""
1396 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1397 **kwargs)
1398
1399 def gdinitconfig(ui):
1400 """helper function to know if a repo should be created as general delta
1401 """
1402 # experimental config: format.generaldelta
1403 return (ui.configbool('format', 'generaldelta', False)
1404 or ui.configbool('format', 'usegeneraldelta', True))
1405
1406 def gddeltaconfig(ui):
1407 """helper function to know if incoming delta should be optimised
1408 """
1409 # experimental config: format.generaldelta
1410 return ui.configbool('format', 'generaldelta', False)
1411
1412 class closewrapbase(object):
476 class closewrapbase(object):
1413 """Base class of wrapper, which hooks closing
477 """Base class of wrapper, which hooks closing
1414
478
1415 Do not instantiate outside of the vfs layer.
479 Do not instantiate outside of the vfs layer.
1416 """
480 """
1417 def __init__(self, fh):
481 def __init__(self, fh):
1418 object.__setattr__(self, '_origfh', fh)
482 object.__setattr__(self, '_origfh', fh)
1419
483
1420 def __getattr__(self, attr):
484 def __getattr__(self, attr):
1421 return getattr(self._origfh, attr)
485 return getattr(self._origfh, attr)
1422
486
1423 def __setattr__(self, attr, value):
487 def __setattr__(self, attr, value):
1424 return setattr(self._origfh, attr, value)
488 return setattr(self._origfh, attr, value)
1425
489
1426 def __delattr__(self, attr):
490 def __delattr__(self, attr):
1427 return delattr(self._origfh, attr)
491 return delattr(self._origfh, attr)
1428
492
1429 def __enter__(self):
493 def __enter__(self):
1430 return self._origfh.__enter__()
494 return self._origfh.__enter__()
1431
495
1432 def __exit__(self, exc_type, exc_value, exc_tb):
496 def __exit__(self, exc_type, exc_value, exc_tb):
1433 raise NotImplementedError('attempted instantiating ' + str(type(self)))
497 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1434
498
1435 def close(self):
499 def close(self):
1436 raise NotImplementedError('attempted instantiating ' + str(type(self)))
500 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1437
501
1438 class delayclosedfile(closewrapbase):
502 class delayclosedfile(closewrapbase):
1439 """Proxy for a file object whose close is delayed.
503 """Proxy for a file object whose close is delayed.
1440
504
1441 Do not instantiate outside of the vfs layer.
505 Do not instantiate outside of the vfs layer.
1442 """
506 """
1443 def __init__(self, fh, closer):
507 def __init__(self, fh, closer):
1444 super(delayclosedfile, self).__init__(fh)
508 super(delayclosedfile, self).__init__(fh)
1445 object.__setattr__(self, '_closer', closer)
509 object.__setattr__(self, '_closer', closer)
1446
510
1447 def __exit__(self, exc_type, exc_value, exc_tb):
511 def __exit__(self, exc_type, exc_value, exc_tb):
1448 self._closer.close(self._origfh)
512 self._closer.close(self._origfh)
1449
513
1450 def close(self):
514 def close(self):
1451 self._closer.close(self._origfh)
515 self._closer.close(self._origfh)
1452
516
1453 class backgroundfilecloser(object):
517 class backgroundfilecloser(object):
1454 """Coordinates background closing of file handles on multiple threads."""
518 """Coordinates background closing of file handles on multiple threads."""
1455 def __init__(self, ui, expectedcount=-1):
519 def __init__(self, ui, expectedcount=-1):
1456 self._running = False
520 self._running = False
1457 self._entered = False
521 self._entered = False
1458 self._threads = []
522 self._threads = []
1459 self._threadexception = None
523 self._threadexception = None
1460
524
1461 # Only Windows/NTFS has slow file closing. So only enable by default
525 # Only Windows/NTFS has slow file closing. So only enable by default
1462 # on that platform. But allow to be enabled elsewhere for testing.
526 # on that platform. But allow to be enabled elsewhere for testing.
1463 defaultenabled = pycompat.osname == 'nt'
527 defaultenabled = pycompat.osname == 'nt'
1464 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
528 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1465
529
1466 if not enabled:
530 if not enabled:
1467 return
531 return
1468
532
1469 # There is overhead to starting and stopping the background threads.
533 # There is overhead to starting and stopping the background threads.
1470 # Don't do background processing unless the file count is large enough
534 # Don't do background processing unless the file count is large enough
1471 # to justify it.
535 # to justify it.
1472 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
536 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1473 2048)
537 2048)
1474 # FUTURE dynamically start background threads after minfilecount closes.
538 # FUTURE dynamically start background threads after minfilecount closes.
1475 # (We don't currently have any callers that don't know their file count)
539 # (We don't currently have any callers that don't know their file count)
1476 if expectedcount > 0 and expectedcount < minfilecount:
540 if expectedcount > 0 and expectedcount < minfilecount:
1477 return
541 return
1478
542
1479 # Windows defaults to a limit of 512 open files. A buffer of 128
543 # Windows defaults to a limit of 512 open files. A buffer of 128
1480 # should give us enough headway.
544 # should give us enough headway.
1481 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
545 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1482 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
546 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1483
547
1484 ui.debug('starting %d threads for background file closing\n' %
548 ui.debug('starting %d threads for background file closing\n' %
1485 threadcount)
549 threadcount)
1486
550
1487 self._queue = util.queue(maxsize=maxqueue)
551 self._queue = util.queue(maxsize=maxqueue)
1488 self._running = True
552 self._running = True
1489
553
1490 for i in range(threadcount):
554 for i in range(threadcount):
1491 t = threading.Thread(target=self._worker, name='backgroundcloser')
555 t = threading.Thread(target=self._worker, name='backgroundcloser')
1492 self._threads.append(t)
556 self._threads.append(t)
1493 t.start()
557 t.start()
1494
558
1495 def __enter__(self):
559 def __enter__(self):
1496 self._entered = True
560 self._entered = True
1497 return self
561 return self
1498
562
1499 def __exit__(self, exc_type, exc_value, exc_tb):
563 def __exit__(self, exc_type, exc_value, exc_tb):
1500 self._running = False
564 self._running = False
1501
565
1502 # Wait for threads to finish closing so open files don't linger for
566 # Wait for threads to finish closing so open files don't linger for
1503 # longer than lifetime of context manager.
567 # longer than lifetime of context manager.
1504 for t in self._threads:
568 for t in self._threads:
1505 t.join()
569 t.join()
1506
570
1507 def _worker(self):
571 def _worker(self):
1508 """Main routine for worker thread."""
572 """Main routine for worker thread."""
1509 while True:
573 while True:
1510 try:
574 try:
1511 fh = self._queue.get(block=True, timeout=0.100)
575 fh = self._queue.get(block=True, timeout=0.100)
1512 # Need to catch or the thread will terminate and
576 # Need to catch or the thread will terminate and
1513 # we could orphan file descriptors.
577 # we could orphan file descriptors.
1514 try:
578 try:
1515 fh.close()
579 fh.close()
1516 except Exception as e:
580 except Exception as e:
1517 # Stash so can re-raise from main thread later.
581 # Stash so can re-raise from main thread later.
1518 self._threadexception = e
582 self._threadexception = e
1519 except util.empty:
583 except util.empty:
1520 if not self._running:
584 if not self._running:
1521 break
585 break
1522
586
1523 def close(self, fh):
587 def close(self, fh):
1524 """Schedule a file for closing."""
588 """Schedule a file for closing."""
1525 if not self._entered:
589 if not self._entered:
1526 raise error.Abort(_('can only call close() when context manager '
590 raise error.Abort(_('can only call close() when context manager '
1527 'active'))
591 'active'))
1528
592
1529 # If a background thread encountered an exception, raise now so we fail
593 # If a background thread encountered an exception, raise now so we fail
1530 # fast. Otherwise we may potentially go on for minutes until the error
594 # fast. Otherwise we may potentially go on for minutes until the error
1531 # is acted on.
595 # is acted on.
1532 if self._threadexception:
596 if self._threadexception:
1533 e = self._threadexception
597 e = self._threadexception
1534 self._threadexception = None
598 self._threadexception = None
1535 raise e
599 raise e
1536
600
1537 # If we're not actively running, close synchronously.
601 # If we're not actively running, close synchronously.
1538 if not self._running:
602 if not self._running:
1539 fh.close()
603 fh.close()
1540 return
604 return
1541
605
1542 self._queue.put(fh, block=True, timeout=None)
606 self._queue.put(fh, block=True, timeout=None)
1543
607
1544 class checkambigatclosing(closewrapbase):
608 class checkambigatclosing(closewrapbase):
1545 """Proxy for a file object, to avoid ambiguity of file stat
609 """Proxy for a file object, to avoid ambiguity of file stat
1546
610
1547 See also util.filestat for detail about "ambiguity of file stat".
611 See also util.filestat for detail about "ambiguity of file stat".
1548
612
1549 This proxy is useful only if the target file is guarded by any
613 This proxy is useful only if the target file is guarded by any
1550 lock (e.g. repo.lock or repo.wlock)
614 lock (e.g. repo.lock or repo.wlock)
1551
615
1552 Do not instantiate outside of the vfs layer.
616 Do not instantiate outside of the vfs layer.
1553 """
617 """
1554 def __init__(self, fh):
618 def __init__(self, fh):
1555 super(checkambigatclosing, self).__init__(fh)
619 super(checkambigatclosing, self).__init__(fh)
1556 object.__setattr__(self, '_oldstat', util.filestat(fh.name))
620 object.__setattr__(self, '_oldstat', util.filestat(fh.name))
1557
621
1558 def _checkambig(self):
622 def _checkambig(self):
1559 oldstat = self._oldstat
623 oldstat = self._oldstat
1560 if oldstat.stat:
624 if oldstat.stat:
1561 newstat = util.filestat(self._origfh.name)
625 newstat = util.filestat(self._origfh.name)
1562 if newstat.isambig(oldstat):
626 if newstat.isambig(oldstat):
1563 # stat of changed file is ambiguous to original one
627 # stat of changed file is ambiguous to original one
1564 newstat.avoidambig(self._origfh.name, oldstat)
628 newstat.avoidambig(self._origfh.name, oldstat)
1565
629
1566 def __exit__(self, exc_type, exc_value, exc_tb):
630 def __exit__(self, exc_type, exc_value, exc_tb):
1567 self._origfh.__exit__(exc_type, exc_value, exc_tb)
631 self._origfh.__exit__(exc_type, exc_value, exc_tb)
1568 self._checkambig()
632 self._checkambig()
1569
633
1570 def close(self):
634 def close(self):
1571 self._origfh.close()
635 self._origfh.close()
1572 self._checkambig()
636 self._checkambig()
General Comments 0
You need to be logged in to leave comments. Login now