##// END OF EJS Templates
auditvfs: forward options property from nested vfs...
Augie Fackler -
r29714:69109052 default
parent child Browse files
Show More
@@ -1,1423 +1,1431 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import glob
12 import glob
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import re
15 import re
16 import shutil
16 import shutil
17 import stat
17 import stat
18 import tempfile
18 import tempfile
19 import threading
19 import threading
20
20
21 from .i18n import _
21 from .i18n import _
22 from .node import wdirrev
22 from .node import wdirrev
23 from . import (
23 from . import (
24 encoding,
24 encoding,
25 error,
25 error,
26 match as matchmod,
26 match as matchmod,
27 osutil,
27 osutil,
28 pathutil,
28 pathutil,
29 phases,
29 phases,
30 revset,
30 revset,
31 similar,
31 similar,
32 util,
32 util,
33 )
33 )
34
34
35 if os.name == 'nt':
35 if os.name == 'nt':
36 from . import scmwindows as scmplatform
36 from . import scmwindows as scmplatform
37 else:
37 else:
38 from . import scmposix as scmplatform
38 from . import scmposix as scmplatform
39
39
40 systemrcpath = scmplatform.systemrcpath
40 systemrcpath = scmplatform.systemrcpath
41 userrcpath = scmplatform.userrcpath
41 userrcpath = scmplatform.userrcpath
42
42
43 class status(tuple):
43 class status(tuple):
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 and 'ignored' properties are only relevant to the working copy.
45 and 'ignored' properties are only relevant to the working copy.
46 '''
46 '''
47
47
48 __slots__ = ()
48 __slots__ = ()
49
49
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 clean):
51 clean):
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 ignored, clean))
53 ignored, clean))
54
54
55 @property
55 @property
56 def modified(self):
56 def modified(self):
57 '''files that have been modified'''
57 '''files that have been modified'''
58 return self[0]
58 return self[0]
59
59
60 @property
60 @property
61 def added(self):
61 def added(self):
62 '''files that have been added'''
62 '''files that have been added'''
63 return self[1]
63 return self[1]
64
64
65 @property
65 @property
66 def removed(self):
66 def removed(self):
67 '''files that have been removed'''
67 '''files that have been removed'''
68 return self[2]
68 return self[2]
69
69
70 @property
70 @property
71 def deleted(self):
71 def deleted(self):
72 '''files that are in the dirstate, but have been deleted from the
72 '''files that are in the dirstate, but have been deleted from the
73 working copy (aka "missing")
73 working copy (aka "missing")
74 '''
74 '''
75 return self[3]
75 return self[3]
76
76
77 @property
77 @property
78 def unknown(self):
78 def unknown(self):
79 '''files not in the dirstate that are not ignored'''
79 '''files not in the dirstate that are not ignored'''
80 return self[4]
80 return self[4]
81
81
82 @property
82 @property
83 def ignored(self):
83 def ignored(self):
84 '''files not in the dirstate that are ignored (by _dirignore())'''
84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 return self[5]
85 return self[5]
86
86
87 @property
87 @property
88 def clean(self):
88 def clean(self):
89 '''files that have not been modified'''
89 '''files that have not been modified'''
90 return self[6]
90 return self[6]
91
91
92 def __repr__(self, *args, **kwargs):
92 def __repr__(self, *args, **kwargs):
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 'unknown=%r, ignored=%r, clean=%r>') % self)
94 'unknown=%r, ignored=%r, clean=%r>') % self)
95
95
96 def itersubrepos(ctx1, ctx2):
96 def itersubrepos(ctx1, ctx2):
97 """find subrepos in ctx1 or ctx2"""
97 """find subrepos in ctx1 or ctx2"""
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 # has been modified (in ctx2) but not yet committed (in ctx1).
100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103
103
104 missing = set()
104 missing = set()
105
105
106 for subpath in ctx2.substate:
106 for subpath in ctx2.substate:
107 if subpath not in ctx1.substate:
107 if subpath not in ctx1.substate:
108 del subpaths[subpath]
108 del subpaths[subpath]
109 missing.add(subpath)
109 missing.add(subpath)
110
110
111 for subpath, ctx in sorted(subpaths.iteritems()):
111 for subpath, ctx in sorted(subpaths.iteritems()):
112 yield subpath, ctx.sub(subpath)
112 yield subpath, ctx.sub(subpath)
113
113
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 # status and diff will have an accurate result when it does
115 # status and diff will have an accurate result when it does
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 # against itself.
117 # against itself.
118 for subpath in missing:
118 for subpath in missing:
119 yield subpath, ctx2.nullsub(subpath, ctx1)
119 yield subpath, ctx2.nullsub(subpath, ctx1)
120
120
121 def nochangesfound(ui, repo, excluded=None):
121 def nochangesfound(ui, repo, excluded=None):
122 '''Report no changes for push/pull, excluded is None or a list of
122 '''Report no changes for push/pull, excluded is None or a list of
123 nodes excluded from the push/pull.
123 nodes excluded from the push/pull.
124 '''
124 '''
125 secretlist = []
125 secretlist = []
126 if excluded:
126 if excluded:
127 for n in excluded:
127 for n in excluded:
128 if n not in repo:
128 if n not in repo:
129 # discovery should not have included the filtered revision,
129 # discovery should not have included the filtered revision,
130 # we have to explicitly exclude it until discovery is cleanup.
130 # we have to explicitly exclude it until discovery is cleanup.
131 continue
131 continue
132 ctx = repo[n]
132 ctx = repo[n]
133 if ctx.phase() >= phases.secret and not ctx.extinct():
133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 secretlist.append(n)
134 secretlist.append(n)
135
135
136 if secretlist:
136 if secretlist:
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 % len(secretlist))
138 % len(secretlist))
139 else:
139 else:
140 ui.status(_("no changes found\n"))
140 ui.status(_("no changes found\n"))
141
141
142 def checknewlabel(repo, lbl, kind):
142 def checknewlabel(repo, lbl, kind):
143 # Do not use the "kind" parameter in ui output.
143 # Do not use the "kind" parameter in ui output.
144 # It makes strings difficult to translate.
144 # It makes strings difficult to translate.
145 if lbl in ['tip', '.', 'null']:
145 if lbl in ['tip', '.', 'null']:
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 for c in (':', '\0', '\n', '\r'):
147 for c in (':', '\0', '\n', '\r'):
148 if c in lbl:
148 if c in lbl:
149 raise error.Abort(_("%r cannot be used in a name") % c)
149 raise error.Abort(_("%r cannot be used in a name") % c)
150 try:
150 try:
151 int(lbl)
151 int(lbl)
152 raise error.Abort(_("cannot use an integer as a name"))
152 raise error.Abort(_("cannot use an integer as a name"))
153 except ValueError:
153 except ValueError:
154 pass
154 pass
155
155
156 def checkfilename(f):
156 def checkfilename(f):
157 '''Check that the filename f is an acceptable filename for a tracked file'''
157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 if '\r' in f or '\n' in f:
158 if '\r' in f or '\n' in f:
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160
160
161 def checkportable(ui, f):
161 def checkportable(ui, f):
162 '''Check if filename f is portable and warn or abort depending on config'''
162 '''Check if filename f is portable and warn or abort depending on config'''
163 checkfilename(f)
163 checkfilename(f)
164 abort, warn = checkportabilityalert(ui)
164 abort, warn = checkportabilityalert(ui)
165 if abort or warn:
165 if abort or warn:
166 msg = util.checkwinfilename(f)
166 msg = util.checkwinfilename(f)
167 if msg:
167 if msg:
168 msg = "%s: %r" % (msg, f)
168 msg = "%s: %r" % (msg, f)
169 if abort:
169 if abort:
170 raise error.Abort(msg)
170 raise error.Abort(msg)
171 ui.warn(_("warning: %s\n") % msg)
171 ui.warn(_("warning: %s\n") % msg)
172
172
173 def checkportabilityalert(ui):
173 def checkportabilityalert(ui):
174 '''check if the user's config requests nothing, a warning, or abort for
174 '''check if the user's config requests nothing, a warning, or abort for
175 non-portable filenames'''
175 non-portable filenames'''
176 val = ui.config('ui', 'portablefilenames', 'warn')
176 val = ui.config('ui', 'portablefilenames', 'warn')
177 lval = val.lower()
177 lval = val.lower()
178 bval = util.parsebool(val)
178 bval = util.parsebool(val)
179 abort = os.name == 'nt' or lval == 'abort'
179 abort = os.name == 'nt' or lval == 'abort'
180 warn = bval or lval == 'warn'
180 warn = bval or lval == 'warn'
181 if bval is None and not (warn or abort or lval == 'ignore'):
181 if bval is None and not (warn or abort or lval == 'ignore'):
182 raise error.ConfigError(
182 raise error.ConfigError(
183 _("ui.portablefilenames value is invalid ('%s')") % val)
183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 return abort, warn
184 return abort, warn
185
185
186 class casecollisionauditor(object):
186 class casecollisionauditor(object):
187 def __init__(self, ui, abort, dirstate):
187 def __init__(self, ui, abort, dirstate):
188 self._ui = ui
188 self._ui = ui
189 self._abort = abort
189 self._abort = abort
190 allfiles = '\0'.join(dirstate._map)
190 allfiles = '\0'.join(dirstate._map)
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 self._dirstate = dirstate
192 self._dirstate = dirstate
193 # The purpose of _newfiles is so that we don't complain about
193 # The purpose of _newfiles is so that we don't complain about
194 # case collisions if someone were to call this object with the
194 # case collisions if someone were to call this object with the
195 # same filename twice.
195 # same filename twice.
196 self._newfiles = set()
196 self._newfiles = set()
197
197
198 def __call__(self, f):
198 def __call__(self, f):
199 if f in self._newfiles:
199 if f in self._newfiles:
200 return
200 return
201 fl = encoding.lower(f)
201 fl = encoding.lower(f)
202 if fl in self._loweredfiles and f not in self._dirstate:
202 if fl in self._loweredfiles and f not in self._dirstate:
203 msg = _('possible case-folding collision for %s') % f
203 msg = _('possible case-folding collision for %s') % f
204 if self._abort:
204 if self._abort:
205 raise error.Abort(msg)
205 raise error.Abort(msg)
206 self._ui.warn(_("warning: %s\n") % msg)
206 self._ui.warn(_("warning: %s\n") % msg)
207 self._loweredfiles.add(fl)
207 self._loweredfiles.add(fl)
208 self._newfiles.add(f)
208 self._newfiles.add(f)
209
209
210 def filteredhash(repo, maxrev):
210 def filteredhash(repo, maxrev):
211 """build hash of filtered revisions in the current repoview.
211 """build hash of filtered revisions in the current repoview.
212
212
213 Multiple caches perform up-to-date validation by checking that the
213 Multiple caches perform up-to-date validation by checking that the
214 tiprev and tipnode stored in the cache file match the current repository.
214 tiprev and tipnode stored in the cache file match the current repository.
215 However, this is not sufficient for validating repoviews because the set
215 However, this is not sufficient for validating repoviews because the set
216 of revisions in the view may change without the repository tiprev and
216 of revisions in the view may change without the repository tiprev and
217 tipnode changing.
217 tipnode changing.
218
218
219 This function hashes all the revs filtered from the view and returns
219 This function hashes all the revs filtered from the view and returns
220 that SHA-1 digest.
220 that SHA-1 digest.
221 """
221 """
222 cl = repo.changelog
222 cl = repo.changelog
223 if not cl.filteredrevs:
223 if not cl.filteredrevs:
224 return None
224 return None
225 key = None
225 key = None
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 if revs:
227 if revs:
228 s = hashlib.sha1()
228 s = hashlib.sha1()
229 for rev in revs:
229 for rev in revs:
230 s.update('%s;' % rev)
230 s.update('%s;' % rev)
231 key = s.digest()
231 key = s.digest()
232 return key
232 return key
233
233
234 class abstractvfs(object):
234 class abstractvfs(object):
235 """Abstract base class; cannot be instantiated"""
235 """Abstract base class; cannot be instantiated"""
236
236
237 def __init__(self, *args, **kwargs):
237 def __init__(self, *args, **kwargs):
238 '''Prevent instantiation; don't call this from subclasses.'''
238 '''Prevent instantiation; don't call this from subclasses.'''
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240
240
241 def tryread(self, path):
241 def tryread(self, path):
242 '''gracefully return an empty string for missing files'''
242 '''gracefully return an empty string for missing files'''
243 try:
243 try:
244 return self.read(path)
244 return self.read(path)
245 except IOError as inst:
245 except IOError as inst:
246 if inst.errno != errno.ENOENT:
246 if inst.errno != errno.ENOENT:
247 raise
247 raise
248 return ""
248 return ""
249
249
250 def tryreadlines(self, path, mode='rb'):
250 def tryreadlines(self, path, mode='rb'):
251 '''gracefully return an empty array for missing files'''
251 '''gracefully return an empty array for missing files'''
252 try:
252 try:
253 return self.readlines(path, mode=mode)
253 return self.readlines(path, mode=mode)
254 except IOError as inst:
254 except IOError as inst:
255 if inst.errno != errno.ENOENT:
255 if inst.errno != errno.ENOENT:
256 raise
256 raise
257 return []
257 return []
258
258
259 def open(self, path, mode="r", text=False, atomictemp=False,
259 def open(self, path, mode="r", text=False, atomictemp=False,
260 notindexed=False, backgroundclose=False):
260 notindexed=False, backgroundclose=False):
261 '''Open ``path`` file, which is relative to vfs root.
261 '''Open ``path`` file, which is relative to vfs root.
262
262
263 Newly created directories are marked as "not to be indexed by
263 Newly created directories are marked as "not to be indexed by
264 the content indexing service", if ``notindexed`` is specified
264 the content indexing service", if ``notindexed`` is specified
265 for "write" mode access.
265 for "write" mode access.
266 '''
266 '''
267 self.open = self.__call__
267 self.open = self.__call__
268 return self.__call__(path, mode, text, atomictemp, notindexed,
268 return self.__call__(path, mode, text, atomictemp, notindexed,
269 backgroundclose=backgroundclose)
269 backgroundclose=backgroundclose)
270
270
271 def read(self, path):
271 def read(self, path):
272 with self(path, 'rb') as fp:
272 with self(path, 'rb') as fp:
273 return fp.read()
273 return fp.read()
274
274
275 def readlines(self, path, mode='rb'):
275 def readlines(self, path, mode='rb'):
276 with self(path, mode=mode) as fp:
276 with self(path, mode=mode) as fp:
277 return fp.readlines()
277 return fp.readlines()
278
278
279 def write(self, path, data, backgroundclose=False):
279 def write(self, path, data, backgroundclose=False):
280 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
280 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
281 return fp.write(data)
281 return fp.write(data)
282
282
283 def writelines(self, path, data, mode='wb', notindexed=False):
283 def writelines(self, path, data, mode='wb', notindexed=False):
284 with self(path, mode=mode, notindexed=notindexed) as fp:
284 with self(path, mode=mode, notindexed=notindexed) as fp:
285 return fp.writelines(data)
285 return fp.writelines(data)
286
286
287 def append(self, path, data):
287 def append(self, path, data):
288 with self(path, 'ab') as fp:
288 with self(path, 'ab') as fp:
289 return fp.write(data)
289 return fp.write(data)
290
290
291 def basename(self, path):
291 def basename(self, path):
292 """return base element of a path (as os.path.basename would do)
292 """return base element of a path (as os.path.basename would do)
293
293
294 This exists to allow handling of strange encoding if needed."""
294 This exists to allow handling of strange encoding if needed."""
295 return os.path.basename(path)
295 return os.path.basename(path)
296
296
297 def chmod(self, path, mode):
297 def chmod(self, path, mode):
298 return os.chmod(self.join(path), mode)
298 return os.chmod(self.join(path), mode)
299
299
300 def dirname(self, path):
300 def dirname(self, path):
301 """return dirname element of a path (as os.path.dirname would do)
301 """return dirname element of a path (as os.path.dirname would do)
302
302
303 This exists to allow handling of strange encoding if needed."""
303 This exists to allow handling of strange encoding if needed."""
304 return os.path.dirname(path)
304 return os.path.dirname(path)
305
305
306 def exists(self, path=None):
306 def exists(self, path=None):
307 return os.path.exists(self.join(path))
307 return os.path.exists(self.join(path))
308
308
309 def fstat(self, fp):
309 def fstat(self, fp):
310 return util.fstat(fp)
310 return util.fstat(fp)
311
311
312 def isdir(self, path=None):
312 def isdir(self, path=None):
313 return os.path.isdir(self.join(path))
313 return os.path.isdir(self.join(path))
314
314
315 def isfile(self, path=None):
315 def isfile(self, path=None):
316 return os.path.isfile(self.join(path))
316 return os.path.isfile(self.join(path))
317
317
318 def islink(self, path=None):
318 def islink(self, path=None):
319 return os.path.islink(self.join(path))
319 return os.path.islink(self.join(path))
320
320
321 def isfileorlink(self, path=None):
321 def isfileorlink(self, path=None):
322 '''return whether path is a regular file or a symlink
322 '''return whether path is a regular file or a symlink
323
323
324 Unlike isfile, this doesn't follow symlinks.'''
324 Unlike isfile, this doesn't follow symlinks.'''
325 try:
325 try:
326 st = self.lstat(path)
326 st = self.lstat(path)
327 except OSError:
327 except OSError:
328 return False
328 return False
329 mode = st.st_mode
329 mode = st.st_mode
330 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
330 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
331
331
332 def reljoin(self, *paths):
332 def reljoin(self, *paths):
333 """join various elements of a path together (as os.path.join would do)
333 """join various elements of a path together (as os.path.join would do)
334
334
335 The vfs base is not injected so that path stay relative. This exists
335 The vfs base is not injected so that path stay relative. This exists
336 to allow handling of strange encoding if needed."""
336 to allow handling of strange encoding if needed."""
337 return os.path.join(*paths)
337 return os.path.join(*paths)
338
338
339 def split(self, path):
339 def split(self, path):
340 """split top-most element of a path (as os.path.split would do)
340 """split top-most element of a path (as os.path.split would do)
341
341
342 This exists to allow handling of strange encoding if needed."""
342 This exists to allow handling of strange encoding if needed."""
343 return os.path.split(path)
343 return os.path.split(path)
344
344
345 def lexists(self, path=None):
345 def lexists(self, path=None):
346 return os.path.lexists(self.join(path))
346 return os.path.lexists(self.join(path))
347
347
348 def lstat(self, path=None):
348 def lstat(self, path=None):
349 return os.lstat(self.join(path))
349 return os.lstat(self.join(path))
350
350
351 def listdir(self, path=None):
351 def listdir(self, path=None):
352 return os.listdir(self.join(path))
352 return os.listdir(self.join(path))
353
353
354 def makedir(self, path=None, notindexed=True):
354 def makedir(self, path=None, notindexed=True):
355 return util.makedir(self.join(path), notindexed)
355 return util.makedir(self.join(path), notindexed)
356
356
357 def makedirs(self, path=None, mode=None):
357 def makedirs(self, path=None, mode=None):
358 return util.makedirs(self.join(path), mode)
358 return util.makedirs(self.join(path), mode)
359
359
360 def makelock(self, info, path):
360 def makelock(self, info, path):
361 return util.makelock(info, self.join(path))
361 return util.makelock(info, self.join(path))
362
362
363 def mkdir(self, path=None):
363 def mkdir(self, path=None):
364 return os.mkdir(self.join(path))
364 return os.mkdir(self.join(path))
365
365
366 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
366 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
367 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
367 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
368 dir=self.join(dir), text=text)
368 dir=self.join(dir), text=text)
369 dname, fname = util.split(name)
369 dname, fname = util.split(name)
370 if dir:
370 if dir:
371 return fd, os.path.join(dir, fname)
371 return fd, os.path.join(dir, fname)
372 else:
372 else:
373 return fd, fname
373 return fd, fname
374
374
375 def readdir(self, path=None, stat=None, skip=None):
375 def readdir(self, path=None, stat=None, skip=None):
376 return osutil.listdir(self.join(path), stat, skip)
376 return osutil.listdir(self.join(path), stat, skip)
377
377
378 def readlock(self, path):
378 def readlock(self, path):
379 return util.readlock(self.join(path))
379 return util.readlock(self.join(path))
380
380
381 def rename(self, src, dst, checkambig=False):
381 def rename(self, src, dst, checkambig=False):
382 """Rename from src to dst
382 """Rename from src to dst
383
383
384 checkambig argument is used with util.filestat, and is useful
384 checkambig argument is used with util.filestat, and is useful
385 only if destination file is guarded by any lock
385 only if destination file is guarded by any lock
386 (e.g. repo.lock or repo.wlock).
386 (e.g. repo.lock or repo.wlock).
387 """
387 """
388 dstpath = self.join(dst)
388 dstpath = self.join(dst)
389 oldstat = checkambig and util.filestat(dstpath)
389 oldstat = checkambig and util.filestat(dstpath)
390 if oldstat and oldstat.stat:
390 if oldstat and oldstat.stat:
391 ret = util.rename(self.join(src), dstpath)
391 ret = util.rename(self.join(src), dstpath)
392 newstat = util.filestat(dstpath)
392 newstat = util.filestat(dstpath)
393 if newstat.isambig(oldstat):
393 if newstat.isambig(oldstat):
394 # stat of renamed file is ambiguous to original one
394 # stat of renamed file is ambiguous to original one
395 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
395 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
396 os.utime(dstpath, (advanced, advanced))
396 os.utime(dstpath, (advanced, advanced))
397 return ret
397 return ret
398 return util.rename(self.join(src), dstpath)
398 return util.rename(self.join(src), dstpath)
399
399
400 def readlink(self, path):
400 def readlink(self, path):
401 return os.readlink(self.join(path))
401 return os.readlink(self.join(path))
402
402
403 def removedirs(self, path=None):
403 def removedirs(self, path=None):
404 """Remove a leaf directory and all empty intermediate ones
404 """Remove a leaf directory and all empty intermediate ones
405 """
405 """
406 return util.removedirs(self.join(path))
406 return util.removedirs(self.join(path))
407
407
408 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
408 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
409 """Remove a directory tree recursively
409 """Remove a directory tree recursively
410
410
411 If ``forcibly``, this tries to remove READ-ONLY files, too.
411 If ``forcibly``, this tries to remove READ-ONLY files, too.
412 """
412 """
413 if forcibly:
413 if forcibly:
414 def onerror(function, path, excinfo):
414 def onerror(function, path, excinfo):
415 if function is not os.remove:
415 if function is not os.remove:
416 raise
416 raise
417 # read-only files cannot be unlinked under Windows
417 # read-only files cannot be unlinked under Windows
418 s = os.stat(path)
418 s = os.stat(path)
419 if (s.st_mode & stat.S_IWRITE) != 0:
419 if (s.st_mode & stat.S_IWRITE) != 0:
420 raise
420 raise
421 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
421 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
422 os.remove(path)
422 os.remove(path)
423 else:
423 else:
424 onerror = None
424 onerror = None
425 return shutil.rmtree(self.join(path),
425 return shutil.rmtree(self.join(path),
426 ignore_errors=ignore_errors, onerror=onerror)
426 ignore_errors=ignore_errors, onerror=onerror)
427
427
428 def setflags(self, path, l, x):
428 def setflags(self, path, l, x):
429 return util.setflags(self.join(path), l, x)
429 return util.setflags(self.join(path), l, x)
430
430
431 def stat(self, path=None):
431 def stat(self, path=None):
432 return os.stat(self.join(path))
432 return os.stat(self.join(path))
433
433
434 def unlink(self, path=None):
434 def unlink(self, path=None):
435 return util.unlink(self.join(path))
435 return util.unlink(self.join(path))
436
436
437 def unlinkpath(self, path=None, ignoremissing=False):
437 def unlinkpath(self, path=None, ignoremissing=False):
438 return util.unlinkpath(self.join(path), ignoremissing)
438 return util.unlinkpath(self.join(path), ignoremissing)
439
439
440 def utime(self, path=None, t=None):
440 def utime(self, path=None, t=None):
441 return os.utime(self.join(path), t)
441 return os.utime(self.join(path), t)
442
442
443 def walk(self, path=None, onerror=None):
443 def walk(self, path=None, onerror=None):
444 """Yield (dirpath, dirs, files) tuple for each directories under path
444 """Yield (dirpath, dirs, files) tuple for each directories under path
445
445
446 ``dirpath`` is relative one from the root of this vfs. This
446 ``dirpath`` is relative one from the root of this vfs. This
447 uses ``os.sep`` as path separator, even you specify POSIX
447 uses ``os.sep`` as path separator, even you specify POSIX
448 style ``path``.
448 style ``path``.
449
449
450 "The root of this vfs" is represented as empty ``dirpath``.
450 "The root of this vfs" is represented as empty ``dirpath``.
451 """
451 """
452 root = os.path.normpath(self.join(None))
452 root = os.path.normpath(self.join(None))
453 # when dirpath == root, dirpath[prefixlen:] becomes empty
453 # when dirpath == root, dirpath[prefixlen:] becomes empty
454 # because len(dirpath) < prefixlen.
454 # because len(dirpath) < prefixlen.
455 prefixlen = len(pathutil.normasprefix(root))
455 prefixlen = len(pathutil.normasprefix(root))
456 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
456 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
457 yield (dirpath[prefixlen:], dirs, files)
457 yield (dirpath[prefixlen:], dirs, files)
458
458
459 @contextlib.contextmanager
459 @contextlib.contextmanager
460 def backgroundclosing(self, ui, expectedcount=-1):
460 def backgroundclosing(self, ui, expectedcount=-1):
461 """Allow files to be closed asynchronously.
461 """Allow files to be closed asynchronously.
462
462
463 When this context manager is active, ``backgroundclose`` can be passed
463 When this context manager is active, ``backgroundclose`` can be passed
464 to ``__call__``/``open`` to result in the file possibly being closed
464 to ``__call__``/``open`` to result in the file possibly being closed
465 asynchronously, on a background thread.
465 asynchronously, on a background thread.
466 """
466 """
467 # This is an arbitrary restriction and could be changed if we ever
467 # This is an arbitrary restriction and could be changed if we ever
468 # have a use case.
468 # have a use case.
469 vfs = getattr(self, 'vfs', self)
469 vfs = getattr(self, 'vfs', self)
470 if getattr(vfs, '_backgroundfilecloser', None):
470 if getattr(vfs, '_backgroundfilecloser', None):
471 raise error.Abort(
471 raise error.Abort(
472 _('can only have 1 active background file closer'))
472 _('can only have 1 active background file closer'))
473
473
474 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
474 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
475 try:
475 try:
476 vfs._backgroundfilecloser = bfc
476 vfs._backgroundfilecloser = bfc
477 yield bfc
477 yield bfc
478 finally:
478 finally:
479 vfs._backgroundfilecloser = None
479 vfs._backgroundfilecloser = None
480
480
481 class vfs(abstractvfs):
481 class vfs(abstractvfs):
482 '''Operate files relative to a base directory
482 '''Operate files relative to a base directory
483
483
484 This class is used to hide the details of COW semantics and
484 This class is used to hide the details of COW semantics and
485 remote file access from higher level code.
485 remote file access from higher level code.
486 '''
486 '''
487 def __init__(self, base, audit=True, expandpath=False, realpath=False):
487 def __init__(self, base, audit=True, expandpath=False, realpath=False):
488 if expandpath:
488 if expandpath:
489 base = util.expandpath(base)
489 base = util.expandpath(base)
490 if realpath:
490 if realpath:
491 base = os.path.realpath(base)
491 base = os.path.realpath(base)
492 self.base = base
492 self.base = base
493 self.mustaudit = audit
493 self.mustaudit = audit
494 self.createmode = None
494 self.createmode = None
495 self._trustnlink = None
495 self._trustnlink = None
496
496
497 @property
497 @property
498 def mustaudit(self):
498 def mustaudit(self):
499 return self._audit
499 return self._audit
500
500
501 @mustaudit.setter
501 @mustaudit.setter
502 def mustaudit(self, onoff):
502 def mustaudit(self, onoff):
503 self._audit = onoff
503 self._audit = onoff
504 if onoff:
504 if onoff:
505 self.audit = pathutil.pathauditor(self.base)
505 self.audit = pathutil.pathauditor(self.base)
506 else:
506 else:
507 self.audit = util.always
507 self.audit = util.always
508
508
509 @util.propertycache
509 @util.propertycache
510 def _cansymlink(self):
510 def _cansymlink(self):
511 return util.checklink(self.base)
511 return util.checklink(self.base)
512
512
513 @util.propertycache
513 @util.propertycache
514 def _chmod(self):
514 def _chmod(self):
515 return util.checkexec(self.base)
515 return util.checkexec(self.base)
516
516
517 def _fixfilemode(self, name):
517 def _fixfilemode(self, name):
518 if self.createmode is None or not self._chmod:
518 if self.createmode is None or not self._chmod:
519 return
519 return
520 os.chmod(name, self.createmode & 0o666)
520 os.chmod(name, self.createmode & 0o666)
521
521
522 def __call__(self, path, mode="r", text=False, atomictemp=False,
522 def __call__(self, path, mode="r", text=False, atomictemp=False,
523 notindexed=False, backgroundclose=False, checkambig=False):
523 notindexed=False, backgroundclose=False, checkambig=False):
524 '''Open ``path`` file, which is relative to vfs root.
524 '''Open ``path`` file, which is relative to vfs root.
525
525
526 Newly created directories are marked as "not to be indexed by
526 Newly created directories are marked as "not to be indexed by
527 the content indexing service", if ``notindexed`` is specified
527 the content indexing service", if ``notindexed`` is specified
528 for "write" mode access.
528 for "write" mode access.
529
529
530 If ``backgroundclose`` is passed, the file may be closed asynchronously.
530 If ``backgroundclose`` is passed, the file may be closed asynchronously.
531 It can only be used if the ``self.backgroundclosing()`` context manager
531 It can only be used if the ``self.backgroundclosing()`` context manager
532 is active. This should only be specified if the following criteria hold:
532 is active. This should only be specified if the following criteria hold:
533
533
534 1. There is a potential for writing thousands of files. Unless you
534 1. There is a potential for writing thousands of files. Unless you
535 are writing thousands of files, the performance benefits of
535 are writing thousands of files, the performance benefits of
536 asynchronously closing files is not realized.
536 asynchronously closing files is not realized.
537 2. Files are opened exactly once for the ``backgroundclosing``
537 2. Files are opened exactly once for the ``backgroundclosing``
538 active duration and are therefore free of race conditions between
538 active duration and are therefore free of race conditions between
539 closing a file on a background thread and reopening it. (If the
539 closing a file on a background thread and reopening it. (If the
540 file were opened multiple times, there could be unflushed data
540 file were opened multiple times, there could be unflushed data
541 because the original file handle hasn't been flushed/closed yet.)
541 because the original file handle hasn't been flushed/closed yet.)
542
542
543 ``checkambig`` argument is passed to atomictemplfile (valid
543 ``checkambig`` argument is passed to atomictemplfile (valid
544 only for writing), and is useful only if target file is
544 only for writing), and is useful only if target file is
545 guarded by any lock (e.g. repo.lock or repo.wlock).
545 guarded by any lock (e.g. repo.lock or repo.wlock).
546 '''
546 '''
547 if self._audit:
547 if self._audit:
548 r = util.checkosfilename(path)
548 r = util.checkosfilename(path)
549 if r:
549 if r:
550 raise error.Abort("%s: %r" % (r, path))
550 raise error.Abort("%s: %r" % (r, path))
551 self.audit(path)
551 self.audit(path)
552 f = self.join(path)
552 f = self.join(path)
553
553
554 if not text and "b" not in mode:
554 if not text and "b" not in mode:
555 mode += "b" # for that other OS
555 mode += "b" # for that other OS
556
556
557 nlink = -1
557 nlink = -1
558 if mode not in ('r', 'rb'):
558 if mode not in ('r', 'rb'):
559 dirname, basename = util.split(f)
559 dirname, basename = util.split(f)
560 # If basename is empty, then the path is malformed because it points
560 # If basename is empty, then the path is malformed because it points
561 # to a directory. Let the posixfile() call below raise IOError.
561 # to a directory. Let the posixfile() call below raise IOError.
562 if basename:
562 if basename:
563 if atomictemp:
563 if atomictemp:
564 util.makedirs(dirname, self.createmode, notindexed)
564 util.makedirs(dirname, self.createmode, notindexed)
565 return util.atomictempfile(f, mode, self.createmode,
565 return util.atomictempfile(f, mode, self.createmode,
566 checkambig=checkambig)
566 checkambig=checkambig)
567 try:
567 try:
568 if 'w' in mode:
568 if 'w' in mode:
569 util.unlink(f)
569 util.unlink(f)
570 nlink = 0
570 nlink = 0
571 else:
571 else:
572 # nlinks() may behave differently for files on Windows
572 # nlinks() may behave differently for files on Windows
573 # shares if the file is open.
573 # shares if the file is open.
574 with util.posixfile(f):
574 with util.posixfile(f):
575 nlink = util.nlinks(f)
575 nlink = util.nlinks(f)
576 if nlink < 1:
576 if nlink < 1:
577 nlink = 2 # force mktempcopy (issue1922)
577 nlink = 2 # force mktempcopy (issue1922)
578 except (OSError, IOError) as e:
578 except (OSError, IOError) as e:
579 if e.errno != errno.ENOENT:
579 if e.errno != errno.ENOENT:
580 raise
580 raise
581 nlink = 0
581 nlink = 0
582 util.makedirs(dirname, self.createmode, notindexed)
582 util.makedirs(dirname, self.createmode, notindexed)
583 if nlink > 0:
583 if nlink > 0:
584 if self._trustnlink is None:
584 if self._trustnlink is None:
585 self._trustnlink = nlink > 1 or util.checknlink(f)
585 self._trustnlink = nlink > 1 or util.checknlink(f)
586 if nlink > 1 or not self._trustnlink:
586 if nlink > 1 or not self._trustnlink:
587 util.rename(util.mktempcopy(f), f)
587 util.rename(util.mktempcopy(f), f)
588 fp = util.posixfile(f, mode)
588 fp = util.posixfile(f, mode)
589 if nlink == 0:
589 if nlink == 0:
590 self._fixfilemode(f)
590 self._fixfilemode(f)
591
591
592 if backgroundclose:
592 if backgroundclose:
593 if not self._backgroundfilecloser:
593 if not self._backgroundfilecloser:
594 raise error.Abort(_('backgroundclose can only be used when a '
594 raise error.Abort(_('backgroundclose can only be used when a '
595 'backgroundclosing context manager is active')
595 'backgroundclosing context manager is active')
596 )
596 )
597
597
598 fp = delayclosedfile(fp, self._backgroundfilecloser)
598 fp = delayclosedfile(fp, self._backgroundfilecloser)
599
599
600 return fp
600 return fp
601
601
602 def symlink(self, src, dst):
602 def symlink(self, src, dst):
603 self.audit(dst)
603 self.audit(dst)
604 linkname = self.join(dst)
604 linkname = self.join(dst)
605 try:
605 try:
606 os.unlink(linkname)
606 os.unlink(linkname)
607 except OSError:
607 except OSError:
608 pass
608 pass
609
609
610 util.makedirs(os.path.dirname(linkname), self.createmode)
610 util.makedirs(os.path.dirname(linkname), self.createmode)
611
611
612 if self._cansymlink:
612 if self._cansymlink:
613 try:
613 try:
614 os.symlink(src, linkname)
614 os.symlink(src, linkname)
615 except OSError as err:
615 except OSError as err:
616 raise OSError(err.errno, _('could not symlink to %r: %s') %
616 raise OSError(err.errno, _('could not symlink to %r: %s') %
617 (src, err.strerror), linkname)
617 (src, err.strerror), linkname)
618 else:
618 else:
619 self.write(dst, src)
619 self.write(dst, src)
620
620
621 def join(self, path, *insidef):
621 def join(self, path, *insidef):
622 if path:
622 if path:
623 return os.path.join(self.base, path, *insidef)
623 return os.path.join(self.base, path, *insidef)
624 else:
624 else:
625 return self.base
625 return self.base
626
626
627 opener = vfs
627 opener = vfs
628
628
629 class auditvfs(object):
629 class auditvfs(object):
630 def __init__(self, vfs):
630 def __init__(self, vfs):
631 self.vfs = vfs
631 self.vfs = vfs
632
632
633 @property
633 @property
634 def mustaudit(self):
634 def mustaudit(self):
635 return self.vfs.mustaudit
635 return self.vfs.mustaudit
636
636
637 @mustaudit.setter
637 @mustaudit.setter
638 def mustaudit(self, onoff):
638 def mustaudit(self, onoff):
639 self.vfs.mustaudit = onoff
639 self.vfs.mustaudit = onoff
640
640
641 @property
642 def options(self):
643 return self.vfs.options
644
645 @options.setter
646 def options(self, value):
647 self.vfs.options = value
648
641 class filtervfs(abstractvfs, auditvfs):
649 class filtervfs(abstractvfs, auditvfs):
642 '''Wrapper vfs for filtering filenames with a function.'''
650 '''Wrapper vfs for filtering filenames with a function.'''
643
651
644 def __init__(self, vfs, filter):
652 def __init__(self, vfs, filter):
645 auditvfs.__init__(self, vfs)
653 auditvfs.__init__(self, vfs)
646 self._filter = filter
654 self._filter = filter
647
655
648 def __call__(self, path, *args, **kwargs):
656 def __call__(self, path, *args, **kwargs):
649 return self.vfs(self._filter(path), *args, **kwargs)
657 return self.vfs(self._filter(path), *args, **kwargs)
650
658
651 def join(self, path, *insidef):
659 def join(self, path, *insidef):
652 if path:
660 if path:
653 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
661 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
654 else:
662 else:
655 return self.vfs.join(path)
663 return self.vfs.join(path)
656
664
657 filteropener = filtervfs
665 filteropener = filtervfs
658
666
659 class readonlyvfs(abstractvfs, auditvfs):
667 class readonlyvfs(abstractvfs, auditvfs):
660 '''Wrapper vfs preventing any writing.'''
668 '''Wrapper vfs preventing any writing.'''
661
669
662 def __init__(self, vfs):
670 def __init__(self, vfs):
663 auditvfs.__init__(self, vfs)
671 auditvfs.__init__(self, vfs)
664
672
665 def __call__(self, path, mode='r', *args, **kw):
673 def __call__(self, path, mode='r', *args, **kw):
666 if mode not in ('r', 'rb'):
674 if mode not in ('r', 'rb'):
667 raise error.Abort(_('this vfs is read only'))
675 raise error.Abort(_('this vfs is read only'))
668 return self.vfs(path, mode, *args, **kw)
676 return self.vfs(path, mode, *args, **kw)
669
677
670 def join(self, path, *insidef):
678 def join(self, path, *insidef):
671 return self.vfs.join(path, *insidef)
679 return self.vfs.join(path, *insidef)
672
680
673 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
681 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
674 '''yield every hg repository under path, always recursively.
682 '''yield every hg repository under path, always recursively.
675 The recurse flag will only control recursion into repo working dirs'''
683 The recurse flag will only control recursion into repo working dirs'''
676 def errhandler(err):
684 def errhandler(err):
677 if err.filename == path:
685 if err.filename == path:
678 raise err
686 raise err
679 samestat = getattr(os.path, 'samestat', None)
687 samestat = getattr(os.path, 'samestat', None)
680 if followsym and samestat is not None:
688 if followsym and samestat is not None:
681 def adddir(dirlst, dirname):
689 def adddir(dirlst, dirname):
682 match = False
690 match = False
683 dirstat = os.stat(dirname)
691 dirstat = os.stat(dirname)
684 for lstdirstat in dirlst:
692 for lstdirstat in dirlst:
685 if samestat(dirstat, lstdirstat):
693 if samestat(dirstat, lstdirstat):
686 match = True
694 match = True
687 break
695 break
688 if not match:
696 if not match:
689 dirlst.append(dirstat)
697 dirlst.append(dirstat)
690 return not match
698 return not match
691 else:
699 else:
692 followsym = False
700 followsym = False
693
701
694 if (seen_dirs is None) and followsym:
702 if (seen_dirs is None) and followsym:
695 seen_dirs = []
703 seen_dirs = []
696 adddir(seen_dirs, path)
704 adddir(seen_dirs, path)
697 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
705 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
698 dirs.sort()
706 dirs.sort()
699 if '.hg' in dirs:
707 if '.hg' in dirs:
700 yield root # found a repository
708 yield root # found a repository
701 qroot = os.path.join(root, '.hg', 'patches')
709 qroot = os.path.join(root, '.hg', 'patches')
702 if os.path.isdir(os.path.join(qroot, '.hg')):
710 if os.path.isdir(os.path.join(qroot, '.hg')):
703 yield qroot # we have a patch queue repo here
711 yield qroot # we have a patch queue repo here
704 if recurse:
712 if recurse:
705 # avoid recursing inside the .hg directory
713 # avoid recursing inside the .hg directory
706 dirs.remove('.hg')
714 dirs.remove('.hg')
707 else:
715 else:
708 dirs[:] = [] # don't descend further
716 dirs[:] = [] # don't descend further
709 elif followsym:
717 elif followsym:
710 newdirs = []
718 newdirs = []
711 for d in dirs:
719 for d in dirs:
712 fname = os.path.join(root, d)
720 fname = os.path.join(root, d)
713 if adddir(seen_dirs, fname):
721 if adddir(seen_dirs, fname):
714 if os.path.islink(fname):
722 if os.path.islink(fname):
715 for hgname in walkrepos(fname, True, seen_dirs):
723 for hgname in walkrepos(fname, True, seen_dirs):
716 yield hgname
724 yield hgname
717 else:
725 else:
718 newdirs.append(d)
726 newdirs.append(d)
719 dirs[:] = newdirs
727 dirs[:] = newdirs
720
728
721 def osrcpath():
729 def osrcpath():
722 '''return default os-specific hgrc search path'''
730 '''return default os-specific hgrc search path'''
723 path = []
731 path = []
724 defaultpath = os.path.join(util.datapath, 'default.d')
732 defaultpath = os.path.join(util.datapath, 'default.d')
725 if os.path.isdir(defaultpath):
733 if os.path.isdir(defaultpath):
726 for f, kind in osutil.listdir(defaultpath):
734 for f, kind in osutil.listdir(defaultpath):
727 if f.endswith('.rc'):
735 if f.endswith('.rc'):
728 path.append(os.path.join(defaultpath, f))
736 path.append(os.path.join(defaultpath, f))
729 path.extend(systemrcpath())
737 path.extend(systemrcpath())
730 path.extend(userrcpath())
738 path.extend(userrcpath())
731 path = [os.path.normpath(f) for f in path]
739 path = [os.path.normpath(f) for f in path]
732 return path
740 return path
733
741
734 _rcpath = None
742 _rcpath = None
735
743
736 def rcpath():
744 def rcpath():
737 '''return hgrc search path. if env var HGRCPATH is set, use it.
745 '''return hgrc search path. if env var HGRCPATH is set, use it.
738 for each item in path, if directory, use files ending in .rc,
746 for each item in path, if directory, use files ending in .rc,
739 else use item.
747 else use item.
740 make HGRCPATH empty to only look in .hg/hgrc of current repo.
748 make HGRCPATH empty to only look in .hg/hgrc of current repo.
741 if no HGRCPATH, use default os-specific path.'''
749 if no HGRCPATH, use default os-specific path.'''
742 global _rcpath
750 global _rcpath
743 if _rcpath is None:
751 if _rcpath is None:
744 if 'HGRCPATH' in os.environ:
752 if 'HGRCPATH' in os.environ:
745 _rcpath = []
753 _rcpath = []
746 for p in os.environ['HGRCPATH'].split(os.pathsep):
754 for p in os.environ['HGRCPATH'].split(os.pathsep):
747 if not p:
755 if not p:
748 continue
756 continue
749 p = util.expandpath(p)
757 p = util.expandpath(p)
750 if os.path.isdir(p):
758 if os.path.isdir(p):
751 for f, kind in osutil.listdir(p):
759 for f, kind in osutil.listdir(p):
752 if f.endswith('.rc'):
760 if f.endswith('.rc'):
753 _rcpath.append(os.path.join(p, f))
761 _rcpath.append(os.path.join(p, f))
754 else:
762 else:
755 _rcpath.append(p)
763 _rcpath.append(p)
756 else:
764 else:
757 _rcpath = osrcpath()
765 _rcpath = osrcpath()
758 return _rcpath
766 return _rcpath
759
767
760 def intrev(rev):
768 def intrev(rev):
761 """Return integer for a given revision that can be used in comparison or
769 """Return integer for a given revision that can be used in comparison or
762 arithmetic operation"""
770 arithmetic operation"""
763 if rev is None:
771 if rev is None:
764 return wdirrev
772 return wdirrev
765 return rev
773 return rev
766
774
767 def revsingle(repo, revspec, default='.'):
775 def revsingle(repo, revspec, default='.'):
768 if not revspec and revspec != 0:
776 if not revspec and revspec != 0:
769 return repo[default]
777 return repo[default]
770
778
771 l = revrange(repo, [revspec])
779 l = revrange(repo, [revspec])
772 if not l:
780 if not l:
773 raise error.Abort(_('empty revision set'))
781 raise error.Abort(_('empty revision set'))
774 return repo[l.last()]
782 return repo[l.last()]
775
783
776 def _pairspec(revspec):
784 def _pairspec(revspec):
777 tree = revset.parse(revspec)
785 tree = revset.parse(revspec)
778 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
786 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
779 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
787 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
780
788
781 def revpair(repo, revs):
789 def revpair(repo, revs):
782 if not revs:
790 if not revs:
783 return repo.dirstate.p1(), None
791 return repo.dirstate.p1(), None
784
792
785 l = revrange(repo, revs)
793 l = revrange(repo, revs)
786
794
787 if not l:
795 if not l:
788 first = second = None
796 first = second = None
789 elif l.isascending():
797 elif l.isascending():
790 first = l.min()
798 first = l.min()
791 second = l.max()
799 second = l.max()
792 elif l.isdescending():
800 elif l.isdescending():
793 first = l.max()
801 first = l.max()
794 second = l.min()
802 second = l.min()
795 else:
803 else:
796 first = l.first()
804 first = l.first()
797 second = l.last()
805 second = l.last()
798
806
799 if first is None:
807 if first is None:
800 raise error.Abort(_('empty revision range'))
808 raise error.Abort(_('empty revision range'))
801 if (first == second and len(revs) >= 2
809 if (first == second and len(revs) >= 2
802 and not all(revrange(repo, [r]) for r in revs)):
810 and not all(revrange(repo, [r]) for r in revs)):
803 raise error.Abort(_('empty revision on one side of range'))
811 raise error.Abort(_('empty revision on one side of range'))
804
812
805 # if top-level is range expression, the result must always be a pair
813 # if top-level is range expression, the result must always be a pair
806 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
814 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
807 return repo.lookup(first), None
815 return repo.lookup(first), None
808
816
809 return repo.lookup(first), repo.lookup(second)
817 return repo.lookup(first), repo.lookup(second)
810
818
811 def revrange(repo, specs):
819 def revrange(repo, specs):
812 """Execute 1 to many revsets and return the union.
820 """Execute 1 to many revsets and return the union.
813
821
814 This is the preferred mechanism for executing revsets using user-specified
822 This is the preferred mechanism for executing revsets using user-specified
815 config options, such as revset aliases.
823 config options, such as revset aliases.
816
824
817 The revsets specified by ``specs`` will be executed via a chained ``OR``
825 The revsets specified by ``specs`` will be executed via a chained ``OR``
818 expression. If ``specs`` is empty, an empty result is returned.
826 expression. If ``specs`` is empty, an empty result is returned.
819
827
820 ``specs`` can contain integers, in which case they are assumed to be
828 ``specs`` can contain integers, in which case they are assumed to be
821 revision numbers.
829 revision numbers.
822
830
823 It is assumed the revsets are already formatted. If you have arguments
831 It is assumed the revsets are already formatted. If you have arguments
824 that need to be expanded in the revset, call ``revset.formatspec()``
832 that need to be expanded in the revset, call ``revset.formatspec()``
825 and pass the result as an element of ``specs``.
833 and pass the result as an element of ``specs``.
826
834
827 Specifying a single revset is allowed.
835 Specifying a single revset is allowed.
828
836
829 Returns a ``revset.abstractsmartset`` which is a list-like interface over
837 Returns a ``revset.abstractsmartset`` which is a list-like interface over
830 integer revisions.
838 integer revisions.
831 """
839 """
832 allspecs = []
840 allspecs = []
833 for spec in specs:
841 for spec in specs:
834 if isinstance(spec, int):
842 if isinstance(spec, int):
835 spec = revset.formatspec('rev(%d)', spec)
843 spec = revset.formatspec('rev(%d)', spec)
836 allspecs.append(spec)
844 allspecs.append(spec)
837 m = revset.matchany(repo.ui, allspecs, repo)
845 m = revset.matchany(repo.ui, allspecs, repo)
838 return m(repo)
846 return m(repo)
839
847
840 def meaningfulparents(repo, ctx):
848 def meaningfulparents(repo, ctx):
841 """Return list of meaningful (or all if debug) parentrevs for rev.
849 """Return list of meaningful (or all if debug) parentrevs for rev.
842
850
843 For merges (two non-nullrev revisions) both parents are meaningful.
851 For merges (two non-nullrev revisions) both parents are meaningful.
844 Otherwise the first parent revision is considered meaningful if it
852 Otherwise the first parent revision is considered meaningful if it
845 is not the preceding revision.
853 is not the preceding revision.
846 """
854 """
847 parents = ctx.parents()
855 parents = ctx.parents()
848 if len(parents) > 1:
856 if len(parents) > 1:
849 return parents
857 return parents
850 if repo.ui.debugflag:
858 if repo.ui.debugflag:
851 return [parents[0], repo['null']]
859 return [parents[0], repo['null']]
852 if parents[0].rev() >= intrev(ctx.rev()) - 1:
860 if parents[0].rev() >= intrev(ctx.rev()) - 1:
853 return []
861 return []
854 return parents
862 return parents
855
863
856 def expandpats(pats):
864 def expandpats(pats):
857 '''Expand bare globs when running on windows.
865 '''Expand bare globs when running on windows.
858 On posix we assume it already has already been done by sh.'''
866 On posix we assume it already has already been done by sh.'''
859 if not util.expandglobs:
867 if not util.expandglobs:
860 return list(pats)
868 return list(pats)
861 ret = []
869 ret = []
862 for kindpat in pats:
870 for kindpat in pats:
863 kind, pat = matchmod._patsplit(kindpat, None)
871 kind, pat = matchmod._patsplit(kindpat, None)
864 if kind is None:
872 if kind is None:
865 try:
873 try:
866 globbed = glob.glob(pat)
874 globbed = glob.glob(pat)
867 except re.error:
875 except re.error:
868 globbed = [pat]
876 globbed = [pat]
869 if globbed:
877 if globbed:
870 ret.extend(globbed)
878 ret.extend(globbed)
871 continue
879 continue
872 ret.append(kindpat)
880 ret.append(kindpat)
873 return ret
881 return ret
874
882
875 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
883 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
876 badfn=None):
884 badfn=None):
877 '''Return a matcher and the patterns that were used.
885 '''Return a matcher and the patterns that were used.
878 The matcher will warn about bad matches, unless an alternate badfn callback
886 The matcher will warn about bad matches, unless an alternate badfn callback
879 is provided.'''
887 is provided.'''
880 if pats == ("",):
888 if pats == ("",):
881 pats = []
889 pats = []
882 if opts is None:
890 if opts is None:
883 opts = {}
891 opts = {}
884 if not globbed and default == 'relpath':
892 if not globbed and default == 'relpath':
885 pats = expandpats(pats or [])
893 pats = expandpats(pats or [])
886
894
887 def bad(f, msg):
895 def bad(f, msg):
888 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
896 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
889
897
890 if badfn is None:
898 if badfn is None:
891 badfn = bad
899 badfn = bad
892
900
893 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
901 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
894 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
902 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
895
903
896 if m.always():
904 if m.always():
897 pats = []
905 pats = []
898 return m, pats
906 return m, pats
899
907
900 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
908 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
901 badfn=None):
909 badfn=None):
902 '''Return a matcher that will warn about bad matches.'''
910 '''Return a matcher that will warn about bad matches.'''
903 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
911 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
904
912
905 def matchall(repo):
913 def matchall(repo):
906 '''Return a matcher that will efficiently match everything.'''
914 '''Return a matcher that will efficiently match everything.'''
907 return matchmod.always(repo.root, repo.getcwd())
915 return matchmod.always(repo.root, repo.getcwd())
908
916
909 def matchfiles(repo, files, badfn=None):
917 def matchfiles(repo, files, badfn=None):
910 '''Return a matcher that will efficiently match exactly these files.'''
918 '''Return a matcher that will efficiently match exactly these files.'''
911 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
919 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
912
920
913 def origpath(ui, repo, filepath):
921 def origpath(ui, repo, filepath):
914 '''customize where .orig files are created
922 '''customize where .orig files are created
915
923
916 Fetch user defined path from config file: [ui] origbackuppath = <path>
924 Fetch user defined path from config file: [ui] origbackuppath = <path>
917 Fall back to default (filepath) if not specified
925 Fall back to default (filepath) if not specified
918 '''
926 '''
919 origbackuppath = ui.config('ui', 'origbackuppath', None)
927 origbackuppath = ui.config('ui', 'origbackuppath', None)
920 if origbackuppath is None:
928 if origbackuppath is None:
921 return filepath + ".orig"
929 return filepath + ".orig"
922
930
923 filepathfromroot = os.path.relpath(filepath, start=repo.root)
931 filepathfromroot = os.path.relpath(filepath, start=repo.root)
924 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
932 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
925
933
926 origbackupdir = repo.vfs.dirname(fullorigpath)
934 origbackupdir = repo.vfs.dirname(fullorigpath)
927 if not repo.vfs.exists(origbackupdir):
935 if not repo.vfs.exists(origbackupdir):
928 ui.note(_('creating directory: %s\n') % origbackupdir)
936 ui.note(_('creating directory: %s\n') % origbackupdir)
929 util.makedirs(origbackupdir)
937 util.makedirs(origbackupdir)
930
938
931 return fullorigpath + ".orig"
939 return fullorigpath + ".orig"
932
940
933 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
941 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
934 if opts is None:
942 if opts is None:
935 opts = {}
943 opts = {}
936 m = matcher
944 m = matcher
937 if dry_run is None:
945 if dry_run is None:
938 dry_run = opts.get('dry_run')
946 dry_run = opts.get('dry_run')
939 if similarity is None:
947 if similarity is None:
940 similarity = float(opts.get('similarity') or 0)
948 similarity = float(opts.get('similarity') or 0)
941
949
942 ret = 0
950 ret = 0
943 join = lambda f: os.path.join(prefix, f)
951 join = lambda f: os.path.join(prefix, f)
944
952
945 def matchessubrepo(matcher, subpath):
953 def matchessubrepo(matcher, subpath):
946 if matcher.exact(subpath):
954 if matcher.exact(subpath):
947 return True
955 return True
948 for f in matcher.files():
956 for f in matcher.files():
949 if f.startswith(subpath):
957 if f.startswith(subpath):
950 return True
958 return True
951 return False
959 return False
952
960
953 wctx = repo[None]
961 wctx = repo[None]
954 for subpath in sorted(wctx.substate):
962 for subpath in sorted(wctx.substate):
955 if opts.get('subrepos') or matchessubrepo(m, subpath):
963 if opts.get('subrepos') or matchessubrepo(m, subpath):
956 sub = wctx.sub(subpath)
964 sub = wctx.sub(subpath)
957 try:
965 try:
958 submatch = matchmod.subdirmatcher(subpath, m)
966 submatch = matchmod.subdirmatcher(subpath, m)
959 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
967 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
960 ret = 1
968 ret = 1
961 except error.LookupError:
969 except error.LookupError:
962 repo.ui.status(_("skipping missing subrepository: %s\n")
970 repo.ui.status(_("skipping missing subrepository: %s\n")
963 % join(subpath))
971 % join(subpath))
964
972
965 rejected = []
973 rejected = []
966 def badfn(f, msg):
974 def badfn(f, msg):
967 if f in m.files():
975 if f in m.files():
968 m.bad(f, msg)
976 m.bad(f, msg)
969 rejected.append(f)
977 rejected.append(f)
970
978
971 badmatch = matchmod.badmatch(m, badfn)
979 badmatch = matchmod.badmatch(m, badfn)
972 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
980 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
973 badmatch)
981 badmatch)
974
982
975 unknownset = set(unknown + forgotten)
983 unknownset = set(unknown + forgotten)
976 toprint = unknownset.copy()
984 toprint = unknownset.copy()
977 toprint.update(deleted)
985 toprint.update(deleted)
978 for abs in sorted(toprint):
986 for abs in sorted(toprint):
979 if repo.ui.verbose or not m.exact(abs):
987 if repo.ui.verbose or not m.exact(abs):
980 if abs in unknownset:
988 if abs in unknownset:
981 status = _('adding %s\n') % m.uipath(abs)
989 status = _('adding %s\n') % m.uipath(abs)
982 else:
990 else:
983 status = _('removing %s\n') % m.uipath(abs)
991 status = _('removing %s\n') % m.uipath(abs)
984 repo.ui.status(status)
992 repo.ui.status(status)
985
993
986 renames = _findrenames(repo, m, added + unknown, removed + deleted,
994 renames = _findrenames(repo, m, added + unknown, removed + deleted,
987 similarity)
995 similarity)
988
996
989 if not dry_run:
997 if not dry_run:
990 _markchanges(repo, unknown + forgotten, deleted, renames)
998 _markchanges(repo, unknown + forgotten, deleted, renames)
991
999
992 for f in rejected:
1000 for f in rejected:
993 if f in m.files():
1001 if f in m.files():
994 return 1
1002 return 1
995 return ret
1003 return ret
996
1004
997 def marktouched(repo, files, similarity=0.0):
1005 def marktouched(repo, files, similarity=0.0):
998 '''Assert that files have somehow been operated upon. files are relative to
1006 '''Assert that files have somehow been operated upon. files are relative to
999 the repo root.'''
1007 the repo root.'''
1000 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1008 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1001 rejected = []
1009 rejected = []
1002
1010
1003 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1011 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1004
1012
1005 if repo.ui.verbose:
1013 if repo.ui.verbose:
1006 unknownset = set(unknown + forgotten)
1014 unknownset = set(unknown + forgotten)
1007 toprint = unknownset.copy()
1015 toprint = unknownset.copy()
1008 toprint.update(deleted)
1016 toprint.update(deleted)
1009 for abs in sorted(toprint):
1017 for abs in sorted(toprint):
1010 if abs in unknownset:
1018 if abs in unknownset:
1011 status = _('adding %s\n') % abs
1019 status = _('adding %s\n') % abs
1012 else:
1020 else:
1013 status = _('removing %s\n') % abs
1021 status = _('removing %s\n') % abs
1014 repo.ui.status(status)
1022 repo.ui.status(status)
1015
1023
1016 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1024 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1017 similarity)
1025 similarity)
1018
1026
1019 _markchanges(repo, unknown + forgotten, deleted, renames)
1027 _markchanges(repo, unknown + forgotten, deleted, renames)
1020
1028
1021 for f in rejected:
1029 for f in rejected:
1022 if f in m.files():
1030 if f in m.files():
1023 return 1
1031 return 1
1024 return 0
1032 return 0
1025
1033
1026 def _interestingfiles(repo, matcher):
1034 def _interestingfiles(repo, matcher):
1027 '''Walk dirstate with matcher, looking for files that addremove would care
1035 '''Walk dirstate with matcher, looking for files that addremove would care
1028 about.
1036 about.
1029
1037
1030 This is different from dirstate.status because it doesn't care about
1038 This is different from dirstate.status because it doesn't care about
1031 whether files are modified or clean.'''
1039 whether files are modified or clean.'''
1032 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1040 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1033 audit_path = pathutil.pathauditor(repo.root)
1041 audit_path = pathutil.pathauditor(repo.root)
1034
1042
1035 ctx = repo[None]
1043 ctx = repo[None]
1036 dirstate = repo.dirstate
1044 dirstate = repo.dirstate
1037 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1045 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1038 full=False)
1046 full=False)
1039 for abs, st in walkresults.iteritems():
1047 for abs, st in walkresults.iteritems():
1040 dstate = dirstate[abs]
1048 dstate = dirstate[abs]
1041 if dstate == '?' and audit_path.check(abs):
1049 if dstate == '?' and audit_path.check(abs):
1042 unknown.append(abs)
1050 unknown.append(abs)
1043 elif dstate != 'r' and not st:
1051 elif dstate != 'r' and not st:
1044 deleted.append(abs)
1052 deleted.append(abs)
1045 elif dstate == 'r' and st:
1053 elif dstate == 'r' and st:
1046 forgotten.append(abs)
1054 forgotten.append(abs)
1047 # for finding renames
1055 # for finding renames
1048 elif dstate == 'r' and not st:
1056 elif dstate == 'r' and not st:
1049 removed.append(abs)
1057 removed.append(abs)
1050 elif dstate == 'a':
1058 elif dstate == 'a':
1051 added.append(abs)
1059 added.append(abs)
1052
1060
1053 return added, unknown, deleted, removed, forgotten
1061 return added, unknown, deleted, removed, forgotten
1054
1062
1055 def _findrenames(repo, matcher, added, removed, similarity):
1063 def _findrenames(repo, matcher, added, removed, similarity):
1056 '''Find renames from removed files to added ones.'''
1064 '''Find renames from removed files to added ones.'''
1057 renames = {}
1065 renames = {}
1058 if similarity > 0:
1066 if similarity > 0:
1059 for old, new, score in similar.findrenames(repo, added, removed,
1067 for old, new, score in similar.findrenames(repo, added, removed,
1060 similarity):
1068 similarity):
1061 if (repo.ui.verbose or not matcher.exact(old)
1069 if (repo.ui.verbose or not matcher.exact(old)
1062 or not matcher.exact(new)):
1070 or not matcher.exact(new)):
1063 repo.ui.status(_('recording removal of %s as rename to %s '
1071 repo.ui.status(_('recording removal of %s as rename to %s '
1064 '(%d%% similar)\n') %
1072 '(%d%% similar)\n') %
1065 (matcher.rel(old), matcher.rel(new),
1073 (matcher.rel(old), matcher.rel(new),
1066 score * 100))
1074 score * 100))
1067 renames[new] = old
1075 renames[new] = old
1068 return renames
1076 return renames
1069
1077
1070 def _markchanges(repo, unknown, deleted, renames):
1078 def _markchanges(repo, unknown, deleted, renames):
1071 '''Marks the files in unknown as added, the files in deleted as removed,
1079 '''Marks the files in unknown as added, the files in deleted as removed,
1072 and the files in renames as copied.'''
1080 and the files in renames as copied.'''
1073 wctx = repo[None]
1081 wctx = repo[None]
1074 with repo.wlock():
1082 with repo.wlock():
1075 wctx.forget(deleted)
1083 wctx.forget(deleted)
1076 wctx.add(unknown)
1084 wctx.add(unknown)
1077 for new, old in renames.iteritems():
1085 for new, old in renames.iteritems():
1078 wctx.copy(old, new)
1086 wctx.copy(old, new)
1079
1087
1080 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1088 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1081 """Update the dirstate to reflect the intent of copying src to dst. For
1089 """Update the dirstate to reflect the intent of copying src to dst. For
1082 different reasons it might not end with dst being marked as copied from src.
1090 different reasons it might not end with dst being marked as copied from src.
1083 """
1091 """
1084 origsrc = repo.dirstate.copied(src) or src
1092 origsrc = repo.dirstate.copied(src) or src
1085 if dst == origsrc: # copying back a copy?
1093 if dst == origsrc: # copying back a copy?
1086 if repo.dirstate[dst] not in 'mn' and not dryrun:
1094 if repo.dirstate[dst] not in 'mn' and not dryrun:
1087 repo.dirstate.normallookup(dst)
1095 repo.dirstate.normallookup(dst)
1088 else:
1096 else:
1089 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1097 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1090 if not ui.quiet:
1098 if not ui.quiet:
1091 ui.warn(_("%s has not been committed yet, so no copy "
1099 ui.warn(_("%s has not been committed yet, so no copy "
1092 "data will be stored for %s.\n")
1100 "data will be stored for %s.\n")
1093 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1101 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1094 if repo.dirstate[dst] in '?r' and not dryrun:
1102 if repo.dirstate[dst] in '?r' and not dryrun:
1095 wctx.add([dst])
1103 wctx.add([dst])
1096 elif not dryrun:
1104 elif not dryrun:
1097 wctx.copy(origsrc, dst)
1105 wctx.copy(origsrc, dst)
1098
1106
1099 def readrequires(opener, supported):
1107 def readrequires(opener, supported):
1100 '''Reads and parses .hg/requires and checks if all entries found
1108 '''Reads and parses .hg/requires and checks if all entries found
1101 are in the list of supported features.'''
1109 are in the list of supported features.'''
1102 requirements = set(opener.read("requires").splitlines())
1110 requirements = set(opener.read("requires").splitlines())
1103 missings = []
1111 missings = []
1104 for r in requirements:
1112 for r in requirements:
1105 if r not in supported:
1113 if r not in supported:
1106 if not r or not r[0].isalnum():
1114 if not r or not r[0].isalnum():
1107 raise error.RequirementError(_(".hg/requires file is corrupt"))
1115 raise error.RequirementError(_(".hg/requires file is corrupt"))
1108 missings.append(r)
1116 missings.append(r)
1109 missings.sort()
1117 missings.sort()
1110 if missings:
1118 if missings:
1111 raise error.RequirementError(
1119 raise error.RequirementError(
1112 _("repository requires features unknown to this Mercurial: %s")
1120 _("repository requires features unknown to this Mercurial: %s")
1113 % " ".join(missings),
1121 % " ".join(missings),
1114 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1122 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1115 " for more information"))
1123 " for more information"))
1116 return requirements
1124 return requirements
1117
1125
1118 def writerequires(opener, requirements):
1126 def writerequires(opener, requirements):
1119 with opener('requires', 'w') as fp:
1127 with opener('requires', 'w') as fp:
1120 for r in sorted(requirements):
1128 for r in sorted(requirements):
1121 fp.write("%s\n" % r)
1129 fp.write("%s\n" % r)
1122
1130
1123 class filecachesubentry(object):
1131 class filecachesubentry(object):
1124 def __init__(self, path, stat):
1132 def __init__(self, path, stat):
1125 self.path = path
1133 self.path = path
1126 self.cachestat = None
1134 self.cachestat = None
1127 self._cacheable = None
1135 self._cacheable = None
1128
1136
1129 if stat:
1137 if stat:
1130 self.cachestat = filecachesubentry.stat(self.path)
1138 self.cachestat = filecachesubentry.stat(self.path)
1131
1139
1132 if self.cachestat:
1140 if self.cachestat:
1133 self._cacheable = self.cachestat.cacheable()
1141 self._cacheable = self.cachestat.cacheable()
1134 else:
1142 else:
1135 # None means we don't know yet
1143 # None means we don't know yet
1136 self._cacheable = None
1144 self._cacheable = None
1137
1145
1138 def refresh(self):
1146 def refresh(self):
1139 if self.cacheable():
1147 if self.cacheable():
1140 self.cachestat = filecachesubentry.stat(self.path)
1148 self.cachestat = filecachesubentry.stat(self.path)
1141
1149
1142 def cacheable(self):
1150 def cacheable(self):
1143 if self._cacheable is not None:
1151 if self._cacheable is not None:
1144 return self._cacheable
1152 return self._cacheable
1145
1153
1146 # we don't know yet, assume it is for now
1154 # we don't know yet, assume it is for now
1147 return True
1155 return True
1148
1156
1149 def changed(self):
1157 def changed(self):
1150 # no point in going further if we can't cache it
1158 # no point in going further if we can't cache it
1151 if not self.cacheable():
1159 if not self.cacheable():
1152 return True
1160 return True
1153
1161
1154 newstat = filecachesubentry.stat(self.path)
1162 newstat = filecachesubentry.stat(self.path)
1155
1163
1156 # we may not know if it's cacheable yet, check again now
1164 # we may not know if it's cacheable yet, check again now
1157 if newstat and self._cacheable is None:
1165 if newstat and self._cacheable is None:
1158 self._cacheable = newstat.cacheable()
1166 self._cacheable = newstat.cacheable()
1159
1167
1160 # check again
1168 # check again
1161 if not self._cacheable:
1169 if not self._cacheable:
1162 return True
1170 return True
1163
1171
1164 if self.cachestat != newstat:
1172 if self.cachestat != newstat:
1165 self.cachestat = newstat
1173 self.cachestat = newstat
1166 return True
1174 return True
1167 else:
1175 else:
1168 return False
1176 return False
1169
1177
1170 @staticmethod
1178 @staticmethod
1171 def stat(path):
1179 def stat(path):
1172 try:
1180 try:
1173 return util.cachestat(path)
1181 return util.cachestat(path)
1174 except OSError as e:
1182 except OSError as e:
1175 if e.errno != errno.ENOENT:
1183 if e.errno != errno.ENOENT:
1176 raise
1184 raise
1177
1185
1178 class filecacheentry(object):
1186 class filecacheentry(object):
1179 def __init__(self, paths, stat=True):
1187 def __init__(self, paths, stat=True):
1180 self._entries = []
1188 self._entries = []
1181 for path in paths:
1189 for path in paths:
1182 self._entries.append(filecachesubentry(path, stat))
1190 self._entries.append(filecachesubentry(path, stat))
1183
1191
1184 def changed(self):
1192 def changed(self):
1185 '''true if any entry has changed'''
1193 '''true if any entry has changed'''
1186 for entry in self._entries:
1194 for entry in self._entries:
1187 if entry.changed():
1195 if entry.changed():
1188 return True
1196 return True
1189 return False
1197 return False
1190
1198
1191 def refresh(self):
1199 def refresh(self):
1192 for entry in self._entries:
1200 for entry in self._entries:
1193 entry.refresh()
1201 entry.refresh()
1194
1202
1195 class filecache(object):
1203 class filecache(object):
1196 '''A property like decorator that tracks files under .hg/ for updates.
1204 '''A property like decorator that tracks files under .hg/ for updates.
1197
1205
1198 Records stat info when called in _filecache.
1206 Records stat info when called in _filecache.
1199
1207
1200 On subsequent calls, compares old stat info with new info, and recreates the
1208 On subsequent calls, compares old stat info with new info, and recreates the
1201 object when any of the files changes, updating the new stat info in
1209 object when any of the files changes, updating the new stat info in
1202 _filecache.
1210 _filecache.
1203
1211
1204 Mercurial either atomic renames or appends for files under .hg,
1212 Mercurial either atomic renames or appends for files under .hg,
1205 so to ensure the cache is reliable we need the filesystem to be able
1213 so to ensure the cache is reliable we need the filesystem to be able
1206 to tell us if a file has been replaced. If it can't, we fallback to
1214 to tell us if a file has been replaced. If it can't, we fallback to
1207 recreating the object on every call (essentially the same behavior as
1215 recreating the object on every call (essentially the same behavior as
1208 propertycache).
1216 propertycache).
1209
1217
1210 '''
1218 '''
1211 def __init__(self, *paths):
1219 def __init__(self, *paths):
1212 self.paths = paths
1220 self.paths = paths
1213
1221
1214 def join(self, obj, fname):
1222 def join(self, obj, fname):
1215 """Used to compute the runtime path of a cached file.
1223 """Used to compute the runtime path of a cached file.
1216
1224
1217 Users should subclass filecache and provide their own version of this
1225 Users should subclass filecache and provide their own version of this
1218 function to call the appropriate join function on 'obj' (an instance
1226 function to call the appropriate join function on 'obj' (an instance
1219 of the class that its member function was decorated).
1227 of the class that its member function was decorated).
1220 """
1228 """
1221 return obj.join(fname)
1229 return obj.join(fname)
1222
1230
1223 def __call__(self, func):
1231 def __call__(self, func):
1224 self.func = func
1232 self.func = func
1225 self.name = func.__name__
1233 self.name = func.__name__
1226 return self
1234 return self
1227
1235
1228 def __get__(self, obj, type=None):
1236 def __get__(self, obj, type=None):
1229 # if accessed on the class, return the descriptor itself.
1237 # if accessed on the class, return the descriptor itself.
1230 if obj is None:
1238 if obj is None:
1231 return self
1239 return self
1232 # do we need to check if the file changed?
1240 # do we need to check if the file changed?
1233 if self.name in obj.__dict__:
1241 if self.name in obj.__dict__:
1234 assert self.name in obj._filecache, self.name
1242 assert self.name in obj._filecache, self.name
1235 return obj.__dict__[self.name]
1243 return obj.__dict__[self.name]
1236
1244
1237 entry = obj._filecache.get(self.name)
1245 entry = obj._filecache.get(self.name)
1238
1246
1239 if entry:
1247 if entry:
1240 if entry.changed():
1248 if entry.changed():
1241 entry.obj = self.func(obj)
1249 entry.obj = self.func(obj)
1242 else:
1250 else:
1243 paths = [self.join(obj, path) for path in self.paths]
1251 paths = [self.join(obj, path) for path in self.paths]
1244
1252
1245 # We stat -before- creating the object so our cache doesn't lie if
1253 # We stat -before- creating the object so our cache doesn't lie if
1246 # a writer modified between the time we read and stat
1254 # a writer modified between the time we read and stat
1247 entry = filecacheentry(paths, True)
1255 entry = filecacheentry(paths, True)
1248 entry.obj = self.func(obj)
1256 entry.obj = self.func(obj)
1249
1257
1250 obj._filecache[self.name] = entry
1258 obj._filecache[self.name] = entry
1251
1259
1252 obj.__dict__[self.name] = entry.obj
1260 obj.__dict__[self.name] = entry.obj
1253 return entry.obj
1261 return entry.obj
1254
1262
1255 def __set__(self, obj, value):
1263 def __set__(self, obj, value):
1256 if self.name not in obj._filecache:
1264 if self.name not in obj._filecache:
1257 # we add an entry for the missing value because X in __dict__
1265 # we add an entry for the missing value because X in __dict__
1258 # implies X in _filecache
1266 # implies X in _filecache
1259 paths = [self.join(obj, path) for path in self.paths]
1267 paths = [self.join(obj, path) for path in self.paths]
1260 ce = filecacheentry(paths, False)
1268 ce = filecacheentry(paths, False)
1261 obj._filecache[self.name] = ce
1269 obj._filecache[self.name] = ce
1262 else:
1270 else:
1263 ce = obj._filecache[self.name]
1271 ce = obj._filecache[self.name]
1264
1272
1265 ce.obj = value # update cached copy
1273 ce.obj = value # update cached copy
1266 obj.__dict__[self.name] = value # update copy returned by obj.x
1274 obj.__dict__[self.name] = value # update copy returned by obj.x
1267
1275
1268 def __delete__(self, obj):
1276 def __delete__(self, obj):
1269 try:
1277 try:
1270 del obj.__dict__[self.name]
1278 del obj.__dict__[self.name]
1271 except KeyError:
1279 except KeyError:
1272 raise AttributeError(self.name)
1280 raise AttributeError(self.name)
1273
1281
1274 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1282 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1275 if lock is None:
1283 if lock is None:
1276 raise error.LockInheritanceContractViolation(
1284 raise error.LockInheritanceContractViolation(
1277 'lock can only be inherited while held')
1285 'lock can only be inherited while held')
1278 if environ is None:
1286 if environ is None:
1279 environ = {}
1287 environ = {}
1280 with lock.inherit() as locker:
1288 with lock.inherit() as locker:
1281 environ[envvar] = locker
1289 environ[envvar] = locker
1282 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1290 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1283
1291
1284 def wlocksub(repo, cmd, *args, **kwargs):
1292 def wlocksub(repo, cmd, *args, **kwargs):
1285 """run cmd as a subprocess that allows inheriting repo's wlock
1293 """run cmd as a subprocess that allows inheriting repo's wlock
1286
1294
1287 This can only be called while the wlock is held. This takes all the
1295 This can only be called while the wlock is held. This takes all the
1288 arguments that ui.system does, and returns the exit code of the
1296 arguments that ui.system does, and returns the exit code of the
1289 subprocess."""
1297 subprocess."""
1290 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1298 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1291 **kwargs)
1299 **kwargs)
1292
1300
1293 def gdinitconfig(ui):
1301 def gdinitconfig(ui):
1294 """helper function to know if a repo should be created as general delta
1302 """helper function to know if a repo should be created as general delta
1295 """
1303 """
1296 # experimental config: format.generaldelta
1304 # experimental config: format.generaldelta
1297 return (ui.configbool('format', 'generaldelta', False)
1305 return (ui.configbool('format', 'generaldelta', False)
1298 or ui.configbool('format', 'usegeneraldelta', True))
1306 or ui.configbool('format', 'usegeneraldelta', True))
1299
1307
1300 def gddeltaconfig(ui):
1308 def gddeltaconfig(ui):
1301 """helper function to know if incoming delta should be optimised
1309 """helper function to know if incoming delta should be optimised
1302 """
1310 """
1303 # experimental config: format.generaldelta
1311 # experimental config: format.generaldelta
1304 return ui.configbool('format', 'generaldelta', False)
1312 return ui.configbool('format', 'generaldelta', False)
1305
1313
1306 class delayclosedfile(object):
1314 class delayclosedfile(object):
1307 """Proxy for a file object whose close is delayed.
1315 """Proxy for a file object whose close is delayed.
1308
1316
1309 Do not instantiate outside of the vfs layer.
1317 Do not instantiate outside of the vfs layer.
1310 """
1318 """
1311
1319
1312 def __init__(self, fh, closer):
1320 def __init__(self, fh, closer):
1313 object.__setattr__(self, '_origfh', fh)
1321 object.__setattr__(self, '_origfh', fh)
1314 object.__setattr__(self, '_closer', closer)
1322 object.__setattr__(self, '_closer', closer)
1315
1323
1316 def __getattr__(self, attr):
1324 def __getattr__(self, attr):
1317 return getattr(self._origfh, attr)
1325 return getattr(self._origfh, attr)
1318
1326
1319 def __setattr__(self, attr, value):
1327 def __setattr__(self, attr, value):
1320 return setattr(self._origfh, attr, value)
1328 return setattr(self._origfh, attr, value)
1321
1329
1322 def __delattr__(self, attr):
1330 def __delattr__(self, attr):
1323 return delattr(self._origfh, attr)
1331 return delattr(self._origfh, attr)
1324
1332
1325 def __enter__(self):
1333 def __enter__(self):
1326 return self._origfh.__enter__()
1334 return self._origfh.__enter__()
1327
1335
1328 def __exit__(self, exc_type, exc_value, exc_tb):
1336 def __exit__(self, exc_type, exc_value, exc_tb):
1329 self._closer.close(self._origfh)
1337 self._closer.close(self._origfh)
1330
1338
1331 def close(self):
1339 def close(self):
1332 self._closer.close(self._origfh)
1340 self._closer.close(self._origfh)
1333
1341
1334 class backgroundfilecloser(object):
1342 class backgroundfilecloser(object):
1335 """Coordinates background closing of file handles on multiple threads."""
1343 """Coordinates background closing of file handles on multiple threads."""
1336 def __init__(self, ui, expectedcount=-1):
1344 def __init__(self, ui, expectedcount=-1):
1337 self._running = False
1345 self._running = False
1338 self._entered = False
1346 self._entered = False
1339 self._threads = []
1347 self._threads = []
1340 self._threadexception = None
1348 self._threadexception = None
1341
1349
1342 # Only Windows/NTFS has slow file closing. So only enable by default
1350 # Only Windows/NTFS has slow file closing. So only enable by default
1343 # on that platform. But allow to be enabled elsewhere for testing.
1351 # on that platform. But allow to be enabled elsewhere for testing.
1344 defaultenabled = os.name == 'nt'
1352 defaultenabled = os.name == 'nt'
1345 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1353 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1346
1354
1347 if not enabled:
1355 if not enabled:
1348 return
1356 return
1349
1357
1350 # There is overhead to starting and stopping the background threads.
1358 # There is overhead to starting and stopping the background threads.
1351 # Don't do background processing unless the file count is large enough
1359 # Don't do background processing unless the file count is large enough
1352 # to justify it.
1360 # to justify it.
1353 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1361 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1354 2048)
1362 2048)
1355 # FUTURE dynamically start background threads after minfilecount closes.
1363 # FUTURE dynamically start background threads after minfilecount closes.
1356 # (We don't currently have any callers that don't know their file count)
1364 # (We don't currently have any callers that don't know their file count)
1357 if expectedcount > 0 and expectedcount < minfilecount:
1365 if expectedcount > 0 and expectedcount < minfilecount:
1358 return
1366 return
1359
1367
1360 # Windows defaults to a limit of 512 open files. A buffer of 128
1368 # Windows defaults to a limit of 512 open files. A buffer of 128
1361 # should give us enough headway.
1369 # should give us enough headway.
1362 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1370 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1363 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1371 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1364
1372
1365 ui.debug('starting %d threads for background file closing\n' %
1373 ui.debug('starting %d threads for background file closing\n' %
1366 threadcount)
1374 threadcount)
1367
1375
1368 self._queue = util.queue(maxsize=maxqueue)
1376 self._queue = util.queue(maxsize=maxqueue)
1369 self._running = True
1377 self._running = True
1370
1378
1371 for i in range(threadcount):
1379 for i in range(threadcount):
1372 t = threading.Thread(target=self._worker, name='backgroundcloser')
1380 t = threading.Thread(target=self._worker, name='backgroundcloser')
1373 self._threads.append(t)
1381 self._threads.append(t)
1374 t.start()
1382 t.start()
1375
1383
1376 def __enter__(self):
1384 def __enter__(self):
1377 self._entered = True
1385 self._entered = True
1378 return self
1386 return self
1379
1387
1380 def __exit__(self, exc_type, exc_value, exc_tb):
1388 def __exit__(self, exc_type, exc_value, exc_tb):
1381 self._running = False
1389 self._running = False
1382
1390
1383 # Wait for threads to finish closing so open files don't linger for
1391 # Wait for threads to finish closing so open files don't linger for
1384 # longer than lifetime of context manager.
1392 # longer than lifetime of context manager.
1385 for t in self._threads:
1393 for t in self._threads:
1386 t.join()
1394 t.join()
1387
1395
1388 def _worker(self):
1396 def _worker(self):
1389 """Main routine for worker thread."""
1397 """Main routine for worker thread."""
1390 while True:
1398 while True:
1391 try:
1399 try:
1392 fh = self._queue.get(block=True, timeout=0.100)
1400 fh = self._queue.get(block=True, timeout=0.100)
1393 # Need to catch or the thread will terminate and
1401 # Need to catch or the thread will terminate and
1394 # we could orphan file descriptors.
1402 # we could orphan file descriptors.
1395 try:
1403 try:
1396 fh.close()
1404 fh.close()
1397 except Exception as e:
1405 except Exception as e:
1398 # Stash so can re-raise from main thread later.
1406 # Stash so can re-raise from main thread later.
1399 self._threadexception = e
1407 self._threadexception = e
1400 except util.empty:
1408 except util.empty:
1401 if not self._running:
1409 if not self._running:
1402 break
1410 break
1403
1411
1404 def close(self, fh):
1412 def close(self, fh):
1405 """Schedule a file for closing."""
1413 """Schedule a file for closing."""
1406 if not self._entered:
1414 if not self._entered:
1407 raise error.Abort(_('can only call close() when context manager '
1415 raise error.Abort(_('can only call close() when context manager '
1408 'active'))
1416 'active'))
1409
1417
1410 # If a background thread encountered an exception, raise now so we fail
1418 # If a background thread encountered an exception, raise now so we fail
1411 # fast. Otherwise we may potentially go on for minutes until the error
1419 # fast. Otherwise we may potentially go on for minutes until the error
1412 # is acted on.
1420 # is acted on.
1413 if self._threadexception:
1421 if self._threadexception:
1414 e = self._threadexception
1422 e = self._threadexception
1415 self._threadexception = None
1423 self._threadexception = None
1416 raise e
1424 raise e
1417
1425
1418 # If we're not actively running, close synchronously.
1426 # If we're not actively running, close synchronously.
1419 if not self._running:
1427 if not self._running:
1420 fh.close()
1428 fh.close()
1421 return
1429 return
1422
1430
1423 self._queue.put(fh, block=True, timeout=None)
1431 self._queue.put(fh, block=True, timeout=None)
General Comments 0
You need to be logged in to leave comments. Login now