##// END OF EJS Templates
vfs: make atomictempfile avoid ambiguity of file stat if needed...
FUJIWARA Katsunori -
r29202:76f1ea36 default
parent child Browse files
Show More
@@ -1,1378 +1,1381
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import glob
12 import glob
13 import os
13 import os
14 import re
14 import re
15 import shutil
15 import shutil
16 import stat
16 import stat
17 import tempfile
17 import tempfile
18 import threading
18 import threading
19
19
20 from .i18n import _
20 from .i18n import _
21 from .node import wdirrev
21 from .node import wdirrev
22 from . import (
22 from . import (
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 osutil,
26 osutil,
27 pathutil,
27 pathutil,
28 phases,
28 phases,
29 revset,
29 revset,
30 similar,
30 similar,
31 util,
31 util,
32 )
32 )
33
33
34 if os.name == 'nt':
34 if os.name == 'nt':
35 from . import scmwindows as scmplatform
35 from . import scmwindows as scmplatform
36 else:
36 else:
37 from . import scmposix as scmplatform
37 from . import scmposix as scmplatform
38
38
39 systemrcpath = scmplatform.systemrcpath
39 systemrcpath = scmplatform.systemrcpath
40 userrcpath = scmplatform.userrcpath
40 userrcpath = scmplatform.userrcpath
41
41
42 class status(tuple):
42 class status(tuple):
43 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
43 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
44 and 'ignored' properties are only relevant to the working copy.
44 and 'ignored' properties are only relevant to the working copy.
45 '''
45 '''
46
46
47 __slots__ = ()
47 __slots__ = ()
48
48
49 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
49 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
50 clean):
50 clean):
51 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
51 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
52 ignored, clean))
52 ignored, clean))
53
53
54 @property
54 @property
55 def modified(self):
55 def modified(self):
56 '''files that have been modified'''
56 '''files that have been modified'''
57 return self[0]
57 return self[0]
58
58
59 @property
59 @property
60 def added(self):
60 def added(self):
61 '''files that have been added'''
61 '''files that have been added'''
62 return self[1]
62 return self[1]
63
63
64 @property
64 @property
65 def removed(self):
65 def removed(self):
66 '''files that have been removed'''
66 '''files that have been removed'''
67 return self[2]
67 return self[2]
68
68
69 @property
69 @property
70 def deleted(self):
70 def deleted(self):
71 '''files that are in the dirstate, but have been deleted from the
71 '''files that are in the dirstate, but have been deleted from the
72 working copy (aka "missing")
72 working copy (aka "missing")
73 '''
73 '''
74 return self[3]
74 return self[3]
75
75
76 @property
76 @property
77 def unknown(self):
77 def unknown(self):
78 '''files not in the dirstate that are not ignored'''
78 '''files not in the dirstate that are not ignored'''
79 return self[4]
79 return self[4]
80
80
81 @property
81 @property
82 def ignored(self):
82 def ignored(self):
83 '''files not in the dirstate that are ignored (by _dirignore())'''
83 '''files not in the dirstate that are ignored (by _dirignore())'''
84 return self[5]
84 return self[5]
85
85
86 @property
86 @property
87 def clean(self):
87 def clean(self):
88 '''files that have not been modified'''
88 '''files that have not been modified'''
89 return self[6]
89 return self[6]
90
90
91 def __repr__(self, *args, **kwargs):
91 def __repr__(self, *args, **kwargs):
92 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
92 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
93 'unknown=%r, ignored=%r, clean=%r>') % self)
93 'unknown=%r, ignored=%r, clean=%r>') % self)
94
94
95 def itersubrepos(ctx1, ctx2):
95 def itersubrepos(ctx1, ctx2):
96 """find subrepos in ctx1 or ctx2"""
96 """find subrepos in ctx1 or ctx2"""
97 # Create a (subpath, ctx) mapping where we prefer subpaths from
97 # Create a (subpath, ctx) mapping where we prefer subpaths from
98 # ctx1. The subpaths from ctx2 are important when the .hgsub file
98 # ctx1. The subpaths from ctx2 are important when the .hgsub file
99 # has been modified (in ctx2) but not yet committed (in ctx1).
99 # has been modified (in ctx2) but not yet committed (in ctx1).
100 subpaths = dict.fromkeys(ctx2.substate, ctx2)
100 subpaths = dict.fromkeys(ctx2.substate, ctx2)
101 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
101 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
102
102
103 missing = set()
103 missing = set()
104
104
105 for subpath in ctx2.substate:
105 for subpath in ctx2.substate:
106 if subpath not in ctx1.substate:
106 if subpath not in ctx1.substate:
107 del subpaths[subpath]
107 del subpaths[subpath]
108 missing.add(subpath)
108 missing.add(subpath)
109
109
110 for subpath, ctx in sorted(subpaths.iteritems()):
110 for subpath, ctx in sorted(subpaths.iteritems()):
111 yield subpath, ctx.sub(subpath)
111 yield subpath, ctx.sub(subpath)
112
112
113 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
113 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
114 # status and diff will have an accurate result when it does
114 # status and diff will have an accurate result when it does
115 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
115 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
116 # against itself.
116 # against itself.
117 for subpath in missing:
117 for subpath in missing:
118 yield subpath, ctx2.nullsub(subpath, ctx1)
118 yield subpath, ctx2.nullsub(subpath, ctx1)
119
119
120 def nochangesfound(ui, repo, excluded=None):
120 def nochangesfound(ui, repo, excluded=None):
121 '''Report no changes for push/pull, excluded is None or a list of
121 '''Report no changes for push/pull, excluded is None or a list of
122 nodes excluded from the push/pull.
122 nodes excluded from the push/pull.
123 '''
123 '''
124 secretlist = []
124 secretlist = []
125 if excluded:
125 if excluded:
126 for n in excluded:
126 for n in excluded:
127 if n not in repo:
127 if n not in repo:
128 # discovery should not have included the filtered revision,
128 # discovery should not have included the filtered revision,
129 # we have to explicitly exclude it until discovery is cleanup.
129 # we have to explicitly exclude it until discovery is cleanup.
130 continue
130 continue
131 ctx = repo[n]
131 ctx = repo[n]
132 if ctx.phase() >= phases.secret and not ctx.extinct():
132 if ctx.phase() >= phases.secret and not ctx.extinct():
133 secretlist.append(n)
133 secretlist.append(n)
134
134
135 if secretlist:
135 if secretlist:
136 ui.status(_("no changes found (ignored %d secret changesets)\n")
136 ui.status(_("no changes found (ignored %d secret changesets)\n")
137 % len(secretlist))
137 % len(secretlist))
138 else:
138 else:
139 ui.status(_("no changes found\n"))
139 ui.status(_("no changes found\n"))
140
140
141 def checknewlabel(repo, lbl, kind):
141 def checknewlabel(repo, lbl, kind):
142 # Do not use the "kind" parameter in ui output.
142 # Do not use the "kind" parameter in ui output.
143 # It makes strings difficult to translate.
143 # It makes strings difficult to translate.
144 if lbl in ['tip', '.', 'null']:
144 if lbl in ['tip', '.', 'null']:
145 raise error.Abort(_("the name '%s' is reserved") % lbl)
145 raise error.Abort(_("the name '%s' is reserved") % lbl)
146 for c in (':', '\0', '\n', '\r'):
146 for c in (':', '\0', '\n', '\r'):
147 if c in lbl:
147 if c in lbl:
148 raise error.Abort(_("%r cannot be used in a name") % c)
148 raise error.Abort(_("%r cannot be used in a name") % c)
149 try:
149 try:
150 int(lbl)
150 int(lbl)
151 raise error.Abort(_("cannot use an integer as a name"))
151 raise error.Abort(_("cannot use an integer as a name"))
152 except ValueError:
152 except ValueError:
153 pass
153 pass
154
154
155 def checkfilename(f):
155 def checkfilename(f):
156 '''Check that the filename f is an acceptable filename for a tracked file'''
156 '''Check that the filename f is an acceptable filename for a tracked file'''
157 if '\r' in f or '\n' in f:
157 if '\r' in f or '\n' in f:
158 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
158 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
159
159
160 def checkportable(ui, f):
160 def checkportable(ui, f):
161 '''Check if filename f is portable and warn or abort depending on config'''
161 '''Check if filename f is portable and warn or abort depending on config'''
162 checkfilename(f)
162 checkfilename(f)
163 abort, warn = checkportabilityalert(ui)
163 abort, warn = checkportabilityalert(ui)
164 if abort or warn:
164 if abort or warn:
165 msg = util.checkwinfilename(f)
165 msg = util.checkwinfilename(f)
166 if msg:
166 if msg:
167 msg = "%s: %r" % (msg, f)
167 msg = "%s: %r" % (msg, f)
168 if abort:
168 if abort:
169 raise error.Abort(msg)
169 raise error.Abort(msg)
170 ui.warn(_("warning: %s\n") % msg)
170 ui.warn(_("warning: %s\n") % msg)
171
171
172 def checkportabilityalert(ui):
172 def checkportabilityalert(ui):
173 '''check if the user's config requests nothing, a warning, or abort for
173 '''check if the user's config requests nothing, a warning, or abort for
174 non-portable filenames'''
174 non-portable filenames'''
175 val = ui.config('ui', 'portablefilenames', 'warn')
175 val = ui.config('ui', 'portablefilenames', 'warn')
176 lval = val.lower()
176 lval = val.lower()
177 bval = util.parsebool(val)
177 bval = util.parsebool(val)
178 abort = os.name == 'nt' or lval == 'abort'
178 abort = os.name == 'nt' or lval == 'abort'
179 warn = bval or lval == 'warn'
179 warn = bval or lval == 'warn'
180 if bval is None and not (warn or abort or lval == 'ignore'):
180 if bval is None and not (warn or abort or lval == 'ignore'):
181 raise error.ConfigError(
181 raise error.ConfigError(
182 _("ui.portablefilenames value is invalid ('%s')") % val)
182 _("ui.portablefilenames value is invalid ('%s')") % val)
183 return abort, warn
183 return abort, warn
184
184
185 class casecollisionauditor(object):
185 class casecollisionauditor(object):
186 def __init__(self, ui, abort, dirstate):
186 def __init__(self, ui, abort, dirstate):
187 self._ui = ui
187 self._ui = ui
188 self._abort = abort
188 self._abort = abort
189 allfiles = '\0'.join(dirstate._map)
189 allfiles = '\0'.join(dirstate._map)
190 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
190 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
191 self._dirstate = dirstate
191 self._dirstate = dirstate
192 # The purpose of _newfiles is so that we don't complain about
192 # The purpose of _newfiles is so that we don't complain about
193 # case collisions if someone were to call this object with the
193 # case collisions if someone were to call this object with the
194 # same filename twice.
194 # same filename twice.
195 self._newfiles = set()
195 self._newfiles = set()
196
196
197 def __call__(self, f):
197 def __call__(self, f):
198 if f in self._newfiles:
198 if f in self._newfiles:
199 return
199 return
200 fl = encoding.lower(f)
200 fl = encoding.lower(f)
201 if fl in self._loweredfiles and f not in self._dirstate:
201 if fl in self._loweredfiles and f not in self._dirstate:
202 msg = _('possible case-folding collision for %s') % f
202 msg = _('possible case-folding collision for %s') % f
203 if self._abort:
203 if self._abort:
204 raise error.Abort(msg)
204 raise error.Abort(msg)
205 self._ui.warn(_("warning: %s\n") % msg)
205 self._ui.warn(_("warning: %s\n") % msg)
206 self._loweredfiles.add(fl)
206 self._loweredfiles.add(fl)
207 self._newfiles.add(f)
207 self._newfiles.add(f)
208
208
209 def filteredhash(repo, maxrev):
209 def filteredhash(repo, maxrev):
210 """build hash of filtered revisions in the current repoview.
210 """build hash of filtered revisions in the current repoview.
211
211
212 Multiple caches perform up-to-date validation by checking that the
212 Multiple caches perform up-to-date validation by checking that the
213 tiprev and tipnode stored in the cache file match the current repository.
213 tiprev and tipnode stored in the cache file match the current repository.
214 However, this is not sufficient for validating repoviews because the set
214 However, this is not sufficient for validating repoviews because the set
215 of revisions in the view may change without the repository tiprev and
215 of revisions in the view may change without the repository tiprev and
216 tipnode changing.
216 tipnode changing.
217
217
218 This function hashes all the revs filtered from the view and returns
218 This function hashes all the revs filtered from the view and returns
219 that SHA-1 digest.
219 that SHA-1 digest.
220 """
220 """
221 cl = repo.changelog
221 cl = repo.changelog
222 if not cl.filteredrevs:
222 if not cl.filteredrevs:
223 return None
223 return None
224 key = None
224 key = None
225 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
225 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
226 if revs:
226 if revs:
227 s = util.sha1()
227 s = util.sha1()
228 for rev in revs:
228 for rev in revs:
229 s.update('%s;' % rev)
229 s.update('%s;' % rev)
230 key = s.digest()
230 key = s.digest()
231 return key
231 return key
232
232
233 class abstractvfs(object):
233 class abstractvfs(object):
234 """Abstract base class; cannot be instantiated"""
234 """Abstract base class; cannot be instantiated"""
235
235
236 def __init__(self, *args, **kwargs):
236 def __init__(self, *args, **kwargs):
237 '''Prevent instantiation; don't call this from subclasses.'''
237 '''Prevent instantiation; don't call this from subclasses.'''
238 raise NotImplementedError('attempted instantiating ' + str(type(self)))
238 raise NotImplementedError('attempted instantiating ' + str(type(self)))
239
239
240 def tryread(self, path):
240 def tryread(self, path):
241 '''gracefully return an empty string for missing files'''
241 '''gracefully return an empty string for missing files'''
242 try:
242 try:
243 return self.read(path)
243 return self.read(path)
244 except IOError as inst:
244 except IOError as inst:
245 if inst.errno != errno.ENOENT:
245 if inst.errno != errno.ENOENT:
246 raise
246 raise
247 return ""
247 return ""
248
248
249 def tryreadlines(self, path, mode='rb'):
249 def tryreadlines(self, path, mode='rb'):
250 '''gracefully return an empty array for missing files'''
250 '''gracefully return an empty array for missing files'''
251 try:
251 try:
252 return self.readlines(path, mode=mode)
252 return self.readlines(path, mode=mode)
253 except IOError as inst:
253 except IOError as inst:
254 if inst.errno != errno.ENOENT:
254 if inst.errno != errno.ENOENT:
255 raise
255 raise
256 return []
256 return []
257
257
258 def open(self, path, mode="r", text=False, atomictemp=False,
258 def open(self, path, mode="r", text=False, atomictemp=False,
259 notindexed=False, backgroundclose=False):
259 notindexed=False, backgroundclose=False):
260 '''Open ``path`` file, which is relative to vfs root.
260 '''Open ``path`` file, which is relative to vfs root.
261
261
262 Newly created directories are marked as "not to be indexed by
262 Newly created directories are marked as "not to be indexed by
263 the content indexing service", if ``notindexed`` is specified
263 the content indexing service", if ``notindexed`` is specified
264 for "write" mode access.
264 for "write" mode access.
265 '''
265 '''
266 self.open = self.__call__
266 self.open = self.__call__
267 return self.__call__(path, mode, text, atomictemp, notindexed,
267 return self.__call__(path, mode, text, atomictemp, notindexed,
268 backgroundclose=backgroundclose)
268 backgroundclose=backgroundclose)
269
269
270 def read(self, path):
270 def read(self, path):
271 with self(path, 'rb') as fp:
271 with self(path, 'rb') as fp:
272 return fp.read()
272 return fp.read()
273
273
274 def readlines(self, path, mode='rb'):
274 def readlines(self, path, mode='rb'):
275 with self(path, mode=mode) as fp:
275 with self(path, mode=mode) as fp:
276 return fp.readlines()
276 return fp.readlines()
277
277
278 def write(self, path, data, backgroundclose=False):
278 def write(self, path, data, backgroundclose=False):
279 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
279 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
280 return fp.write(data)
280 return fp.write(data)
281
281
282 def writelines(self, path, data, mode='wb', notindexed=False):
282 def writelines(self, path, data, mode='wb', notindexed=False):
283 with self(path, mode=mode, notindexed=notindexed) as fp:
283 with self(path, mode=mode, notindexed=notindexed) as fp:
284 return fp.writelines(data)
284 return fp.writelines(data)
285
285
286 def append(self, path, data):
286 def append(self, path, data):
287 with self(path, 'ab') as fp:
287 with self(path, 'ab') as fp:
288 return fp.write(data)
288 return fp.write(data)
289
289
290 def basename(self, path):
290 def basename(self, path):
291 """return base element of a path (as os.path.basename would do)
291 """return base element of a path (as os.path.basename would do)
292
292
293 This exists to allow handling of strange encoding if needed."""
293 This exists to allow handling of strange encoding if needed."""
294 return os.path.basename(path)
294 return os.path.basename(path)
295
295
296 def chmod(self, path, mode):
296 def chmod(self, path, mode):
297 return os.chmod(self.join(path), mode)
297 return os.chmod(self.join(path), mode)
298
298
299 def dirname(self, path):
299 def dirname(self, path):
300 """return dirname element of a path (as os.path.dirname would do)
300 """return dirname element of a path (as os.path.dirname would do)
301
301
302 This exists to allow handling of strange encoding if needed."""
302 This exists to allow handling of strange encoding if needed."""
303 return os.path.dirname(path)
303 return os.path.dirname(path)
304
304
305 def exists(self, path=None):
305 def exists(self, path=None):
306 return os.path.exists(self.join(path))
306 return os.path.exists(self.join(path))
307
307
308 def fstat(self, fp):
308 def fstat(self, fp):
309 return util.fstat(fp)
309 return util.fstat(fp)
310
310
311 def isdir(self, path=None):
311 def isdir(self, path=None):
312 return os.path.isdir(self.join(path))
312 return os.path.isdir(self.join(path))
313
313
314 def isfile(self, path=None):
314 def isfile(self, path=None):
315 return os.path.isfile(self.join(path))
315 return os.path.isfile(self.join(path))
316
316
317 def islink(self, path=None):
317 def islink(self, path=None):
318 return os.path.islink(self.join(path))
318 return os.path.islink(self.join(path))
319
319
320 def isfileorlink(self, path=None):
320 def isfileorlink(self, path=None):
321 '''return whether path is a regular file or a symlink
321 '''return whether path is a regular file or a symlink
322
322
323 Unlike isfile, this doesn't follow symlinks.'''
323 Unlike isfile, this doesn't follow symlinks.'''
324 try:
324 try:
325 st = self.lstat(path)
325 st = self.lstat(path)
326 except OSError:
326 except OSError:
327 return False
327 return False
328 mode = st.st_mode
328 mode = st.st_mode
329 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
329 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
330
330
331 def reljoin(self, *paths):
331 def reljoin(self, *paths):
332 """join various elements of a path together (as os.path.join would do)
332 """join various elements of a path together (as os.path.join would do)
333
333
334 The vfs base is not injected so that path stay relative. This exists
334 The vfs base is not injected so that path stay relative. This exists
335 to allow handling of strange encoding if needed."""
335 to allow handling of strange encoding if needed."""
336 return os.path.join(*paths)
336 return os.path.join(*paths)
337
337
338 def split(self, path):
338 def split(self, path):
339 """split top-most element of a path (as os.path.split would do)
339 """split top-most element of a path (as os.path.split would do)
340
340
341 This exists to allow handling of strange encoding if needed."""
341 This exists to allow handling of strange encoding if needed."""
342 return os.path.split(path)
342 return os.path.split(path)
343
343
344 def lexists(self, path=None):
344 def lexists(self, path=None):
345 return os.path.lexists(self.join(path))
345 return os.path.lexists(self.join(path))
346
346
347 def lstat(self, path=None):
347 def lstat(self, path=None):
348 return os.lstat(self.join(path))
348 return os.lstat(self.join(path))
349
349
350 def listdir(self, path=None):
350 def listdir(self, path=None):
351 return os.listdir(self.join(path))
351 return os.listdir(self.join(path))
352
352
353 def makedir(self, path=None, notindexed=True):
353 def makedir(self, path=None, notindexed=True):
354 return util.makedir(self.join(path), notindexed)
354 return util.makedir(self.join(path), notindexed)
355
355
356 def makedirs(self, path=None, mode=None):
356 def makedirs(self, path=None, mode=None):
357 return util.makedirs(self.join(path), mode)
357 return util.makedirs(self.join(path), mode)
358
358
359 def makelock(self, info, path):
359 def makelock(self, info, path):
360 return util.makelock(info, self.join(path))
360 return util.makelock(info, self.join(path))
361
361
362 def mkdir(self, path=None):
362 def mkdir(self, path=None):
363 return os.mkdir(self.join(path))
363 return os.mkdir(self.join(path))
364
364
365 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
365 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
366 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
366 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
367 dir=self.join(dir), text=text)
367 dir=self.join(dir), text=text)
368 dname, fname = util.split(name)
368 dname, fname = util.split(name)
369 if dir:
369 if dir:
370 return fd, os.path.join(dir, fname)
370 return fd, os.path.join(dir, fname)
371 else:
371 else:
372 return fd, fname
372 return fd, fname
373
373
374 def readdir(self, path=None, stat=None, skip=None):
374 def readdir(self, path=None, stat=None, skip=None):
375 return osutil.listdir(self.join(path), stat, skip)
375 return osutil.listdir(self.join(path), stat, skip)
376
376
377 def readlock(self, path):
377 def readlock(self, path):
378 return util.readlock(self.join(path))
378 return util.readlock(self.join(path))
379
379
380 def rename(self, src, dst):
380 def rename(self, src, dst):
381 return util.rename(self.join(src), self.join(dst))
381 return util.rename(self.join(src), self.join(dst))
382
382
383 def readlink(self, path):
383 def readlink(self, path):
384 return os.readlink(self.join(path))
384 return os.readlink(self.join(path))
385
385
386 def removedirs(self, path=None):
386 def removedirs(self, path=None):
387 """Remove a leaf directory and all empty intermediate ones
387 """Remove a leaf directory and all empty intermediate ones
388 """
388 """
389 return util.removedirs(self.join(path))
389 return util.removedirs(self.join(path))
390
390
391 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
391 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
392 """Remove a directory tree recursively
392 """Remove a directory tree recursively
393
393
394 If ``forcibly``, this tries to remove READ-ONLY files, too.
394 If ``forcibly``, this tries to remove READ-ONLY files, too.
395 """
395 """
396 if forcibly:
396 if forcibly:
397 def onerror(function, path, excinfo):
397 def onerror(function, path, excinfo):
398 if function is not os.remove:
398 if function is not os.remove:
399 raise
399 raise
400 # read-only files cannot be unlinked under Windows
400 # read-only files cannot be unlinked under Windows
401 s = os.stat(path)
401 s = os.stat(path)
402 if (s.st_mode & stat.S_IWRITE) != 0:
402 if (s.st_mode & stat.S_IWRITE) != 0:
403 raise
403 raise
404 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
404 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
405 os.remove(path)
405 os.remove(path)
406 else:
406 else:
407 onerror = None
407 onerror = None
408 return shutil.rmtree(self.join(path),
408 return shutil.rmtree(self.join(path),
409 ignore_errors=ignore_errors, onerror=onerror)
409 ignore_errors=ignore_errors, onerror=onerror)
410
410
411 def setflags(self, path, l, x):
411 def setflags(self, path, l, x):
412 return util.setflags(self.join(path), l, x)
412 return util.setflags(self.join(path), l, x)
413
413
414 def stat(self, path=None):
414 def stat(self, path=None):
415 return os.stat(self.join(path))
415 return os.stat(self.join(path))
416
416
417 def unlink(self, path=None):
417 def unlink(self, path=None):
418 return util.unlink(self.join(path))
418 return util.unlink(self.join(path))
419
419
420 def unlinkpath(self, path=None, ignoremissing=False):
420 def unlinkpath(self, path=None, ignoremissing=False):
421 return util.unlinkpath(self.join(path), ignoremissing)
421 return util.unlinkpath(self.join(path), ignoremissing)
422
422
423 def utime(self, path=None, t=None):
423 def utime(self, path=None, t=None):
424 return os.utime(self.join(path), t)
424 return os.utime(self.join(path), t)
425
425
426 def walk(self, path=None, onerror=None):
426 def walk(self, path=None, onerror=None):
427 """Yield (dirpath, dirs, files) tuple for each directories under path
427 """Yield (dirpath, dirs, files) tuple for each directories under path
428
428
429 ``dirpath`` is relative one from the root of this vfs. This
429 ``dirpath`` is relative one from the root of this vfs. This
430 uses ``os.sep`` as path separator, even you specify POSIX
430 uses ``os.sep`` as path separator, even you specify POSIX
431 style ``path``.
431 style ``path``.
432
432
433 "The root of this vfs" is represented as empty ``dirpath``.
433 "The root of this vfs" is represented as empty ``dirpath``.
434 """
434 """
435 root = os.path.normpath(self.join(None))
435 root = os.path.normpath(self.join(None))
436 # when dirpath == root, dirpath[prefixlen:] becomes empty
436 # when dirpath == root, dirpath[prefixlen:] becomes empty
437 # because len(dirpath) < prefixlen.
437 # because len(dirpath) < prefixlen.
438 prefixlen = len(pathutil.normasprefix(root))
438 prefixlen = len(pathutil.normasprefix(root))
439 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
439 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
440 yield (dirpath[prefixlen:], dirs, files)
440 yield (dirpath[prefixlen:], dirs, files)
441
441
442 @contextlib.contextmanager
442 @contextlib.contextmanager
443 def backgroundclosing(self, ui, expectedcount=-1):
443 def backgroundclosing(self, ui, expectedcount=-1):
444 """Allow files to be closed asynchronously.
444 """Allow files to be closed asynchronously.
445
445
446 When this context manager is active, ``backgroundclose`` can be passed
446 When this context manager is active, ``backgroundclose`` can be passed
447 to ``__call__``/``open`` to result in the file possibly being closed
447 to ``__call__``/``open`` to result in the file possibly being closed
448 asynchronously, on a background thread.
448 asynchronously, on a background thread.
449 """
449 """
450 # This is an arbitrary restriction and could be changed if we ever
450 # This is an arbitrary restriction and could be changed if we ever
451 # have a use case.
451 # have a use case.
452 vfs = getattr(self, 'vfs', self)
452 vfs = getattr(self, 'vfs', self)
453 if getattr(vfs, '_backgroundfilecloser', None):
453 if getattr(vfs, '_backgroundfilecloser', None):
454 raise error.Abort('can only have 1 active background file closer')
454 raise error.Abort('can only have 1 active background file closer')
455
455
456 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
456 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
457 try:
457 try:
458 vfs._backgroundfilecloser = bfc
458 vfs._backgroundfilecloser = bfc
459 yield bfc
459 yield bfc
460 finally:
460 finally:
461 vfs._backgroundfilecloser = None
461 vfs._backgroundfilecloser = None
462
462
463 class vfs(abstractvfs):
463 class vfs(abstractvfs):
464 '''Operate files relative to a base directory
464 '''Operate files relative to a base directory
465
465
466 This class is used to hide the details of COW semantics and
466 This class is used to hide the details of COW semantics and
467 remote file access from higher level code.
467 remote file access from higher level code.
468 '''
468 '''
469 def __init__(self, base, audit=True, expandpath=False, realpath=False):
469 def __init__(self, base, audit=True, expandpath=False, realpath=False):
470 if expandpath:
470 if expandpath:
471 base = util.expandpath(base)
471 base = util.expandpath(base)
472 if realpath:
472 if realpath:
473 base = os.path.realpath(base)
473 base = os.path.realpath(base)
474 self.base = base
474 self.base = base
475 self.mustaudit = audit
475 self.mustaudit = audit
476 self.createmode = None
476 self.createmode = None
477 self._trustnlink = None
477 self._trustnlink = None
478
478
479 @property
479 @property
480 def mustaudit(self):
480 def mustaudit(self):
481 return self._audit
481 return self._audit
482
482
483 @mustaudit.setter
483 @mustaudit.setter
484 def mustaudit(self, onoff):
484 def mustaudit(self, onoff):
485 self._audit = onoff
485 self._audit = onoff
486 if onoff:
486 if onoff:
487 self.audit = pathutil.pathauditor(self.base)
487 self.audit = pathutil.pathauditor(self.base)
488 else:
488 else:
489 self.audit = util.always
489 self.audit = util.always
490
490
491 @util.propertycache
491 @util.propertycache
492 def _cansymlink(self):
492 def _cansymlink(self):
493 return util.checklink(self.base)
493 return util.checklink(self.base)
494
494
495 @util.propertycache
495 @util.propertycache
496 def _chmod(self):
496 def _chmod(self):
497 return util.checkexec(self.base)
497 return util.checkexec(self.base)
498
498
499 def _fixfilemode(self, name):
499 def _fixfilemode(self, name):
500 if self.createmode is None or not self._chmod:
500 if self.createmode is None or not self._chmod:
501 return
501 return
502 os.chmod(name, self.createmode & 0o666)
502 os.chmod(name, self.createmode & 0o666)
503
503
504 def __call__(self, path, mode="r", text=False, atomictemp=False,
504 def __call__(self, path, mode="r", text=False, atomictemp=False,
505 notindexed=False, backgroundclose=False):
505 notindexed=False, backgroundclose=False, checkambig=False):
506 '''Open ``path`` file, which is relative to vfs root.
506 '''Open ``path`` file, which is relative to vfs root.
507
507
508 Newly created directories are marked as "not to be indexed by
508 Newly created directories are marked as "not to be indexed by
509 the content indexing service", if ``notindexed`` is specified
509 the content indexing service", if ``notindexed`` is specified
510 for "write" mode access.
510 for "write" mode access.
511
511
512 If ``backgroundclose`` is passed, the file may be closed asynchronously.
512 If ``backgroundclose`` is passed, the file may be closed asynchronously.
513 It can only be used if the ``self.backgroundclosing()`` context manager
513 It can only be used if the ``self.backgroundclosing()`` context manager
514 is active. This should only be specified if the following criteria hold:
514 is active. This should only be specified if the following criteria hold:
515
515
516 1. There is a potential for writing thousands of files. Unless you
516 1. There is a potential for writing thousands of files. Unless you
517 are writing thousands of files, the performance benefits of
517 are writing thousands of files, the performance benefits of
518 asynchronously closing files is not realized.
518 asynchronously closing files is not realized.
519 2. Files are opened exactly once for the ``backgroundclosing``
519 2. Files are opened exactly once for the ``backgroundclosing``
520 active duration and are therefore free of race conditions between
520 active duration and are therefore free of race conditions between
521 closing a file on a background thread and reopening it. (If the
521 closing a file on a background thread and reopening it. (If the
522 file were opened multiple times, there could be unflushed data
522 file were opened multiple times, there could be unflushed data
523 because the original file handle hasn't been flushed/closed yet.)
523 because the original file handle hasn't been flushed/closed yet.)
524
525 ``checkambig`` is passed to atomictempfile (valid only for writing).
524 '''
526 '''
525 if self._audit:
527 if self._audit:
526 r = util.checkosfilename(path)
528 r = util.checkosfilename(path)
527 if r:
529 if r:
528 raise error.Abort("%s: %r" % (r, path))
530 raise error.Abort("%s: %r" % (r, path))
529 self.audit(path)
531 self.audit(path)
530 f = self.join(path)
532 f = self.join(path)
531
533
532 if not text and "b" not in mode:
534 if not text and "b" not in mode:
533 mode += "b" # for that other OS
535 mode += "b" # for that other OS
534
536
535 nlink = -1
537 nlink = -1
536 if mode not in ('r', 'rb'):
538 if mode not in ('r', 'rb'):
537 dirname, basename = util.split(f)
539 dirname, basename = util.split(f)
538 # If basename is empty, then the path is malformed because it points
540 # If basename is empty, then the path is malformed because it points
539 # to a directory. Let the posixfile() call below raise IOError.
541 # to a directory. Let the posixfile() call below raise IOError.
540 if basename:
542 if basename:
541 if atomictemp:
543 if atomictemp:
542 util.makedirs(dirname, self.createmode, notindexed)
544 util.makedirs(dirname, self.createmode, notindexed)
543 return util.atomictempfile(f, mode, self.createmode)
545 return util.atomictempfile(f, mode, self.createmode,
546 checkambig=checkambig)
544 try:
547 try:
545 if 'w' in mode:
548 if 'w' in mode:
546 util.unlink(f)
549 util.unlink(f)
547 nlink = 0
550 nlink = 0
548 else:
551 else:
549 # nlinks() may behave differently for files on Windows
552 # nlinks() may behave differently for files on Windows
550 # shares if the file is open.
553 # shares if the file is open.
551 with util.posixfile(f):
554 with util.posixfile(f):
552 nlink = util.nlinks(f)
555 nlink = util.nlinks(f)
553 if nlink < 1:
556 if nlink < 1:
554 nlink = 2 # force mktempcopy (issue1922)
557 nlink = 2 # force mktempcopy (issue1922)
555 except (OSError, IOError) as e:
558 except (OSError, IOError) as e:
556 if e.errno != errno.ENOENT:
559 if e.errno != errno.ENOENT:
557 raise
560 raise
558 nlink = 0
561 nlink = 0
559 util.makedirs(dirname, self.createmode, notindexed)
562 util.makedirs(dirname, self.createmode, notindexed)
560 if nlink > 0:
563 if nlink > 0:
561 if self._trustnlink is None:
564 if self._trustnlink is None:
562 self._trustnlink = nlink > 1 or util.checknlink(f)
565 self._trustnlink = nlink > 1 or util.checknlink(f)
563 if nlink > 1 or not self._trustnlink:
566 if nlink > 1 or not self._trustnlink:
564 util.rename(util.mktempcopy(f), f)
567 util.rename(util.mktempcopy(f), f)
565 fp = util.posixfile(f, mode)
568 fp = util.posixfile(f, mode)
566 if nlink == 0:
569 if nlink == 0:
567 self._fixfilemode(f)
570 self._fixfilemode(f)
568
571
569 if backgroundclose:
572 if backgroundclose:
570 if not self._backgroundfilecloser:
573 if not self._backgroundfilecloser:
571 raise error.Abort('backgroundclose can only be used when a '
574 raise error.Abort('backgroundclose can only be used when a '
572 'backgroundclosing context manager is active')
575 'backgroundclosing context manager is active')
573
576
574 fp = delayclosedfile(fp, self._backgroundfilecloser)
577 fp = delayclosedfile(fp, self._backgroundfilecloser)
575
578
576 return fp
579 return fp
577
580
578 def symlink(self, src, dst):
581 def symlink(self, src, dst):
579 self.audit(dst)
582 self.audit(dst)
580 linkname = self.join(dst)
583 linkname = self.join(dst)
581 try:
584 try:
582 os.unlink(linkname)
585 os.unlink(linkname)
583 except OSError:
586 except OSError:
584 pass
587 pass
585
588
586 util.makedirs(os.path.dirname(linkname), self.createmode)
589 util.makedirs(os.path.dirname(linkname), self.createmode)
587
590
588 if self._cansymlink:
591 if self._cansymlink:
589 try:
592 try:
590 os.symlink(src, linkname)
593 os.symlink(src, linkname)
591 except OSError as err:
594 except OSError as err:
592 raise OSError(err.errno, _('could not symlink to %r: %s') %
595 raise OSError(err.errno, _('could not symlink to %r: %s') %
593 (src, err.strerror), linkname)
596 (src, err.strerror), linkname)
594 else:
597 else:
595 self.write(dst, src)
598 self.write(dst, src)
596
599
597 def join(self, path, *insidef):
600 def join(self, path, *insidef):
598 if path:
601 if path:
599 return os.path.join(self.base, path, *insidef)
602 return os.path.join(self.base, path, *insidef)
600 else:
603 else:
601 return self.base
604 return self.base
602
605
603 opener = vfs
606 opener = vfs
604
607
605 class auditvfs(object):
608 class auditvfs(object):
606 def __init__(self, vfs):
609 def __init__(self, vfs):
607 self.vfs = vfs
610 self.vfs = vfs
608
611
609 @property
612 @property
610 def mustaudit(self):
613 def mustaudit(self):
611 return self.vfs.mustaudit
614 return self.vfs.mustaudit
612
615
613 @mustaudit.setter
616 @mustaudit.setter
614 def mustaudit(self, onoff):
617 def mustaudit(self, onoff):
615 self.vfs.mustaudit = onoff
618 self.vfs.mustaudit = onoff
616
619
617 class filtervfs(abstractvfs, auditvfs):
620 class filtervfs(abstractvfs, auditvfs):
618 '''Wrapper vfs for filtering filenames with a function.'''
621 '''Wrapper vfs for filtering filenames with a function.'''
619
622
620 def __init__(self, vfs, filter):
623 def __init__(self, vfs, filter):
621 auditvfs.__init__(self, vfs)
624 auditvfs.__init__(self, vfs)
622 self._filter = filter
625 self._filter = filter
623
626
624 def __call__(self, path, *args, **kwargs):
627 def __call__(self, path, *args, **kwargs):
625 return self.vfs(self._filter(path), *args, **kwargs)
628 return self.vfs(self._filter(path), *args, **kwargs)
626
629
627 def join(self, path, *insidef):
630 def join(self, path, *insidef):
628 if path:
631 if path:
629 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
632 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
630 else:
633 else:
631 return self.vfs.join(path)
634 return self.vfs.join(path)
632
635
633 filteropener = filtervfs
636 filteropener = filtervfs
634
637
635 class readonlyvfs(abstractvfs, auditvfs):
638 class readonlyvfs(abstractvfs, auditvfs):
636 '''Wrapper vfs preventing any writing.'''
639 '''Wrapper vfs preventing any writing.'''
637
640
638 def __init__(self, vfs):
641 def __init__(self, vfs):
639 auditvfs.__init__(self, vfs)
642 auditvfs.__init__(self, vfs)
640
643
641 def __call__(self, path, mode='r', *args, **kw):
644 def __call__(self, path, mode='r', *args, **kw):
642 if mode not in ('r', 'rb'):
645 if mode not in ('r', 'rb'):
643 raise error.Abort('this vfs is read only')
646 raise error.Abort('this vfs is read only')
644 return self.vfs(path, mode, *args, **kw)
647 return self.vfs(path, mode, *args, **kw)
645
648
646 def join(self, path, *insidef):
649 def join(self, path, *insidef):
647 return self.vfs.join(path, *insidef)
650 return self.vfs.join(path, *insidef)
648
651
649 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
652 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
650 '''yield every hg repository under path, always recursively.
653 '''yield every hg repository under path, always recursively.
651 The recurse flag will only control recursion into repo working dirs'''
654 The recurse flag will only control recursion into repo working dirs'''
652 def errhandler(err):
655 def errhandler(err):
653 if err.filename == path:
656 if err.filename == path:
654 raise err
657 raise err
655 samestat = getattr(os.path, 'samestat', None)
658 samestat = getattr(os.path, 'samestat', None)
656 if followsym and samestat is not None:
659 if followsym and samestat is not None:
657 def adddir(dirlst, dirname):
660 def adddir(dirlst, dirname):
658 match = False
661 match = False
659 dirstat = os.stat(dirname)
662 dirstat = os.stat(dirname)
660 for lstdirstat in dirlst:
663 for lstdirstat in dirlst:
661 if samestat(dirstat, lstdirstat):
664 if samestat(dirstat, lstdirstat):
662 match = True
665 match = True
663 break
666 break
664 if not match:
667 if not match:
665 dirlst.append(dirstat)
668 dirlst.append(dirstat)
666 return not match
669 return not match
667 else:
670 else:
668 followsym = False
671 followsym = False
669
672
670 if (seen_dirs is None) and followsym:
673 if (seen_dirs is None) and followsym:
671 seen_dirs = []
674 seen_dirs = []
672 adddir(seen_dirs, path)
675 adddir(seen_dirs, path)
673 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
676 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
674 dirs.sort()
677 dirs.sort()
675 if '.hg' in dirs:
678 if '.hg' in dirs:
676 yield root # found a repository
679 yield root # found a repository
677 qroot = os.path.join(root, '.hg', 'patches')
680 qroot = os.path.join(root, '.hg', 'patches')
678 if os.path.isdir(os.path.join(qroot, '.hg')):
681 if os.path.isdir(os.path.join(qroot, '.hg')):
679 yield qroot # we have a patch queue repo here
682 yield qroot # we have a patch queue repo here
680 if recurse:
683 if recurse:
681 # avoid recursing inside the .hg directory
684 # avoid recursing inside the .hg directory
682 dirs.remove('.hg')
685 dirs.remove('.hg')
683 else:
686 else:
684 dirs[:] = [] # don't descend further
687 dirs[:] = [] # don't descend further
685 elif followsym:
688 elif followsym:
686 newdirs = []
689 newdirs = []
687 for d in dirs:
690 for d in dirs:
688 fname = os.path.join(root, d)
691 fname = os.path.join(root, d)
689 if adddir(seen_dirs, fname):
692 if adddir(seen_dirs, fname):
690 if os.path.islink(fname):
693 if os.path.islink(fname):
691 for hgname in walkrepos(fname, True, seen_dirs):
694 for hgname in walkrepos(fname, True, seen_dirs):
692 yield hgname
695 yield hgname
693 else:
696 else:
694 newdirs.append(d)
697 newdirs.append(d)
695 dirs[:] = newdirs
698 dirs[:] = newdirs
696
699
697 def osrcpath():
700 def osrcpath():
698 '''return default os-specific hgrc search path'''
701 '''return default os-specific hgrc search path'''
699 path = []
702 path = []
700 defaultpath = os.path.join(util.datapath, 'default.d')
703 defaultpath = os.path.join(util.datapath, 'default.d')
701 if os.path.isdir(defaultpath):
704 if os.path.isdir(defaultpath):
702 for f, kind in osutil.listdir(defaultpath):
705 for f, kind in osutil.listdir(defaultpath):
703 if f.endswith('.rc'):
706 if f.endswith('.rc'):
704 path.append(os.path.join(defaultpath, f))
707 path.append(os.path.join(defaultpath, f))
705 path.extend(systemrcpath())
708 path.extend(systemrcpath())
706 path.extend(userrcpath())
709 path.extend(userrcpath())
707 path = [os.path.normpath(f) for f in path]
710 path = [os.path.normpath(f) for f in path]
708 return path
711 return path
709
712
710 _rcpath = None
713 _rcpath = None
711
714
712 def rcpath():
715 def rcpath():
713 '''return hgrc search path. if env var HGRCPATH is set, use it.
716 '''return hgrc search path. if env var HGRCPATH is set, use it.
714 for each item in path, if directory, use files ending in .rc,
717 for each item in path, if directory, use files ending in .rc,
715 else use item.
718 else use item.
716 make HGRCPATH empty to only look in .hg/hgrc of current repo.
719 make HGRCPATH empty to only look in .hg/hgrc of current repo.
717 if no HGRCPATH, use default os-specific path.'''
720 if no HGRCPATH, use default os-specific path.'''
718 global _rcpath
721 global _rcpath
719 if _rcpath is None:
722 if _rcpath is None:
720 if 'HGRCPATH' in os.environ:
723 if 'HGRCPATH' in os.environ:
721 _rcpath = []
724 _rcpath = []
722 for p in os.environ['HGRCPATH'].split(os.pathsep):
725 for p in os.environ['HGRCPATH'].split(os.pathsep):
723 if not p:
726 if not p:
724 continue
727 continue
725 p = util.expandpath(p)
728 p = util.expandpath(p)
726 if os.path.isdir(p):
729 if os.path.isdir(p):
727 for f, kind in osutil.listdir(p):
730 for f, kind in osutil.listdir(p):
728 if f.endswith('.rc'):
731 if f.endswith('.rc'):
729 _rcpath.append(os.path.join(p, f))
732 _rcpath.append(os.path.join(p, f))
730 else:
733 else:
731 _rcpath.append(p)
734 _rcpath.append(p)
732 else:
735 else:
733 _rcpath = osrcpath()
736 _rcpath = osrcpath()
734 return _rcpath
737 return _rcpath
735
738
736 def intrev(rev):
739 def intrev(rev):
737 """Return integer for a given revision that can be used in comparison or
740 """Return integer for a given revision that can be used in comparison or
738 arithmetic operation"""
741 arithmetic operation"""
739 if rev is None:
742 if rev is None:
740 return wdirrev
743 return wdirrev
741 return rev
744 return rev
742
745
743 def revsingle(repo, revspec, default='.'):
746 def revsingle(repo, revspec, default='.'):
744 if not revspec and revspec != 0:
747 if not revspec and revspec != 0:
745 return repo[default]
748 return repo[default]
746
749
747 l = revrange(repo, [revspec])
750 l = revrange(repo, [revspec])
748 if not l:
751 if not l:
749 raise error.Abort(_('empty revision set'))
752 raise error.Abort(_('empty revision set'))
750 return repo[l.last()]
753 return repo[l.last()]
751
754
752 def _pairspec(revspec):
755 def _pairspec(revspec):
753 tree = revset.parse(revspec)
756 tree = revset.parse(revspec)
754 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
757 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
755 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
758 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
756
759
757 def revpair(repo, revs):
760 def revpair(repo, revs):
758 if not revs:
761 if not revs:
759 return repo.dirstate.p1(), None
762 return repo.dirstate.p1(), None
760
763
761 l = revrange(repo, revs)
764 l = revrange(repo, revs)
762
765
763 if not l:
766 if not l:
764 first = second = None
767 first = second = None
765 elif l.isascending():
768 elif l.isascending():
766 first = l.min()
769 first = l.min()
767 second = l.max()
770 second = l.max()
768 elif l.isdescending():
771 elif l.isdescending():
769 first = l.max()
772 first = l.max()
770 second = l.min()
773 second = l.min()
771 else:
774 else:
772 first = l.first()
775 first = l.first()
773 second = l.last()
776 second = l.last()
774
777
775 if first is None:
778 if first is None:
776 raise error.Abort(_('empty revision range'))
779 raise error.Abort(_('empty revision range'))
777 if (first == second and len(revs) >= 2
780 if (first == second and len(revs) >= 2
778 and not all(revrange(repo, [r]) for r in revs)):
781 and not all(revrange(repo, [r]) for r in revs)):
779 raise error.Abort(_('empty revision on one side of range'))
782 raise error.Abort(_('empty revision on one side of range'))
780
783
781 # if top-level is range expression, the result must always be a pair
784 # if top-level is range expression, the result must always be a pair
782 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
785 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
783 return repo.lookup(first), None
786 return repo.lookup(first), None
784
787
785 return repo.lookup(first), repo.lookup(second)
788 return repo.lookup(first), repo.lookup(second)
786
789
787 def revrange(repo, revs):
790 def revrange(repo, revs):
788 """Yield revision as strings from a list of revision specifications."""
791 """Yield revision as strings from a list of revision specifications."""
789 allspecs = []
792 allspecs = []
790 for spec in revs:
793 for spec in revs:
791 if isinstance(spec, int):
794 if isinstance(spec, int):
792 spec = revset.formatspec('rev(%d)', spec)
795 spec = revset.formatspec('rev(%d)', spec)
793 allspecs.append(spec)
796 allspecs.append(spec)
794 m = revset.matchany(repo.ui, allspecs, repo)
797 m = revset.matchany(repo.ui, allspecs, repo)
795 return m(repo)
798 return m(repo)
796
799
797 def meaningfulparents(repo, ctx):
800 def meaningfulparents(repo, ctx):
798 """Return list of meaningful (or all if debug) parentrevs for rev.
801 """Return list of meaningful (or all if debug) parentrevs for rev.
799
802
800 For merges (two non-nullrev revisions) both parents are meaningful.
803 For merges (two non-nullrev revisions) both parents are meaningful.
801 Otherwise the first parent revision is considered meaningful if it
804 Otherwise the first parent revision is considered meaningful if it
802 is not the preceding revision.
805 is not the preceding revision.
803 """
806 """
804 parents = ctx.parents()
807 parents = ctx.parents()
805 if len(parents) > 1:
808 if len(parents) > 1:
806 return parents
809 return parents
807 if repo.ui.debugflag:
810 if repo.ui.debugflag:
808 return [parents[0], repo['null']]
811 return [parents[0], repo['null']]
809 if parents[0].rev() >= intrev(ctx.rev()) - 1:
812 if parents[0].rev() >= intrev(ctx.rev()) - 1:
810 return []
813 return []
811 return parents
814 return parents
812
815
813 def expandpats(pats):
816 def expandpats(pats):
814 '''Expand bare globs when running on windows.
817 '''Expand bare globs when running on windows.
815 On posix we assume it already has already been done by sh.'''
818 On posix we assume it already has already been done by sh.'''
816 if not util.expandglobs:
819 if not util.expandglobs:
817 return list(pats)
820 return list(pats)
818 ret = []
821 ret = []
819 for kindpat in pats:
822 for kindpat in pats:
820 kind, pat = matchmod._patsplit(kindpat, None)
823 kind, pat = matchmod._patsplit(kindpat, None)
821 if kind is None:
824 if kind is None:
822 try:
825 try:
823 globbed = glob.glob(pat)
826 globbed = glob.glob(pat)
824 except re.error:
827 except re.error:
825 globbed = [pat]
828 globbed = [pat]
826 if globbed:
829 if globbed:
827 ret.extend(globbed)
830 ret.extend(globbed)
828 continue
831 continue
829 ret.append(kindpat)
832 ret.append(kindpat)
830 return ret
833 return ret
831
834
832 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
835 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
833 badfn=None):
836 badfn=None):
834 '''Return a matcher and the patterns that were used.
837 '''Return a matcher and the patterns that were used.
835 The matcher will warn about bad matches, unless an alternate badfn callback
838 The matcher will warn about bad matches, unless an alternate badfn callback
836 is provided.'''
839 is provided.'''
837 if pats == ("",):
840 if pats == ("",):
838 pats = []
841 pats = []
839 if opts is None:
842 if opts is None:
840 opts = {}
843 opts = {}
841 if not globbed and default == 'relpath':
844 if not globbed and default == 'relpath':
842 pats = expandpats(pats or [])
845 pats = expandpats(pats or [])
843
846
844 def bad(f, msg):
847 def bad(f, msg):
845 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
848 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
846
849
847 if badfn is None:
850 if badfn is None:
848 badfn = bad
851 badfn = bad
849
852
850 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
853 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
851 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
854 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
852
855
853 if m.always():
856 if m.always():
854 pats = []
857 pats = []
855 return m, pats
858 return m, pats
856
859
857 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
860 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
858 badfn=None):
861 badfn=None):
859 '''Return a matcher that will warn about bad matches.'''
862 '''Return a matcher that will warn about bad matches.'''
860 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
863 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
861
864
862 def matchall(repo):
865 def matchall(repo):
863 '''Return a matcher that will efficiently match everything.'''
866 '''Return a matcher that will efficiently match everything.'''
864 return matchmod.always(repo.root, repo.getcwd())
867 return matchmod.always(repo.root, repo.getcwd())
865
868
866 def matchfiles(repo, files, badfn=None):
869 def matchfiles(repo, files, badfn=None):
867 '''Return a matcher that will efficiently match exactly these files.'''
870 '''Return a matcher that will efficiently match exactly these files.'''
868 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
871 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
869
872
870 def origpath(ui, repo, filepath):
873 def origpath(ui, repo, filepath):
871 '''customize where .orig files are created
874 '''customize where .orig files are created
872
875
873 Fetch user defined path from config file: [ui] origbackuppath = <path>
876 Fetch user defined path from config file: [ui] origbackuppath = <path>
874 Fall back to default (filepath) if not specified
877 Fall back to default (filepath) if not specified
875 '''
878 '''
876 origbackuppath = ui.config('ui', 'origbackuppath', None)
879 origbackuppath = ui.config('ui', 'origbackuppath', None)
877 if origbackuppath is None:
880 if origbackuppath is None:
878 return filepath + ".orig"
881 return filepath + ".orig"
879
882
880 filepathfromroot = os.path.relpath(filepath, start=repo.root)
883 filepathfromroot = os.path.relpath(filepath, start=repo.root)
881 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
884 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
882
885
883 origbackupdir = repo.vfs.dirname(fullorigpath)
886 origbackupdir = repo.vfs.dirname(fullorigpath)
884 if not repo.vfs.exists(origbackupdir):
887 if not repo.vfs.exists(origbackupdir):
885 ui.note(_('creating directory: %s\n') % origbackupdir)
888 ui.note(_('creating directory: %s\n') % origbackupdir)
886 util.makedirs(origbackupdir)
889 util.makedirs(origbackupdir)
887
890
888 return fullorigpath + ".orig"
891 return fullorigpath + ".orig"
889
892
890 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
893 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
891 if opts is None:
894 if opts is None:
892 opts = {}
895 opts = {}
893 m = matcher
896 m = matcher
894 if dry_run is None:
897 if dry_run is None:
895 dry_run = opts.get('dry_run')
898 dry_run = opts.get('dry_run')
896 if similarity is None:
899 if similarity is None:
897 similarity = float(opts.get('similarity') or 0)
900 similarity = float(opts.get('similarity') or 0)
898
901
899 ret = 0
902 ret = 0
900 join = lambda f: os.path.join(prefix, f)
903 join = lambda f: os.path.join(prefix, f)
901
904
902 def matchessubrepo(matcher, subpath):
905 def matchessubrepo(matcher, subpath):
903 if matcher.exact(subpath):
906 if matcher.exact(subpath):
904 return True
907 return True
905 for f in matcher.files():
908 for f in matcher.files():
906 if f.startswith(subpath):
909 if f.startswith(subpath):
907 return True
910 return True
908 return False
911 return False
909
912
910 wctx = repo[None]
913 wctx = repo[None]
911 for subpath in sorted(wctx.substate):
914 for subpath in sorted(wctx.substate):
912 if opts.get('subrepos') or matchessubrepo(m, subpath):
915 if opts.get('subrepos') or matchessubrepo(m, subpath):
913 sub = wctx.sub(subpath)
916 sub = wctx.sub(subpath)
914 try:
917 try:
915 submatch = matchmod.subdirmatcher(subpath, m)
918 submatch = matchmod.subdirmatcher(subpath, m)
916 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
919 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
917 ret = 1
920 ret = 1
918 except error.LookupError:
921 except error.LookupError:
919 repo.ui.status(_("skipping missing subrepository: %s\n")
922 repo.ui.status(_("skipping missing subrepository: %s\n")
920 % join(subpath))
923 % join(subpath))
921
924
922 rejected = []
925 rejected = []
923 def badfn(f, msg):
926 def badfn(f, msg):
924 if f in m.files():
927 if f in m.files():
925 m.bad(f, msg)
928 m.bad(f, msg)
926 rejected.append(f)
929 rejected.append(f)
927
930
928 badmatch = matchmod.badmatch(m, badfn)
931 badmatch = matchmod.badmatch(m, badfn)
929 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
932 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
930 badmatch)
933 badmatch)
931
934
932 unknownset = set(unknown + forgotten)
935 unknownset = set(unknown + forgotten)
933 toprint = unknownset.copy()
936 toprint = unknownset.copy()
934 toprint.update(deleted)
937 toprint.update(deleted)
935 for abs in sorted(toprint):
938 for abs in sorted(toprint):
936 if repo.ui.verbose or not m.exact(abs):
939 if repo.ui.verbose or not m.exact(abs):
937 if abs in unknownset:
940 if abs in unknownset:
938 status = _('adding %s\n') % m.uipath(abs)
941 status = _('adding %s\n') % m.uipath(abs)
939 else:
942 else:
940 status = _('removing %s\n') % m.uipath(abs)
943 status = _('removing %s\n') % m.uipath(abs)
941 repo.ui.status(status)
944 repo.ui.status(status)
942
945
943 renames = _findrenames(repo, m, added + unknown, removed + deleted,
946 renames = _findrenames(repo, m, added + unknown, removed + deleted,
944 similarity)
947 similarity)
945
948
946 if not dry_run:
949 if not dry_run:
947 _markchanges(repo, unknown + forgotten, deleted, renames)
950 _markchanges(repo, unknown + forgotten, deleted, renames)
948
951
949 for f in rejected:
952 for f in rejected:
950 if f in m.files():
953 if f in m.files():
951 return 1
954 return 1
952 return ret
955 return ret
953
956
954 def marktouched(repo, files, similarity=0.0):
957 def marktouched(repo, files, similarity=0.0):
955 '''Assert that files have somehow been operated upon. files are relative to
958 '''Assert that files have somehow been operated upon. files are relative to
956 the repo root.'''
959 the repo root.'''
957 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
960 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
958 rejected = []
961 rejected = []
959
962
960 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
963 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
961
964
962 if repo.ui.verbose:
965 if repo.ui.verbose:
963 unknownset = set(unknown + forgotten)
966 unknownset = set(unknown + forgotten)
964 toprint = unknownset.copy()
967 toprint = unknownset.copy()
965 toprint.update(deleted)
968 toprint.update(deleted)
966 for abs in sorted(toprint):
969 for abs in sorted(toprint):
967 if abs in unknownset:
970 if abs in unknownset:
968 status = _('adding %s\n') % abs
971 status = _('adding %s\n') % abs
969 else:
972 else:
970 status = _('removing %s\n') % abs
973 status = _('removing %s\n') % abs
971 repo.ui.status(status)
974 repo.ui.status(status)
972
975
973 renames = _findrenames(repo, m, added + unknown, removed + deleted,
976 renames = _findrenames(repo, m, added + unknown, removed + deleted,
974 similarity)
977 similarity)
975
978
976 _markchanges(repo, unknown + forgotten, deleted, renames)
979 _markchanges(repo, unknown + forgotten, deleted, renames)
977
980
978 for f in rejected:
981 for f in rejected:
979 if f in m.files():
982 if f in m.files():
980 return 1
983 return 1
981 return 0
984 return 0
982
985
983 def _interestingfiles(repo, matcher):
986 def _interestingfiles(repo, matcher):
984 '''Walk dirstate with matcher, looking for files that addremove would care
987 '''Walk dirstate with matcher, looking for files that addremove would care
985 about.
988 about.
986
989
987 This is different from dirstate.status because it doesn't care about
990 This is different from dirstate.status because it doesn't care about
988 whether files are modified or clean.'''
991 whether files are modified or clean.'''
989 added, unknown, deleted, removed, forgotten = [], [], [], [], []
992 added, unknown, deleted, removed, forgotten = [], [], [], [], []
990 audit_path = pathutil.pathauditor(repo.root)
993 audit_path = pathutil.pathauditor(repo.root)
991
994
992 ctx = repo[None]
995 ctx = repo[None]
993 dirstate = repo.dirstate
996 dirstate = repo.dirstate
994 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
997 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
995 full=False)
998 full=False)
996 for abs, st in walkresults.iteritems():
999 for abs, st in walkresults.iteritems():
997 dstate = dirstate[abs]
1000 dstate = dirstate[abs]
998 if dstate == '?' and audit_path.check(abs):
1001 if dstate == '?' and audit_path.check(abs):
999 unknown.append(abs)
1002 unknown.append(abs)
1000 elif dstate != 'r' and not st:
1003 elif dstate != 'r' and not st:
1001 deleted.append(abs)
1004 deleted.append(abs)
1002 elif dstate == 'r' and st:
1005 elif dstate == 'r' and st:
1003 forgotten.append(abs)
1006 forgotten.append(abs)
1004 # for finding renames
1007 # for finding renames
1005 elif dstate == 'r' and not st:
1008 elif dstate == 'r' and not st:
1006 removed.append(abs)
1009 removed.append(abs)
1007 elif dstate == 'a':
1010 elif dstate == 'a':
1008 added.append(abs)
1011 added.append(abs)
1009
1012
1010 return added, unknown, deleted, removed, forgotten
1013 return added, unknown, deleted, removed, forgotten
1011
1014
1012 def _findrenames(repo, matcher, added, removed, similarity):
1015 def _findrenames(repo, matcher, added, removed, similarity):
1013 '''Find renames from removed files to added ones.'''
1016 '''Find renames from removed files to added ones.'''
1014 renames = {}
1017 renames = {}
1015 if similarity > 0:
1018 if similarity > 0:
1016 for old, new, score in similar.findrenames(repo, added, removed,
1019 for old, new, score in similar.findrenames(repo, added, removed,
1017 similarity):
1020 similarity):
1018 if (repo.ui.verbose or not matcher.exact(old)
1021 if (repo.ui.verbose or not matcher.exact(old)
1019 or not matcher.exact(new)):
1022 or not matcher.exact(new)):
1020 repo.ui.status(_('recording removal of %s as rename to %s '
1023 repo.ui.status(_('recording removal of %s as rename to %s '
1021 '(%d%% similar)\n') %
1024 '(%d%% similar)\n') %
1022 (matcher.rel(old), matcher.rel(new),
1025 (matcher.rel(old), matcher.rel(new),
1023 score * 100))
1026 score * 100))
1024 renames[new] = old
1027 renames[new] = old
1025 return renames
1028 return renames
1026
1029
1027 def _markchanges(repo, unknown, deleted, renames):
1030 def _markchanges(repo, unknown, deleted, renames):
1028 '''Marks the files in unknown as added, the files in deleted as removed,
1031 '''Marks the files in unknown as added, the files in deleted as removed,
1029 and the files in renames as copied.'''
1032 and the files in renames as copied.'''
1030 wctx = repo[None]
1033 wctx = repo[None]
1031 with repo.wlock():
1034 with repo.wlock():
1032 wctx.forget(deleted)
1035 wctx.forget(deleted)
1033 wctx.add(unknown)
1036 wctx.add(unknown)
1034 for new, old in renames.iteritems():
1037 for new, old in renames.iteritems():
1035 wctx.copy(old, new)
1038 wctx.copy(old, new)
1036
1039
1037 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1040 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1038 """Update the dirstate to reflect the intent of copying src to dst. For
1041 """Update the dirstate to reflect the intent of copying src to dst. For
1039 different reasons it might not end with dst being marked as copied from src.
1042 different reasons it might not end with dst being marked as copied from src.
1040 """
1043 """
1041 origsrc = repo.dirstate.copied(src) or src
1044 origsrc = repo.dirstate.copied(src) or src
1042 if dst == origsrc: # copying back a copy?
1045 if dst == origsrc: # copying back a copy?
1043 if repo.dirstate[dst] not in 'mn' and not dryrun:
1046 if repo.dirstate[dst] not in 'mn' and not dryrun:
1044 repo.dirstate.normallookup(dst)
1047 repo.dirstate.normallookup(dst)
1045 else:
1048 else:
1046 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1049 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1047 if not ui.quiet:
1050 if not ui.quiet:
1048 ui.warn(_("%s has not been committed yet, so no copy "
1051 ui.warn(_("%s has not been committed yet, so no copy "
1049 "data will be stored for %s.\n")
1052 "data will be stored for %s.\n")
1050 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1053 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1051 if repo.dirstate[dst] in '?r' and not dryrun:
1054 if repo.dirstate[dst] in '?r' and not dryrun:
1052 wctx.add([dst])
1055 wctx.add([dst])
1053 elif not dryrun:
1056 elif not dryrun:
1054 wctx.copy(origsrc, dst)
1057 wctx.copy(origsrc, dst)
1055
1058
1056 def readrequires(opener, supported):
1059 def readrequires(opener, supported):
1057 '''Reads and parses .hg/requires and checks if all entries found
1060 '''Reads and parses .hg/requires and checks if all entries found
1058 are in the list of supported features.'''
1061 are in the list of supported features.'''
1059 requirements = set(opener.read("requires").splitlines())
1062 requirements = set(opener.read("requires").splitlines())
1060 missings = []
1063 missings = []
1061 for r in requirements:
1064 for r in requirements:
1062 if r not in supported:
1065 if r not in supported:
1063 if not r or not r[0].isalnum():
1066 if not r or not r[0].isalnum():
1064 raise error.RequirementError(_(".hg/requires file is corrupt"))
1067 raise error.RequirementError(_(".hg/requires file is corrupt"))
1065 missings.append(r)
1068 missings.append(r)
1066 missings.sort()
1069 missings.sort()
1067 if missings:
1070 if missings:
1068 raise error.RequirementError(
1071 raise error.RequirementError(
1069 _("repository requires features unknown to this Mercurial: %s")
1072 _("repository requires features unknown to this Mercurial: %s")
1070 % " ".join(missings),
1073 % " ".join(missings),
1071 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1074 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1072 " for more information"))
1075 " for more information"))
1073 return requirements
1076 return requirements
1074
1077
1075 def writerequires(opener, requirements):
1078 def writerequires(opener, requirements):
1076 with opener('requires', 'w') as fp:
1079 with opener('requires', 'w') as fp:
1077 for r in sorted(requirements):
1080 for r in sorted(requirements):
1078 fp.write("%s\n" % r)
1081 fp.write("%s\n" % r)
1079
1082
1080 class filecachesubentry(object):
1083 class filecachesubentry(object):
1081 def __init__(self, path, stat):
1084 def __init__(self, path, stat):
1082 self.path = path
1085 self.path = path
1083 self.cachestat = None
1086 self.cachestat = None
1084 self._cacheable = None
1087 self._cacheable = None
1085
1088
1086 if stat:
1089 if stat:
1087 self.cachestat = filecachesubentry.stat(self.path)
1090 self.cachestat = filecachesubentry.stat(self.path)
1088
1091
1089 if self.cachestat:
1092 if self.cachestat:
1090 self._cacheable = self.cachestat.cacheable()
1093 self._cacheable = self.cachestat.cacheable()
1091 else:
1094 else:
1092 # None means we don't know yet
1095 # None means we don't know yet
1093 self._cacheable = None
1096 self._cacheable = None
1094
1097
1095 def refresh(self):
1098 def refresh(self):
1096 if self.cacheable():
1099 if self.cacheable():
1097 self.cachestat = filecachesubentry.stat(self.path)
1100 self.cachestat = filecachesubentry.stat(self.path)
1098
1101
1099 def cacheable(self):
1102 def cacheable(self):
1100 if self._cacheable is not None:
1103 if self._cacheable is not None:
1101 return self._cacheable
1104 return self._cacheable
1102
1105
1103 # we don't know yet, assume it is for now
1106 # we don't know yet, assume it is for now
1104 return True
1107 return True
1105
1108
1106 def changed(self):
1109 def changed(self):
1107 # no point in going further if we can't cache it
1110 # no point in going further if we can't cache it
1108 if not self.cacheable():
1111 if not self.cacheable():
1109 return True
1112 return True
1110
1113
1111 newstat = filecachesubentry.stat(self.path)
1114 newstat = filecachesubentry.stat(self.path)
1112
1115
1113 # we may not know if it's cacheable yet, check again now
1116 # we may not know if it's cacheable yet, check again now
1114 if newstat and self._cacheable is None:
1117 if newstat and self._cacheable is None:
1115 self._cacheable = newstat.cacheable()
1118 self._cacheable = newstat.cacheable()
1116
1119
1117 # check again
1120 # check again
1118 if not self._cacheable:
1121 if not self._cacheable:
1119 return True
1122 return True
1120
1123
1121 if self.cachestat != newstat:
1124 if self.cachestat != newstat:
1122 self.cachestat = newstat
1125 self.cachestat = newstat
1123 return True
1126 return True
1124 else:
1127 else:
1125 return False
1128 return False
1126
1129
1127 @staticmethod
1130 @staticmethod
1128 def stat(path):
1131 def stat(path):
1129 try:
1132 try:
1130 return util.cachestat(path)
1133 return util.cachestat(path)
1131 except OSError as e:
1134 except OSError as e:
1132 if e.errno != errno.ENOENT:
1135 if e.errno != errno.ENOENT:
1133 raise
1136 raise
1134
1137
1135 class filecacheentry(object):
1138 class filecacheentry(object):
1136 def __init__(self, paths, stat=True):
1139 def __init__(self, paths, stat=True):
1137 self._entries = []
1140 self._entries = []
1138 for path in paths:
1141 for path in paths:
1139 self._entries.append(filecachesubentry(path, stat))
1142 self._entries.append(filecachesubentry(path, stat))
1140
1143
1141 def changed(self):
1144 def changed(self):
1142 '''true if any entry has changed'''
1145 '''true if any entry has changed'''
1143 for entry in self._entries:
1146 for entry in self._entries:
1144 if entry.changed():
1147 if entry.changed():
1145 return True
1148 return True
1146 return False
1149 return False
1147
1150
1148 def refresh(self):
1151 def refresh(self):
1149 for entry in self._entries:
1152 for entry in self._entries:
1150 entry.refresh()
1153 entry.refresh()
1151
1154
1152 class filecache(object):
1155 class filecache(object):
1153 '''A property like decorator that tracks files under .hg/ for updates.
1156 '''A property like decorator that tracks files under .hg/ for updates.
1154
1157
1155 Records stat info when called in _filecache.
1158 Records stat info when called in _filecache.
1156
1159
1157 On subsequent calls, compares old stat info with new info, and recreates the
1160 On subsequent calls, compares old stat info with new info, and recreates the
1158 object when any of the files changes, updating the new stat info in
1161 object when any of the files changes, updating the new stat info in
1159 _filecache.
1162 _filecache.
1160
1163
1161 Mercurial either atomic renames or appends for files under .hg,
1164 Mercurial either atomic renames or appends for files under .hg,
1162 so to ensure the cache is reliable we need the filesystem to be able
1165 so to ensure the cache is reliable we need the filesystem to be able
1163 to tell us if a file has been replaced. If it can't, we fallback to
1166 to tell us if a file has been replaced. If it can't, we fallback to
1164 recreating the object on every call (essentially the same behavior as
1167 recreating the object on every call (essentially the same behavior as
1165 propertycache).
1168 propertycache).
1166
1169
1167 '''
1170 '''
1168 def __init__(self, *paths):
1171 def __init__(self, *paths):
1169 self.paths = paths
1172 self.paths = paths
1170
1173
1171 def join(self, obj, fname):
1174 def join(self, obj, fname):
1172 """Used to compute the runtime path of a cached file.
1175 """Used to compute the runtime path of a cached file.
1173
1176
1174 Users should subclass filecache and provide their own version of this
1177 Users should subclass filecache and provide their own version of this
1175 function to call the appropriate join function on 'obj' (an instance
1178 function to call the appropriate join function on 'obj' (an instance
1176 of the class that its member function was decorated).
1179 of the class that its member function was decorated).
1177 """
1180 """
1178 return obj.join(fname)
1181 return obj.join(fname)
1179
1182
1180 def __call__(self, func):
1183 def __call__(self, func):
1181 self.func = func
1184 self.func = func
1182 self.name = func.__name__
1185 self.name = func.__name__
1183 return self
1186 return self
1184
1187
1185 def __get__(self, obj, type=None):
1188 def __get__(self, obj, type=None):
1186 # do we need to check if the file changed?
1189 # do we need to check if the file changed?
1187 if self.name in obj.__dict__:
1190 if self.name in obj.__dict__:
1188 assert self.name in obj._filecache, self.name
1191 assert self.name in obj._filecache, self.name
1189 return obj.__dict__[self.name]
1192 return obj.__dict__[self.name]
1190
1193
1191 entry = obj._filecache.get(self.name)
1194 entry = obj._filecache.get(self.name)
1192
1195
1193 if entry:
1196 if entry:
1194 if entry.changed():
1197 if entry.changed():
1195 entry.obj = self.func(obj)
1198 entry.obj = self.func(obj)
1196 else:
1199 else:
1197 paths = [self.join(obj, path) for path in self.paths]
1200 paths = [self.join(obj, path) for path in self.paths]
1198
1201
1199 # We stat -before- creating the object so our cache doesn't lie if
1202 # We stat -before- creating the object so our cache doesn't lie if
1200 # a writer modified between the time we read and stat
1203 # a writer modified between the time we read and stat
1201 entry = filecacheentry(paths, True)
1204 entry = filecacheentry(paths, True)
1202 entry.obj = self.func(obj)
1205 entry.obj = self.func(obj)
1203
1206
1204 obj._filecache[self.name] = entry
1207 obj._filecache[self.name] = entry
1205
1208
1206 obj.__dict__[self.name] = entry.obj
1209 obj.__dict__[self.name] = entry.obj
1207 return entry.obj
1210 return entry.obj
1208
1211
1209 def __set__(self, obj, value):
1212 def __set__(self, obj, value):
1210 if self.name not in obj._filecache:
1213 if self.name not in obj._filecache:
1211 # we add an entry for the missing value because X in __dict__
1214 # we add an entry for the missing value because X in __dict__
1212 # implies X in _filecache
1215 # implies X in _filecache
1213 paths = [self.join(obj, path) for path in self.paths]
1216 paths = [self.join(obj, path) for path in self.paths]
1214 ce = filecacheentry(paths, False)
1217 ce = filecacheentry(paths, False)
1215 obj._filecache[self.name] = ce
1218 obj._filecache[self.name] = ce
1216 else:
1219 else:
1217 ce = obj._filecache[self.name]
1220 ce = obj._filecache[self.name]
1218
1221
1219 ce.obj = value # update cached copy
1222 ce.obj = value # update cached copy
1220 obj.__dict__[self.name] = value # update copy returned by obj.x
1223 obj.__dict__[self.name] = value # update copy returned by obj.x
1221
1224
1222 def __delete__(self, obj):
1225 def __delete__(self, obj):
1223 try:
1226 try:
1224 del obj.__dict__[self.name]
1227 del obj.__dict__[self.name]
1225 except KeyError:
1228 except KeyError:
1226 raise AttributeError(self.name)
1229 raise AttributeError(self.name)
1227
1230
1228 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1231 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1229 if lock is None:
1232 if lock is None:
1230 raise error.LockInheritanceContractViolation(
1233 raise error.LockInheritanceContractViolation(
1231 'lock can only be inherited while held')
1234 'lock can only be inherited while held')
1232 if environ is None:
1235 if environ is None:
1233 environ = {}
1236 environ = {}
1234 with lock.inherit() as locker:
1237 with lock.inherit() as locker:
1235 environ[envvar] = locker
1238 environ[envvar] = locker
1236 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1239 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1237
1240
1238 def wlocksub(repo, cmd, *args, **kwargs):
1241 def wlocksub(repo, cmd, *args, **kwargs):
1239 """run cmd as a subprocess that allows inheriting repo's wlock
1242 """run cmd as a subprocess that allows inheriting repo's wlock
1240
1243
1241 This can only be called while the wlock is held. This takes all the
1244 This can only be called while the wlock is held. This takes all the
1242 arguments that ui.system does, and returns the exit code of the
1245 arguments that ui.system does, and returns the exit code of the
1243 subprocess."""
1246 subprocess."""
1244 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1247 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1245 **kwargs)
1248 **kwargs)
1246
1249
1247 def gdinitconfig(ui):
1250 def gdinitconfig(ui):
1248 """helper function to know if a repo should be created as general delta
1251 """helper function to know if a repo should be created as general delta
1249 """
1252 """
1250 # experimental config: format.generaldelta
1253 # experimental config: format.generaldelta
1251 return (ui.configbool('format', 'generaldelta', False)
1254 return (ui.configbool('format', 'generaldelta', False)
1252 or ui.configbool('format', 'usegeneraldelta', True))
1255 or ui.configbool('format', 'usegeneraldelta', True))
1253
1256
1254 def gddeltaconfig(ui):
1257 def gddeltaconfig(ui):
1255 """helper function to know if incoming delta should be optimised
1258 """helper function to know if incoming delta should be optimised
1256 """
1259 """
1257 # experimental config: format.generaldelta
1260 # experimental config: format.generaldelta
1258 return ui.configbool('format', 'generaldelta', False)
1261 return ui.configbool('format', 'generaldelta', False)
1259
1262
1260 class delayclosedfile(object):
1263 class delayclosedfile(object):
1261 """Proxy for a file object whose close is delayed.
1264 """Proxy for a file object whose close is delayed.
1262
1265
1263 Do not instantiate outside of the vfs layer.
1266 Do not instantiate outside of the vfs layer.
1264 """
1267 """
1265
1268
1266 def __init__(self, fh, closer):
1269 def __init__(self, fh, closer):
1267 object.__setattr__(self, '_origfh', fh)
1270 object.__setattr__(self, '_origfh', fh)
1268 object.__setattr__(self, '_closer', closer)
1271 object.__setattr__(self, '_closer', closer)
1269
1272
1270 def __getattr__(self, attr):
1273 def __getattr__(self, attr):
1271 return getattr(self._origfh, attr)
1274 return getattr(self._origfh, attr)
1272
1275
1273 def __setattr__(self, attr, value):
1276 def __setattr__(self, attr, value):
1274 return setattr(self._origfh, attr, value)
1277 return setattr(self._origfh, attr, value)
1275
1278
1276 def __delattr__(self, attr):
1279 def __delattr__(self, attr):
1277 return delattr(self._origfh, attr)
1280 return delattr(self._origfh, attr)
1278
1281
1279 def __enter__(self):
1282 def __enter__(self):
1280 return self._origfh.__enter__()
1283 return self._origfh.__enter__()
1281
1284
1282 def __exit__(self, exc_type, exc_value, exc_tb):
1285 def __exit__(self, exc_type, exc_value, exc_tb):
1283 self._closer.close(self._origfh)
1286 self._closer.close(self._origfh)
1284
1287
1285 def close(self):
1288 def close(self):
1286 self._closer.close(self._origfh)
1289 self._closer.close(self._origfh)
1287
1290
1288 class backgroundfilecloser(object):
1291 class backgroundfilecloser(object):
1289 """Coordinates background closing of file handles on multiple threads."""
1292 """Coordinates background closing of file handles on multiple threads."""
1290 def __init__(self, ui, expectedcount=-1):
1293 def __init__(self, ui, expectedcount=-1):
1291 self._running = False
1294 self._running = False
1292 self._entered = False
1295 self._entered = False
1293 self._threads = []
1296 self._threads = []
1294 self._threadexception = None
1297 self._threadexception = None
1295
1298
1296 # Only Windows/NTFS has slow file closing. So only enable by default
1299 # Only Windows/NTFS has slow file closing. So only enable by default
1297 # on that platform. But allow to be enabled elsewhere for testing.
1300 # on that platform. But allow to be enabled elsewhere for testing.
1298 defaultenabled = os.name == 'nt'
1301 defaultenabled = os.name == 'nt'
1299 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1302 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1300
1303
1301 if not enabled:
1304 if not enabled:
1302 return
1305 return
1303
1306
1304 # There is overhead to starting and stopping the background threads.
1307 # There is overhead to starting and stopping the background threads.
1305 # Don't do background processing unless the file count is large enough
1308 # Don't do background processing unless the file count is large enough
1306 # to justify it.
1309 # to justify it.
1307 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1310 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1308 2048)
1311 2048)
1309 # FUTURE dynamically start background threads after minfilecount closes.
1312 # FUTURE dynamically start background threads after minfilecount closes.
1310 # (We don't currently have any callers that don't know their file count)
1313 # (We don't currently have any callers that don't know their file count)
1311 if expectedcount > 0 and expectedcount < minfilecount:
1314 if expectedcount > 0 and expectedcount < minfilecount:
1312 return
1315 return
1313
1316
1314 # Windows defaults to a limit of 512 open files. A buffer of 128
1317 # Windows defaults to a limit of 512 open files. A buffer of 128
1315 # should give us enough headway.
1318 # should give us enough headway.
1316 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1319 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1317 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1320 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1318
1321
1319 ui.debug('starting %d threads for background file closing\n' %
1322 ui.debug('starting %d threads for background file closing\n' %
1320 threadcount)
1323 threadcount)
1321
1324
1322 self._queue = util.queue(maxsize=maxqueue)
1325 self._queue = util.queue(maxsize=maxqueue)
1323 self._running = True
1326 self._running = True
1324
1327
1325 for i in range(threadcount):
1328 for i in range(threadcount):
1326 t = threading.Thread(target=self._worker, name='backgroundcloser')
1329 t = threading.Thread(target=self._worker, name='backgroundcloser')
1327 self._threads.append(t)
1330 self._threads.append(t)
1328 t.start()
1331 t.start()
1329
1332
1330 def __enter__(self):
1333 def __enter__(self):
1331 self._entered = True
1334 self._entered = True
1332 return self
1335 return self
1333
1336
1334 def __exit__(self, exc_type, exc_value, exc_tb):
1337 def __exit__(self, exc_type, exc_value, exc_tb):
1335 self._running = False
1338 self._running = False
1336
1339
1337 # Wait for threads to finish closing so open files don't linger for
1340 # Wait for threads to finish closing so open files don't linger for
1338 # longer than lifetime of context manager.
1341 # longer than lifetime of context manager.
1339 for t in self._threads:
1342 for t in self._threads:
1340 t.join()
1343 t.join()
1341
1344
1342 def _worker(self):
1345 def _worker(self):
1343 """Main routine for worker thread."""
1346 """Main routine for worker thread."""
1344 while True:
1347 while True:
1345 try:
1348 try:
1346 fh = self._queue.get(block=True, timeout=0.100)
1349 fh = self._queue.get(block=True, timeout=0.100)
1347 # Need to catch or the thread will terminate and
1350 # Need to catch or the thread will terminate and
1348 # we could orphan file descriptors.
1351 # we could orphan file descriptors.
1349 try:
1352 try:
1350 fh.close()
1353 fh.close()
1351 except Exception as e:
1354 except Exception as e:
1352 # Stash so can re-raise from main thread later.
1355 # Stash so can re-raise from main thread later.
1353 self._threadexception = e
1356 self._threadexception = e
1354 except util.empty:
1357 except util.empty:
1355 if not self._running:
1358 if not self._running:
1356 break
1359 break
1357
1360
1358 def close(self, fh):
1361 def close(self, fh):
1359 """Schedule a file for closing."""
1362 """Schedule a file for closing."""
1360 if not self._entered:
1363 if not self._entered:
1361 raise error.Abort('can only call close() when context manager '
1364 raise error.Abort('can only call close() when context manager '
1362 'active')
1365 'active')
1363
1366
1364 # If a background thread encountered an exception, raise now so we fail
1367 # If a background thread encountered an exception, raise now so we fail
1365 # fast. Otherwise we may potentially go on for minutes until the error
1368 # fast. Otherwise we may potentially go on for minutes until the error
1366 # is acted on.
1369 # is acted on.
1367 if self._threadexception:
1370 if self._threadexception:
1368 e = self._threadexception
1371 e = self._threadexception
1369 self._threadexception = None
1372 self._threadexception = None
1370 raise e
1373 raise e
1371
1374
1372 # If we're not actively running, close synchronously.
1375 # If we're not actively running, close synchronously.
1373 if not self._running:
1376 if not self._running:
1374 fh.close()
1377 fh.close()
1375 return
1378 return
1376
1379
1377 self._queue.put(fh, block=True, timeout=None)
1380 self._queue.put(fh, block=True, timeout=None)
1378
1381
General Comments 0
You need to be logged in to leave comments. Login now