##// END OF EJS Templates
vfs: make rename avoid ambiguity of file stat if needed...
FUJIWARA Katsunori -
r29203:731ced08 default
parent child Browse files
Show More
@@ -1,1381 +1,1391 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import glob
12 import glob
13 import os
13 import os
14 import re
14 import re
15 import shutil
15 import shutil
16 import stat
16 import stat
17 import tempfile
17 import tempfile
18 import threading
18 import threading
19
19
20 from .i18n import _
20 from .i18n import _
21 from .node import wdirrev
21 from .node import wdirrev
22 from . import (
22 from . import (
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 osutil,
26 osutil,
27 pathutil,
27 pathutil,
28 phases,
28 phases,
29 revset,
29 revset,
30 similar,
30 similar,
31 util,
31 util,
32 )
32 )
33
33
34 if os.name == 'nt':
34 if os.name == 'nt':
35 from . import scmwindows as scmplatform
35 from . import scmwindows as scmplatform
36 else:
36 else:
37 from . import scmposix as scmplatform
37 from . import scmposix as scmplatform
38
38
39 systemrcpath = scmplatform.systemrcpath
39 systemrcpath = scmplatform.systemrcpath
40 userrcpath = scmplatform.userrcpath
40 userrcpath = scmplatform.userrcpath
41
41
42 class status(tuple):
42 class status(tuple):
43 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
43 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
44 and 'ignored' properties are only relevant to the working copy.
44 and 'ignored' properties are only relevant to the working copy.
45 '''
45 '''
46
46
47 __slots__ = ()
47 __slots__ = ()
48
48
49 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
49 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
50 clean):
50 clean):
51 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
51 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
52 ignored, clean))
52 ignored, clean))
53
53
54 @property
54 @property
55 def modified(self):
55 def modified(self):
56 '''files that have been modified'''
56 '''files that have been modified'''
57 return self[0]
57 return self[0]
58
58
59 @property
59 @property
60 def added(self):
60 def added(self):
61 '''files that have been added'''
61 '''files that have been added'''
62 return self[1]
62 return self[1]
63
63
64 @property
64 @property
65 def removed(self):
65 def removed(self):
66 '''files that have been removed'''
66 '''files that have been removed'''
67 return self[2]
67 return self[2]
68
68
69 @property
69 @property
70 def deleted(self):
70 def deleted(self):
71 '''files that are in the dirstate, but have been deleted from the
71 '''files that are in the dirstate, but have been deleted from the
72 working copy (aka "missing")
72 working copy (aka "missing")
73 '''
73 '''
74 return self[3]
74 return self[3]
75
75
76 @property
76 @property
77 def unknown(self):
77 def unknown(self):
78 '''files not in the dirstate that are not ignored'''
78 '''files not in the dirstate that are not ignored'''
79 return self[4]
79 return self[4]
80
80
81 @property
81 @property
82 def ignored(self):
82 def ignored(self):
83 '''files not in the dirstate that are ignored (by _dirignore())'''
83 '''files not in the dirstate that are ignored (by _dirignore())'''
84 return self[5]
84 return self[5]
85
85
86 @property
86 @property
87 def clean(self):
87 def clean(self):
88 '''files that have not been modified'''
88 '''files that have not been modified'''
89 return self[6]
89 return self[6]
90
90
91 def __repr__(self, *args, **kwargs):
91 def __repr__(self, *args, **kwargs):
92 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
92 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
93 'unknown=%r, ignored=%r, clean=%r>') % self)
93 'unknown=%r, ignored=%r, clean=%r>') % self)
94
94
95 def itersubrepos(ctx1, ctx2):
95 def itersubrepos(ctx1, ctx2):
96 """find subrepos in ctx1 or ctx2"""
96 """find subrepos in ctx1 or ctx2"""
97 # Create a (subpath, ctx) mapping where we prefer subpaths from
97 # Create a (subpath, ctx) mapping where we prefer subpaths from
98 # ctx1. The subpaths from ctx2 are important when the .hgsub file
98 # ctx1. The subpaths from ctx2 are important when the .hgsub file
99 # has been modified (in ctx2) but not yet committed (in ctx1).
99 # has been modified (in ctx2) but not yet committed (in ctx1).
100 subpaths = dict.fromkeys(ctx2.substate, ctx2)
100 subpaths = dict.fromkeys(ctx2.substate, ctx2)
101 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
101 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
102
102
103 missing = set()
103 missing = set()
104
104
105 for subpath in ctx2.substate:
105 for subpath in ctx2.substate:
106 if subpath not in ctx1.substate:
106 if subpath not in ctx1.substate:
107 del subpaths[subpath]
107 del subpaths[subpath]
108 missing.add(subpath)
108 missing.add(subpath)
109
109
110 for subpath, ctx in sorted(subpaths.iteritems()):
110 for subpath, ctx in sorted(subpaths.iteritems()):
111 yield subpath, ctx.sub(subpath)
111 yield subpath, ctx.sub(subpath)
112
112
113 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
113 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
114 # status and diff will have an accurate result when it does
114 # status and diff will have an accurate result when it does
115 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
115 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
116 # against itself.
116 # against itself.
117 for subpath in missing:
117 for subpath in missing:
118 yield subpath, ctx2.nullsub(subpath, ctx1)
118 yield subpath, ctx2.nullsub(subpath, ctx1)
119
119
120 def nochangesfound(ui, repo, excluded=None):
120 def nochangesfound(ui, repo, excluded=None):
121 '''Report no changes for push/pull, excluded is None or a list of
121 '''Report no changes for push/pull, excluded is None or a list of
122 nodes excluded from the push/pull.
122 nodes excluded from the push/pull.
123 '''
123 '''
124 secretlist = []
124 secretlist = []
125 if excluded:
125 if excluded:
126 for n in excluded:
126 for n in excluded:
127 if n not in repo:
127 if n not in repo:
128 # discovery should not have included the filtered revision,
128 # discovery should not have included the filtered revision,
129 # we have to explicitly exclude it until discovery is cleanup.
129 # we have to explicitly exclude it until discovery is cleanup.
130 continue
130 continue
131 ctx = repo[n]
131 ctx = repo[n]
132 if ctx.phase() >= phases.secret and not ctx.extinct():
132 if ctx.phase() >= phases.secret and not ctx.extinct():
133 secretlist.append(n)
133 secretlist.append(n)
134
134
135 if secretlist:
135 if secretlist:
136 ui.status(_("no changes found (ignored %d secret changesets)\n")
136 ui.status(_("no changes found (ignored %d secret changesets)\n")
137 % len(secretlist))
137 % len(secretlist))
138 else:
138 else:
139 ui.status(_("no changes found\n"))
139 ui.status(_("no changes found\n"))
140
140
141 def checknewlabel(repo, lbl, kind):
141 def checknewlabel(repo, lbl, kind):
142 # Do not use the "kind" parameter in ui output.
142 # Do not use the "kind" parameter in ui output.
143 # It makes strings difficult to translate.
143 # It makes strings difficult to translate.
144 if lbl in ['tip', '.', 'null']:
144 if lbl in ['tip', '.', 'null']:
145 raise error.Abort(_("the name '%s' is reserved") % lbl)
145 raise error.Abort(_("the name '%s' is reserved") % lbl)
146 for c in (':', '\0', '\n', '\r'):
146 for c in (':', '\0', '\n', '\r'):
147 if c in lbl:
147 if c in lbl:
148 raise error.Abort(_("%r cannot be used in a name") % c)
148 raise error.Abort(_("%r cannot be used in a name") % c)
149 try:
149 try:
150 int(lbl)
150 int(lbl)
151 raise error.Abort(_("cannot use an integer as a name"))
151 raise error.Abort(_("cannot use an integer as a name"))
152 except ValueError:
152 except ValueError:
153 pass
153 pass
154
154
155 def checkfilename(f):
155 def checkfilename(f):
156 '''Check that the filename f is an acceptable filename for a tracked file'''
156 '''Check that the filename f is an acceptable filename for a tracked file'''
157 if '\r' in f or '\n' in f:
157 if '\r' in f or '\n' in f:
158 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
158 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
159
159
160 def checkportable(ui, f):
160 def checkportable(ui, f):
161 '''Check if filename f is portable and warn or abort depending on config'''
161 '''Check if filename f is portable and warn or abort depending on config'''
162 checkfilename(f)
162 checkfilename(f)
163 abort, warn = checkportabilityalert(ui)
163 abort, warn = checkportabilityalert(ui)
164 if abort or warn:
164 if abort or warn:
165 msg = util.checkwinfilename(f)
165 msg = util.checkwinfilename(f)
166 if msg:
166 if msg:
167 msg = "%s: %r" % (msg, f)
167 msg = "%s: %r" % (msg, f)
168 if abort:
168 if abort:
169 raise error.Abort(msg)
169 raise error.Abort(msg)
170 ui.warn(_("warning: %s\n") % msg)
170 ui.warn(_("warning: %s\n") % msg)
171
171
172 def checkportabilityalert(ui):
172 def checkportabilityalert(ui):
173 '''check if the user's config requests nothing, a warning, or abort for
173 '''check if the user's config requests nothing, a warning, or abort for
174 non-portable filenames'''
174 non-portable filenames'''
175 val = ui.config('ui', 'portablefilenames', 'warn')
175 val = ui.config('ui', 'portablefilenames', 'warn')
176 lval = val.lower()
176 lval = val.lower()
177 bval = util.parsebool(val)
177 bval = util.parsebool(val)
178 abort = os.name == 'nt' or lval == 'abort'
178 abort = os.name == 'nt' or lval == 'abort'
179 warn = bval or lval == 'warn'
179 warn = bval or lval == 'warn'
180 if bval is None and not (warn or abort or lval == 'ignore'):
180 if bval is None and not (warn or abort or lval == 'ignore'):
181 raise error.ConfigError(
181 raise error.ConfigError(
182 _("ui.portablefilenames value is invalid ('%s')") % val)
182 _("ui.portablefilenames value is invalid ('%s')") % val)
183 return abort, warn
183 return abort, warn
184
184
185 class casecollisionauditor(object):
185 class casecollisionauditor(object):
186 def __init__(self, ui, abort, dirstate):
186 def __init__(self, ui, abort, dirstate):
187 self._ui = ui
187 self._ui = ui
188 self._abort = abort
188 self._abort = abort
189 allfiles = '\0'.join(dirstate._map)
189 allfiles = '\0'.join(dirstate._map)
190 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
190 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
191 self._dirstate = dirstate
191 self._dirstate = dirstate
192 # The purpose of _newfiles is so that we don't complain about
192 # The purpose of _newfiles is so that we don't complain about
193 # case collisions if someone were to call this object with the
193 # case collisions if someone were to call this object with the
194 # same filename twice.
194 # same filename twice.
195 self._newfiles = set()
195 self._newfiles = set()
196
196
197 def __call__(self, f):
197 def __call__(self, f):
198 if f in self._newfiles:
198 if f in self._newfiles:
199 return
199 return
200 fl = encoding.lower(f)
200 fl = encoding.lower(f)
201 if fl in self._loweredfiles and f not in self._dirstate:
201 if fl in self._loweredfiles and f not in self._dirstate:
202 msg = _('possible case-folding collision for %s') % f
202 msg = _('possible case-folding collision for %s') % f
203 if self._abort:
203 if self._abort:
204 raise error.Abort(msg)
204 raise error.Abort(msg)
205 self._ui.warn(_("warning: %s\n") % msg)
205 self._ui.warn(_("warning: %s\n") % msg)
206 self._loweredfiles.add(fl)
206 self._loweredfiles.add(fl)
207 self._newfiles.add(f)
207 self._newfiles.add(f)
208
208
209 def filteredhash(repo, maxrev):
209 def filteredhash(repo, maxrev):
210 """build hash of filtered revisions in the current repoview.
210 """build hash of filtered revisions in the current repoview.
211
211
212 Multiple caches perform up-to-date validation by checking that the
212 Multiple caches perform up-to-date validation by checking that the
213 tiprev and tipnode stored in the cache file match the current repository.
213 tiprev and tipnode stored in the cache file match the current repository.
214 However, this is not sufficient for validating repoviews because the set
214 However, this is not sufficient for validating repoviews because the set
215 of revisions in the view may change without the repository tiprev and
215 of revisions in the view may change without the repository tiprev and
216 tipnode changing.
216 tipnode changing.
217
217
218 This function hashes all the revs filtered from the view and returns
218 This function hashes all the revs filtered from the view and returns
219 that SHA-1 digest.
219 that SHA-1 digest.
220 """
220 """
221 cl = repo.changelog
221 cl = repo.changelog
222 if not cl.filteredrevs:
222 if not cl.filteredrevs:
223 return None
223 return None
224 key = None
224 key = None
225 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
225 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
226 if revs:
226 if revs:
227 s = util.sha1()
227 s = util.sha1()
228 for rev in revs:
228 for rev in revs:
229 s.update('%s;' % rev)
229 s.update('%s;' % rev)
230 key = s.digest()
230 key = s.digest()
231 return key
231 return key
232
232
233 class abstractvfs(object):
233 class abstractvfs(object):
234 """Abstract base class; cannot be instantiated"""
234 """Abstract base class; cannot be instantiated"""
235
235
236 def __init__(self, *args, **kwargs):
236 def __init__(self, *args, **kwargs):
237 '''Prevent instantiation; don't call this from subclasses.'''
237 '''Prevent instantiation; don't call this from subclasses.'''
238 raise NotImplementedError('attempted instantiating ' + str(type(self)))
238 raise NotImplementedError('attempted instantiating ' + str(type(self)))
239
239
240 def tryread(self, path):
240 def tryread(self, path):
241 '''gracefully return an empty string for missing files'''
241 '''gracefully return an empty string for missing files'''
242 try:
242 try:
243 return self.read(path)
243 return self.read(path)
244 except IOError as inst:
244 except IOError as inst:
245 if inst.errno != errno.ENOENT:
245 if inst.errno != errno.ENOENT:
246 raise
246 raise
247 return ""
247 return ""
248
248
249 def tryreadlines(self, path, mode='rb'):
249 def tryreadlines(self, path, mode='rb'):
250 '''gracefully return an empty array for missing files'''
250 '''gracefully return an empty array for missing files'''
251 try:
251 try:
252 return self.readlines(path, mode=mode)
252 return self.readlines(path, mode=mode)
253 except IOError as inst:
253 except IOError as inst:
254 if inst.errno != errno.ENOENT:
254 if inst.errno != errno.ENOENT:
255 raise
255 raise
256 return []
256 return []
257
257
258 def open(self, path, mode="r", text=False, atomictemp=False,
258 def open(self, path, mode="r", text=False, atomictemp=False,
259 notindexed=False, backgroundclose=False):
259 notindexed=False, backgroundclose=False):
260 '''Open ``path`` file, which is relative to vfs root.
260 '''Open ``path`` file, which is relative to vfs root.
261
261
262 Newly created directories are marked as "not to be indexed by
262 Newly created directories are marked as "not to be indexed by
263 the content indexing service", if ``notindexed`` is specified
263 the content indexing service", if ``notindexed`` is specified
264 for "write" mode access.
264 for "write" mode access.
265 '''
265 '''
266 self.open = self.__call__
266 self.open = self.__call__
267 return self.__call__(path, mode, text, atomictemp, notindexed,
267 return self.__call__(path, mode, text, atomictemp, notindexed,
268 backgroundclose=backgroundclose)
268 backgroundclose=backgroundclose)
269
269
270 def read(self, path):
270 def read(self, path):
271 with self(path, 'rb') as fp:
271 with self(path, 'rb') as fp:
272 return fp.read()
272 return fp.read()
273
273
274 def readlines(self, path, mode='rb'):
274 def readlines(self, path, mode='rb'):
275 with self(path, mode=mode) as fp:
275 with self(path, mode=mode) as fp:
276 return fp.readlines()
276 return fp.readlines()
277
277
278 def write(self, path, data, backgroundclose=False):
278 def write(self, path, data, backgroundclose=False):
279 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
279 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
280 return fp.write(data)
280 return fp.write(data)
281
281
282 def writelines(self, path, data, mode='wb', notindexed=False):
282 def writelines(self, path, data, mode='wb', notindexed=False):
283 with self(path, mode=mode, notindexed=notindexed) as fp:
283 with self(path, mode=mode, notindexed=notindexed) as fp:
284 return fp.writelines(data)
284 return fp.writelines(data)
285
285
286 def append(self, path, data):
286 def append(self, path, data):
287 with self(path, 'ab') as fp:
287 with self(path, 'ab') as fp:
288 return fp.write(data)
288 return fp.write(data)
289
289
290 def basename(self, path):
290 def basename(self, path):
291 """return base element of a path (as os.path.basename would do)
291 """return base element of a path (as os.path.basename would do)
292
292
293 This exists to allow handling of strange encoding if needed."""
293 This exists to allow handling of strange encoding if needed."""
294 return os.path.basename(path)
294 return os.path.basename(path)
295
295
296 def chmod(self, path, mode):
296 def chmod(self, path, mode):
297 return os.chmod(self.join(path), mode)
297 return os.chmod(self.join(path), mode)
298
298
299 def dirname(self, path):
299 def dirname(self, path):
300 """return dirname element of a path (as os.path.dirname would do)
300 """return dirname element of a path (as os.path.dirname would do)
301
301
302 This exists to allow handling of strange encoding if needed."""
302 This exists to allow handling of strange encoding if needed."""
303 return os.path.dirname(path)
303 return os.path.dirname(path)
304
304
305 def exists(self, path=None):
305 def exists(self, path=None):
306 return os.path.exists(self.join(path))
306 return os.path.exists(self.join(path))
307
307
308 def fstat(self, fp):
308 def fstat(self, fp):
309 return util.fstat(fp)
309 return util.fstat(fp)
310
310
311 def isdir(self, path=None):
311 def isdir(self, path=None):
312 return os.path.isdir(self.join(path))
312 return os.path.isdir(self.join(path))
313
313
314 def isfile(self, path=None):
314 def isfile(self, path=None):
315 return os.path.isfile(self.join(path))
315 return os.path.isfile(self.join(path))
316
316
317 def islink(self, path=None):
317 def islink(self, path=None):
318 return os.path.islink(self.join(path))
318 return os.path.islink(self.join(path))
319
319
320 def isfileorlink(self, path=None):
320 def isfileorlink(self, path=None):
321 '''return whether path is a regular file or a symlink
321 '''return whether path is a regular file or a symlink
322
322
323 Unlike isfile, this doesn't follow symlinks.'''
323 Unlike isfile, this doesn't follow symlinks.'''
324 try:
324 try:
325 st = self.lstat(path)
325 st = self.lstat(path)
326 except OSError:
326 except OSError:
327 return False
327 return False
328 mode = st.st_mode
328 mode = st.st_mode
329 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
329 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
330
330
331 def reljoin(self, *paths):
331 def reljoin(self, *paths):
332 """join various elements of a path together (as os.path.join would do)
332 """join various elements of a path together (as os.path.join would do)
333
333
334 The vfs base is not injected so that path stay relative. This exists
334 The vfs base is not injected so that path stay relative. This exists
335 to allow handling of strange encoding if needed."""
335 to allow handling of strange encoding if needed."""
336 return os.path.join(*paths)
336 return os.path.join(*paths)
337
337
338 def split(self, path):
338 def split(self, path):
339 """split top-most element of a path (as os.path.split would do)
339 """split top-most element of a path (as os.path.split would do)
340
340
341 This exists to allow handling of strange encoding if needed."""
341 This exists to allow handling of strange encoding if needed."""
342 return os.path.split(path)
342 return os.path.split(path)
343
343
344 def lexists(self, path=None):
344 def lexists(self, path=None):
345 return os.path.lexists(self.join(path))
345 return os.path.lexists(self.join(path))
346
346
347 def lstat(self, path=None):
347 def lstat(self, path=None):
348 return os.lstat(self.join(path))
348 return os.lstat(self.join(path))
349
349
350 def listdir(self, path=None):
350 def listdir(self, path=None):
351 return os.listdir(self.join(path))
351 return os.listdir(self.join(path))
352
352
353 def makedir(self, path=None, notindexed=True):
353 def makedir(self, path=None, notindexed=True):
354 return util.makedir(self.join(path), notindexed)
354 return util.makedir(self.join(path), notindexed)
355
355
356 def makedirs(self, path=None, mode=None):
356 def makedirs(self, path=None, mode=None):
357 return util.makedirs(self.join(path), mode)
357 return util.makedirs(self.join(path), mode)
358
358
359 def makelock(self, info, path):
359 def makelock(self, info, path):
360 return util.makelock(info, self.join(path))
360 return util.makelock(info, self.join(path))
361
361
362 def mkdir(self, path=None):
362 def mkdir(self, path=None):
363 return os.mkdir(self.join(path))
363 return os.mkdir(self.join(path))
364
364
365 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
365 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
366 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
366 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
367 dir=self.join(dir), text=text)
367 dir=self.join(dir), text=text)
368 dname, fname = util.split(name)
368 dname, fname = util.split(name)
369 if dir:
369 if dir:
370 return fd, os.path.join(dir, fname)
370 return fd, os.path.join(dir, fname)
371 else:
371 else:
372 return fd, fname
372 return fd, fname
373
373
374 def readdir(self, path=None, stat=None, skip=None):
374 def readdir(self, path=None, stat=None, skip=None):
375 return osutil.listdir(self.join(path), stat, skip)
375 return osutil.listdir(self.join(path), stat, skip)
376
376
377 def readlock(self, path):
377 def readlock(self, path):
378 return util.readlock(self.join(path))
378 return util.readlock(self.join(path))
379
379
380 def rename(self, src, dst):
380 def rename(self, src, dst, checkambig=False):
381 return util.rename(self.join(src), self.join(dst))
381 dstpath = self.join(dst)
382 oldstat = checkambig and util.filestat(dstpath)
383 if oldstat and oldstat.stat:
384 ret = util.rename(self.join(src), dstpath)
385 newstat = util.filestat(dstpath)
386 if newstat.isambig(oldstat):
387 # stat of renamed file is ambiguous to original one
388 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
389 os.utime(dstpath, (advanced, advanced))
390 return ret
391 return util.rename(self.join(src), dstpath)
382
392
383 def readlink(self, path):
393 def readlink(self, path):
384 return os.readlink(self.join(path))
394 return os.readlink(self.join(path))
385
395
386 def removedirs(self, path=None):
396 def removedirs(self, path=None):
387 """Remove a leaf directory and all empty intermediate ones
397 """Remove a leaf directory and all empty intermediate ones
388 """
398 """
389 return util.removedirs(self.join(path))
399 return util.removedirs(self.join(path))
390
400
391 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
401 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
392 """Remove a directory tree recursively
402 """Remove a directory tree recursively
393
403
394 If ``forcibly``, this tries to remove READ-ONLY files, too.
404 If ``forcibly``, this tries to remove READ-ONLY files, too.
395 """
405 """
396 if forcibly:
406 if forcibly:
397 def onerror(function, path, excinfo):
407 def onerror(function, path, excinfo):
398 if function is not os.remove:
408 if function is not os.remove:
399 raise
409 raise
400 # read-only files cannot be unlinked under Windows
410 # read-only files cannot be unlinked under Windows
401 s = os.stat(path)
411 s = os.stat(path)
402 if (s.st_mode & stat.S_IWRITE) != 0:
412 if (s.st_mode & stat.S_IWRITE) != 0:
403 raise
413 raise
404 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
414 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
405 os.remove(path)
415 os.remove(path)
406 else:
416 else:
407 onerror = None
417 onerror = None
408 return shutil.rmtree(self.join(path),
418 return shutil.rmtree(self.join(path),
409 ignore_errors=ignore_errors, onerror=onerror)
419 ignore_errors=ignore_errors, onerror=onerror)
410
420
411 def setflags(self, path, l, x):
421 def setflags(self, path, l, x):
412 return util.setflags(self.join(path), l, x)
422 return util.setflags(self.join(path), l, x)
413
423
414 def stat(self, path=None):
424 def stat(self, path=None):
415 return os.stat(self.join(path))
425 return os.stat(self.join(path))
416
426
417 def unlink(self, path=None):
427 def unlink(self, path=None):
418 return util.unlink(self.join(path))
428 return util.unlink(self.join(path))
419
429
420 def unlinkpath(self, path=None, ignoremissing=False):
430 def unlinkpath(self, path=None, ignoremissing=False):
421 return util.unlinkpath(self.join(path), ignoremissing)
431 return util.unlinkpath(self.join(path), ignoremissing)
422
432
423 def utime(self, path=None, t=None):
433 def utime(self, path=None, t=None):
424 return os.utime(self.join(path), t)
434 return os.utime(self.join(path), t)
425
435
426 def walk(self, path=None, onerror=None):
436 def walk(self, path=None, onerror=None):
427 """Yield (dirpath, dirs, files) tuple for each directories under path
437 """Yield (dirpath, dirs, files) tuple for each directories under path
428
438
429 ``dirpath`` is relative one from the root of this vfs. This
439 ``dirpath`` is relative one from the root of this vfs. This
430 uses ``os.sep`` as path separator, even you specify POSIX
440 uses ``os.sep`` as path separator, even you specify POSIX
431 style ``path``.
441 style ``path``.
432
442
433 "The root of this vfs" is represented as empty ``dirpath``.
443 "The root of this vfs" is represented as empty ``dirpath``.
434 """
444 """
435 root = os.path.normpath(self.join(None))
445 root = os.path.normpath(self.join(None))
436 # when dirpath == root, dirpath[prefixlen:] becomes empty
446 # when dirpath == root, dirpath[prefixlen:] becomes empty
437 # because len(dirpath) < prefixlen.
447 # because len(dirpath) < prefixlen.
438 prefixlen = len(pathutil.normasprefix(root))
448 prefixlen = len(pathutil.normasprefix(root))
439 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
449 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
440 yield (dirpath[prefixlen:], dirs, files)
450 yield (dirpath[prefixlen:], dirs, files)
441
451
442 @contextlib.contextmanager
452 @contextlib.contextmanager
443 def backgroundclosing(self, ui, expectedcount=-1):
453 def backgroundclosing(self, ui, expectedcount=-1):
444 """Allow files to be closed asynchronously.
454 """Allow files to be closed asynchronously.
445
455
446 When this context manager is active, ``backgroundclose`` can be passed
456 When this context manager is active, ``backgroundclose`` can be passed
447 to ``__call__``/``open`` to result in the file possibly being closed
457 to ``__call__``/``open`` to result in the file possibly being closed
448 asynchronously, on a background thread.
458 asynchronously, on a background thread.
449 """
459 """
450 # This is an arbitrary restriction and could be changed if we ever
460 # This is an arbitrary restriction and could be changed if we ever
451 # have a use case.
461 # have a use case.
452 vfs = getattr(self, 'vfs', self)
462 vfs = getattr(self, 'vfs', self)
453 if getattr(vfs, '_backgroundfilecloser', None):
463 if getattr(vfs, '_backgroundfilecloser', None):
454 raise error.Abort('can only have 1 active background file closer')
464 raise error.Abort('can only have 1 active background file closer')
455
465
456 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
466 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
457 try:
467 try:
458 vfs._backgroundfilecloser = bfc
468 vfs._backgroundfilecloser = bfc
459 yield bfc
469 yield bfc
460 finally:
470 finally:
461 vfs._backgroundfilecloser = None
471 vfs._backgroundfilecloser = None
462
472
463 class vfs(abstractvfs):
473 class vfs(abstractvfs):
464 '''Operate files relative to a base directory
474 '''Operate files relative to a base directory
465
475
466 This class is used to hide the details of COW semantics and
476 This class is used to hide the details of COW semantics and
467 remote file access from higher level code.
477 remote file access from higher level code.
468 '''
478 '''
469 def __init__(self, base, audit=True, expandpath=False, realpath=False):
479 def __init__(self, base, audit=True, expandpath=False, realpath=False):
470 if expandpath:
480 if expandpath:
471 base = util.expandpath(base)
481 base = util.expandpath(base)
472 if realpath:
482 if realpath:
473 base = os.path.realpath(base)
483 base = os.path.realpath(base)
474 self.base = base
484 self.base = base
475 self.mustaudit = audit
485 self.mustaudit = audit
476 self.createmode = None
486 self.createmode = None
477 self._trustnlink = None
487 self._trustnlink = None
478
488
479 @property
489 @property
480 def mustaudit(self):
490 def mustaudit(self):
481 return self._audit
491 return self._audit
482
492
483 @mustaudit.setter
493 @mustaudit.setter
484 def mustaudit(self, onoff):
494 def mustaudit(self, onoff):
485 self._audit = onoff
495 self._audit = onoff
486 if onoff:
496 if onoff:
487 self.audit = pathutil.pathauditor(self.base)
497 self.audit = pathutil.pathauditor(self.base)
488 else:
498 else:
489 self.audit = util.always
499 self.audit = util.always
490
500
491 @util.propertycache
501 @util.propertycache
492 def _cansymlink(self):
502 def _cansymlink(self):
493 return util.checklink(self.base)
503 return util.checklink(self.base)
494
504
495 @util.propertycache
505 @util.propertycache
496 def _chmod(self):
506 def _chmod(self):
497 return util.checkexec(self.base)
507 return util.checkexec(self.base)
498
508
499 def _fixfilemode(self, name):
509 def _fixfilemode(self, name):
500 if self.createmode is None or not self._chmod:
510 if self.createmode is None or not self._chmod:
501 return
511 return
502 os.chmod(name, self.createmode & 0o666)
512 os.chmod(name, self.createmode & 0o666)
503
513
504 def __call__(self, path, mode="r", text=False, atomictemp=False,
514 def __call__(self, path, mode="r", text=False, atomictemp=False,
505 notindexed=False, backgroundclose=False, checkambig=False):
515 notindexed=False, backgroundclose=False, checkambig=False):
506 '''Open ``path`` file, which is relative to vfs root.
516 '''Open ``path`` file, which is relative to vfs root.
507
517
508 Newly created directories are marked as "not to be indexed by
518 Newly created directories are marked as "not to be indexed by
509 the content indexing service", if ``notindexed`` is specified
519 the content indexing service", if ``notindexed`` is specified
510 for "write" mode access.
520 for "write" mode access.
511
521
512 If ``backgroundclose`` is passed, the file may be closed asynchronously.
522 If ``backgroundclose`` is passed, the file may be closed asynchronously.
513 It can only be used if the ``self.backgroundclosing()`` context manager
523 It can only be used if the ``self.backgroundclosing()`` context manager
514 is active. This should only be specified if the following criteria hold:
524 is active. This should only be specified if the following criteria hold:
515
525
516 1. There is a potential for writing thousands of files. Unless you
526 1. There is a potential for writing thousands of files. Unless you
517 are writing thousands of files, the performance benefits of
527 are writing thousands of files, the performance benefits of
518 asynchronously closing files is not realized.
528 asynchronously closing files is not realized.
519 2. Files are opened exactly once for the ``backgroundclosing``
529 2. Files are opened exactly once for the ``backgroundclosing``
520 active duration and are therefore free of race conditions between
530 active duration and are therefore free of race conditions between
521 closing a file on a background thread and reopening it. (If the
531 closing a file on a background thread and reopening it. (If the
522 file were opened multiple times, there could be unflushed data
532 file were opened multiple times, there could be unflushed data
523 because the original file handle hasn't been flushed/closed yet.)
533 because the original file handle hasn't been flushed/closed yet.)
524
534
525 ``checkambig`` is passed to atomictempfile (valid only for writing).
535 ``checkambig`` is passed to atomictempfile (valid only for writing).
526 '''
536 '''
527 if self._audit:
537 if self._audit:
528 r = util.checkosfilename(path)
538 r = util.checkosfilename(path)
529 if r:
539 if r:
530 raise error.Abort("%s: %r" % (r, path))
540 raise error.Abort("%s: %r" % (r, path))
531 self.audit(path)
541 self.audit(path)
532 f = self.join(path)
542 f = self.join(path)
533
543
534 if not text and "b" not in mode:
544 if not text and "b" not in mode:
535 mode += "b" # for that other OS
545 mode += "b" # for that other OS
536
546
537 nlink = -1
547 nlink = -1
538 if mode not in ('r', 'rb'):
548 if mode not in ('r', 'rb'):
539 dirname, basename = util.split(f)
549 dirname, basename = util.split(f)
540 # If basename is empty, then the path is malformed because it points
550 # If basename is empty, then the path is malformed because it points
541 # to a directory. Let the posixfile() call below raise IOError.
551 # to a directory. Let the posixfile() call below raise IOError.
542 if basename:
552 if basename:
543 if atomictemp:
553 if atomictemp:
544 util.makedirs(dirname, self.createmode, notindexed)
554 util.makedirs(dirname, self.createmode, notindexed)
545 return util.atomictempfile(f, mode, self.createmode,
555 return util.atomictempfile(f, mode, self.createmode,
546 checkambig=checkambig)
556 checkambig=checkambig)
547 try:
557 try:
548 if 'w' in mode:
558 if 'w' in mode:
549 util.unlink(f)
559 util.unlink(f)
550 nlink = 0
560 nlink = 0
551 else:
561 else:
552 # nlinks() may behave differently for files on Windows
562 # nlinks() may behave differently for files on Windows
553 # shares if the file is open.
563 # shares if the file is open.
554 with util.posixfile(f):
564 with util.posixfile(f):
555 nlink = util.nlinks(f)
565 nlink = util.nlinks(f)
556 if nlink < 1:
566 if nlink < 1:
557 nlink = 2 # force mktempcopy (issue1922)
567 nlink = 2 # force mktempcopy (issue1922)
558 except (OSError, IOError) as e:
568 except (OSError, IOError) as e:
559 if e.errno != errno.ENOENT:
569 if e.errno != errno.ENOENT:
560 raise
570 raise
561 nlink = 0
571 nlink = 0
562 util.makedirs(dirname, self.createmode, notindexed)
572 util.makedirs(dirname, self.createmode, notindexed)
563 if nlink > 0:
573 if nlink > 0:
564 if self._trustnlink is None:
574 if self._trustnlink is None:
565 self._trustnlink = nlink > 1 or util.checknlink(f)
575 self._trustnlink = nlink > 1 or util.checknlink(f)
566 if nlink > 1 or not self._trustnlink:
576 if nlink > 1 or not self._trustnlink:
567 util.rename(util.mktempcopy(f), f)
577 util.rename(util.mktempcopy(f), f)
568 fp = util.posixfile(f, mode)
578 fp = util.posixfile(f, mode)
569 if nlink == 0:
579 if nlink == 0:
570 self._fixfilemode(f)
580 self._fixfilemode(f)
571
581
572 if backgroundclose:
582 if backgroundclose:
573 if not self._backgroundfilecloser:
583 if not self._backgroundfilecloser:
574 raise error.Abort('backgroundclose can only be used when a '
584 raise error.Abort('backgroundclose can only be used when a '
575 'backgroundclosing context manager is active')
585 'backgroundclosing context manager is active')
576
586
577 fp = delayclosedfile(fp, self._backgroundfilecloser)
587 fp = delayclosedfile(fp, self._backgroundfilecloser)
578
588
579 return fp
589 return fp
580
590
581 def symlink(self, src, dst):
591 def symlink(self, src, dst):
582 self.audit(dst)
592 self.audit(dst)
583 linkname = self.join(dst)
593 linkname = self.join(dst)
584 try:
594 try:
585 os.unlink(linkname)
595 os.unlink(linkname)
586 except OSError:
596 except OSError:
587 pass
597 pass
588
598
589 util.makedirs(os.path.dirname(linkname), self.createmode)
599 util.makedirs(os.path.dirname(linkname), self.createmode)
590
600
591 if self._cansymlink:
601 if self._cansymlink:
592 try:
602 try:
593 os.symlink(src, linkname)
603 os.symlink(src, linkname)
594 except OSError as err:
604 except OSError as err:
595 raise OSError(err.errno, _('could not symlink to %r: %s') %
605 raise OSError(err.errno, _('could not symlink to %r: %s') %
596 (src, err.strerror), linkname)
606 (src, err.strerror), linkname)
597 else:
607 else:
598 self.write(dst, src)
608 self.write(dst, src)
599
609
600 def join(self, path, *insidef):
610 def join(self, path, *insidef):
601 if path:
611 if path:
602 return os.path.join(self.base, path, *insidef)
612 return os.path.join(self.base, path, *insidef)
603 else:
613 else:
604 return self.base
614 return self.base
605
615
606 opener = vfs
616 opener = vfs
607
617
608 class auditvfs(object):
618 class auditvfs(object):
609 def __init__(self, vfs):
619 def __init__(self, vfs):
610 self.vfs = vfs
620 self.vfs = vfs
611
621
612 @property
622 @property
613 def mustaudit(self):
623 def mustaudit(self):
614 return self.vfs.mustaudit
624 return self.vfs.mustaudit
615
625
616 @mustaudit.setter
626 @mustaudit.setter
617 def mustaudit(self, onoff):
627 def mustaudit(self, onoff):
618 self.vfs.mustaudit = onoff
628 self.vfs.mustaudit = onoff
619
629
620 class filtervfs(abstractvfs, auditvfs):
630 class filtervfs(abstractvfs, auditvfs):
621 '''Wrapper vfs for filtering filenames with a function.'''
631 '''Wrapper vfs for filtering filenames with a function.'''
622
632
623 def __init__(self, vfs, filter):
633 def __init__(self, vfs, filter):
624 auditvfs.__init__(self, vfs)
634 auditvfs.__init__(self, vfs)
625 self._filter = filter
635 self._filter = filter
626
636
627 def __call__(self, path, *args, **kwargs):
637 def __call__(self, path, *args, **kwargs):
628 return self.vfs(self._filter(path), *args, **kwargs)
638 return self.vfs(self._filter(path), *args, **kwargs)
629
639
630 def join(self, path, *insidef):
640 def join(self, path, *insidef):
631 if path:
641 if path:
632 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
642 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
633 else:
643 else:
634 return self.vfs.join(path)
644 return self.vfs.join(path)
635
645
636 filteropener = filtervfs
646 filteropener = filtervfs
637
647
638 class readonlyvfs(abstractvfs, auditvfs):
648 class readonlyvfs(abstractvfs, auditvfs):
639 '''Wrapper vfs preventing any writing.'''
649 '''Wrapper vfs preventing any writing.'''
640
650
641 def __init__(self, vfs):
651 def __init__(self, vfs):
642 auditvfs.__init__(self, vfs)
652 auditvfs.__init__(self, vfs)
643
653
644 def __call__(self, path, mode='r', *args, **kw):
654 def __call__(self, path, mode='r', *args, **kw):
645 if mode not in ('r', 'rb'):
655 if mode not in ('r', 'rb'):
646 raise error.Abort('this vfs is read only')
656 raise error.Abort('this vfs is read only')
647 return self.vfs(path, mode, *args, **kw)
657 return self.vfs(path, mode, *args, **kw)
648
658
649 def join(self, path, *insidef):
659 def join(self, path, *insidef):
650 return self.vfs.join(path, *insidef)
660 return self.vfs.join(path, *insidef)
651
661
652 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
662 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
653 '''yield every hg repository under path, always recursively.
663 '''yield every hg repository under path, always recursively.
654 The recurse flag will only control recursion into repo working dirs'''
664 The recurse flag will only control recursion into repo working dirs'''
655 def errhandler(err):
665 def errhandler(err):
656 if err.filename == path:
666 if err.filename == path:
657 raise err
667 raise err
658 samestat = getattr(os.path, 'samestat', None)
668 samestat = getattr(os.path, 'samestat', None)
659 if followsym and samestat is not None:
669 if followsym and samestat is not None:
660 def adddir(dirlst, dirname):
670 def adddir(dirlst, dirname):
661 match = False
671 match = False
662 dirstat = os.stat(dirname)
672 dirstat = os.stat(dirname)
663 for lstdirstat in dirlst:
673 for lstdirstat in dirlst:
664 if samestat(dirstat, lstdirstat):
674 if samestat(dirstat, lstdirstat):
665 match = True
675 match = True
666 break
676 break
667 if not match:
677 if not match:
668 dirlst.append(dirstat)
678 dirlst.append(dirstat)
669 return not match
679 return not match
670 else:
680 else:
671 followsym = False
681 followsym = False
672
682
673 if (seen_dirs is None) and followsym:
683 if (seen_dirs is None) and followsym:
674 seen_dirs = []
684 seen_dirs = []
675 adddir(seen_dirs, path)
685 adddir(seen_dirs, path)
676 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
686 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
677 dirs.sort()
687 dirs.sort()
678 if '.hg' in dirs:
688 if '.hg' in dirs:
679 yield root # found a repository
689 yield root # found a repository
680 qroot = os.path.join(root, '.hg', 'patches')
690 qroot = os.path.join(root, '.hg', 'patches')
681 if os.path.isdir(os.path.join(qroot, '.hg')):
691 if os.path.isdir(os.path.join(qroot, '.hg')):
682 yield qroot # we have a patch queue repo here
692 yield qroot # we have a patch queue repo here
683 if recurse:
693 if recurse:
684 # avoid recursing inside the .hg directory
694 # avoid recursing inside the .hg directory
685 dirs.remove('.hg')
695 dirs.remove('.hg')
686 else:
696 else:
687 dirs[:] = [] # don't descend further
697 dirs[:] = [] # don't descend further
688 elif followsym:
698 elif followsym:
689 newdirs = []
699 newdirs = []
690 for d in dirs:
700 for d in dirs:
691 fname = os.path.join(root, d)
701 fname = os.path.join(root, d)
692 if adddir(seen_dirs, fname):
702 if adddir(seen_dirs, fname):
693 if os.path.islink(fname):
703 if os.path.islink(fname):
694 for hgname in walkrepos(fname, True, seen_dirs):
704 for hgname in walkrepos(fname, True, seen_dirs):
695 yield hgname
705 yield hgname
696 else:
706 else:
697 newdirs.append(d)
707 newdirs.append(d)
698 dirs[:] = newdirs
708 dirs[:] = newdirs
699
709
700 def osrcpath():
710 def osrcpath():
701 '''return default os-specific hgrc search path'''
711 '''return default os-specific hgrc search path'''
702 path = []
712 path = []
703 defaultpath = os.path.join(util.datapath, 'default.d')
713 defaultpath = os.path.join(util.datapath, 'default.d')
704 if os.path.isdir(defaultpath):
714 if os.path.isdir(defaultpath):
705 for f, kind in osutil.listdir(defaultpath):
715 for f, kind in osutil.listdir(defaultpath):
706 if f.endswith('.rc'):
716 if f.endswith('.rc'):
707 path.append(os.path.join(defaultpath, f))
717 path.append(os.path.join(defaultpath, f))
708 path.extend(systemrcpath())
718 path.extend(systemrcpath())
709 path.extend(userrcpath())
719 path.extend(userrcpath())
710 path = [os.path.normpath(f) for f in path]
720 path = [os.path.normpath(f) for f in path]
711 return path
721 return path
712
722
713 _rcpath = None
723 _rcpath = None
714
724
715 def rcpath():
725 def rcpath():
716 '''return hgrc search path. if env var HGRCPATH is set, use it.
726 '''return hgrc search path. if env var HGRCPATH is set, use it.
717 for each item in path, if directory, use files ending in .rc,
727 for each item in path, if directory, use files ending in .rc,
718 else use item.
728 else use item.
719 make HGRCPATH empty to only look in .hg/hgrc of current repo.
729 make HGRCPATH empty to only look in .hg/hgrc of current repo.
720 if no HGRCPATH, use default os-specific path.'''
730 if no HGRCPATH, use default os-specific path.'''
721 global _rcpath
731 global _rcpath
722 if _rcpath is None:
732 if _rcpath is None:
723 if 'HGRCPATH' in os.environ:
733 if 'HGRCPATH' in os.environ:
724 _rcpath = []
734 _rcpath = []
725 for p in os.environ['HGRCPATH'].split(os.pathsep):
735 for p in os.environ['HGRCPATH'].split(os.pathsep):
726 if not p:
736 if not p:
727 continue
737 continue
728 p = util.expandpath(p)
738 p = util.expandpath(p)
729 if os.path.isdir(p):
739 if os.path.isdir(p):
730 for f, kind in osutil.listdir(p):
740 for f, kind in osutil.listdir(p):
731 if f.endswith('.rc'):
741 if f.endswith('.rc'):
732 _rcpath.append(os.path.join(p, f))
742 _rcpath.append(os.path.join(p, f))
733 else:
743 else:
734 _rcpath.append(p)
744 _rcpath.append(p)
735 else:
745 else:
736 _rcpath = osrcpath()
746 _rcpath = osrcpath()
737 return _rcpath
747 return _rcpath
738
748
739 def intrev(rev):
749 def intrev(rev):
740 """Return integer for a given revision that can be used in comparison or
750 """Return integer for a given revision that can be used in comparison or
741 arithmetic operation"""
751 arithmetic operation"""
742 if rev is None:
752 if rev is None:
743 return wdirrev
753 return wdirrev
744 return rev
754 return rev
745
755
746 def revsingle(repo, revspec, default='.'):
756 def revsingle(repo, revspec, default='.'):
747 if not revspec and revspec != 0:
757 if not revspec and revspec != 0:
748 return repo[default]
758 return repo[default]
749
759
750 l = revrange(repo, [revspec])
760 l = revrange(repo, [revspec])
751 if not l:
761 if not l:
752 raise error.Abort(_('empty revision set'))
762 raise error.Abort(_('empty revision set'))
753 return repo[l.last()]
763 return repo[l.last()]
754
764
755 def _pairspec(revspec):
765 def _pairspec(revspec):
756 tree = revset.parse(revspec)
766 tree = revset.parse(revspec)
757 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
767 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
758 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
768 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
759
769
760 def revpair(repo, revs):
770 def revpair(repo, revs):
761 if not revs:
771 if not revs:
762 return repo.dirstate.p1(), None
772 return repo.dirstate.p1(), None
763
773
764 l = revrange(repo, revs)
774 l = revrange(repo, revs)
765
775
766 if not l:
776 if not l:
767 first = second = None
777 first = second = None
768 elif l.isascending():
778 elif l.isascending():
769 first = l.min()
779 first = l.min()
770 second = l.max()
780 second = l.max()
771 elif l.isdescending():
781 elif l.isdescending():
772 first = l.max()
782 first = l.max()
773 second = l.min()
783 second = l.min()
774 else:
784 else:
775 first = l.first()
785 first = l.first()
776 second = l.last()
786 second = l.last()
777
787
778 if first is None:
788 if first is None:
779 raise error.Abort(_('empty revision range'))
789 raise error.Abort(_('empty revision range'))
780 if (first == second and len(revs) >= 2
790 if (first == second and len(revs) >= 2
781 and not all(revrange(repo, [r]) for r in revs)):
791 and not all(revrange(repo, [r]) for r in revs)):
782 raise error.Abort(_('empty revision on one side of range'))
792 raise error.Abort(_('empty revision on one side of range'))
783
793
784 # if top-level is range expression, the result must always be a pair
794 # if top-level is range expression, the result must always be a pair
785 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
795 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
786 return repo.lookup(first), None
796 return repo.lookup(first), None
787
797
788 return repo.lookup(first), repo.lookup(second)
798 return repo.lookup(first), repo.lookup(second)
789
799
790 def revrange(repo, revs):
800 def revrange(repo, revs):
791 """Yield revision as strings from a list of revision specifications."""
801 """Yield revision as strings from a list of revision specifications."""
792 allspecs = []
802 allspecs = []
793 for spec in revs:
803 for spec in revs:
794 if isinstance(spec, int):
804 if isinstance(spec, int):
795 spec = revset.formatspec('rev(%d)', spec)
805 spec = revset.formatspec('rev(%d)', spec)
796 allspecs.append(spec)
806 allspecs.append(spec)
797 m = revset.matchany(repo.ui, allspecs, repo)
807 m = revset.matchany(repo.ui, allspecs, repo)
798 return m(repo)
808 return m(repo)
799
809
800 def meaningfulparents(repo, ctx):
810 def meaningfulparents(repo, ctx):
801 """Return list of meaningful (or all if debug) parentrevs for rev.
811 """Return list of meaningful (or all if debug) parentrevs for rev.
802
812
803 For merges (two non-nullrev revisions) both parents are meaningful.
813 For merges (two non-nullrev revisions) both parents are meaningful.
804 Otherwise the first parent revision is considered meaningful if it
814 Otherwise the first parent revision is considered meaningful if it
805 is not the preceding revision.
815 is not the preceding revision.
806 """
816 """
807 parents = ctx.parents()
817 parents = ctx.parents()
808 if len(parents) > 1:
818 if len(parents) > 1:
809 return parents
819 return parents
810 if repo.ui.debugflag:
820 if repo.ui.debugflag:
811 return [parents[0], repo['null']]
821 return [parents[0], repo['null']]
812 if parents[0].rev() >= intrev(ctx.rev()) - 1:
822 if parents[0].rev() >= intrev(ctx.rev()) - 1:
813 return []
823 return []
814 return parents
824 return parents
815
825
816 def expandpats(pats):
826 def expandpats(pats):
817 '''Expand bare globs when running on windows.
827 '''Expand bare globs when running on windows.
818 On posix we assume it already has already been done by sh.'''
828 On posix we assume it already has already been done by sh.'''
819 if not util.expandglobs:
829 if not util.expandglobs:
820 return list(pats)
830 return list(pats)
821 ret = []
831 ret = []
822 for kindpat in pats:
832 for kindpat in pats:
823 kind, pat = matchmod._patsplit(kindpat, None)
833 kind, pat = matchmod._patsplit(kindpat, None)
824 if kind is None:
834 if kind is None:
825 try:
835 try:
826 globbed = glob.glob(pat)
836 globbed = glob.glob(pat)
827 except re.error:
837 except re.error:
828 globbed = [pat]
838 globbed = [pat]
829 if globbed:
839 if globbed:
830 ret.extend(globbed)
840 ret.extend(globbed)
831 continue
841 continue
832 ret.append(kindpat)
842 ret.append(kindpat)
833 return ret
843 return ret
834
844
835 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
845 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
836 badfn=None):
846 badfn=None):
837 '''Return a matcher and the patterns that were used.
847 '''Return a matcher and the patterns that were used.
838 The matcher will warn about bad matches, unless an alternate badfn callback
848 The matcher will warn about bad matches, unless an alternate badfn callback
839 is provided.'''
849 is provided.'''
840 if pats == ("",):
850 if pats == ("",):
841 pats = []
851 pats = []
842 if opts is None:
852 if opts is None:
843 opts = {}
853 opts = {}
844 if not globbed and default == 'relpath':
854 if not globbed and default == 'relpath':
845 pats = expandpats(pats or [])
855 pats = expandpats(pats or [])
846
856
847 def bad(f, msg):
857 def bad(f, msg):
848 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
858 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
849
859
850 if badfn is None:
860 if badfn is None:
851 badfn = bad
861 badfn = bad
852
862
853 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
863 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
854 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
864 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
855
865
856 if m.always():
866 if m.always():
857 pats = []
867 pats = []
858 return m, pats
868 return m, pats
859
869
860 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
870 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
861 badfn=None):
871 badfn=None):
862 '''Return a matcher that will warn about bad matches.'''
872 '''Return a matcher that will warn about bad matches.'''
863 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
873 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
864
874
865 def matchall(repo):
875 def matchall(repo):
866 '''Return a matcher that will efficiently match everything.'''
876 '''Return a matcher that will efficiently match everything.'''
867 return matchmod.always(repo.root, repo.getcwd())
877 return matchmod.always(repo.root, repo.getcwd())
868
878
869 def matchfiles(repo, files, badfn=None):
879 def matchfiles(repo, files, badfn=None):
870 '''Return a matcher that will efficiently match exactly these files.'''
880 '''Return a matcher that will efficiently match exactly these files.'''
871 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
881 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
872
882
873 def origpath(ui, repo, filepath):
883 def origpath(ui, repo, filepath):
874 '''customize where .orig files are created
884 '''customize where .orig files are created
875
885
876 Fetch user defined path from config file: [ui] origbackuppath = <path>
886 Fetch user defined path from config file: [ui] origbackuppath = <path>
877 Fall back to default (filepath) if not specified
887 Fall back to default (filepath) if not specified
878 '''
888 '''
879 origbackuppath = ui.config('ui', 'origbackuppath', None)
889 origbackuppath = ui.config('ui', 'origbackuppath', None)
880 if origbackuppath is None:
890 if origbackuppath is None:
881 return filepath + ".orig"
891 return filepath + ".orig"
882
892
883 filepathfromroot = os.path.relpath(filepath, start=repo.root)
893 filepathfromroot = os.path.relpath(filepath, start=repo.root)
884 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
894 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
885
895
886 origbackupdir = repo.vfs.dirname(fullorigpath)
896 origbackupdir = repo.vfs.dirname(fullorigpath)
887 if not repo.vfs.exists(origbackupdir):
897 if not repo.vfs.exists(origbackupdir):
888 ui.note(_('creating directory: %s\n') % origbackupdir)
898 ui.note(_('creating directory: %s\n') % origbackupdir)
889 util.makedirs(origbackupdir)
899 util.makedirs(origbackupdir)
890
900
891 return fullorigpath + ".orig"
901 return fullorigpath + ".orig"
892
902
893 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
903 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
894 if opts is None:
904 if opts is None:
895 opts = {}
905 opts = {}
896 m = matcher
906 m = matcher
897 if dry_run is None:
907 if dry_run is None:
898 dry_run = opts.get('dry_run')
908 dry_run = opts.get('dry_run')
899 if similarity is None:
909 if similarity is None:
900 similarity = float(opts.get('similarity') or 0)
910 similarity = float(opts.get('similarity') or 0)
901
911
902 ret = 0
912 ret = 0
903 join = lambda f: os.path.join(prefix, f)
913 join = lambda f: os.path.join(prefix, f)
904
914
905 def matchessubrepo(matcher, subpath):
915 def matchessubrepo(matcher, subpath):
906 if matcher.exact(subpath):
916 if matcher.exact(subpath):
907 return True
917 return True
908 for f in matcher.files():
918 for f in matcher.files():
909 if f.startswith(subpath):
919 if f.startswith(subpath):
910 return True
920 return True
911 return False
921 return False
912
922
913 wctx = repo[None]
923 wctx = repo[None]
914 for subpath in sorted(wctx.substate):
924 for subpath in sorted(wctx.substate):
915 if opts.get('subrepos') or matchessubrepo(m, subpath):
925 if opts.get('subrepos') or matchessubrepo(m, subpath):
916 sub = wctx.sub(subpath)
926 sub = wctx.sub(subpath)
917 try:
927 try:
918 submatch = matchmod.subdirmatcher(subpath, m)
928 submatch = matchmod.subdirmatcher(subpath, m)
919 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
929 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
920 ret = 1
930 ret = 1
921 except error.LookupError:
931 except error.LookupError:
922 repo.ui.status(_("skipping missing subrepository: %s\n")
932 repo.ui.status(_("skipping missing subrepository: %s\n")
923 % join(subpath))
933 % join(subpath))
924
934
925 rejected = []
935 rejected = []
926 def badfn(f, msg):
936 def badfn(f, msg):
927 if f in m.files():
937 if f in m.files():
928 m.bad(f, msg)
938 m.bad(f, msg)
929 rejected.append(f)
939 rejected.append(f)
930
940
931 badmatch = matchmod.badmatch(m, badfn)
941 badmatch = matchmod.badmatch(m, badfn)
932 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
942 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
933 badmatch)
943 badmatch)
934
944
935 unknownset = set(unknown + forgotten)
945 unknownset = set(unknown + forgotten)
936 toprint = unknownset.copy()
946 toprint = unknownset.copy()
937 toprint.update(deleted)
947 toprint.update(deleted)
938 for abs in sorted(toprint):
948 for abs in sorted(toprint):
939 if repo.ui.verbose or not m.exact(abs):
949 if repo.ui.verbose or not m.exact(abs):
940 if abs in unknownset:
950 if abs in unknownset:
941 status = _('adding %s\n') % m.uipath(abs)
951 status = _('adding %s\n') % m.uipath(abs)
942 else:
952 else:
943 status = _('removing %s\n') % m.uipath(abs)
953 status = _('removing %s\n') % m.uipath(abs)
944 repo.ui.status(status)
954 repo.ui.status(status)
945
955
946 renames = _findrenames(repo, m, added + unknown, removed + deleted,
956 renames = _findrenames(repo, m, added + unknown, removed + deleted,
947 similarity)
957 similarity)
948
958
949 if not dry_run:
959 if not dry_run:
950 _markchanges(repo, unknown + forgotten, deleted, renames)
960 _markchanges(repo, unknown + forgotten, deleted, renames)
951
961
952 for f in rejected:
962 for f in rejected:
953 if f in m.files():
963 if f in m.files():
954 return 1
964 return 1
955 return ret
965 return ret
956
966
957 def marktouched(repo, files, similarity=0.0):
967 def marktouched(repo, files, similarity=0.0):
958 '''Assert that files have somehow been operated upon. files are relative to
968 '''Assert that files have somehow been operated upon. files are relative to
959 the repo root.'''
969 the repo root.'''
960 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
970 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
961 rejected = []
971 rejected = []
962
972
963 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
973 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
964
974
965 if repo.ui.verbose:
975 if repo.ui.verbose:
966 unknownset = set(unknown + forgotten)
976 unknownset = set(unknown + forgotten)
967 toprint = unknownset.copy()
977 toprint = unknownset.copy()
968 toprint.update(deleted)
978 toprint.update(deleted)
969 for abs in sorted(toprint):
979 for abs in sorted(toprint):
970 if abs in unknownset:
980 if abs in unknownset:
971 status = _('adding %s\n') % abs
981 status = _('adding %s\n') % abs
972 else:
982 else:
973 status = _('removing %s\n') % abs
983 status = _('removing %s\n') % abs
974 repo.ui.status(status)
984 repo.ui.status(status)
975
985
976 renames = _findrenames(repo, m, added + unknown, removed + deleted,
986 renames = _findrenames(repo, m, added + unknown, removed + deleted,
977 similarity)
987 similarity)
978
988
979 _markchanges(repo, unknown + forgotten, deleted, renames)
989 _markchanges(repo, unknown + forgotten, deleted, renames)
980
990
981 for f in rejected:
991 for f in rejected:
982 if f in m.files():
992 if f in m.files():
983 return 1
993 return 1
984 return 0
994 return 0
985
995
986 def _interestingfiles(repo, matcher):
996 def _interestingfiles(repo, matcher):
987 '''Walk dirstate with matcher, looking for files that addremove would care
997 '''Walk dirstate with matcher, looking for files that addremove would care
988 about.
998 about.
989
999
990 This is different from dirstate.status because it doesn't care about
1000 This is different from dirstate.status because it doesn't care about
991 whether files are modified or clean.'''
1001 whether files are modified or clean.'''
992 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1002 added, unknown, deleted, removed, forgotten = [], [], [], [], []
993 audit_path = pathutil.pathauditor(repo.root)
1003 audit_path = pathutil.pathauditor(repo.root)
994
1004
995 ctx = repo[None]
1005 ctx = repo[None]
996 dirstate = repo.dirstate
1006 dirstate = repo.dirstate
997 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1007 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
998 full=False)
1008 full=False)
999 for abs, st in walkresults.iteritems():
1009 for abs, st in walkresults.iteritems():
1000 dstate = dirstate[abs]
1010 dstate = dirstate[abs]
1001 if dstate == '?' and audit_path.check(abs):
1011 if dstate == '?' and audit_path.check(abs):
1002 unknown.append(abs)
1012 unknown.append(abs)
1003 elif dstate != 'r' and not st:
1013 elif dstate != 'r' and not st:
1004 deleted.append(abs)
1014 deleted.append(abs)
1005 elif dstate == 'r' and st:
1015 elif dstate == 'r' and st:
1006 forgotten.append(abs)
1016 forgotten.append(abs)
1007 # for finding renames
1017 # for finding renames
1008 elif dstate == 'r' and not st:
1018 elif dstate == 'r' and not st:
1009 removed.append(abs)
1019 removed.append(abs)
1010 elif dstate == 'a':
1020 elif dstate == 'a':
1011 added.append(abs)
1021 added.append(abs)
1012
1022
1013 return added, unknown, deleted, removed, forgotten
1023 return added, unknown, deleted, removed, forgotten
1014
1024
1015 def _findrenames(repo, matcher, added, removed, similarity):
1025 def _findrenames(repo, matcher, added, removed, similarity):
1016 '''Find renames from removed files to added ones.'''
1026 '''Find renames from removed files to added ones.'''
1017 renames = {}
1027 renames = {}
1018 if similarity > 0:
1028 if similarity > 0:
1019 for old, new, score in similar.findrenames(repo, added, removed,
1029 for old, new, score in similar.findrenames(repo, added, removed,
1020 similarity):
1030 similarity):
1021 if (repo.ui.verbose or not matcher.exact(old)
1031 if (repo.ui.verbose or not matcher.exact(old)
1022 or not matcher.exact(new)):
1032 or not matcher.exact(new)):
1023 repo.ui.status(_('recording removal of %s as rename to %s '
1033 repo.ui.status(_('recording removal of %s as rename to %s '
1024 '(%d%% similar)\n') %
1034 '(%d%% similar)\n') %
1025 (matcher.rel(old), matcher.rel(new),
1035 (matcher.rel(old), matcher.rel(new),
1026 score * 100))
1036 score * 100))
1027 renames[new] = old
1037 renames[new] = old
1028 return renames
1038 return renames
1029
1039
1030 def _markchanges(repo, unknown, deleted, renames):
1040 def _markchanges(repo, unknown, deleted, renames):
1031 '''Marks the files in unknown as added, the files in deleted as removed,
1041 '''Marks the files in unknown as added, the files in deleted as removed,
1032 and the files in renames as copied.'''
1042 and the files in renames as copied.'''
1033 wctx = repo[None]
1043 wctx = repo[None]
1034 with repo.wlock():
1044 with repo.wlock():
1035 wctx.forget(deleted)
1045 wctx.forget(deleted)
1036 wctx.add(unknown)
1046 wctx.add(unknown)
1037 for new, old in renames.iteritems():
1047 for new, old in renames.iteritems():
1038 wctx.copy(old, new)
1048 wctx.copy(old, new)
1039
1049
1040 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1050 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1041 """Update the dirstate to reflect the intent of copying src to dst. For
1051 """Update the dirstate to reflect the intent of copying src to dst. For
1042 different reasons it might not end with dst being marked as copied from src.
1052 different reasons it might not end with dst being marked as copied from src.
1043 """
1053 """
1044 origsrc = repo.dirstate.copied(src) or src
1054 origsrc = repo.dirstate.copied(src) or src
1045 if dst == origsrc: # copying back a copy?
1055 if dst == origsrc: # copying back a copy?
1046 if repo.dirstate[dst] not in 'mn' and not dryrun:
1056 if repo.dirstate[dst] not in 'mn' and not dryrun:
1047 repo.dirstate.normallookup(dst)
1057 repo.dirstate.normallookup(dst)
1048 else:
1058 else:
1049 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1059 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1050 if not ui.quiet:
1060 if not ui.quiet:
1051 ui.warn(_("%s has not been committed yet, so no copy "
1061 ui.warn(_("%s has not been committed yet, so no copy "
1052 "data will be stored for %s.\n")
1062 "data will be stored for %s.\n")
1053 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1063 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1054 if repo.dirstate[dst] in '?r' and not dryrun:
1064 if repo.dirstate[dst] in '?r' and not dryrun:
1055 wctx.add([dst])
1065 wctx.add([dst])
1056 elif not dryrun:
1066 elif not dryrun:
1057 wctx.copy(origsrc, dst)
1067 wctx.copy(origsrc, dst)
1058
1068
1059 def readrequires(opener, supported):
1069 def readrequires(opener, supported):
1060 '''Reads and parses .hg/requires and checks if all entries found
1070 '''Reads and parses .hg/requires and checks if all entries found
1061 are in the list of supported features.'''
1071 are in the list of supported features.'''
1062 requirements = set(opener.read("requires").splitlines())
1072 requirements = set(opener.read("requires").splitlines())
1063 missings = []
1073 missings = []
1064 for r in requirements:
1074 for r in requirements:
1065 if r not in supported:
1075 if r not in supported:
1066 if not r or not r[0].isalnum():
1076 if not r or not r[0].isalnum():
1067 raise error.RequirementError(_(".hg/requires file is corrupt"))
1077 raise error.RequirementError(_(".hg/requires file is corrupt"))
1068 missings.append(r)
1078 missings.append(r)
1069 missings.sort()
1079 missings.sort()
1070 if missings:
1080 if missings:
1071 raise error.RequirementError(
1081 raise error.RequirementError(
1072 _("repository requires features unknown to this Mercurial: %s")
1082 _("repository requires features unknown to this Mercurial: %s")
1073 % " ".join(missings),
1083 % " ".join(missings),
1074 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1084 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1075 " for more information"))
1085 " for more information"))
1076 return requirements
1086 return requirements
1077
1087
1078 def writerequires(opener, requirements):
1088 def writerequires(opener, requirements):
1079 with opener('requires', 'w') as fp:
1089 with opener('requires', 'w') as fp:
1080 for r in sorted(requirements):
1090 for r in sorted(requirements):
1081 fp.write("%s\n" % r)
1091 fp.write("%s\n" % r)
1082
1092
1083 class filecachesubentry(object):
1093 class filecachesubentry(object):
1084 def __init__(self, path, stat):
1094 def __init__(self, path, stat):
1085 self.path = path
1095 self.path = path
1086 self.cachestat = None
1096 self.cachestat = None
1087 self._cacheable = None
1097 self._cacheable = None
1088
1098
1089 if stat:
1099 if stat:
1090 self.cachestat = filecachesubentry.stat(self.path)
1100 self.cachestat = filecachesubentry.stat(self.path)
1091
1101
1092 if self.cachestat:
1102 if self.cachestat:
1093 self._cacheable = self.cachestat.cacheable()
1103 self._cacheable = self.cachestat.cacheable()
1094 else:
1104 else:
1095 # None means we don't know yet
1105 # None means we don't know yet
1096 self._cacheable = None
1106 self._cacheable = None
1097
1107
1098 def refresh(self):
1108 def refresh(self):
1099 if self.cacheable():
1109 if self.cacheable():
1100 self.cachestat = filecachesubentry.stat(self.path)
1110 self.cachestat = filecachesubentry.stat(self.path)
1101
1111
1102 def cacheable(self):
1112 def cacheable(self):
1103 if self._cacheable is not None:
1113 if self._cacheable is not None:
1104 return self._cacheable
1114 return self._cacheable
1105
1115
1106 # we don't know yet, assume it is for now
1116 # we don't know yet, assume it is for now
1107 return True
1117 return True
1108
1118
1109 def changed(self):
1119 def changed(self):
1110 # no point in going further if we can't cache it
1120 # no point in going further if we can't cache it
1111 if not self.cacheable():
1121 if not self.cacheable():
1112 return True
1122 return True
1113
1123
1114 newstat = filecachesubentry.stat(self.path)
1124 newstat = filecachesubentry.stat(self.path)
1115
1125
1116 # we may not know if it's cacheable yet, check again now
1126 # we may not know if it's cacheable yet, check again now
1117 if newstat and self._cacheable is None:
1127 if newstat and self._cacheable is None:
1118 self._cacheable = newstat.cacheable()
1128 self._cacheable = newstat.cacheable()
1119
1129
1120 # check again
1130 # check again
1121 if not self._cacheable:
1131 if not self._cacheable:
1122 return True
1132 return True
1123
1133
1124 if self.cachestat != newstat:
1134 if self.cachestat != newstat:
1125 self.cachestat = newstat
1135 self.cachestat = newstat
1126 return True
1136 return True
1127 else:
1137 else:
1128 return False
1138 return False
1129
1139
1130 @staticmethod
1140 @staticmethod
1131 def stat(path):
1141 def stat(path):
1132 try:
1142 try:
1133 return util.cachestat(path)
1143 return util.cachestat(path)
1134 except OSError as e:
1144 except OSError as e:
1135 if e.errno != errno.ENOENT:
1145 if e.errno != errno.ENOENT:
1136 raise
1146 raise
1137
1147
1138 class filecacheentry(object):
1148 class filecacheentry(object):
1139 def __init__(self, paths, stat=True):
1149 def __init__(self, paths, stat=True):
1140 self._entries = []
1150 self._entries = []
1141 for path in paths:
1151 for path in paths:
1142 self._entries.append(filecachesubentry(path, stat))
1152 self._entries.append(filecachesubentry(path, stat))
1143
1153
1144 def changed(self):
1154 def changed(self):
1145 '''true if any entry has changed'''
1155 '''true if any entry has changed'''
1146 for entry in self._entries:
1156 for entry in self._entries:
1147 if entry.changed():
1157 if entry.changed():
1148 return True
1158 return True
1149 return False
1159 return False
1150
1160
1151 def refresh(self):
1161 def refresh(self):
1152 for entry in self._entries:
1162 for entry in self._entries:
1153 entry.refresh()
1163 entry.refresh()
1154
1164
1155 class filecache(object):
1165 class filecache(object):
1156 '''A property like decorator that tracks files under .hg/ for updates.
1166 '''A property like decorator that tracks files under .hg/ for updates.
1157
1167
1158 Records stat info when called in _filecache.
1168 Records stat info when called in _filecache.
1159
1169
1160 On subsequent calls, compares old stat info with new info, and recreates the
1170 On subsequent calls, compares old stat info with new info, and recreates the
1161 object when any of the files changes, updating the new stat info in
1171 object when any of the files changes, updating the new stat info in
1162 _filecache.
1172 _filecache.
1163
1173
1164 Mercurial either atomic renames or appends for files under .hg,
1174 Mercurial either atomic renames or appends for files under .hg,
1165 so to ensure the cache is reliable we need the filesystem to be able
1175 so to ensure the cache is reliable we need the filesystem to be able
1166 to tell us if a file has been replaced. If it can't, we fallback to
1176 to tell us if a file has been replaced. If it can't, we fallback to
1167 recreating the object on every call (essentially the same behavior as
1177 recreating the object on every call (essentially the same behavior as
1168 propertycache).
1178 propertycache).
1169
1179
1170 '''
1180 '''
1171 def __init__(self, *paths):
1181 def __init__(self, *paths):
1172 self.paths = paths
1182 self.paths = paths
1173
1183
1174 def join(self, obj, fname):
1184 def join(self, obj, fname):
1175 """Used to compute the runtime path of a cached file.
1185 """Used to compute the runtime path of a cached file.
1176
1186
1177 Users should subclass filecache and provide their own version of this
1187 Users should subclass filecache and provide their own version of this
1178 function to call the appropriate join function on 'obj' (an instance
1188 function to call the appropriate join function on 'obj' (an instance
1179 of the class that its member function was decorated).
1189 of the class that its member function was decorated).
1180 """
1190 """
1181 return obj.join(fname)
1191 return obj.join(fname)
1182
1192
1183 def __call__(self, func):
1193 def __call__(self, func):
1184 self.func = func
1194 self.func = func
1185 self.name = func.__name__
1195 self.name = func.__name__
1186 return self
1196 return self
1187
1197
1188 def __get__(self, obj, type=None):
1198 def __get__(self, obj, type=None):
1189 # do we need to check if the file changed?
1199 # do we need to check if the file changed?
1190 if self.name in obj.__dict__:
1200 if self.name in obj.__dict__:
1191 assert self.name in obj._filecache, self.name
1201 assert self.name in obj._filecache, self.name
1192 return obj.__dict__[self.name]
1202 return obj.__dict__[self.name]
1193
1203
1194 entry = obj._filecache.get(self.name)
1204 entry = obj._filecache.get(self.name)
1195
1205
1196 if entry:
1206 if entry:
1197 if entry.changed():
1207 if entry.changed():
1198 entry.obj = self.func(obj)
1208 entry.obj = self.func(obj)
1199 else:
1209 else:
1200 paths = [self.join(obj, path) for path in self.paths]
1210 paths = [self.join(obj, path) for path in self.paths]
1201
1211
1202 # We stat -before- creating the object so our cache doesn't lie if
1212 # We stat -before- creating the object so our cache doesn't lie if
1203 # a writer modified between the time we read and stat
1213 # a writer modified between the time we read and stat
1204 entry = filecacheentry(paths, True)
1214 entry = filecacheentry(paths, True)
1205 entry.obj = self.func(obj)
1215 entry.obj = self.func(obj)
1206
1216
1207 obj._filecache[self.name] = entry
1217 obj._filecache[self.name] = entry
1208
1218
1209 obj.__dict__[self.name] = entry.obj
1219 obj.__dict__[self.name] = entry.obj
1210 return entry.obj
1220 return entry.obj
1211
1221
1212 def __set__(self, obj, value):
1222 def __set__(self, obj, value):
1213 if self.name not in obj._filecache:
1223 if self.name not in obj._filecache:
1214 # we add an entry for the missing value because X in __dict__
1224 # we add an entry for the missing value because X in __dict__
1215 # implies X in _filecache
1225 # implies X in _filecache
1216 paths = [self.join(obj, path) for path in self.paths]
1226 paths = [self.join(obj, path) for path in self.paths]
1217 ce = filecacheentry(paths, False)
1227 ce = filecacheentry(paths, False)
1218 obj._filecache[self.name] = ce
1228 obj._filecache[self.name] = ce
1219 else:
1229 else:
1220 ce = obj._filecache[self.name]
1230 ce = obj._filecache[self.name]
1221
1231
1222 ce.obj = value # update cached copy
1232 ce.obj = value # update cached copy
1223 obj.__dict__[self.name] = value # update copy returned by obj.x
1233 obj.__dict__[self.name] = value # update copy returned by obj.x
1224
1234
1225 def __delete__(self, obj):
1235 def __delete__(self, obj):
1226 try:
1236 try:
1227 del obj.__dict__[self.name]
1237 del obj.__dict__[self.name]
1228 except KeyError:
1238 except KeyError:
1229 raise AttributeError(self.name)
1239 raise AttributeError(self.name)
1230
1240
1231 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1241 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1232 if lock is None:
1242 if lock is None:
1233 raise error.LockInheritanceContractViolation(
1243 raise error.LockInheritanceContractViolation(
1234 'lock can only be inherited while held')
1244 'lock can only be inherited while held')
1235 if environ is None:
1245 if environ is None:
1236 environ = {}
1246 environ = {}
1237 with lock.inherit() as locker:
1247 with lock.inherit() as locker:
1238 environ[envvar] = locker
1248 environ[envvar] = locker
1239 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1249 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1240
1250
1241 def wlocksub(repo, cmd, *args, **kwargs):
1251 def wlocksub(repo, cmd, *args, **kwargs):
1242 """run cmd as a subprocess that allows inheriting repo's wlock
1252 """run cmd as a subprocess that allows inheriting repo's wlock
1243
1253
1244 This can only be called while the wlock is held. This takes all the
1254 This can only be called while the wlock is held. This takes all the
1245 arguments that ui.system does, and returns the exit code of the
1255 arguments that ui.system does, and returns the exit code of the
1246 subprocess."""
1256 subprocess."""
1247 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1257 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1248 **kwargs)
1258 **kwargs)
1249
1259
1250 def gdinitconfig(ui):
1260 def gdinitconfig(ui):
1251 """helper function to know if a repo should be created as general delta
1261 """helper function to know if a repo should be created as general delta
1252 """
1262 """
1253 # experimental config: format.generaldelta
1263 # experimental config: format.generaldelta
1254 return (ui.configbool('format', 'generaldelta', False)
1264 return (ui.configbool('format', 'generaldelta', False)
1255 or ui.configbool('format', 'usegeneraldelta', True))
1265 or ui.configbool('format', 'usegeneraldelta', True))
1256
1266
1257 def gddeltaconfig(ui):
1267 def gddeltaconfig(ui):
1258 """helper function to know if incoming delta should be optimised
1268 """helper function to know if incoming delta should be optimised
1259 """
1269 """
1260 # experimental config: format.generaldelta
1270 # experimental config: format.generaldelta
1261 return ui.configbool('format', 'generaldelta', False)
1271 return ui.configbool('format', 'generaldelta', False)
1262
1272
1263 class delayclosedfile(object):
1273 class delayclosedfile(object):
1264 """Proxy for a file object whose close is delayed.
1274 """Proxy for a file object whose close is delayed.
1265
1275
1266 Do not instantiate outside of the vfs layer.
1276 Do not instantiate outside of the vfs layer.
1267 """
1277 """
1268
1278
1269 def __init__(self, fh, closer):
1279 def __init__(self, fh, closer):
1270 object.__setattr__(self, '_origfh', fh)
1280 object.__setattr__(self, '_origfh', fh)
1271 object.__setattr__(self, '_closer', closer)
1281 object.__setattr__(self, '_closer', closer)
1272
1282
1273 def __getattr__(self, attr):
1283 def __getattr__(self, attr):
1274 return getattr(self._origfh, attr)
1284 return getattr(self._origfh, attr)
1275
1285
1276 def __setattr__(self, attr, value):
1286 def __setattr__(self, attr, value):
1277 return setattr(self._origfh, attr, value)
1287 return setattr(self._origfh, attr, value)
1278
1288
1279 def __delattr__(self, attr):
1289 def __delattr__(self, attr):
1280 return delattr(self._origfh, attr)
1290 return delattr(self._origfh, attr)
1281
1291
1282 def __enter__(self):
1292 def __enter__(self):
1283 return self._origfh.__enter__()
1293 return self._origfh.__enter__()
1284
1294
1285 def __exit__(self, exc_type, exc_value, exc_tb):
1295 def __exit__(self, exc_type, exc_value, exc_tb):
1286 self._closer.close(self._origfh)
1296 self._closer.close(self._origfh)
1287
1297
1288 def close(self):
1298 def close(self):
1289 self._closer.close(self._origfh)
1299 self._closer.close(self._origfh)
1290
1300
1291 class backgroundfilecloser(object):
1301 class backgroundfilecloser(object):
1292 """Coordinates background closing of file handles on multiple threads."""
1302 """Coordinates background closing of file handles on multiple threads."""
1293 def __init__(self, ui, expectedcount=-1):
1303 def __init__(self, ui, expectedcount=-1):
1294 self._running = False
1304 self._running = False
1295 self._entered = False
1305 self._entered = False
1296 self._threads = []
1306 self._threads = []
1297 self._threadexception = None
1307 self._threadexception = None
1298
1308
1299 # Only Windows/NTFS has slow file closing. So only enable by default
1309 # Only Windows/NTFS has slow file closing. So only enable by default
1300 # on that platform. But allow to be enabled elsewhere for testing.
1310 # on that platform. But allow to be enabled elsewhere for testing.
1301 defaultenabled = os.name == 'nt'
1311 defaultenabled = os.name == 'nt'
1302 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1312 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1303
1313
1304 if not enabled:
1314 if not enabled:
1305 return
1315 return
1306
1316
1307 # There is overhead to starting and stopping the background threads.
1317 # There is overhead to starting and stopping the background threads.
1308 # Don't do background processing unless the file count is large enough
1318 # Don't do background processing unless the file count is large enough
1309 # to justify it.
1319 # to justify it.
1310 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1320 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1311 2048)
1321 2048)
1312 # FUTURE dynamically start background threads after minfilecount closes.
1322 # FUTURE dynamically start background threads after minfilecount closes.
1313 # (We don't currently have any callers that don't know their file count)
1323 # (We don't currently have any callers that don't know their file count)
1314 if expectedcount > 0 and expectedcount < minfilecount:
1324 if expectedcount > 0 and expectedcount < minfilecount:
1315 return
1325 return
1316
1326
1317 # Windows defaults to a limit of 512 open files. A buffer of 128
1327 # Windows defaults to a limit of 512 open files. A buffer of 128
1318 # should give us enough headway.
1328 # should give us enough headway.
1319 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1329 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1320 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1330 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1321
1331
1322 ui.debug('starting %d threads for background file closing\n' %
1332 ui.debug('starting %d threads for background file closing\n' %
1323 threadcount)
1333 threadcount)
1324
1334
1325 self._queue = util.queue(maxsize=maxqueue)
1335 self._queue = util.queue(maxsize=maxqueue)
1326 self._running = True
1336 self._running = True
1327
1337
1328 for i in range(threadcount):
1338 for i in range(threadcount):
1329 t = threading.Thread(target=self._worker, name='backgroundcloser')
1339 t = threading.Thread(target=self._worker, name='backgroundcloser')
1330 self._threads.append(t)
1340 self._threads.append(t)
1331 t.start()
1341 t.start()
1332
1342
1333 def __enter__(self):
1343 def __enter__(self):
1334 self._entered = True
1344 self._entered = True
1335 return self
1345 return self
1336
1346
1337 def __exit__(self, exc_type, exc_value, exc_tb):
1347 def __exit__(self, exc_type, exc_value, exc_tb):
1338 self._running = False
1348 self._running = False
1339
1349
1340 # Wait for threads to finish closing so open files don't linger for
1350 # Wait for threads to finish closing so open files don't linger for
1341 # longer than lifetime of context manager.
1351 # longer than lifetime of context manager.
1342 for t in self._threads:
1352 for t in self._threads:
1343 t.join()
1353 t.join()
1344
1354
1345 def _worker(self):
1355 def _worker(self):
1346 """Main routine for worker thread."""
1356 """Main routine for worker thread."""
1347 while True:
1357 while True:
1348 try:
1358 try:
1349 fh = self._queue.get(block=True, timeout=0.100)
1359 fh = self._queue.get(block=True, timeout=0.100)
1350 # Need to catch or the thread will terminate and
1360 # Need to catch or the thread will terminate and
1351 # we could orphan file descriptors.
1361 # we could orphan file descriptors.
1352 try:
1362 try:
1353 fh.close()
1363 fh.close()
1354 except Exception as e:
1364 except Exception as e:
1355 # Stash so can re-raise from main thread later.
1365 # Stash so can re-raise from main thread later.
1356 self._threadexception = e
1366 self._threadexception = e
1357 except util.empty:
1367 except util.empty:
1358 if not self._running:
1368 if not self._running:
1359 break
1369 break
1360
1370
1361 def close(self, fh):
1371 def close(self, fh):
1362 """Schedule a file for closing."""
1372 """Schedule a file for closing."""
1363 if not self._entered:
1373 if not self._entered:
1364 raise error.Abort('can only call close() when context manager '
1374 raise error.Abort('can only call close() when context manager '
1365 'active')
1375 'active')
1366
1376
1367 # If a background thread encountered an exception, raise now so we fail
1377 # If a background thread encountered an exception, raise now so we fail
1368 # fast. Otherwise we may potentially go on for minutes until the error
1378 # fast. Otherwise we may potentially go on for minutes until the error
1369 # is acted on.
1379 # is acted on.
1370 if self._threadexception:
1380 if self._threadexception:
1371 e = self._threadexception
1381 e = self._threadexception
1372 self._threadexception = None
1382 self._threadexception = None
1373 raise e
1383 raise e
1374
1384
1375 # If we're not actively running, close synchronously.
1385 # If we're not actively running, close synchronously.
1376 if not self._running:
1386 if not self._running:
1377 fh.close()
1387 fh.close()
1378 return
1388 return
1379
1389
1380 self._queue.put(fh, block=True, timeout=None)
1390 self._queue.put(fh, block=True, timeout=None)
1381
1391
General Comments 0
You need to be logged in to leave comments. Login now