##// END OF EJS Templates
scmutil: ignore EPERM at os.utime, which avoids ambiguity at closing...
FUJIWARA Katsunori -
r30321:e0ff4799 stable
parent child Browse files
Show More
@@ -1,1469 +1,1468 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import glob
12 import glob
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import re
15 import re
16 import shutil
16 import shutil
17 import stat
17 import stat
18 import tempfile
18 import tempfile
19 import threading
19 import threading
20
20
21 from .i18n import _
21 from .i18n import _
22 from .node import wdirrev
22 from .node import wdirrev
23 from . import (
23 from . import (
24 encoding,
24 encoding,
25 error,
25 error,
26 match as matchmod,
26 match as matchmod,
27 osutil,
27 osutil,
28 pathutil,
28 pathutil,
29 phases,
29 phases,
30 revset,
30 revset,
31 similar,
31 similar,
32 util,
32 util,
33 )
33 )
34
34
35 if os.name == 'nt':
35 if os.name == 'nt':
36 from . import scmwindows as scmplatform
36 from . import scmwindows as scmplatform
37 else:
37 else:
38 from . import scmposix as scmplatform
38 from . import scmposix as scmplatform
39
39
40 systemrcpath = scmplatform.systemrcpath
40 systemrcpath = scmplatform.systemrcpath
41 userrcpath = scmplatform.userrcpath
41 userrcpath = scmplatform.userrcpath
42
42
43 class status(tuple):
43 class status(tuple):
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 and 'ignored' properties are only relevant to the working copy.
45 and 'ignored' properties are only relevant to the working copy.
46 '''
46 '''
47
47
48 __slots__ = ()
48 __slots__ = ()
49
49
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 clean):
51 clean):
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 ignored, clean))
53 ignored, clean))
54
54
55 @property
55 @property
56 def modified(self):
56 def modified(self):
57 '''files that have been modified'''
57 '''files that have been modified'''
58 return self[0]
58 return self[0]
59
59
60 @property
60 @property
61 def added(self):
61 def added(self):
62 '''files that have been added'''
62 '''files that have been added'''
63 return self[1]
63 return self[1]
64
64
65 @property
65 @property
66 def removed(self):
66 def removed(self):
67 '''files that have been removed'''
67 '''files that have been removed'''
68 return self[2]
68 return self[2]
69
69
70 @property
70 @property
71 def deleted(self):
71 def deleted(self):
72 '''files that are in the dirstate, but have been deleted from the
72 '''files that are in the dirstate, but have been deleted from the
73 working copy (aka "missing")
73 working copy (aka "missing")
74 '''
74 '''
75 return self[3]
75 return self[3]
76
76
77 @property
77 @property
78 def unknown(self):
78 def unknown(self):
79 '''files not in the dirstate that are not ignored'''
79 '''files not in the dirstate that are not ignored'''
80 return self[4]
80 return self[4]
81
81
82 @property
82 @property
83 def ignored(self):
83 def ignored(self):
84 '''files not in the dirstate that are ignored (by _dirignore())'''
84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 return self[5]
85 return self[5]
86
86
87 @property
87 @property
88 def clean(self):
88 def clean(self):
89 '''files that have not been modified'''
89 '''files that have not been modified'''
90 return self[6]
90 return self[6]
91
91
92 def __repr__(self, *args, **kwargs):
92 def __repr__(self, *args, **kwargs):
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 'unknown=%r, ignored=%r, clean=%r>') % self)
94 'unknown=%r, ignored=%r, clean=%r>') % self)
95
95
96 def itersubrepos(ctx1, ctx2):
96 def itersubrepos(ctx1, ctx2):
97 """find subrepos in ctx1 or ctx2"""
97 """find subrepos in ctx1 or ctx2"""
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 # has been modified (in ctx2) but not yet committed (in ctx1).
100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103
103
104 missing = set()
104 missing = set()
105
105
106 for subpath in ctx2.substate:
106 for subpath in ctx2.substate:
107 if subpath not in ctx1.substate:
107 if subpath not in ctx1.substate:
108 del subpaths[subpath]
108 del subpaths[subpath]
109 missing.add(subpath)
109 missing.add(subpath)
110
110
111 for subpath, ctx in sorted(subpaths.iteritems()):
111 for subpath, ctx in sorted(subpaths.iteritems()):
112 yield subpath, ctx.sub(subpath)
112 yield subpath, ctx.sub(subpath)
113
113
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 # status and diff will have an accurate result when it does
115 # status and diff will have an accurate result when it does
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 # against itself.
117 # against itself.
118 for subpath in missing:
118 for subpath in missing:
119 yield subpath, ctx2.nullsub(subpath, ctx1)
119 yield subpath, ctx2.nullsub(subpath, ctx1)
120
120
121 def nochangesfound(ui, repo, excluded=None):
121 def nochangesfound(ui, repo, excluded=None):
122 '''Report no changes for push/pull, excluded is None or a list of
122 '''Report no changes for push/pull, excluded is None or a list of
123 nodes excluded from the push/pull.
123 nodes excluded from the push/pull.
124 '''
124 '''
125 secretlist = []
125 secretlist = []
126 if excluded:
126 if excluded:
127 for n in excluded:
127 for n in excluded:
128 if n not in repo:
128 if n not in repo:
129 # discovery should not have included the filtered revision,
129 # discovery should not have included the filtered revision,
130 # we have to explicitly exclude it until discovery is cleanup.
130 # we have to explicitly exclude it until discovery is cleanup.
131 continue
131 continue
132 ctx = repo[n]
132 ctx = repo[n]
133 if ctx.phase() >= phases.secret and not ctx.extinct():
133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 secretlist.append(n)
134 secretlist.append(n)
135
135
136 if secretlist:
136 if secretlist:
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 % len(secretlist))
138 % len(secretlist))
139 else:
139 else:
140 ui.status(_("no changes found\n"))
140 ui.status(_("no changes found\n"))
141
141
142 def checknewlabel(repo, lbl, kind):
142 def checknewlabel(repo, lbl, kind):
143 # Do not use the "kind" parameter in ui output.
143 # Do not use the "kind" parameter in ui output.
144 # It makes strings difficult to translate.
144 # It makes strings difficult to translate.
145 if lbl in ['tip', '.', 'null']:
145 if lbl in ['tip', '.', 'null']:
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 for c in (':', '\0', '\n', '\r'):
147 for c in (':', '\0', '\n', '\r'):
148 if c in lbl:
148 if c in lbl:
149 raise error.Abort(_("%r cannot be used in a name") % c)
149 raise error.Abort(_("%r cannot be used in a name") % c)
150 try:
150 try:
151 int(lbl)
151 int(lbl)
152 raise error.Abort(_("cannot use an integer as a name"))
152 raise error.Abort(_("cannot use an integer as a name"))
153 except ValueError:
153 except ValueError:
154 pass
154 pass
155
155
156 def checkfilename(f):
156 def checkfilename(f):
157 '''Check that the filename f is an acceptable filename for a tracked file'''
157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 if '\r' in f or '\n' in f:
158 if '\r' in f or '\n' in f:
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160
160
161 def checkportable(ui, f):
161 def checkportable(ui, f):
162 '''Check if filename f is portable and warn or abort depending on config'''
162 '''Check if filename f is portable and warn or abort depending on config'''
163 checkfilename(f)
163 checkfilename(f)
164 abort, warn = checkportabilityalert(ui)
164 abort, warn = checkportabilityalert(ui)
165 if abort or warn:
165 if abort or warn:
166 msg = util.checkwinfilename(f)
166 msg = util.checkwinfilename(f)
167 if msg:
167 if msg:
168 msg = "%s: %r" % (msg, f)
168 msg = "%s: %r" % (msg, f)
169 if abort:
169 if abort:
170 raise error.Abort(msg)
170 raise error.Abort(msg)
171 ui.warn(_("warning: %s\n") % msg)
171 ui.warn(_("warning: %s\n") % msg)
172
172
173 def checkportabilityalert(ui):
173 def checkportabilityalert(ui):
174 '''check if the user's config requests nothing, a warning, or abort for
174 '''check if the user's config requests nothing, a warning, or abort for
175 non-portable filenames'''
175 non-portable filenames'''
176 val = ui.config('ui', 'portablefilenames', 'warn')
176 val = ui.config('ui', 'portablefilenames', 'warn')
177 lval = val.lower()
177 lval = val.lower()
178 bval = util.parsebool(val)
178 bval = util.parsebool(val)
179 abort = os.name == 'nt' or lval == 'abort'
179 abort = os.name == 'nt' or lval == 'abort'
180 warn = bval or lval == 'warn'
180 warn = bval or lval == 'warn'
181 if bval is None and not (warn or abort or lval == 'ignore'):
181 if bval is None and not (warn or abort or lval == 'ignore'):
182 raise error.ConfigError(
182 raise error.ConfigError(
183 _("ui.portablefilenames value is invalid ('%s')") % val)
183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 return abort, warn
184 return abort, warn
185
185
186 class casecollisionauditor(object):
186 class casecollisionauditor(object):
187 def __init__(self, ui, abort, dirstate):
187 def __init__(self, ui, abort, dirstate):
188 self._ui = ui
188 self._ui = ui
189 self._abort = abort
189 self._abort = abort
190 allfiles = '\0'.join(dirstate._map)
190 allfiles = '\0'.join(dirstate._map)
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 self._dirstate = dirstate
192 self._dirstate = dirstate
193 # The purpose of _newfiles is so that we don't complain about
193 # The purpose of _newfiles is so that we don't complain about
194 # case collisions if someone were to call this object with the
194 # case collisions if someone were to call this object with the
195 # same filename twice.
195 # same filename twice.
196 self._newfiles = set()
196 self._newfiles = set()
197
197
198 def __call__(self, f):
198 def __call__(self, f):
199 if f in self._newfiles:
199 if f in self._newfiles:
200 return
200 return
201 fl = encoding.lower(f)
201 fl = encoding.lower(f)
202 if fl in self._loweredfiles and f not in self._dirstate:
202 if fl in self._loweredfiles and f not in self._dirstate:
203 msg = _('possible case-folding collision for %s') % f
203 msg = _('possible case-folding collision for %s') % f
204 if self._abort:
204 if self._abort:
205 raise error.Abort(msg)
205 raise error.Abort(msg)
206 self._ui.warn(_("warning: %s\n") % msg)
206 self._ui.warn(_("warning: %s\n") % msg)
207 self._loweredfiles.add(fl)
207 self._loweredfiles.add(fl)
208 self._newfiles.add(f)
208 self._newfiles.add(f)
209
209
210 def filteredhash(repo, maxrev):
210 def filteredhash(repo, maxrev):
211 """build hash of filtered revisions in the current repoview.
211 """build hash of filtered revisions in the current repoview.
212
212
213 Multiple caches perform up-to-date validation by checking that the
213 Multiple caches perform up-to-date validation by checking that the
214 tiprev and tipnode stored in the cache file match the current repository.
214 tiprev and tipnode stored in the cache file match the current repository.
215 However, this is not sufficient for validating repoviews because the set
215 However, this is not sufficient for validating repoviews because the set
216 of revisions in the view may change without the repository tiprev and
216 of revisions in the view may change without the repository tiprev and
217 tipnode changing.
217 tipnode changing.
218
218
219 This function hashes all the revs filtered from the view and returns
219 This function hashes all the revs filtered from the view and returns
220 that SHA-1 digest.
220 that SHA-1 digest.
221 """
221 """
222 cl = repo.changelog
222 cl = repo.changelog
223 if not cl.filteredrevs:
223 if not cl.filteredrevs:
224 return None
224 return None
225 key = None
225 key = None
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 if revs:
227 if revs:
228 s = hashlib.sha1()
228 s = hashlib.sha1()
229 for rev in revs:
229 for rev in revs:
230 s.update('%s;' % rev)
230 s.update('%s;' % rev)
231 key = s.digest()
231 key = s.digest()
232 return key
232 return key
233
233
234 class abstractvfs(object):
234 class abstractvfs(object):
235 """Abstract base class; cannot be instantiated"""
235 """Abstract base class; cannot be instantiated"""
236
236
237 def __init__(self, *args, **kwargs):
237 def __init__(self, *args, **kwargs):
238 '''Prevent instantiation; don't call this from subclasses.'''
238 '''Prevent instantiation; don't call this from subclasses.'''
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240
240
241 def tryread(self, path):
241 def tryread(self, path):
242 '''gracefully return an empty string for missing files'''
242 '''gracefully return an empty string for missing files'''
243 try:
243 try:
244 return self.read(path)
244 return self.read(path)
245 except IOError as inst:
245 except IOError as inst:
246 if inst.errno != errno.ENOENT:
246 if inst.errno != errno.ENOENT:
247 raise
247 raise
248 return ""
248 return ""
249
249
250 def tryreadlines(self, path, mode='rb'):
250 def tryreadlines(self, path, mode='rb'):
251 '''gracefully return an empty array for missing files'''
251 '''gracefully return an empty array for missing files'''
252 try:
252 try:
253 return self.readlines(path, mode=mode)
253 return self.readlines(path, mode=mode)
254 except IOError as inst:
254 except IOError as inst:
255 if inst.errno != errno.ENOENT:
255 if inst.errno != errno.ENOENT:
256 raise
256 raise
257 return []
257 return []
258
258
259 @util.propertycache
259 @util.propertycache
260 def open(self):
260 def open(self):
261 '''Open ``path`` file, which is relative to vfs root.
261 '''Open ``path`` file, which is relative to vfs root.
262
262
263 Newly created directories are marked as "not to be indexed by
263 Newly created directories are marked as "not to be indexed by
264 the content indexing service", if ``notindexed`` is specified
264 the content indexing service", if ``notindexed`` is specified
265 for "write" mode access.
265 for "write" mode access.
266 '''
266 '''
267 return self.__call__
267 return self.__call__
268
268
269 def read(self, path):
269 def read(self, path):
270 with self(path, 'rb') as fp:
270 with self(path, 'rb') as fp:
271 return fp.read()
271 return fp.read()
272
272
273 def readlines(self, path, mode='rb'):
273 def readlines(self, path, mode='rb'):
274 with self(path, mode=mode) as fp:
274 with self(path, mode=mode) as fp:
275 return fp.readlines()
275 return fp.readlines()
276
276
277 def write(self, path, data, backgroundclose=False):
277 def write(self, path, data, backgroundclose=False):
278 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
278 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
279 return fp.write(data)
279 return fp.write(data)
280
280
281 def writelines(self, path, data, mode='wb', notindexed=False):
281 def writelines(self, path, data, mode='wb', notindexed=False):
282 with self(path, mode=mode, notindexed=notindexed) as fp:
282 with self(path, mode=mode, notindexed=notindexed) as fp:
283 return fp.writelines(data)
283 return fp.writelines(data)
284
284
285 def append(self, path, data):
285 def append(self, path, data):
286 with self(path, 'ab') as fp:
286 with self(path, 'ab') as fp:
287 return fp.write(data)
287 return fp.write(data)
288
288
289 def basename(self, path):
289 def basename(self, path):
290 """return base element of a path (as os.path.basename would do)
290 """return base element of a path (as os.path.basename would do)
291
291
292 This exists to allow handling of strange encoding if needed."""
292 This exists to allow handling of strange encoding if needed."""
293 return os.path.basename(path)
293 return os.path.basename(path)
294
294
295 def chmod(self, path, mode):
295 def chmod(self, path, mode):
296 return os.chmod(self.join(path), mode)
296 return os.chmod(self.join(path), mode)
297
297
298 def dirname(self, path):
298 def dirname(self, path):
299 """return dirname element of a path (as os.path.dirname would do)
299 """return dirname element of a path (as os.path.dirname would do)
300
300
301 This exists to allow handling of strange encoding if needed."""
301 This exists to allow handling of strange encoding if needed."""
302 return os.path.dirname(path)
302 return os.path.dirname(path)
303
303
304 def exists(self, path=None):
304 def exists(self, path=None):
305 return os.path.exists(self.join(path))
305 return os.path.exists(self.join(path))
306
306
307 def fstat(self, fp):
307 def fstat(self, fp):
308 return util.fstat(fp)
308 return util.fstat(fp)
309
309
310 def isdir(self, path=None):
310 def isdir(self, path=None):
311 return os.path.isdir(self.join(path))
311 return os.path.isdir(self.join(path))
312
312
313 def isfile(self, path=None):
313 def isfile(self, path=None):
314 return os.path.isfile(self.join(path))
314 return os.path.isfile(self.join(path))
315
315
316 def islink(self, path=None):
316 def islink(self, path=None):
317 return os.path.islink(self.join(path))
317 return os.path.islink(self.join(path))
318
318
319 def isfileorlink(self, path=None):
319 def isfileorlink(self, path=None):
320 '''return whether path is a regular file or a symlink
320 '''return whether path is a regular file or a symlink
321
321
322 Unlike isfile, this doesn't follow symlinks.'''
322 Unlike isfile, this doesn't follow symlinks.'''
323 try:
323 try:
324 st = self.lstat(path)
324 st = self.lstat(path)
325 except OSError:
325 except OSError:
326 return False
326 return False
327 mode = st.st_mode
327 mode = st.st_mode
328 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
328 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
329
329
330 def reljoin(self, *paths):
330 def reljoin(self, *paths):
331 """join various elements of a path together (as os.path.join would do)
331 """join various elements of a path together (as os.path.join would do)
332
332
333 The vfs base is not injected so that path stay relative. This exists
333 The vfs base is not injected so that path stay relative. This exists
334 to allow handling of strange encoding if needed."""
334 to allow handling of strange encoding if needed."""
335 return os.path.join(*paths)
335 return os.path.join(*paths)
336
336
337 def split(self, path):
337 def split(self, path):
338 """split top-most element of a path (as os.path.split would do)
338 """split top-most element of a path (as os.path.split would do)
339
339
340 This exists to allow handling of strange encoding if needed."""
340 This exists to allow handling of strange encoding if needed."""
341 return os.path.split(path)
341 return os.path.split(path)
342
342
343 def lexists(self, path=None):
343 def lexists(self, path=None):
344 return os.path.lexists(self.join(path))
344 return os.path.lexists(self.join(path))
345
345
346 def lstat(self, path=None):
346 def lstat(self, path=None):
347 return os.lstat(self.join(path))
347 return os.lstat(self.join(path))
348
348
349 def listdir(self, path=None):
349 def listdir(self, path=None):
350 return os.listdir(self.join(path))
350 return os.listdir(self.join(path))
351
351
352 def makedir(self, path=None, notindexed=True):
352 def makedir(self, path=None, notindexed=True):
353 return util.makedir(self.join(path), notindexed)
353 return util.makedir(self.join(path), notindexed)
354
354
355 def makedirs(self, path=None, mode=None):
355 def makedirs(self, path=None, mode=None):
356 return util.makedirs(self.join(path), mode)
356 return util.makedirs(self.join(path), mode)
357
357
358 def makelock(self, info, path):
358 def makelock(self, info, path):
359 return util.makelock(info, self.join(path))
359 return util.makelock(info, self.join(path))
360
360
361 def mkdir(self, path=None):
361 def mkdir(self, path=None):
362 return os.mkdir(self.join(path))
362 return os.mkdir(self.join(path))
363
363
364 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
364 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
365 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
365 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
366 dir=self.join(dir), text=text)
366 dir=self.join(dir), text=text)
367 dname, fname = util.split(name)
367 dname, fname = util.split(name)
368 if dir:
368 if dir:
369 return fd, os.path.join(dir, fname)
369 return fd, os.path.join(dir, fname)
370 else:
370 else:
371 return fd, fname
371 return fd, fname
372
372
373 def readdir(self, path=None, stat=None, skip=None):
373 def readdir(self, path=None, stat=None, skip=None):
374 return osutil.listdir(self.join(path), stat, skip)
374 return osutil.listdir(self.join(path), stat, skip)
375
375
376 def readlock(self, path):
376 def readlock(self, path):
377 return util.readlock(self.join(path))
377 return util.readlock(self.join(path))
378
378
379 def rename(self, src, dst, checkambig=False):
379 def rename(self, src, dst, checkambig=False):
380 """Rename from src to dst
380 """Rename from src to dst
381
381
382 checkambig argument is used with util.filestat, and is useful
382 checkambig argument is used with util.filestat, and is useful
383 only if destination file is guarded by any lock
383 only if destination file is guarded by any lock
384 (e.g. repo.lock or repo.wlock).
384 (e.g. repo.lock or repo.wlock).
385 """
385 """
386 dstpath = self.join(dst)
386 dstpath = self.join(dst)
387 oldstat = checkambig and util.filestat(dstpath)
387 oldstat = checkambig and util.filestat(dstpath)
388 if oldstat and oldstat.stat:
388 if oldstat and oldstat.stat:
389 ret = util.rename(self.join(src), dstpath)
389 ret = util.rename(self.join(src), dstpath)
390 newstat = util.filestat(dstpath)
390 newstat = util.filestat(dstpath)
391 if newstat.isambig(oldstat):
391 if newstat.isambig(oldstat):
392 # stat of renamed file is ambiguous to original one
392 # stat of renamed file is ambiguous to original one
393 newstat.avoidambig(dstpath, oldstat)
393 newstat.avoidambig(dstpath, oldstat)
394 return ret
394 return ret
395 return util.rename(self.join(src), dstpath)
395 return util.rename(self.join(src), dstpath)
396
396
397 def readlink(self, path):
397 def readlink(self, path):
398 return os.readlink(self.join(path))
398 return os.readlink(self.join(path))
399
399
400 def removedirs(self, path=None):
400 def removedirs(self, path=None):
401 """Remove a leaf directory and all empty intermediate ones
401 """Remove a leaf directory and all empty intermediate ones
402 """
402 """
403 return util.removedirs(self.join(path))
403 return util.removedirs(self.join(path))
404
404
405 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
405 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
406 """Remove a directory tree recursively
406 """Remove a directory tree recursively
407
407
408 If ``forcibly``, this tries to remove READ-ONLY files, too.
408 If ``forcibly``, this tries to remove READ-ONLY files, too.
409 """
409 """
410 if forcibly:
410 if forcibly:
411 def onerror(function, path, excinfo):
411 def onerror(function, path, excinfo):
412 if function is not os.remove:
412 if function is not os.remove:
413 raise
413 raise
414 # read-only files cannot be unlinked under Windows
414 # read-only files cannot be unlinked under Windows
415 s = os.stat(path)
415 s = os.stat(path)
416 if (s.st_mode & stat.S_IWRITE) != 0:
416 if (s.st_mode & stat.S_IWRITE) != 0:
417 raise
417 raise
418 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
418 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
419 os.remove(path)
419 os.remove(path)
420 else:
420 else:
421 onerror = None
421 onerror = None
422 return shutil.rmtree(self.join(path),
422 return shutil.rmtree(self.join(path),
423 ignore_errors=ignore_errors, onerror=onerror)
423 ignore_errors=ignore_errors, onerror=onerror)
424
424
425 def setflags(self, path, l, x):
425 def setflags(self, path, l, x):
426 return util.setflags(self.join(path), l, x)
426 return util.setflags(self.join(path), l, x)
427
427
428 def stat(self, path=None):
428 def stat(self, path=None):
429 return os.stat(self.join(path))
429 return os.stat(self.join(path))
430
430
431 def unlink(self, path=None):
431 def unlink(self, path=None):
432 return util.unlink(self.join(path))
432 return util.unlink(self.join(path))
433
433
434 def unlinkpath(self, path=None, ignoremissing=False):
434 def unlinkpath(self, path=None, ignoremissing=False):
435 return util.unlinkpath(self.join(path), ignoremissing)
435 return util.unlinkpath(self.join(path), ignoremissing)
436
436
437 def utime(self, path=None, t=None):
437 def utime(self, path=None, t=None):
438 return os.utime(self.join(path), t)
438 return os.utime(self.join(path), t)
439
439
440 def walk(self, path=None, onerror=None):
440 def walk(self, path=None, onerror=None):
441 """Yield (dirpath, dirs, files) tuple for each directories under path
441 """Yield (dirpath, dirs, files) tuple for each directories under path
442
442
443 ``dirpath`` is relative one from the root of this vfs. This
443 ``dirpath`` is relative one from the root of this vfs. This
444 uses ``os.sep`` as path separator, even you specify POSIX
444 uses ``os.sep`` as path separator, even you specify POSIX
445 style ``path``.
445 style ``path``.
446
446
447 "The root of this vfs" is represented as empty ``dirpath``.
447 "The root of this vfs" is represented as empty ``dirpath``.
448 """
448 """
449 root = os.path.normpath(self.join(None))
449 root = os.path.normpath(self.join(None))
450 # when dirpath == root, dirpath[prefixlen:] becomes empty
450 # when dirpath == root, dirpath[prefixlen:] becomes empty
451 # because len(dirpath) < prefixlen.
451 # because len(dirpath) < prefixlen.
452 prefixlen = len(pathutil.normasprefix(root))
452 prefixlen = len(pathutil.normasprefix(root))
453 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
453 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
454 yield (dirpath[prefixlen:], dirs, files)
454 yield (dirpath[prefixlen:], dirs, files)
455
455
456 @contextlib.contextmanager
456 @contextlib.contextmanager
457 def backgroundclosing(self, ui, expectedcount=-1):
457 def backgroundclosing(self, ui, expectedcount=-1):
458 """Allow files to be closed asynchronously.
458 """Allow files to be closed asynchronously.
459
459
460 When this context manager is active, ``backgroundclose`` can be passed
460 When this context manager is active, ``backgroundclose`` can be passed
461 to ``__call__``/``open`` to result in the file possibly being closed
461 to ``__call__``/``open`` to result in the file possibly being closed
462 asynchronously, on a background thread.
462 asynchronously, on a background thread.
463 """
463 """
464 # This is an arbitrary restriction and could be changed if we ever
464 # This is an arbitrary restriction and could be changed if we ever
465 # have a use case.
465 # have a use case.
466 vfs = getattr(self, 'vfs', self)
466 vfs = getattr(self, 'vfs', self)
467 if getattr(vfs, '_backgroundfilecloser', None):
467 if getattr(vfs, '_backgroundfilecloser', None):
468 raise error.Abort(
468 raise error.Abort(
469 _('can only have 1 active background file closer'))
469 _('can only have 1 active background file closer'))
470
470
471 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
471 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
472 try:
472 try:
473 vfs._backgroundfilecloser = bfc
473 vfs._backgroundfilecloser = bfc
474 yield bfc
474 yield bfc
475 finally:
475 finally:
476 vfs._backgroundfilecloser = None
476 vfs._backgroundfilecloser = None
477
477
478 class vfs(abstractvfs):
478 class vfs(abstractvfs):
479 '''Operate files relative to a base directory
479 '''Operate files relative to a base directory
480
480
481 This class is used to hide the details of COW semantics and
481 This class is used to hide the details of COW semantics and
482 remote file access from higher level code.
482 remote file access from higher level code.
483 '''
483 '''
484 def __init__(self, base, audit=True, expandpath=False, realpath=False):
484 def __init__(self, base, audit=True, expandpath=False, realpath=False):
485 if expandpath:
485 if expandpath:
486 base = util.expandpath(base)
486 base = util.expandpath(base)
487 if realpath:
487 if realpath:
488 base = os.path.realpath(base)
488 base = os.path.realpath(base)
489 self.base = base
489 self.base = base
490 self.mustaudit = audit
490 self.mustaudit = audit
491 self.createmode = None
491 self.createmode = None
492 self._trustnlink = None
492 self._trustnlink = None
493
493
494 @property
494 @property
495 def mustaudit(self):
495 def mustaudit(self):
496 return self._audit
496 return self._audit
497
497
498 @mustaudit.setter
498 @mustaudit.setter
499 def mustaudit(self, onoff):
499 def mustaudit(self, onoff):
500 self._audit = onoff
500 self._audit = onoff
501 if onoff:
501 if onoff:
502 self.audit = pathutil.pathauditor(self.base)
502 self.audit = pathutil.pathauditor(self.base)
503 else:
503 else:
504 self.audit = util.always
504 self.audit = util.always
505
505
506 @util.propertycache
506 @util.propertycache
507 def _cansymlink(self):
507 def _cansymlink(self):
508 return util.checklink(self.base)
508 return util.checklink(self.base)
509
509
510 @util.propertycache
510 @util.propertycache
511 def _chmod(self):
511 def _chmod(self):
512 return util.checkexec(self.base)
512 return util.checkexec(self.base)
513
513
514 def _fixfilemode(self, name):
514 def _fixfilemode(self, name):
515 if self.createmode is None or not self._chmod:
515 if self.createmode is None or not self._chmod:
516 return
516 return
517 os.chmod(name, self.createmode & 0o666)
517 os.chmod(name, self.createmode & 0o666)
518
518
519 def __call__(self, path, mode="r", text=False, atomictemp=False,
519 def __call__(self, path, mode="r", text=False, atomictemp=False,
520 notindexed=False, backgroundclose=False, checkambig=False):
520 notindexed=False, backgroundclose=False, checkambig=False):
521 '''Open ``path`` file, which is relative to vfs root.
521 '''Open ``path`` file, which is relative to vfs root.
522
522
523 Newly created directories are marked as "not to be indexed by
523 Newly created directories are marked as "not to be indexed by
524 the content indexing service", if ``notindexed`` is specified
524 the content indexing service", if ``notindexed`` is specified
525 for "write" mode access.
525 for "write" mode access.
526
526
527 If ``backgroundclose`` is passed, the file may be closed asynchronously.
527 If ``backgroundclose`` is passed, the file may be closed asynchronously.
528 It can only be used if the ``self.backgroundclosing()`` context manager
528 It can only be used if the ``self.backgroundclosing()`` context manager
529 is active. This should only be specified if the following criteria hold:
529 is active. This should only be specified if the following criteria hold:
530
530
531 1. There is a potential for writing thousands of files. Unless you
531 1. There is a potential for writing thousands of files. Unless you
532 are writing thousands of files, the performance benefits of
532 are writing thousands of files, the performance benefits of
533 asynchronously closing files is not realized.
533 asynchronously closing files is not realized.
534 2. Files are opened exactly once for the ``backgroundclosing``
534 2. Files are opened exactly once for the ``backgroundclosing``
535 active duration and are therefore free of race conditions between
535 active duration and are therefore free of race conditions between
536 closing a file on a background thread and reopening it. (If the
536 closing a file on a background thread and reopening it. (If the
537 file were opened multiple times, there could be unflushed data
537 file were opened multiple times, there could be unflushed data
538 because the original file handle hasn't been flushed/closed yet.)
538 because the original file handle hasn't been flushed/closed yet.)
539
539
540 ``checkambig`` argument is passed to atomictemplfile (valid
540 ``checkambig`` argument is passed to atomictemplfile (valid
541 only for writing), and is useful only if target file is
541 only for writing), and is useful only if target file is
542 guarded by any lock (e.g. repo.lock or repo.wlock).
542 guarded by any lock (e.g. repo.lock or repo.wlock).
543 '''
543 '''
544 if self._audit:
544 if self._audit:
545 r = util.checkosfilename(path)
545 r = util.checkosfilename(path)
546 if r:
546 if r:
547 raise error.Abort("%s: %r" % (r, path))
547 raise error.Abort("%s: %r" % (r, path))
548 self.audit(path)
548 self.audit(path)
549 f = self.join(path)
549 f = self.join(path)
550
550
551 if not text and "b" not in mode:
551 if not text and "b" not in mode:
552 mode += "b" # for that other OS
552 mode += "b" # for that other OS
553
553
554 nlink = -1
554 nlink = -1
555 if mode not in ('r', 'rb'):
555 if mode not in ('r', 'rb'):
556 dirname, basename = util.split(f)
556 dirname, basename = util.split(f)
557 # If basename is empty, then the path is malformed because it points
557 # If basename is empty, then the path is malformed because it points
558 # to a directory. Let the posixfile() call below raise IOError.
558 # to a directory. Let the posixfile() call below raise IOError.
559 if basename:
559 if basename:
560 if atomictemp:
560 if atomictemp:
561 util.makedirs(dirname, self.createmode, notindexed)
561 util.makedirs(dirname, self.createmode, notindexed)
562 return util.atomictempfile(f, mode, self.createmode,
562 return util.atomictempfile(f, mode, self.createmode,
563 checkambig=checkambig)
563 checkambig=checkambig)
564 try:
564 try:
565 if 'w' in mode:
565 if 'w' in mode:
566 util.unlink(f)
566 util.unlink(f)
567 nlink = 0
567 nlink = 0
568 else:
568 else:
569 # nlinks() may behave differently for files on Windows
569 # nlinks() may behave differently for files on Windows
570 # shares if the file is open.
570 # shares if the file is open.
571 with util.posixfile(f):
571 with util.posixfile(f):
572 nlink = util.nlinks(f)
572 nlink = util.nlinks(f)
573 if nlink < 1:
573 if nlink < 1:
574 nlink = 2 # force mktempcopy (issue1922)
574 nlink = 2 # force mktempcopy (issue1922)
575 except (OSError, IOError) as e:
575 except (OSError, IOError) as e:
576 if e.errno != errno.ENOENT:
576 if e.errno != errno.ENOENT:
577 raise
577 raise
578 nlink = 0
578 nlink = 0
579 util.makedirs(dirname, self.createmode, notindexed)
579 util.makedirs(dirname, self.createmode, notindexed)
580 if nlink > 0:
580 if nlink > 0:
581 if self._trustnlink is None:
581 if self._trustnlink is None:
582 self._trustnlink = nlink > 1 or util.checknlink(f)
582 self._trustnlink = nlink > 1 or util.checknlink(f)
583 if nlink > 1 or not self._trustnlink:
583 if nlink > 1 or not self._trustnlink:
584 util.rename(util.mktempcopy(f), f)
584 util.rename(util.mktempcopy(f), f)
585 fp = util.posixfile(f, mode)
585 fp = util.posixfile(f, mode)
586 if nlink == 0:
586 if nlink == 0:
587 self._fixfilemode(f)
587 self._fixfilemode(f)
588
588
589 if checkambig:
589 if checkambig:
590 if mode in ('r', 'rb'):
590 if mode in ('r', 'rb'):
591 raise error.Abort(_('implementation error: mode %s is not'
591 raise error.Abort(_('implementation error: mode %s is not'
592 ' valid for checkambig=True') % mode)
592 ' valid for checkambig=True') % mode)
593 fp = checkambigatclosing(fp)
593 fp = checkambigatclosing(fp)
594
594
595 if backgroundclose:
595 if backgroundclose:
596 if not self._backgroundfilecloser:
596 if not self._backgroundfilecloser:
597 raise error.Abort(_('backgroundclose can only be used when a '
597 raise error.Abort(_('backgroundclose can only be used when a '
598 'backgroundclosing context manager is active')
598 'backgroundclosing context manager is active')
599 )
599 )
600
600
601 fp = delayclosedfile(fp, self._backgroundfilecloser)
601 fp = delayclosedfile(fp, self._backgroundfilecloser)
602
602
603 return fp
603 return fp
604
604
605 def symlink(self, src, dst):
605 def symlink(self, src, dst):
606 self.audit(dst)
606 self.audit(dst)
607 linkname = self.join(dst)
607 linkname = self.join(dst)
608 try:
608 try:
609 os.unlink(linkname)
609 os.unlink(linkname)
610 except OSError:
610 except OSError:
611 pass
611 pass
612
612
613 util.makedirs(os.path.dirname(linkname), self.createmode)
613 util.makedirs(os.path.dirname(linkname), self.createmode)
614
614
615 if self._cansymlink:
615 if self._cansymlink:
616 try:
616 try:
617 os.symlink(src, linkname)
617 os.symlink(src, linkname)
618 except OSError as err:
618 except OSError as err:
619 raise OSError(err.errno, _('could not symlink to %r: %s') %
619 raise OSError(err.errno, _('could not symlink to %r: %s') %
620 (src, err.strerror), linkname)
620 (src, err.strerror), linkname)
621 else:
621 else:
622 self.write(dst, src)
622 self.write(dst, src)
623
623
624 def join(self, path, *insidef):
624 def join(self, path, *insidef):
625 if path:
625 if path:
626 return os.path.join(self.base, path, *insidef)
626 return os.path.join(self.base, path, *insidef)
627 else:
627 else:
628 return self.base
628 return self.base
629
629
630 opener = vfs
630 opener = vfs
631
631
632 class auditvfs(object):
632 class auditvfs(object):
633 def __init__(self, vfs):
633 def __init__(self, vfs):
634 self.vfs = vfs
634 self.vfs = vfs
635
635
636 @property
636 @property
637 def mustaudit(self):
637 def mustaudit(self):
638 return self.vfs.mustaudit
638 return self.vfs.mustaudit
639
639
640 @mustaudit.setter
640 @mustaudit.setter
641 def mustaudit(self, onoff):
641 def mustaudit(self, onoff):
642 self.vfs.mustaudit = onoff
642 self.vfs.mustaudit = onoff
643
643
644 @property
644 @property
645 def options(self):
645 def options(self):
646 return self.vfs.options
646 return self.vfs.options
647
647
648 @options.setter
648 @options.setter
649 def options(self, value):
649 def options(self, value):
650 self.vfs.options = value
650 self.vfs.options = value
651
651
652 class filtervfs(abstractvfs, auditvfs):
652 class filtervfs(abstractvfs, auditvfs):
653 '''Wrapper vfs for filtering filenames with a function.'''
653 '''Wrapper vfs for filtering filenames with a function.'''
654
654
655 def __init__(self, vfs, filter):
655 def __init__(self, vfs, filter):
656 auditvfs.__init__(self, vfs)
656 auditvfs.__init__(self, vfs)
657 self._filter = filter
657 self._filter = filter
658
658
659 def __call__(self, path, *args, **kwargs):
659 def __call__(self, path, *args, **kwargs):
660 return self.vfs(self._filter(path), *args, **kwargs)
660 return self.vfs(self._filter(path), *args, **kwargs)
661
661
662 def join(self, path, *insidef):
662 def join(self, path, *insidef):
663 if path:
663 if path:
664 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
664 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
665 else:
665 else:
666 return self.vfs.join(path)
666 return self.vfs.join(path)
667
667
668 filteropener = filtervfs
668 filteropener = filtervfs
669
669
670 class readonlyvfs(abstractvfs, auditvfs):
670 class readonlyvfs(abstractvfs, auditvfs):
671 '''Wrapper vfs preventing any writing.'''
671 '''Wrapper vfs preventing any writing.'''
672
672
673 def __init__(self, vfs):
673 def __init__(self, vfs):
674 auditvfs.__init__(self, vfs)
674 auditvfs.__init__(self, vfs)
675
675
676 def __call__(self, path, mode='r', *args, **kw):
676 def __call__(self, path, mode='r', *args, **kw):
677 if mode not in ('r', 'rb'):
677 if mode not in ('r', 'rb'):
678 raise error.Abort(_('this vfs is read only'))
678 raise error.Abort(_('this vfs is read only'))
679 return self.vfs(path, mode, *args, **kw)
679 return self.vfs(path, mode, *args, **kw)
680
680
681 def join(self, path, *insidef):
681 def join(self, path, *insidef):
682 return self.vfs.join(path, *insidef)
682 return self.vfs.join(path, *insidef)
683
683
684 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
684 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
685 '''yield every hg repository under path, always recursively.
685 '''yield every hg repository under path, always recursively.
686 The recurse flag will only control recursion into repo working dirs'''
686 The recurse flag will only control recursion into repo working dirs'''
687 def errhandler(err):
687 def errhandler(err):
688 if err.filename == path:
688 if err.filename == path:
689 raise err
689 raise err
690 samestat = getattr(os.path, 'samestat', None)
690 samestat = getattr(os.path, 'samestat', None)
691 if followsym and samestat is not None:
691 if followsym and samestat is not None:
692 def adddir(dirlst, dirname):
692 def adddir(dirlst, dirname):
693 match = False
693 match = False
694 dirstat = os.stat(dirname)
694 dirstat = os.stat(dirname)
695 for lstdirstat in dirlst:
695 for lstdirstat in dirlst:
696 if samestat(dirstat, lstdirstat):
696 if samestat(dirstat, lstdirstat):
697 match = True
697 match = True
698 break
698 break
699 if not match:
699 if not match:
700 dirlst.append(dirstat)
700 dirlst.append(dirstat)
701 return not match
701 return not match
702 else:
702 else:
703 followsym = False
703 followsym = False
704
704
705 if (seen_dirs is None) and followsym:
705 if (seen_dirs is None) and followsym:
706 seen_dirs = []
706 seen_dirs = []
707 adddir(seen_dirs, path)
707 adddir(seen_dirs, path)
708 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
708 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
709 dirs.sort()
709 dirs.sort()
710 if '.hg' in dirs:
710 if '.hg' in dirs:
711 yield root # found a repository
711 yield root # found a repository
712 qroot = os.path.join(root, '.hg', 'patches')
712 qroot = os.path.join(root, '.hg', 'patches')
713 if os.path.isdir(os.path.join(qroot, '.hg')):
713 if os.path.isdir(os.path.join(qroot, '.hg')):
714 yield qroot # we have a patch queue repo here
714 yield qroot # we have a patch queue repo here
715 if recurse:
715 if recurse:
716 # avoid recursing inside the .hg directory
716 # avoid recursing inside the .hg directory
717 dirs.remove('.hg')
717 dirs.remove('.hg')
718 else:
718 else:
719 dirs[:] = [] # don't descend further
719 dirs[:] = [] # don't descend further
720 elif followsym:
720 elif followsym:
721 newdirs = []
721 newdirs = []
722 for d in dirs:
722 for d in dirs:
723 fname = os.path.join(root, d)
723 fname = os.path.join(root, d)
724 if adddir(seen_dirs, fname):
724 if adddir(seen_dirs, fname):
725 if os.path.islink(fname):
725 if os.path.islink(fname):
726 for hgname in walkrepos(fname, True, seen_dirs):
726 for hgname in walkrepos(fname, True, seen_dirs):
727 yield hgname
727 yield hgname
728 else:
728 else:
729 newdirs.append(d)
729 newdirs.append(d)
730 dirs[:] = newdirs
730 dirs[:] = newdirs
731
731
732 def osrcpath():
732 def osrcpath():
733 '''return default os-specific hgrc search path'''
733 '''return default os-specific hgrc search path'''
734 path = []
734 path = []
735 defaultpath = os.path.join(util.datapath, 'default.d')
735 defaultpath = os.path.join(util.datapath, 'default.d')
736 if os.path.isdir(defaultpath):
736 if os.path.isdir(defaultpath):
737 for f, kind in osutil.listdir(defaultpath):
737 for f, kind in osutil.listdir(defaultpath):
738 if f.endswith('.rc'):
738 if f.endswith('.rc'):
739 path.append(os.path.join(defaultpath, f))
739 path.append(os.path.join(defaultpath, f))
740 path.extend(systemrcpath())
740 path.extend(systemrcpath())
741 path.extend(userrcpath())
741 path.extend(userrcpath())
742 path = [os.path.normpath(f) for f in path]
742 path = [os.path.normpath(f) for f in path]
743 return path
743 return path
744
744
745 _rcpath = None
745 _rcpath = None
746
746
747 def rcpath():
747 def rcpath():
748 '''return hgrc search path. if env var HGRCPATH is set, use it.
748 '''return hgrc search path. if env var HGRCPATH is set, use it.
749 for each item in path, if directory, use files ending in .rc,
749 for each item in path, if directory, use files ending in .rc,
750 else use item.
750 else use item.
751 make HGRCPATH empty to only look in .hg/hgrc of current repo.
751 make HGRCPATH empty to only look in .hg/hgrc of current repo.
752 if no HGRCPATH, use default os-specific path.'''
752 if no HGRCPATH, use default os-specific path.'''
753 global _rcpath
753 global _rcpath
754 if _rcpath is None:
754 if _rcpath is None:
755 if 'HGRCPATH' in encoding.environ:
755 if 'HGRCPATH' in encoding.environ:
756 _rcpath = []
756 _rcpath = []
757 for p in os.environ['HGRCPATH'].split(os.pathsep):
757 for p in os.environ['HGRCPATH'].split(os.pathsep):
758 if not p:
758 if not p:
759 continue
759 continue
760 p = util.expandpath(p)
760 p = util.expandpath(p)
761 if os.path.isdir(p):
761 if os.path.isdir(p):
762 for f, kind in osutil.listdir(p):
762 for f, kind in osutil.listdir(p):
763 if f.endswith('.rc'):
763 if f.endswith('.rc'):
764 _rcpath.append(os.path.join(p, f))
764 _rcpath.append(os.path.join(p, f))
765 else:
765 else:
766 _rcpath.append(p)
766 _rcpath.append(p)
767 else:
767 else:
768 _rcpath = osrcpath()
768 _rcpath = osrcpath()
769 return _rcpath
769 return _rcpath
770
770
771 def intrev(rev):
771 def intrev(rev):
772 """Return integer for a given revision that can be used in comparison or
772 """Return integer for a given revision that can be used in comparison or
773 arithmetic operation"""
773 arithmetic operation"""
774 if rev is None:
774 if rev is None:
775 return wdirrev
775 return wdirrev
776 return rev
776 return rev
777
777
778 def revsingle(repo, revspec, default='.'):
778 def revsingle(repo, revspec, default='.'):
779 if not revspec and revspec != 0:
779 if not revspec and revspec != 0:
780 return repo[default]
780 return repo[default]
781
781
782 l = revrange(repo, [revspec])
782 l = revrange(repo, [revspec])
783 if not l:
783 if not l:
784 raise error.Abort(_('empty revision set'))
784 raise error.Abort(_('empty revision set'))
785 return repo[l.last()]
785 return repo[l.last()]
786
786
787 def _pairspec(revspec):
787 def _pairspec(revspec):
788 tree = revset.parse(revspec)
788 tree = revset.parse(revspec)
789 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
789 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
790
790
791 def revpair(repo, revs):
791 def revpair(repo, revs):
792 if not revs:
792 if not revs:
793 return repo.dirstate.p1(), None
793 return repo.dirstate.p1(), None
794
794
795 l = revrange(repo, revs)
795 l = revrange(repo, revs)
796
796
797 if not l:
797 if not l:
798 first = second = None
798 first = second = None
799 elif l.isascending():
799 elif l.isascending():
800 first = l.min()
800 first = l.min()
801 second = l.max()
801 second = l.max()
802 elif l.isdescending():
802 elif l.isdescending():
803 first = l.max()
803 first = l.max()
804 second = l.min()
804 second = l.min()
805 else:
805 else:
806 first = l.first()
806 first = l.first()
807 second = l.last()
807 second = l.last()
808
808
809 if first is None:
809 if first is None:
810 raise error.Abort(_('empty revision range'))
810 raise error.Abort(_('empty revision range'))
811 if (first == second and len(revs) >= 2
811 if (first == second and len(revs) >= 2
812 and not all(revrange(repo, [r]) for r in revs)):
812 and not all(revrange(repo, [r]) for r in revs)):
813 raise error.Abort(_('empty revision on one side of range'))
813 raise error.Abort(_('empty revision on one side of range'))
814
814
815 # if top-level is range expression, the result must always be a pair
815 # if top-level is range expression, the result must always be a pair
816 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
816 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
817 return repo.lookup(first), None
817 return repo.lookup(first), None
818
818
819 return repo.lookup(first), repo.lookup(second)
819 return repo.lookup(first), repo.lookup(second)
820
820
821 def revrange(repo, specs):
821 def revrange(repo, specs):
822 """Execute 1 to many revsets and return the union.
822 """Execute 1 to many revsets and return the union.
823
823
824 This is the preferred mechanism for executing revsets using user-specified
824 This is the preferred mechanism for executing revsets using user-specified
825 config options, such as revset aliases.
825 config options, such as revset aliases.
826
826
827 The revsets specified by ``specs`` will be executed via a chained ``OR``
827 The revsets specified by ``specs`` will be executed via a chained ``OR``
828 expression. If ``specs`` is empty, an empty result is returned.
828 expression. If ``specs`` is empty, an empty result is returned.
829
829
830 ``specs`` can contain integers, in which case they are assumed to be
830 ``specs`` can contain integers, in which case they are assumed to be
831 revision numbers.
831 revision numbers.
832
832
833 It is assumed the revsets are already formatted. If you have arguments
833 It is assumed the revsets are already formatted. If you have arguments
834 that need to be expanded in the revset, call ``revset.formatspec()``
834 that need to be expanded in the revset, call ``revset.formatspec()``
835 and pass the result as an element of ``specs``.
835 and pass the result as an element of ``specs``.
836
836
837 Specifying a single revset is allowed.
837 Specifying a single revset is allowed.
838
838
839 Returns a ``revset.abstractsmartset`` which is a list-like interface over
839 Returns a ``revset.abstractsmartset`` which is a list-like interface over
840 integer revisions.
840 integer revisions.
841 """
841 """
842 allspecs = []
842 allspecs = []
843 for spec in specs:
843 for spec in specs:
844 if isinstance(spec, int):
844 if isinstance(spec, int):
845 spec = revset.formatspec('rev(%d)', spec)
845 spec = revset.formatspec('rev(%d)', spec)
846 allspecs.append(spec)
846 allspecs.append(spec)
847 m = revset.matchany(repo.ui, allspecs, repo)
847 m = revset.matchany(repo.ui, allspecs, repo)
848 return m(repo)
848 return m(repo)
849
849
850 def meaningfulparents(repo, ctx):
850 def meaningfulparents(repo, ctx):
851 """Return list of meaningful (or all if debug) parentrevs for rev.
851 """Return list of meaningful (or all if debug) parentrevs for rev.
852
852
853 For merges (two non-nullrev revisions) both parents are meaningful.
853 For merges (two non-nullrev revisions) both parents are meaningful.
854 Otherwise the first parent revision is considered meaningful if it
854 Otherwise the first parent revision is considered meaningful if it
855 is not the preceding revision.
855 is not the preceding revision.
856 """
856 """
857 parents = ctx.parents()
857 parents = ctx.parents()
858 if len(parents) > 1:
858 if len(parents) > 1:
859 return parents
859 return parents
860 if repo.ui.debugflag:
860 if repo.ui.debugflag:
861 return [parents[0], repo['null']]
861 return [parents[0], repo['null']]
862 if parents[0].rev() >= intrev(ctx.rev()) - 1:
862 if parents[0].rev() >= intrev(ctx.rev()) - 1:
863 return []
863 return []
864 return parents
864 return parents
865
865
866 def expandpats(pats):
866 def expandpats(pats):
867 '''Expand bare globs when running on windows.
867 '''Expand bare globs when running on windows.
868 On posix we assume it already has already been done by sh.'''
868 On posix we assume it already has already been done by sh.'''
869 if not util.expandglobs:
869 if not util.expandglobs:
870 return list(pats)
870 return list(pats)
871 ret = []
871 ret = []
872 for kindpat in pats:
872 for kindpat in pats:
873 kind, pat = matchmod._patsplit(kindpat, None)
873 kind, pat = matchmod._patsplit(kindpat, None)
874 if kind is None:
874 if kind is None:
875 try:
875 try:
876 globbed = glob.glob(pat)
876 globbed = glob.glob(pat)
877 except re.error:
877 except re.error:
878 globbed = [pat]
878 globbed = [pat]
879 if globbed:
879 if globbed:
880 ret.extend(globbed)
880 ret.extend(globbed)
881 continue
881 continue
882 ret.append(kindpat)
882 ret.append(kindpat)
883 return ret
883 return ret
884
884
885 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
885 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
886 badfn=None):
886 badfn=None):
887 '''Return a matcher and the patterns that were used.
887 '''Return a matcher and the patterns that were used.
888 The matcher will warn about bad matches, unless an alternate badfn callback
888 The matcher will warn about bad matches, unless an alternate badfn callback
889 is provided.'''
889 is provided.'''
890 if pats == ("",):
890 if pats == ("",):
891 pats = []
891 pats = []
892 if opts is None:
892 if opts is None:
893 opts = {}
893 opts = {}
894 if not globbed and default == 'relpath':
894 if not globbed and default == 'relpath':
895 pats = expandpats(pats or [])
895 pats = expandpats(pats or [])
896
896
897 def bad(f, msg):
897 def bad(f, msg):
898 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
898 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
899
899
900 if badfn is None:
900 if badfn is None:
901 badfn = bad
901 badfn = bad
902
902
903 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
903 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
904 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
904 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
905
905
906 if m.always():
906 if m.always():
907 pats = []
907 pats = []
908 return m, pats
908 return m, pats
909
909
910 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
910 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
911 badfn=None):
911 badfn=None):
912 '''Return a matcher that will warn about bad matches.'''
912 '''Return a matcher that will warn about bad matches.'''
913 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
913 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
914
914
915 def matchall(repo):
915 def matchall(repo):
916 '''Return a matcher that will efficiently match everything.'''
916 '''Return a matcher that will efficiently match everything.'''
917 return matchmod.always(repo.root, repo.getcwd())
917 return matchmod.always(repo.root, repo.getcwd())
918
918
919 def matchfiles(repo, files, badfn=None):
919 def matchfiles(repo, files, badfn=None):
920 '''Return a matcher that will efficiently match exactly these files.'''
920 '''Return a matcher that will efficiently match exactly these files.'''
921 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
921 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
922
922
923 def origpath(ui, repo, filepath):
923 def origpath(ui, repo, filepath):
924 '''customize where .orig files are created
924 '''customize where .orig files are created
925
925
926 Fetch user defined path from config file: [ui] origbackuppath = <path>
926 Fetch user defined path from config file: [ui] origbackuppath = <path>
927 Fall back to default (filepath) if not specified
927 Fall back to default (filepath) if not specified
928 '''
928 '''
929 origbackuppath = ui.config('ui', 'origbackuppath', None)
929 origbackuppath = ui.config('ui', 'origbackuppath', None)
930 if origbackuppath is None:
930 if origbackuppath is None:
931 return filepath + ".orig"
931 return filepath + ".orig"
932
932
933 filepathfromroot = os.path.relpath(filepath, start=repo.root)
933 filepathfromroot = os.path.relpath(filepath, start=repo.root)
934 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
934 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
935
935
936 origbackupdir = repo.vfs.dirname(fullorigpath)
936 origbackupdir = repo.vfs.dirname(fullorigpath)
937 if not repo.vfs.exists(origbackupdir):
937 if not repo.vfs.exists(origbackupdir):
938 ui.note(_('creating directory: %s\n') % origbackupdir)
938 ui.note(_('creating directory: %s\n') % origbackupdir)
939 util.makedirs(origbackupdir)
939 util.makedirs(origbackupdir)
940
940
941 return fullorigpath + ".orig"
941 return fullorigpath + ".orig"
942
942
943 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
943 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
944 if opts is None:
944 if opts is None:
945 opts = {}
945 opts = {}
946 m = matcher
946 m = matcher
947 if dry_run is None:
947 if dry_run is None:
948 dry_run = opts.get('dry_run')
948 dry_run = opts.get('dry_run')
949 if similarity is None:
949 if similarity is None:
950 similarity = float(opts.get('similarity') or 0)
950 similarity = float(opts.get('similarity') or 0)
951
951
952 ret = 0
952 ret = 0
953 join = lambda f: os.path.join(prefix, f)
953 join = lambda f: os.path.join(prefix, f)
954
954
955 wctx = repo[None]
955 wctx = repo[None]
956 for subpath in sorted(wctx.substate):
956 for subpath in sorted(wctx.substate):
957 submatch = matchmod.subdirmatcher(subpath, m)
957 submatch = matchmod.subdirmatcher(subpath, m)
958 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
958 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
959 sub = wctx.sub(subpath)
959 sub = wctx.sub(subpath)
960 try:
960 try:
961 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
961 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
962 ret = 1
962 ret = 1
963 except error.LookupError:
963 except error.LookupError:
964 repo.ui.status(_("skipping missing subrepository: %s\n")
964 repo.ui.status(_("skipping missing subrepository: %s\n")
965 % join(subpath))
965 % join(subpath))
966
966
967 rejected = []
967 rejected = []
968 def badfn(f, msg):
968 def badfn(f, msg):
969 if f in m.files():
969 if f in m.files():
970 m.bad(f, msg)
970 m.bad(f, msg)
971 rejected.append(f)
971 rejected.append(f)
972
972
973 badmatch = matchmod.badmatch(m, badfn)
973 badmatch = matchmod.badmatch(m, badfn)
974 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
974 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
975 badmatch)
975 badmatch)
976
976
977 unknownset = set(unknown + forgotten)
977 unknownset = set(unknown + forgotten)
978 toprint = unknownset.copy()
978 toprint = unknownset.copy()
979 toprint.update(deleted)
979 toprint.update(deleted)
980 for abs in sorted(toprint):
980 for abs in sorted(toprint):
981 if repo.ui.verbose or not m.exact(abs):
981 if repo.ui.verbose or not m.exact(abs):
982 if abs in unknownset:
982 if abs in unknownset:
983 status = _('adding %s\n') % m.uipath(abs)
983 status = _('adding %s\n') % m.uipath(abs)
984 else:
984 else:
985 status = _('removing %s\n') % m.uipath(abs)
985 status = _('removing %s\n') % m.uipath(abs)
986 repo.ui.status(status)
986 repo.ui.status(status)
987
987
988 renames = _findrenames(repo, m, added + unknown, removed + deleted,
988 renames = _findrenames(repo, m, added + unknown, removed + deleted,
989 similarity)
989 similarity)
990
990
991 if not dry_run:
991 if not dry_run:
992 _markchanges(repo, unknown + forgotten, deleted, renames)
992 _markchanges(repo, unknown + forgotten, deleted, renames)
993
993
994 for f in rejected:
994 for f in rejected:
995 if f in m.files():
995 if f in m.files():
996 return 1
996 return 1
997 return ret
997 return ret
998
998
999 def marktouched(repo, files, similarity=0.0):
999 def marktouched(repo, files, similarity=0.0):
1000 '''Assert that files have somehow been operated upon. files are relative to
1000 '''Assert that files have somehow been operated upon. files are relative to
1001 the repo root.'''
1001 the repo root.'''
1002 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1002 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1003 rejected = []
1003 rejected = []
1004
1004
1005 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1005 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1006
1006
1007 if repo.ui.verbose:
1007 if repo.ui.verbose:
1008 unknownset = set(unknown + forgotten)
1008 unknownset = set(unknown + forgotten)
1009 toprint = unknownset.copy()
1009 toprint = unknownset.copy()
1010 toprint.update(deleted)
1010 toprint.update(deleted)
1011 for abs in sorted(toprint):
1011 for abs in sorted(toprint):
1012 if abs in unknownset:
1012 if abs in unknownset:
1013 status = _('adding %s\n') % abs
1013 status = _('adding %s\n') % abs
1014 else:
1014 else:
1015 status = _('removing %s\n') % abs
1015 status = _('removing %s\n') % abs
1016 repo.ui.status(status)
1016 repo.ui.status(status)
1017
1017
1018 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1018 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1019 similarity)
1019 similarity)
1020
1020
1021 _markchanges(repo, unknown + forgotten, deleted, renames)
1021 _markchanges(repo, unknown + forgotten, deleted, renames)
1022
1022
1023 for f in rejected:
1023 for f in rejected:
1024 if f in m.files():
1024 if f in m.files():
1025 return 1
1025 return 1
1026 return 0
1026 return 0
1027
1027
1028 def _interestingfiles(repo, matcher):
1028 def _interestingfiles(repo, matcher):
1029 '''Walk dirstate with matcher, looking for files that addremove would care
1029 '''Walk dirstate with matcher, looking for files that addremove would care
1030 about.
1030 about.
1031
1031
1032 This is different from dirstate.status because it doesn't care about
1032 This is different from dirstate.status because it doesn't care about
1033 whether files are modified or clean.'''
1033 whether files are modified or clean.'''
1034 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1034 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1035 audit_path = pathutil.pathauditor(repo.root)
1035 audit_path = pathutil.pathauditor(repo.root)
1036
1036
1037 ctx = repo[None]
1037 ctx = repo[None]
1038 dirstate = repo.dirstate
1038 dirstate = repo.dirstate
1039 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1039 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1040 full=False)
1040 full=False)
1041 for abs, st in walkresults.iteritems():
1041 for abs, st in walkresults.iteritems():
1042 dstate = dirstate[abs]
1042 dstate = dirstate[abs]
1043 if dstate == '?' and audit_path.check(abs):
1043 if dstate == '?' and audit_path.check(abs):
1044 unknown.append(abs)
1044 unknown.append(abs)
1045 elif dstate != 'r' and not st:
1045 elif dstate != 'r' and not st:
1046 deleted.append(abs)
1046 deleted.append(abs)
1047 elif dstate == 'r' and st:
1047 elif dstate == 'r' and st:
1048 forgotten.append(abs)
1048 forgotten.append(abs)
1049 # for finding renames
1049 # for finding renames
1050 elif dstate == 'r' and not st:
1050 elif dstate == 'r' and not st:
1051 removed.append(abs)
1051 removed.append(abs)
1052 elif dstate == 'a':
1052 elif dstate == 'a':
1053 added.append(abs)
1053 added.append(abs)
1054
1054
1055 return added, unknown, deleted, removed, forgotten
1055 return added, unknown, deleted, removed, forgotten
1056
1056
1057 def _findrenames(repo, matcher, added, removed, similarity):
1057 def _findrenames(repo, matcher, added, removed, similarity):
1058 '''Find renames from removed files to added ones.'''
1058 '''Find renames from removed files to added ones.'''
1059 renames = {}
1059 renames = {}
1060 if similarity > 0:
1060 if similarity > 0:
1061 for old, new, score in similar.findrenames(repo, added, removed,
1061 for old, new, score in similar.findrenames(repo, added, removed,
1062 similarity):
1062 similarity):
1063 if (repo.ui.verbose or not matcher.exact(old)
1063 if (repo.ui.verbose or not matcher.exact(old)
1064 or not matcher.exact(new)):
1064 or not matcher.exact(new)):
1065 repo.ui.status(_('recording removal of %s as rename to %s '
1065 repo.ui.status(_('recording removal of %s as rename to %s '
1066 '(%d%% similar)\n') %
1066 '(%d%% similar)\n') %
1067 (matcher.rel(old), matcher.rel(new),
1067 (matcher.rel(old), matcher.rel(new),
1068 score * 100))
1068 score * 100))
1069 renames[new] = old
1069 renames[new] = old
1070 return renames
1070 return renames
1071
1071
1072 def _markchanges(repo, unknown, deleted, renames):
1072 def _markchanges(repo, unknown, deleted, renames):
1073 '''Marks the files in unknown as added, the files in deleted as removed,
1073 '''Marks the files in unknown as added, the files in deleted as removed,
1074 and the files in renames as copied.'''
1074 and the files in renames as copied.'''
1075 wctx = repo[None]
1075 wctx = repo[None]
1076 with repo.wlock():
1076 with repo.wlock():
1077 wctx.forget(deleted)
1077 wctx.forget(deleted)
1078 wctx.add(unknown)
1078 wctx.add(unknown)
1079 for new, old in renames.iteritems():
1079 for new, old in renames.iteritems():
1080 wctx.copy(old, new)
1080 wctx.copy(old, new)
1081
1081
1082 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1082 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1083 """Update the dirstate to reflect the intent of copying src to dst. For
1083 """Update the dirstate to reflect the intent of copying src to dst. For
1084 different reasons it might not end with dst being marked as copied from src.
1084 different reasons it might not end with dst being marked as copied from src.
1085 """
1085 """
1086 origsrc = repo.dirstate.copied(src) or src
1086 origsrc = repo.dirstate.copied(src) or src
1087 if dst == origsrc: # copying back a copy?
1087 if dst == origsrc: # copying back a copy?
1088 if repo.dirstate[dst] not in 'mn' and not dryrun:
1088 if repo.dirstate[dst] not in 'mn' and not dryrun:
1089 repo.dirstate.normallookup(dst)
1089 repo.dirstate.normallookup(dst)
1090 else:
1090 else:
1091 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1091 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1092 if not ui.quiet:
1092 if not ui.quiet:
1093 ui.warn(_("%s has not been committed yet, so no copy "
1093 ui.warn(_("%s has not been committed yet, so no copy "
1094 "data will be stored for %s.\n")
1094 "data will be stored for %s.\n")
1095 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1095 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1096 if repo.dirstate[dst] in '?r' and not dryrun:
1096 if repo.dirstate[dst] in '?r' and not dryrun:
1097 wctx.add([dst])
1097 wctx.add([dst])
1098 elif not dryrun:
1098 elif not dryrun:
1099 wctx.copy(origsrc, dst)
1099 wctx.copy(origsrc, dst)
1100
1100
1101 def readrequires(opener, supported):
1101 def readrequires(opener, supported):
1102 '''Reads and parses .hg/requires and checks if all entries found
1102 '''Reads and parses .hg/requires and checks if all entries found
1103 are in the list of supported features.'''
1103 are in the list of supported features.'''
1104 requirements = set(opener.read("requires").splitlines())
1104 requirements = set(opener.read("requires").splitlines())
1105 missings = []
1105 missings = []
1106 for r in requirements:
1106 for r in requirements:
1107 if r not in supported:
1107 if r not in supported:
1108 if not r or not r[0].isalnum():
1108 if not r or not r[0].isalnum():
1109 raise error.RequirementError(_(".hg/requires file is corrupt"))
1109 raise error.RequirementError(_(".hg/requires file is corrupt"))
1110 missings.append(r)
1110 missings.append(r)
1111 missings.sort()
1111 missings.sort()
1112 if missings:
1112 if missings:
1113 raise error.RequirementError(
1113 raise error.RequirementError(
1114 _("repository requires features unknown to this Mercurial: %s")
1114 _("repository requires features unknown to this Mercurial: %s")
1115 % " ".join(missings),
1115 % " ".join(missings),
1116 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1116 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1117 " for more information"))
1117 " for more information"))
1118 return requirements
1118 return requirements
1119
1119
1120 def writerequires(opener, requirements):
1120 def writerequires(opener, requirements):
1121 with opener('requires', 'w') as fp:
1121 with opener('requires', 'w') as fp:
1122 for r in sorted(requirements):
1122 for r in sorted(requirements):
1123 fp.write("%s\n" % r)
1123 fp.write("%s\n" % r)
1124
1124
1125 class filecachesubentry(object):
1125 class filecachesubentry(object):
1126 def __init__(self, path, stat):
1126 def __init__(self, path, stat):
1127 self.path = path
1127 self.path = path
1128 self.cachestat = None
1128 self.cachestat = None
1129 self._cacheable = None
1129 self._cacheable = None
1130
1130
1131 if stat:
1131 if stat:
1132 self.cachestat = filecachesubentry.stat(self.path)
1132 self.cachestat = filecachesubentry.stat(self.path)
1133
1133
1134 if self.cachestat:
1134 if self.cachestat:
1135 self._cacheable = self.cachestat.cacheable()
1135 self._cacheable = self.cachestat.cacheable()
1136 else:
1136 else:
1137 # None means we don't know yet
1137 # None means we don't know yet
1138 self._cacheable = None
1138 self._cacheable = None
1139
1139
1140 def refresh(self):
1140 def refresh(self):
1141 if self.cacheable():
1141 if self.cacheable():
1142 self.cachestat = filecachesubentry.stat(self.path)
1142 self.cachestat = filecachesubentry.stat(self.path)
1143
1143
1144 def cacheable(self):
1144 def cacheable(self):
1145 if self._cacheable is not None:
1145 if self._cacheable is not None:
1146 return self._cacheable
1146 return self._cacheable
1147
1147
1148 # we don't know yet, assume it is for now
1148 # we don't know yet, assume it is for now
1149 return True
1149 return True
1150
1150
1151 def changed(self):
1151 def changed(self):
1152 # no point in going further if we can't cache it
1152 # no point in going further if we can't cache it
1153 if not self.cacheable():
1153 if not self.cacheable():
1154 return True
1154 return True
1155
1155
1156 newstat = filecachesubentry.stat(self.path)
1156 newstat = filecachesubentry.stat(self.path)
1157
1157
1158 # we may not know if it's cacheable yet, check again now
1158 # we may not know if it's cacheable yet, check again now
1159 if newstat and self._cacheable is None:
1159 if newstat and self._cacheable is None:
1160 self._cacheable = newstat.cacheable()
1160 self._cacheable = newstat.cacheable()
1161
1161
1162 # check again
1162 # check again
1163 if not self._cacheable:
1163 if not self._cacheable:
1164 return True
1164 return True
1165
1165
1166 if self.cachestat != newstat:
1166 if self.cachestat != newstat:
1167 self.cachestat = newstat
1167 self.cachestat = newstat
1168 return True
1168 return True
1169 else:
1169 else:
1170 return False
1170 return False
1171
1171
1172 @staticmethod
1172 @staticmethod
1173 def stat(path):
1173 def stat(path):
1174 try:
1174 try:
1175 return util.cachestat(path)
1175 return util.cachestat(path)
1176 except OSError as e:
1176 except OSError as e:
1177 if e.errno != errno.ENOENT:
1177 if e.errno != errno.ENOENT:
1178 raise
1178 raise
1179
1179
1180 class filecacheentry(object):
1180 class filecacheentry(object):
1181 def __init__(self, paths, stat=True):
1181 def __init__(self, paths, stat=True):
1182 self._entries = []
1182 self._entries = []
1183 for path in paths:
1183 for path in paths:
1184 self._entries.append(filecachesubentry(path, stat))
1184 self._entries.append(filecachesubentry(path, stat))
1185
1185
1186 def changed(self):
1186 def changed(self):
1187 '''true if any entry has changed'''
1187 '''true if any entry has changed'''
1188 for entry in self._entries:
1188 for entry in self._entries:
1189 if entry.changed():
1189 if entry.changed():
1190 return True
1190 return True
1191 return False
1191 return False
1192
1192
1193 def refresh(self):
1193 def refresh(self):
1194 for entry in self._entries:
1194 for entry in self._entries:
1195 entry.refresh()
1195 entry.refresh()
1196
1196
1197 class filecache(object):
1197 class filecache(object):
1198 '''A property like decorator that tracks files under .hg/ for updates.
1198 '''A property like decorator that tracks files under .hg/ for updates.
1199
1199
1200 Records stat info when called in _filecache.
1200 Records stat info when called in _filecache.
1201
1201
1202 On subsequent calls, compares old stat info with new info, and recreates the
1202 On subsequent calls, compares old stat info with new info, and recreates the
1203 object when any of the files changes, updating the new stat info in
1203 object when any of the files changes, updating the new stat info in
1204 _filecache.
1204 _filecache.
1205
1205
1206 Mercurial either atomic renames or appends for files under .hg,
1206 Mercurial either atomic renames or appends for files under .hg,
1207 so to ensure the cache is reliable we need the filesystem to be able
1207 so to ensure the cache is reliable we need the filesystem to be able
1208 to tell us if a file has been replaced. If it can't, we fallback to
1208 to tell us if a file has been replaced. If it can't, we fallback to
1209 recreating the object on every call (essentially the same behavior as
1209 recreating the object on every call (essentially the same behavior as
1210 propertycache).
1210 propertycache).
1211
1211
1212 '''
1212 '''
1213 def __init__(self, *paths):
1213 def __init__(self, *paths):
1214 self.paths = paths
1214 self.paths = paths
1215
1215
1216 def join(self, obj, fname):
1216 def join(self, obj, fname):
1217 """Used to compute the runtime path of a cached file.
1217 """Used to compute the runtime path of a cached file.
1218
1218
1219 Users should subclass filecache and provide their own version of this
1219 Users should subclass filecache and provide their own version of this
1220 function to call the appropriate join function on 'obj' (an instance
1220 function to call the appropriate join function on 'obj' (an instance
1221 of the class that its member function was decorated).
1221 of the class that its member function was decorated).
1222 """
1222 """
1223 return obj.join(fname)
1223 return obj.join(fname)
1224
1224
1225 def __call__(self, func):
1225 def __call__(self, func):
1226 self.func = func
1226 self.func = func
1227 self.name = func.__name__
1227 self.name = func.__name__
1228 return self
1228 return self
1229
1229
1230 def __get__(self, obj, type=None):
1230 def __get__(self, obj, type=None):
1231 # if accessed on the class, return the descriptor itself.
1231 # if accessed on the class, return the descriptor itself.
1232 if obj is None:
1232 if obj is None:
1233 return self
1233 return self
1234 # do we need to check if the file changed?
1234 # do we need to check if the file changed?
1235 if self.name in obj.__dict__:
1235 if self.name in obj.__dict__:
1236 assert self.name in obj._filecache, self.name
1236 assert self.name in obj._filecache, self.name
1237 return obj.__dict__[self.name]
1237 return obj.__dict__[self.name]
1238
1238
1239 entry = obj._filecache.get(self.name)
1239 entry = obj._filecache.get(self.name)
1240
1240
1241 if entry:
1241 if entry:
1242 if entry.changed():
1242 if entry.changed():
1243 entry.obj = self.func(obj)
1243 entry.obj = self.func(obj)
1244 else:
1244 else:
1245 paths = [self.join(obj, path) for path in self.paths]
1245 paths = [self.join(obj, path) for path in self.paths]
1246
1246
1247 # We stat -before- creating the object so our cache doesn't lie if
1247 # We stat -before- creating the object so our cache doesn't lie if
1248 # a writer modified between the time we read and stat
1248 # a writer modified between the time we read and stat
1249 entry = filecacheentry(paths, True)
1249 entry = filecacheentry(paths, True)
1250 entry.obj = self.func(obj)
1250 entry.obj = self.func(obj)
1251
1251
1252 obj._filecache[self.name] = entry
1252 obj._filecache[self.name] = entry
1253
1253
1254 obj.__dict__[self.name] = entry.obj
1254 obj.__dict__[self.name] = entry.obj
1255 return entry.obj
1255 return entry.obj
1256
1256
1257 def __set__(self, obj, value):
1257 def __set__(self, obj, value):
1258 if self.name not in obj._filecache:
1258 if self.name not in obj._filecache:
1259 # we add an entry for the missing value because X in __dict__
1259 # we add an entry for the missing value because X in __dict__
1260 # implies X in _filecache
1260 # implies X in _filecache
1261 paths = [self.join(obj, path) for path in self.paths]
1261 paths = [self.join(obj, path) for path in self.paths]
1262 ce = filecacheentry(paths, False)
1262 ce = filecacheentry(paths, False)
1263 obj._filecache[self.name] = ce
1263 obj._filecache[self.name] = ce
1264 else:
1264 else:
1265 ce = obj._filecache[self.name]
1265 ce = obj._filecache[self.name]
1266
1266
1267 ce.obj = value # update cached copy
1267 ce.obj = value # update cached copy
1268 obj.__dict__[self.name] = value # update copy returned by obj.x
1268 obj.__dict__[self.name] = value # update copy returned by obj.x
1269
1269
1270 def __delete__(self, obj):
1270 def __delete__(self, obj):
1271 try:
1271 try:
1272 del obj.__dict__[self.name]
1272 del obj.__dict__[self.name]
1273 except KeyError:
1273 except KeyError:
1274 raise AttributeError(self.name)
1274 raise AttributeError(self.name)
1275
1275
1276 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1276 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1277 if lock is None:
1277 if lock is None:
1278 raise error.LockInheritanceContractViolation(
1278 raise error.LockInheritanceContractViolation(
1279 'lock can only be inherited while held')
1279 'lock can only be inherited while held')
1280 if environ is None:
1280 if environ is None:
1281 environ = {}
1281 environ = {}
1282 with lock.inherit() as locker:
1282 with lock.inherit() as locker:
1283 environ[envvar] = locker
1283 environ[envvar] = locker
1284 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1284 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1285
1285
1286 def wlocksub(repo, cmd, *args, **kwargs):
1286 def wlocksub(repo, cmd, *args, **kwargs):
1287 """run cmd as a subprocess that allows inheriting repo's wlock
1287 """run cmd as a subprocess that allows inheriting repo's wlock
1288
1288
1289 This can only be called while the wlock is held. This takes all the
1289 This can only be called while the wlock is held. This takes all the
1290 arguments that ui.system does, and returns the exit code of the
1290 arguments that ui.system does, and returns the exit code of the
1291 subprocess."""
1291 subprocess."""
1292 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1292 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1293 **kwargs)
1293 **kwargs)
1294
1294
1295 def gdinitconfig(ui):
1295 def gdinitconfig(ui):
1296 """helper function to know if a repo should be created as general delta
1296 """helper function to know if a repo should be created as general delta
1297 """
1297 """
1298 # experimental config: format.generaldelta
1298 # experimental config: format.generaldelta
1299 return (ui.configbool('format', 'generaldelta', False)
1299 return (ui.configbool('format', 'generaldelta', False)
1300 or ui.configbool('format', 'usegeneraldelta', True))
1300 or ui.configbool('format', 'usegeneraldelta', True))
1301
1301
1302 def gddeltaconfig(ui):
1302 def gddeltaconfig(ui):
1303 """helper function to know if incoming delta should be optimised
1303 """helper function to know if incoming delta should be optimised
1304 """
1304 """
1305 # experimental config: format.generaldelta
1305 # experimental config: format.generaldelta
1306 return ui.configbool('format', 'generaldelta', False)
1306 return ui.configbool('format', 'generaldelta', False)
1307
1307
1308 class closewrapbase(object):
1308 class closewrapbase(object):
1309 """Base class of wrapper, which hooks closing
1309 """Base class of wrapper, which hooks closing
1310
1310
1311 Do not instantiate outside of the vfs layer.
1311 Do not instantiate outside of the vfs layer.
1312 """
1312 """
1313 def __init__(self, fh):
1313 def __init__(self, fh):
1314 object.__setattr__(self, '_origfh', fh)
1314 object.__setattr__(self, '_origfh', fh)
1315
1315
1316 def __getattr__(self, attr):
1316 def __getattr__(self, attr):
1317 return getattr(self._origfh, attr)
1317 return getattr(self._origfh, attr)
1318
1318
1319 def __setattr__(self, attr, value):
1319 def __setattr__(self, attr, value):
1320 return setattr(self._origfh, attr, value)
1320 return setattr(self._origfh, attr, value)
1321
1321
1322 def __delattr__(self, attr):
1322 def __delattr__(self, attr):
1323 return delattr(self._origfh, attr)
1323 return delattr(self._origfh, attr)
1324
1324
1325 def __enter__(self):
1325 def __enter__(self):
1326 return self._origfh.__enter__()
1326 return self._origfh.__enter__()
1327
1327
1328 def __exit__(self, exc_type, exc_value, exc_tb):
1328 def __exit__(self, exc_type, exc_value, exc_tb):
1329 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1329 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1330
1330
1331 def close(self):
1331 def close(self):
1332 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1332 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1333
1333
1334 class delayclosedfile(closewrapbase):
1334 class delayclosedfile(closewrapbase):
1335 """Proxy for a file object whose close is delayed.
1335 """Proxy for a file object whose close is delayed.
1336
1336
1337 Do not instantiate outside of the vfs layer.
1337 Do not instantiate outside of the vfs layer.
1338 """
1338 """
1339 def __init__(self, fh, closer):
1339 def __init__(self, fh, closer):
1340 super(delayclosedfile, self).__init__(fh)
1340 super(delayclosedfile, self).__init__(fh)
1341 object.__setattr__(self, '_closer', closer)
1341 object.__setattr__(self, '_closer', closer)
1342
1342
1343 def __exit__(self, exc_type, exc_value, exc_tb):
1343 def __exit__(self, exc_type, exc_value, exc_tb):
1344 self._closer.close(self._origfh)
1344 self._closer.close(self._origfh)
1345
1345
1346 def close(self):
1346 def close(self):
1347 self._closer.close(self._origfh)
1347 self._closer.close(self._origfh)
1348
1348
1349 class backgroundfilecloser(object):
1349 class backgroundfilecloser(object):
1350 """Coordinates background closing of file handles on multiple threads."""
1350 """Coordinates background closing of file handles on multiple threads."""
1351 def __init__(self, ui, expectedcount=-1):
1351 def __init__(self, ui, expectedcount=-1):
1352 self._running = False
1352 self._running = False
1353 self._entered = False
1353 self._entered = False
1354 self._threads = []
1354 self._threads = []
1355 self._threadexception = None
1355 self._threadexception = None
1356
1356
1357 # Only Windows/NTFS has slow file closing. So only enable by default
1357 # Only Windows/NTFS has slow file closing. So only enable by default
1358 # on that platform. But allow to be enabled elsewhere for testing.
1358 # on that platform. But allow to be enabled elsewhere for testing.
1359 defaultenabled = os.name == 'nt'
1359 defaultenabled = os.name == 'nt'
1360 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1360 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1361
1361
1362 if not enabled:
1362 if not enabled:
1363 return
1363 return
1364
1364
1365 # There is overhead to starting and stopping the background threads.
1365 # There is overhead to starting and stopping the background threads.
1366 # Don't do background processing unless the file count is large enough
1366 # Don't do background processing unless the file count is large enough
1367 # to justify it.
1367 # to justify it.
1368 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1368 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1369 2048)
1369 2048)
1370 # FUTURE dynamically start background threads after minfilecount closes.
1370 # FUTURE dynamically start background threads after minfilecount closes.
1371 # (We don't currently have any callers that don't know their file count)
1371 # (We don't currently have any callers that don't know their file count)
1372 if expectedcount > 0 and expectedcount < minfilecount:
1372 if expectedcount > 0 and expectedcount < minfilecount:
1373 return
1373 return
1374
1374
1375 # Windows defaults to a limit of 512 open files. A buffer of 128
1375 # Windows defaults to a limit of 512 open files. A buffer of 128
1376 # should give us enough headway.
1376 # should give us enough headway.
1377 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1377 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1378 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1378 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1379
1379
1380 ui.debug('starting %d threads for background file closing\n' %
1380 ui.debug('starting %d threads for background file closing\n' %
1381 threadcount)
1381 threadcount)
1382
1382
1383 self._queue = util.queue(maxsize=maxqueue)
1383 self._queue = util.queue(maxsize=maxqueue)
1384 self._running = True
1384 self._running = True
1385
1385
1386 for i in range(threadcount):
1386 for i in range(threadcount):
1387 t = threading.Thread(target=self._worker, name='backgroundcloser')
1387 t = threading.Thread(target=self._worker, name='backgroundcloser')
1388 self._threads.append(t)
1388 self._threads.append(t)
1389 t.start()
1389 t.start()
1390
1390
1391 def __enter__(self):
1391 def __enter__(self):
1392 self._entered = True
1392 self._entered = True
1393 return self
1393 return self
1394
1394
1395 def __exit__(self, exc_type, exc_value, exc_tb):
1395 def __exit__(self, exc_type, exc_value, exc_tb):
1396 self._running = False
1396 self._running = False
1397
1397
1398 # Wait for threads to finish closing so open files don't linger for
1398 # Wait for threads to finish closing so open files don't linger for
1399 # longer than lifetime of context manager.
1399 # longer than lifetime of context manager.
1400 for t in self._threads:
1400 for t in self._threads:
1401 t.join()
1401 t.join()
1402
1402
1403 def _worker(self):
1403 def _worker(self):
1404 """Main routine for worker thread."""
1404 """Main routine for worker thread."""
1405 while True:
1405 while True:
1406 try:
1406 try:
1407 fh = self._queue.get(block=True, timeout=0.100)
1407 fh = self._queue.get(block=True, timeout=0.100)
1408 # Need to catch or the thread will terminate and
1408 # Need to catch or the thread will terminate and
1409 # we could orphan file descriptors.
1409 # we could orphan file descriptors.
1410 try:
1410 try:
1411 fh.close()
1411 fh.close()
1412 except Exception as e:
1412 except Exception as e:
1413 # Stash so can re-raise from main thread later.
1413 # Stash so can re-raise from main thread later.
1414 self._threadexception = e
1414 self._threadexception = e
1415 except util.empty:
1415 except util.empty:
1416 if not self._running:
1416 if not self._running:
1417 break
1417 break
1418
1418
1419 def close(self, fh):
1419 def close(self, fh):
1420 """Schedule a file for closing."""
1420 """Schedule a file for closing."""
1421 if not self._entered:
1421 if not self._entered:
1422 raise error.Abort(_('can only call close() when context manager '
1422 raise error.Abort(_('can only call close() when context manager '
1423 'active'))
1423 'active'))
1424
1424
1425 # If a background thread encountered an exception, raise now so we fail
1425 # If a background thread encountered an exception, raise now so we fail
1426 # fast. Otherwise we may potentially go on for minutes until the error
1426 # fast. Otherwise we may potentially go on for minutes until the error
1427 # is acted on.
1427 # is acted on.
1428 if self._threadexception:
1428 if self._threadexception:
1429 e = self._threadexception
1429 e = self._threadexception
1430 self._threadexception = None
1430 self._threadexception = None
1431 raise e
1431 raise e
1432
1432
1433 # If we're not actively running, close synchronously.
1433 # If we're not actively running, close synchronously.
1434 if not self._running:
1434 if not self._running:
1435 fh.close()
1435 fh.close()
1436 return
1436 return
1437
1437
1438 self._queue.put(fh, block=True, timeout=None)
1438 self._queue.put(fh, block=True, timeout=None)
1439
1439
1440 class checkambigatclosing(closewrapbase):
1440 class checkambigatclosing(closewrapbase):
1441 """Proxy for a file object, to avoid ambiguity of file stat
1441 """Proxy for a file object, to avoid ambiguity of file stat
1442
1442
1443 See also util.filestat for detail about "ambiguity of file stat".
1443 See also util.filestat for detail about "ambiguity of file stat".
1444
1444
1445 This proxy is useful only if the target file is guarded by any
1445 This proxy is useful only if the target file is guarded by any
1446 lock (e.g. repo.lock or repo.wlock)
1446 lock (e.g. repo.lock or repo.wlock)
1447
1447
1448 Do not instantiate outside of the vfs layer.
1448 Do not instantiate outside of the vfs layer.
1449 """
1449 """
1450 def __init__(self, fh):
1450 def __init__(self, fh):
1451 super(checkambigatclosing, self).__init__(fh)
1451 super(checkambigatclosing, self).__init__(fh)
1452 object.__setattr__(self, '_oldstat', util.filestat(fh.name))
1452 object.__setattr__(self, '_oldstat', util.filestat(fh.name))
1453
1453
1454 def _checkambig(self):
1454 def _checkambig(self):
1455 oldstat = self._oldstat
1455 oldstat = self._oldstat
1456 if oldstat.stat:
1456 if oldstat.stat:
1457 newstat = util.filestat(self._origfh.name)
1457 newstat = util.filestat(self._origfh.name)
1458 if newstat.isambig(oldstat):
1458 if newstat.isambig(oldstat):
1459 # stat of changed file is ambiguous to original one
1459 # stat of changed file is ambiguous to original one
1460 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1460 newstat.avoidambig(self._origfh.name, oldstat)
1461 os.utime(self._origfh.name, (advanced, advanced))
1462
1461
1463 def __exit__(self, exc_type, exc_value, exc_tb):
1462 def __exit__(self, exc_type, exc_value, exc_tb):
1464 self._origfh.__exit__(exc_type, exc_value, exc_tb)
1463 self._origfh.__exit__(exc_type, exc_value, exc_tb)
1465 self._checkambig()
1464 self._checkambig()
1466
1465
1467 def close(self):
1466 def close(self):
1468 self._origfh.close()
1467 self._origfh.close()
1469 self._checkambig()
1468 self._checkambig()
General Comments 0
You need to be logged in to leave comments. Login now