##// END OF EJS Templates
vfs: ignore EPERM at os.utime, which avoids ambiguity at renaming (issue5418)...
FUJIWARA Katsunori -
r30320:bff5ccbe stable
parent child Browse files
Show More
@@ -1,1470 +1,1469 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import glob
12 import glob
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import re
15 import re
16 import shutil
16 import shutil
17 import stat
17 import stat
18 import tempfile
18 import tempfile
19 import threading
19 import threading
20
20
21 from .i18n import _
21 from .i18n import _
22 from .node import wdirrev
22 from .node import wdirrev
23 from . import (
23 from . import (
24 encoding,
24 encoding,
25 error,
25 error,
26 match as matchmod,
26 match as matchmod,
27 osutil,
27 osutil,
28 pathutil,
28 pathutil,
29 phases,
29 phases,
30 revset,
30 revset,
31 similar,
31 similar,
32 util,
32 util,
33 )
33 )
34
34
35 if os.name == 'nt':
35 if os.name == 'nt':
36 from . import scmwindows as scmplatform
36 from . import scmwindows as scmplatform
37 else:
37 else:
38 from . import scmposix as scmplatform
38 from . import scmposix as scmplatform
39
39
40 systemrcpath = scmplatform.systemrcpath
40 systemrcpath = scmplatform.systemrcpath
41 userrcpath = scmplatform.userrcpath
41 userrcpath = scmplatform.userrcpath
42
42
43 class status(tuple):
43 class status(tuple):
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 and 'ignored' properties are only relevant to the working copy.
45 and 'ignored' properties are only relevant to the working copy.
46 '''
46 '''
47
47
48 __slots__ = ()
48 __slots__ = ()
49
49
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 clean):
51 clean):
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 ignored, clean))
53 ignored, clean))
54
54
55 @property
55 @property
56 def modified(self):
56 def modified(self):
57 '''files that have been modified'''
57 '''files that have been modified'''
58 return self[0]
58 return self[0]
59
59
60 @property
60 @property
61 def added(self):
61 def added(self):
62 '''files that have been added'''
62 '''files that have been added'''
63 return self[1]
63 return self[1]
64
64
65 @property
65 @property
66 def removed(self):
66 def removed(self):
67 '''files that have been removed'''
67 '''files that have been removed'''
68 return self[2]
68 return self[2]
69
69
70 @property
70 @property
71 def deleted(self):
71 def deleted(self):
72 '''files that are in the dirstate, but have been deleted from the
72 '''files that are in the dirstate, but have been deleted from the
73 working copy (aka "missing")
73 working copy (aka "missing")
74 '''
74 '''
75 return self[3]
75 return self[3]
76
76
77 @property
77 @property
78 def unknown(self):
78 def unknown(self):
79 '''files not in the dirstate that are not ignored'''
79 '''files not in the dirstate that are not ignored'''
80 return self[4]
80 return self[4]
81
81
82 @property
82 @property
83 def ignored(self):
83 def ignored(self):
84 '''files not in the dirstate that are ignored (by _dirignore())'''
84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 return self[5]
85 return self[5]
86
86
87 @property
87 @property
88 def clean(self):
88 def clean(self):
89 '''files that have not been modified'''
89 '''files that have not been modified'''
90 return self[6]
90 return self[6]
91
91
92 def __repr__(self, *args, **kwargs):
92 def __repr__(self, *args, **kwargs):
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 'unknown=%r, ignored=%r, clean=%r>') % self)
94 'unknown=%r, ignored=%r, clean=%r>') % self)
95
95
96 def itersubrepos(ctx1, ctx2):
96 def itersubrepos(ctx1, ctx2):
97 """find subrepos in ctx1 or ctx2"""
97 """find subrepos in ctx1 or ctx2"""
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 # has been modified (in ctx2) but not yet committed (in ctx1).
100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103
103
104 missing = set()
104 missing = set()
105
105
106 for subpath in ctx2.substate:
106 for subpath in ctx2.substate:
107 if subpath not in ctx1.substate:
107 if subpath not in ctx1.substate:
108 del subpaths[subpath]
108 del subpaths[subpath]
109 missing.add(subpath)
109 missing.add(subpath)
110
110
111 for subpath, ctx in sorted(subpaths.iteritems()):
111 for subpath, ctx in sorted(subpaths.iteritems()):
112 yield subpath, ctx.sub(subpath)
112 yield subpath, ctx.sub(subpath)
113
113
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 # status and diff will have an accurate result when it does
115 # status and diff will have an accurate result when it does
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 # against itself.
117 # against itself.
118 for subpath in missing:
118 for subpath in missing:
119 yield subpath, ctx2.nullsub(subpath, ctx1)
119 yield subpath, ctx2.nullsub(subpath, ctx1)
120
120
121 def nochangesfound(ui, repo, excluded=None):
121 def nochangesfound(ui, repo, excluded=None):
122 '''Report no changes for push/pull, excluded is None or a list of
122 '''Report no changes for push/pull, excluded is None or a list of
123 nodes excluded from the push/pull.
123 nodes excluded from the push/pull.
124 '''
124 '''
125 secretlist = []
125 secretlist = []
126 if excluded:
126 if excluded:
127 for n in excluded:
127 for n in excluded:
128 if n not in repo:
128 if n not in repo:
129 # discovery should not have included the filtered revision,
129 # discovery should not have included the filtered revision,
130 # we have to explicitly exclude it until discovery is cleanup.
130 # we have to explicitly exclude it until discovery is cleanup.
131 continue
131 continue
132 ctx = repo[n]
132 ctx = repo[n]
133 if ctx.phase() >= phases.secret and not ctx.extinct():
133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 secretlist.append(n)
134 secretlist.append(n)
135
135
136 if secretlist:
136 if secretlist:
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 % len(secretlist))
138 % len(secretlist))
139 else:
139 else:
140 ui.status(_("no changes found\n"))
140 ui.status(_("no changes found\n"))
141
141
142 def checknewlabel(repo, lbl, kind):
142 def checknewlabel(repo, lbl, kind):
143 # Do not use the "kind" parameter in ui output.
143 # Do not use the "kind" parameter in ui output.
144 # It makes strings difficult to translate.
144 # It makes strings difficult to translate.
145 if lbl in ['tip', '.', 'null']:
145 if lbl in ['tip', '.', 'null']:
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 for c in (':', '\0', '\n', '\r'):
147 for c in (':', '\0', '\n', '\r'):
148 if c in lbl:
148 if c in lbl:
149 raise error.Abort(_("%r cannot be used in a name") % c)
149 raise error.Abort(_("%r cannot be used in a name") % c)
150 try:
150 try:
151 int(lbl)
151 int(lbl)
152 raise error.Abort(_("cannot use an integer as a name"))
152 raise error.Abort(_("cannot use an integer as a name"))
153 except ValueError:
153 except ValueError:
154 pass
154 pass
155
155
156 def checkfilename(f):
156 def checkfilename(f):
157 '''Check that the filename f is an acceptable filename for a tracked file'''
157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 if '\r' in f or '\n' in f:
158 if '\r' in f or '\n' in f:
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160
160
161 def checkportable(ui, f):
161 def checkportable(ui, f):
162 '''Check if filename f is portable and warn or abort depending on config'''
162 '''Check if filename f is portable and warn or abort depending on config'''
163 checkfilename(f)
163 checkfilename(f)
164 abort, warn = checkportabilityalert(ui)
164 abort, warn = checkportabilityalert(ui)
165 if abort or warn:
165 if abort or warn:
166 msg = util.checkwinfilename(f)
166 msg = util.checkwinfilename(f)
167 if msg:
167 if msg:
168 msg = "%s: %r" % (msg, f)
168 msg = "%s: %r" % (msg, f)
169 if abort:
169 if abort:
170 raise error.Abort(msg)
170 raise error.Abort(msg)
171 ui.warn(_("warning: %s\n") % msg)
171 ui.warn(_("warning: %s\n") % msg)
172
172
173 def checkportabilityalert(ui):
173 def checkportabilityalert(ui):
174 '''check if the user's config requests nothing, a warning, or abort for
174 '''check if the user's config requests nothing, a warning, or abort for
175 non-portable filenames'''
175 non-portable filenames'''
176 val = ui.config('ui', 'portablefilenames', 'warn')
176 val = ui.config('ui', 'portablefilenames', 'warn')
177 lval = val.lower()
177 lval = val.lower()
178 bval = util.parsebool(val)
178 bval = util.parsebool(val)
179 abort = os.name == 'nt' or lval == 'abort'
179 abort = os.name == 'nt' or lval == 'abort'
180 warn = bval or lval == 'warn'
180 warn = bval or lval == 'warn'
181 if bval is None and not (warn or abort or lval == 'ignore'):
181 if bval is None and not (warn or abort or lval == 'ignore'):
182 raise error.ConfigError(
182 raise error.ConfigError(
183 _("ui.portablefilenames value is invalid ('%s')") % val)
183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 return abort, warn
184 return abort, warn
185
185
186 class casecollisionauditor(object):
186 class casecollisionauditor(object):
187 def __init__(self, ui, abort, dirstate):
187 def __init__(self, ui, abort, dirstate):
188 self._ui = ui
188 self._ui = ui
189 self._abort = abort
189 self._abort = abort
190 allfiles = '\0'.join(dirstate._map)
190 allfiles = '\0'.join(dirstate._map)
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 self._dirstate = dirstate
192 self._dirstate = dirstate
193 # The purpose of _newfiles is so that we don't complain about
193 # The purpose of _newfiles is so that we don't complain about
194 # case collisions if someone were to call this object with the
194 # case collisions if someone were to call this object with the
195 # same filename twice.
195 # same filename twice.
196 self._newfiles = set()
196 self._newfiles = set()
197
197
198 def __call__(self, f):
198 def __call__(self, f):
199 if f in self._newfiles:
199 if f in self._newfiles:
200 return
200 return
201 fl = encoding.lower(f)
201 fl = encoding.lower(f)
202 if fl in self._loweredfiles and f not in self._dirstate:
202 if fl in self._loweredfiles and f not in self._dirstate:
203 msg = _('possible case-folding collision for %s') % f
203 msg = _('possible case-folding collision for %s') % f
204 if self._abort:
204 if self._abort:
205 raise error.Abort(msg)
205 raise error.Abort(msg)
206 self._ui.warn(_("warning: %s\n") % msg)
206 self._ui.warn(_("warning: %s\n") % msg)
207 self._loweredfiles.add(fl)
207 self._loweredfiles.add(fl)
208 self._newfiles.add(f)
208 self._newfiles.add(f)
209
209
210 def filteredhash(repo, maxrev):
210 def filteredhash(repo, maxrev):
211 """build hash of filtered revisions in the current repoview.
211 """build hash of filtered revisions in the current repoview.
212
212
213 Multiple caches perform up-to-date validation by checking that the
213 Multiple caches perform up-to-date validation by checking that the
214 tiprev and tipnode stored in the cache file match the current repository.
214 tiprev and tipnode stored in the cache file match the current repository.
215 However, this is not sufficient for validating repoviews because the set
215 However, this is not sufficient for validating repoviews because the set
216 of revisions in the view may change without the repository tiprev and
216 of revisions in the view may change without the repository tiprev and
217 tipnode changing.
217 tipnode changing.
218
218
219 This function hashes all the revs filtered from the view and returns
219 This function hashes all the revs filtered from the view and returns
220 that SHA-1 digest.
220 that SHA-1 digest.
221 """
221 """
222 cl = repo.changelog
222 cl = repo.changelog
223 if not cl.filteredrevs:
223 if not cl.filteredrevs:
224 return None
224 return None
225 key = None
225 key = None
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 if revs:
227 if revs:
228 s = hashlib.sha1()
228 s = hashlib.sha1()
229 for rev in revs:
229 for rev in revs:
230 s.update('%s;' % rev)
230 s.update('%s;' % rev)
231 key = s.digest()
231 key = s.digest()
232 return key
232 return key
233
233
234 class abstractvfs(object):
234 class abstractvfs(object):
235 """Abstract base class; cannot be instantiated"""
235 """Abstract base class; cannot be instantiated"""
236
236
237 def __init__(self, *args, **kwargs):
237 def __init__(self, *args, **kwargs):
238 '''Prevent instantiation; don't call this from subclasses.'''
238 '''Prevent instantiation; don't call this from subclasses.'''
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240
240
241 def tryread(self, path):
241 def tryread(self, path):
242 '''gracefully return an empty string for missing files'''
242 '''gracefully return an empty string for missing files'''
243 try:
243 try:
244 return self.read(path)
244 return self.read(path)
245 except IOError as inst:
245 except IOError as inst:
246 if inst.errno != errno.ENOENT:
246 if inst.errno != errno.ENOENT:
247 raise
247 raise
248 return ""
248 return ""
249
249
250 def tryreadlines(self, path, mode='rb'):
250 def tryreadlines(self, path, mode='rb'):
251 '''gracefully return an empty array for missing files'''
251 '''gracefully return an empty array for missing files'''
252 try:
252 try:
253 return self.readlines(path, mode=mode)
253 return self.readlines(path, mode=mode)
254 except IOError as inst:
254 except IOError as inst:
255 if inst.errno != errno.ENOENT:
255 if inst.errno != errno.ENOENT:
256 raise
256 raise
257 return []
257 return []
258
258
259 @util.propertycache
259 @util.propertycache
260 def open(self):
260 def open(self):
261 '''Open ``path`` file, which is relative to vfs root.
261 '''Open ``path`` file, which is relative to vfs root.
262
262
263 Newly created directories are marked as "not to be indexed by
263 Newly created directories are marked as "not to be indexed by
264 the content indexing service", if ``notindexed`` is specified
264 the content indexing service", if ``notindexed`` is specified
265 for "write" mode access.
265 for "write" mode access.
266 '''
266 '''
267 return self.__call__
267 return self.__call__
268
268
269 def read(self, path):
269 def read(self, path):
270 with self(path, 'rb') as fp:
270 with self(path, 'rb') as fp:
271 return fp.read()
271 return fp.read()
272
272
273 def readlines(self, path, mode='rb'):
273 def readlines(self, path, mode='rb'):
274 with self(path, mode=mode) as fp:
274 with self(path, mode=mode) as fp:
275 return fp.readlines()
275 return fp.readlines()
276
276
277 def write(self, path, data, backgroundclose=False):
277 def write(self, path, data, backgroundclose=False):
278 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
278 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
279 return fp.write(data)
279 return fp.write(data)
280
280
281 def writelines(self, path, data, mode='wb', notindexed=False):
281 def writelines(self, path, data, mode='wb', notindexed=False):
282 with self(path, mode=mode, notindexed=notindexed) as fp:
282 with self(path, mode=mode, notindexed=notindexed) as fp:
283 return fp.writelines(data)
283 return fp.writelines(data)
284
284
285 def append(self, path, data):
285 def append(self, path, data):
286 with self(path, 'ab') as fp:
286 with self(path, 'ab') as fp:
287 return fp.write(data)
287 return fp.write(data)
288
288
289 def basename(self, path):
289 def basename(self, path):
290 """return base element of a path (as os.path.basename would do)
290 """return base element of a path (as os.path.basename would do)
291
291
292 This exists to allow handling of strange encoding if needed."""
292 This exists to allow handling of strange encoding if needed."""
293 return os.path.basename(path)
293 return os.path.basename(path)
294
294
295 def chmod(self, path, mode):
295 def chmod(self, path, mode):
296 return os.chmod(self.join(path), mode)
296 return os.chmod(self.join(path), mode)
297
297
298 def dirname(self, path):
298 def dirname(self, path):
299 """return dirname element of a path (as os.path.dirname would do)
299 """return dirname element of a path (as os.path.dirname would do)
300
300
301 This exists to allow handling of strange encoding if needed."""
301 This exists to allow handling of strange encoding if needed."""
302 return os.path.dirname(path)
302 return os.path.dirname(path)
303
303
304 def exists(self, path=None):
304 def exists(self, path=None):
305 return os.path.exists(self.join(path))
305 return os.path.exists(self.join(path))
306
306
307 def fstat(self, fp):
307 def fstat(self, fp):
308 return util.fstat(fp)
308 return util.fstat(fp)
309
309
310 def isdir(self, path=None):
310 def isdir(self, path=None):
311 return os.path.isdir(self.join(path))
311 return os.path.isdir(self.join(path))
312
312
313 def isfile(self, path=None):
313 def isfile(self, path=None):
314 return os.path.isfile(self.join(path))
314 return os.path.isfile(self.join(path))
315
315
316 def islink(self, path=None):
316 def islink(self, path=None):
317 return os.path.islink(self.join(path))
317 return os.path.islink(self.join(path))
318
318
319 def isfileorlink(self, path=None):
319 def isfileorlink(self, path=None):
320 '''return whether path is a regular file or a symlink
320 '''return whether path is a regular file or a symlink
321
321
322 Unlike isfile, this doesn't follow symlinks.'''
322 Unlike isfile, this doesn't follow symlinks.'''
323 try:
323 try:
324 st = self.lstat(path)
324 st = self.lstat(path)
325 except OSError:
325 except OSError:
326 return False
326 return False
327 mode = st.st_mode
327 mode = st.st_mode
328 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
328 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
329
329
330 def reljoin(self, *paths):
330 def reljoin(self, *paths):
331 """join various elements of a path together (as os.path.join would do)
331 """join various elements of a path together (as os.path.join would do)
332
332
333 The vfs base is not injected so that path stay relative. This exists
333 The vfs base is not injected so that path stay relative. This exists
334 to allow handling of strange encoding if needed."""
334 to allow handling of strange encoding if needed."""
335 return os.path.join(*paths)
335 return os.path.join(*paths)
336
336
337 def split(self, path):
337 def split(self, path):
338 """split top-most element of a path (as os.path.split would do)
338 """split top-most element of a path (as os.path.split would do)
339
339
340 This exists to allow handling of strange encoding if needed."""
340 This exists to allow handling of strange encoding if needed."""
341 return os.path.split(path)
341 return os.path.split(path)
342
342
343 def lexists(self, path=None):
343 def lexists(self, path=None):
344 return os.path.lexists(self.join(path))
344 return os.path.lexists(self.join(path))
345
345
346 def lstat(self, path=None):
346 def lstat(self, path=None):
347 return os.lstat(self.join(path))
347 return os.lstat(self.join(path))
348
348
349 def listdir(self, path=None):
349 def listdir(self, path=None):
350 return os.listdir(self.join(path))
350 return os.listdir(self.join(path))
351
351
352 def makedir(self, path=None, notindexed=True):
352 def makedir(self, path=None, notindexed=True):
353 return util.makedir(self.join(path), notindexed)
353 return util.makedir(self.join(path), notindexed)
354
354
355 def makedirs(self, path=None, mode=None):
355 def makedirs(self, path=None, mode=None):
356 return util.makedirs(self.join(path), mode)
356 return util.makedirs(self.join(path), mode)
357
357
358 def makelock(self, info, path):
358 def makelock(self, info, path):
359 return util.makelock(info, self.join(path))
359 return util.makelock(info, self.join(path))
360
360
361 def mkdir(self, path=None):
361 def mkdir(self, path=None):
362 return os.mkdir(self.join(path))
362 return os.mkdir(self.join(path))
363
363
364 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
364 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
365 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
365 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
366 dir=self.join(dir), text=text)
366 dir=self.join(dir), text=text)
367 dname, fname = util.split(name)
367 dname, fname = util.split(name)
368 if dir:
368 if dir:
369 return fd, os.path.join(dir, fname)
369 return fd, os.path.join(dir, fname)
370 else:
370 else:
371 return fd, fname
371 return fd, fname
372
372
373 def readdir(self, path=None, stat=None, skip=None):
373 def readdir(self, path=None, stat=None, skip=None):
374 return osutil.listdir(self.join(path), stat, skip)
374 return osutil.listdir(self.join(path), stat, skip)
375
375
376 def readlock(self, path):
376 def readlock(self, path):
377 return util.readlock(self.join(path))
377 return util.readlock(self.join(path))
378
378
379 def rename(self, src, dst, checkambig=False):
379 def rename(self, src, dst, checkambig=False):
380 """Rename from src to dst
380 """Rename from src to dst
381
381
382 checkambig argument is used with util.filestat, and is useful
382 checkambig argument is used with util.filestat, and is useful
383 only if destination file is guarded by any lock
383 only if destination file is guarded by any lock
384 (e.g. repo.lock or repo.wlock).
384 (e.g. repo.lock or repo.wlock).
385 """
385 """
386 dstpath = self.join(dst)
386 dstpath = self.join(dst)
387 oldstat = checkambig and util.filestat(dstpath)
387 oldstat = checkambig and util.filestat(dstpath)
388 if oldstat and oldstat.stat:
388 if oldstat and oldstat.stat:
389 ret = util.rename(self.join(src), dstpath)
389 ret = util.rename(self.join(src), dstpath)
390 newstat = util.filestat(dstpath)
390 newstat = util.filestat(dstpath)
391 if newstat.isambig(oldstat):
391 if newstat.isambig(oldstat):
392 # stat of renamed file is ambiguous to original one
392 # stat of renamed file is ambiguous to original one
393 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
393 newstat.avoidambig(dstpath, oldstat)
394 os.utime(dstpath, (advanced, advanced))
395 return ret
394 return ret
396 return util.rename(self.join(src), dstpath)
395 return util.rename(self.join(src), dstpath)
397
396
398 def readlink(self, path):
397 def readlink(self, path):
399 return os.readlink(self.join(path))
398 return os.readlink(self.join(path))
400
399
401 def removedirs(self, path=None):
400 def removedirs(self, path=None):
402 """Remove a leaf directory and all empty intermediate ones
401 """Remove a leaf directory and all empty intermediate ones
403 """
402 """
404 return util.removedirs(self.join(path))
403 return util.removedirs(self.join(path))
405
404
406 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
405 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
407 """Remove a directory tree recursively
406 """Remove a directory tree recursively
408
407
409 If ``forcibly``, this tries to remove READ-ONLY files, too.
408 If ``forcibly``, this tries to remove READ-ONLY files, too.
410 """
409 """
411 if forcibly:
410 if forcibly:
412 def onerror(function, path, excinfo):
411 def onerror(function, path, excinfo):
413 if function is not os.remove:
412 if function is not os.remove:
414 raise
413 raise
415 # read-only files cannot be unlinked under Windows
414 # read-only files cannot be unlinked under Windows
416 s = os.stat(path)
415 s = os.stat(path)
417 if (s.st_mode & stat.S_IWRITE) != 0:
416 if (s.st_mode & stat.S_IWRITE) != 0:
418 raise
417 raise
419 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
418 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
420 os.remove(path)
419 os.remove(path)
421 else:
420 else:
422 onerror = None
421 onerror = None
423 return shutil.rmtree(self.join(path),
422 return shutil.rmtree(self.join(path),
424 ignore_errors=ignore_errors, onerror=onerror)
423 ignore_errors=ignore_errors, onerror=onerror)
425
424
426 def setflags(self, path, l, x):
425 def setflags(self, path, l, x):
427 return util.setflags(self.join(path), l, x)
426 return util.setflags(self.join(path), l, x)
428
427
429 def stat(self, path=None):
428 def stat(self, path=None):
430 return os.stat(self.join(path))
429 return os.stat(self.join(path))
431
430
432 def unlink(self, path=None):
431 def unlink(self, path=None):
433 return util.unlink(self.join(path))
432 return util.unlink(self.join(path))
434
433
435 def unlinkpath(self, path=None, ignoremissing=False):
434 def unlinkpath(self, path=None, ignoremissing=False):
436 return util.unlinkpath(self.join(path), ignoremissing)
435 return util.unlinkpath(self.join(path), ignoremissing)
437
436
438 def utime(self, path=None, t=None):
437 def utime(self, path=None, t=None):
439 return os.utime(self.join(path), t)
438 return os.utime(self.join(path), t)
440
439
441 def walk(self, path=None, onerror=None):
440 def walk(self, path=None, onerror=None):
442 """Yield (dirpath, dirs, files) tuple for each directories under path
441 """Yield (dirpath, dirs, files) tuple for each directories under path
443
442
444 ``dirpath`` is relative one from the root of this vfs. This
443 ``dirpath`` is relative one from the root of this vfs. This
445 uses ``os.sep`` as path separator, even you specify POSIX
444 uses ``os.sep`` as path separator, even you specify POSIX
446 style ``path``.
445 style ``path``.
447
446
448 "The root of this vfs" is represented as empty ``dirpath``.
447 "The root of this vfs" is represented as empty ``dirpath``.
449 """
448 """
450 root = os.path.normpath(self.join(None))
449 root = os.path.normpath(self.join(None))
451 # when dirpath == root, dirpath[prefixlen:] becomes empty
450 # when dirpath == root, dirpath[prefixlen:] becomes empty
452 # because len(dirpath) < prefixlen.
451 # because len(dirpath) < prefixlen.
453 prefixlen = len(pathutil.normasprefix(root))
452 prefixlen = len(pathutil.normasprefix(root))
454 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
453 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
455 yield (dirpath[prefixlen:], dirs, files)
454 yield (dirpath[prefixlen:], dirs, files)
456
455
457 @contextlib.contextmanager
456 @contextlib.contextmanager
458 def backgroundclosing(self, ui, expectedcount=-1):
457 def backgroundclosing(self, ui, expectedcount=-1):
459 """Allow files to be closed asynchronously.
458 """Allow files to be closed asynchronously.
460
459
461 When this context manager is active, ``backgroundclose`` can be passed
460 When this context manager is active, ``backgroundclose`` can be passed
462 to ``__call__``/``open`` to result in the file possibly being closed
461 to ``__call__``/``open`` to result in the file possibly being closed
463 asynchronously, on a background thread.
462 asynchronously, on a background thread.
464 """
463 """
465 # This is an arbitrary restriction and could be changed if we ever
464 # This is an arbitrary restriction and could be changed if we ever
466 # have a use case.
465 # have a use case.
467 vfs = getattr(self, 'vfs', self)
466 vfs = getattr(self, 'vfs', self)
468 if getattr(vfs, '_backgroundfilecloser', None):
467 if getattr(vfs, '_backgroundfilecloser', None):
469 raise error.Abort(
468 raise error.Abort(
470 _('can only have 1 active background file closer'))
469 _('can only have 1 active background file closer'))
471
470
472 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
471 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
473 try:
472 try:
474 vfs._backgroundfilecloser = bfc
473 vfs._backgroundfilecloser = bfc
475 yield bfc
474 yield bfc
476 finally:
475 finally:
477 vfs._backgroundfilecloser = None
476 vfs._backgroundfilecloser = None
478
477
479 class vfs(abstractvfs):
478 class vfs(abstractvfs):
480 '''Operate files relative to a base directory
479 '''Operate files relative to a base directory
481
480
482 This class is used to hide the details of COW semantics and
481 This class is used to hide the details of COW semantics and
483 remote file access from higher level code.
482 remote file access from higher level code.
484 '''
483 '''
485 def __init__(self, base, audit=True, expandpath=False, realpath=False):
484 def __init__(self, base, audit=True, expandpath=False, realpath=False):
486 if expandpath:
485 if expandpath:
487 base = util.expandpath(base)
486 base = util.expandpath(base)
488 if realpath:
487 if realpath:
489 base = os.path.realpath(base)
488 base = os.path.realpath(base)
490 self.base = base
489 self.base = base
491 self.mustaudit = audit
490 self.mustaudit = audit
492 self.createmode = None
491 self.createmode = None
493 self._trustnlink = None
492 self._trustnlink = None
494
493
495 @property
494 @property
496 def mustaudit(self):
495 def mustaudit(self):
497 return self._audit
496 return self._audit
498
497
499 @mustaudit.setter
498 @mustaudit.setter
500 def mustaudit(self, onoff):
499 def mustaudit(self, onoff):
501 self._audit = onoff
500 self._audit = onoff
502 if onoff:
501 if onoff:
503 self.audit = pathutil.pathauditor(self.base)
502 self.audit = pathutil.pathauditor(self.base)
504 else:
503 else:
505 self.audit = util.always
504 self.audit = util.always
506
505
507 @util.propertycache
506 @util.propertycache
508 def _cansymlink(self):
507 def _cansymlink(self):
509 return util.checklink(self.base)
508 return util.checklink(self.base)
510
509
511 @util.propertycache
510 @util.propertycache
512 def _chmod(self):
511 def _chmod(self):
513 return util.checkexec(self.base)
512 return util.checkexec(self.base)
514
513
515 def _fixfilemode(self, name):
514 def _fixfilemode(self, name):
516 if self.createmode is None or not self._chmod:
515 if self.createmode is None or not self._chmod:
517 return
516 return
518 os.chmod(name, self.createmode & 0o666)
517 os.chmod(name, self.createmode & 0o666)
519
518
520 def __call__(self, path, mode="r", text=False, atomictemp=False,
519 def __call__(self, path, mode="r", text=False, atomictemp=False,
521 notindexed=False, backgroundclose=False, checkambig=False):
520 notindexed=False, backgroundclose=False, checkambig=False):
522 '''Open ``path`` file, which is relative to vfs root.
521 '''Open ``path`` file, which is relative to vfs root.
523
522
524 Newly created directories are marked as "not to be indexed by
523 Newly created directories are marked as "not to be indexed by
525 the content indexing service", if ``notindexed`` is specified
524 the content indexing service", if ``notindexed`` is specified
526 for "write" mode access.
525 for "write" mode access.
527
526
528 If ``backgroundclose`` is passed, the file may be closed asynchronously.
527 If ``backgroundclose`` is passed, the file may be closed asynchronously.
529 It can only be used if the ``self.backgroundclosing()`` context manager
528 It can only be used if the ``self.backgroundclosing()`` context manager
530 is active. This should only be specified if the following criteria hold:
529 is active. This should only be specified if the following criteria hold:
531
530
532 1. There is a potential for writing thousands of files. Unless you
531 1. There is a potential for writing thousands of files. Unless you
533 are writing thousands of files, the performance benefits of
532 are writing thousands of files, the performance benefits of
534 asynchronously closing files is not realized.
533 asynchronously closing files is not realized.
535 2. Files are opened exactly once for the ``backgroundclosing``
534 2. Files are opened exactly once for the ``backgroundclosing``
536 active duration and are therefore free of race conditions between
535 active duration and are therefore free of race conditions between
537 closing a file on a background thread and reopening it. (If the
536 closing a file on a background thread and reopening it. (If the
538 file were opened multiple times, there could be unflushed data
537 file were opened multiple times, there could be unflushed data
539 because the original file handle hasn't been flushed/closed yet.)
538 because the original file handle hasn't been flushed/closed yet.)
540
539
541 ``checkambig`` argument is passed to atomictemplfile (valid
540 ``checkambig`` argument is passed to atomictemplfile (valid
542 only for writing), and is useful only if target file is
541 only for writing), and is useful only if target file is
543 guarded by any lock (e.g. repo.lock or repo.wlock).
542 guarded by any lock (e.g. repo.lock or repo.wlock).
544 '''
543 '''
545 if self._audit:
544 if self._audit:
546 r = util.checkosfilename(path)
545 r = util.checkosfilename(path)
547 if r:
546 if r:
548 raise error.Abort("%s: %r" % (r, path))
547 raise error.Abort("%s: %r" % (r, path))
549 self.audit(path)
548 self.audit(path)
550 f = self.join(path)
549 f = self.join(path)
551
550
552 if not text and "b" not in mode:
551 if not text and "b" not in mode:
553 mode += "b" # for that other OS
552 mode += "b" # for that other OS
554
553
555 nlink = -1
554 nlink = -1
556 if mode not in ('r', 'rb'):
555 if mode not in ('r', 'rb'):
557 dirname, basename = util.split(f)
556 dirname, basename = util.split(f)
558 # If basename is empty, then the path is malformed because it points
557 # If basename is empty, then the path is malformed because it points
559 # to a directory. Let the posixfile() call below raise IOError.
558 # to a directory. Let the posixfile() call below raise IOError.
560 if basename:
559 if basename:
561 if atomictemp:
560 if atomictemp:
562 util.makedirs(dirname, self.createmode, notindexed)
561 util.makedirs(dirname, self.createmode, notindexed)
563 return util.atomictempfile(f, mode, self.createmode,
562 return util.atomictempfile(f, mode, self.createmode,
564 checkambig=checkambig)
563 checkambig=checkambig)
565 try:
564 try:
566 if 'w' in mode:
565 if 'w' in mode:
567 util.unlink(f)
566 util.unlink(f)
568 nlink = 0
567 nlink = 0
569 else:
568 else:
570 # nlinks() may behave differently for files on Windows
569 # nlinks() may behave differently for files on Windows
571 # shares if the file is open.
570 # shares if the file is open.
572 with util.posixfile(f):
571 with util.posixfile(f):
573 nlink = util.nlinks(f)
572 nlink = util.nlinks(f)
574 if nlink < 1:
573 if nlink < 1:
575 nlink = 2 # force mktempcopy (issue1922)
574 nlink = 2 # force mktempcopy (issue1922)
576 except (OSError, IOError) as e:
575 except (OSError, IOError) as e:
577 if e.errno != errno.ENOENT:
576 if e.errno != errno.ENOENT:
578 raise
577 raise
579 nlink = 0
578 nlink = 0
580 util.makedirs(dirname, self.createmode, notindexed)
579 util.makedirs(dirname, self.createmode, notindexed)
581 if nlink > 0:
580 if nlink > 0:
582 if self._trustnlink is None:
581 if self._trustnlink is None:
583 self._trustnlink = nlink > 1 or util.checknlink(f)
582 self._trustnlink = nlink > 1 or util.checknlink(f)
584 if nlink > 1 or not self._trustnlink:
583 if nlink > 1 or not self._trustnlink:
585 util.rename(util.mktempcopy(f), f)
584 util.rename(util.mktempcopy(f), f)
586 fp = util.posixfile(f, mode)
585 fp = util.posixfile(f, mode)
587 if nlink == 0:
586 if nlink == 0:
588 self._fixfilemode(f)
587 self._fixfilemode(f)
589
588
590 if checkambig:
589 if checkambig:
591 if mode in ('r', 'rb'):
590 if mode in ('r', 'rb'):
592 raise error.Abort(_('implementation error: mode %s is not'
591 raise error.Abort(_('implementation error: mode %s is not'
593 ' valid for checkambig=True') % mode)
592 ' valid for checkambig=True') % mode)
594 fp = checkambigatclosing(fp)
593 fp = checkambigatclosing(fp)
595
594
596 if backgroundclose:
595 if backgroundclose:
597 if not self._backgroundfilecloser:
596 if not self._backgroundfilecloser:
598 raise error.Abort(_('backgroundclose can only be used when a '
597 raise error.Abort(_('backgroundclose can only be used when a '
599 'backgroundclosing context manager is active')
598 'backgroundclosing context manager is active')
600 )
599 )
601
600
602 fp = delayclosedfile(fp, self._backgroundfilecloser)
601 fp = delayclosedfile(fp, self._backgroundfilecloser)
603
602
604 return fp
603 return fp
605
604
606 def symlink(self, src, dst):
605 def symlink(self, src, dst):
607 self.audit(dst)
606 self.audit(dst)
608 linkname = self.join(dst)
607 linkname = self.join(dst)
609 try:
608 try:
610 os.unlink(linkname)
609 os.unlink(linkname)
611 except OSError:
610 except OSError:
612 pass
611 pass
613
612
614 util.makedirs(os.path.dirname(linkname), self.createmode)
613 util.makedirs(os.path.dirname(linkname), self.createmode)
615
614
616 if self._cansymlink:
615 if self._cansymlink:
617 try:
616 try:
618 os.symlink(src, linkname)
617 os.symlink(src, linkname)
619 except OSError as err:
618 except OSError as err:
620 raise OSError(err.errno, _('could not symlink to %r: %s') %
619 raise OSError(err.errno, _('could not symlink to %r: %s') %
621 (src, err.strerror), linkname)
620 (src, err.strerror), linkname)
622 else:
621 else:
623 self.write(dst, src)
622 self.write(dst, src)
624
623
625 def join(self, path, *insidef):
624 def join(self, path, *insidef):
626 if path:
625 if path:
627 return os.path.join(self.base, path, *insidef)
626 return os.path.join(self.base, path, *insidef)
628 else:
627 else:
629 return self.base
628 return self.base
630
629
631 opener = vfs
630 opener = vfs
632
631
633 class auditvfs(object):
632 class auditvfs(object):
634 def __init__(self, vfs):
633 def __init__(self, vfs):
635 self.vfs = vfs
634 self.vfs = vfs
636
635
637 @property
636 @property
638 def mustaudit(self):
637 def mustaudit(self):
639 return self.vfs.mustaudit
638 return self.vfs.mustaudit
640
639
641 @mustaudit.setter
640 @mustaudit.setter
642 def mustaudit(self, onoff):
641 def mustaudit(self, onoff):
643 self.vfs.mustaudit = onoff
642 self.vfs.mustaudit = onoff
644
643
645 @property
644 @property
646 def options(self):
645 def options(self):
647 return self.vfs.options
646 return self.vfs.options
648
647
649 @options.setter
648 @options.setter
650 def options(self, value):
649 def options(self, value):
651 self.vfs.options = value
650 self.vfs.options = value
652
651
653 class filtervfs(abstractvfs, auditvfs):
652 class filtervfs(abstractvfs, auditvfs):
654 '''Wrapper vfs for filtering filenames with a function.'''
653 '''Wrapper vfs for filtering filenames with a function.'''
655
654
656 def __init__(self, vfs, filter):
655 def __init__(self, vfs, filter):
657 auditvfs.__init__(self, vfs)
656 auditvfs.__init__(self, vfs)
658 self._filter = filter
657 self._filter = filter
659
658
660 def __call__(self, path, *args, **kwargs):
659 def __call__(self, path, *args, **kwargs):
661 return self.vfs(self._filter(path), *args, **kwargs)
660 return self.vfs(self._filter(path), *args, **kwargs)
662
661
663 def join(self, path, *insidef):
662 def join(self, path, *insidef):
664 if path:
663 if path:
665 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
664 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
666 else:
665 else:
667 return self.vfs.join(path)
666 return self.vfs.join(path)
668
667
669 filteropener = filtervfs
668 filteropener = filtervfs
670
669
671 class readonlyvfs(abstractvfs, auditvfs):
670 class readonlyvfs(abstractvfs, auditvfs):
672 '''Wrapper vfs preventing any writing.'''
671 '''Wrapper vfs preventing any writing.'''
673
672
674 def __init__(self, vfs):
673 def __init__(self, vfs):
675 auditvfs.__init__(self, vfs)
674 auditvfs.__init__(self, vfs)
676
675
677 def __call__(self, path, mode='r', *args, **kw):
676 def __call__(self, path, mode='r', *args, **kw):
678 if mode not in ('r', 'rb'):
677 if mode not in ('r', 'rb'):
679 raise error.Abort(_('this vfs is read only'))
678 raise error.Abort(_('this vfs is read only'))
680 return self.vfs(path, mode, *args, **kw)
679 return self.vfs(path, mode, *args, **kw)
681
680
682 def join(self, path, *insidef):
681 def join(self, path, *insidef):
683 return self.vfs.join(path, *insidef)
682 return self.vfs.join(path, *insidef)
684
683
685 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
684 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
686 '''yield every hg repository under path, always recursively.
685 '''yield every hg repository under path, always recursively.
687 The recurse flag will only control recursion into repo working dirs'''
686 The recurse flag will only control recursion into repo working dirs'''
688 def errhandler(err):
687 def errhandler(err):
689 if err.filename == path:
688 if err.filename == path:
690 raise err
689 raise err
691 samestat = getattr(os.path, 'samestat', None)
690 samestat = getattr(os.path, 'samestat', None)
692 if followsym and samestat is not None:
691 if followsym and samestat is not None:
693 def adddir(dirlst, dirname):
692 def adddir(dirlst, dirname):
694 match = False
693 match = False
695 dirstat = os.stat(dirname)
694 dirstat = os.stat(dirname)
696 for lstdirstat in dirlst:
695 for lstdirstat in dirlst:
697 if samestat(dirstat, lstdirstat):
696 if samestat(dirstat, lstdirstat):
698 match = True
697 match = True
699 break
698 break
700 if not match:
699 if not match:
701 dirlst.append(dirstat)
700 dirlst.append(dirstat)
702 return not match
701 return not match
703 else:
702 else:
704 followsym = False
703 followsym = False
705
704
706 if (seen_dirs is None) and followsym:
705 if (seen_dirs is None) and followsym:
707 seen_dirs = []
706 seen_dirs = []
708 adddir(seen_dirs, path)
707 adddir(seen_dirs, path)
709 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
708 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
710 dirs.sort()
709 dirs.sort()
711 if '.hg' in dirs:
710 if '.hg' in dirs:
712 yield root # found a repository
711 yield root # found a repository
713 qroot = os.path.join(root, '.hg', 'patches')
712 qroot = os.path.join(root, '.hg', 'patches')
714 if os.path.isdir(os.path.join(qroot, '.hg')):
713 if os.path.isdir(os.path.join(qroot, '.hg')):
715 yield qroot # we have a patch queue repo here
714 yield qroot # we have a patch queue repo here
716 if recurse:
715 if recurse:
717 # avoid recursing inside the .hg directory
716 # avoid recursing inside the .hg directory
718 dirs.remove('.hg')
717 dirs.remove('.hg')
719 else:
718 else:
720 dirs[:] = [] # don't descend further
719 dirs[:] = [] # don't descend further
721 elif followsym:
720 elif followsym:
722 newdirs = []
721 newdirs = []
723 for d in dirs:
722 for d in dirs:
724 fname = os.path.join(root, d)
723 fname = os.path.join(root, d)
725 if adddir(seen_dirs, fname):
724 if adddir(seen_dirs, fname):
726 if os.path.islink(fname):
725 if os.path.islink(fname):
727 for hgname in walkrepos(fname, True, seen_dirs):
726 for hgname in walkrepos(fname, True, seen_dirs):
728 yield hgname
727 yield hgname
729 else:
728 else:
730 newdirs.append(d)
729 newdirs.append(d)
731 dirs[:] = newdirs
730 dirs[:] = newdirs
732
731
733 def osrcpath():
732 def osrcpath():
734 '''return default os-specific hgrc search path'''
733 '''return default os-specific hgrc search path'''
735 path = []
734 path = []
736 defaultpath = os.path.join(util.datapath, 'default.d')
735 defaultpath = os.path.join(util.datapath, 'default.d')
737 if os.path.isdir(defaultpath):
736 if os.path.isdir(defaultpath):
738 for f, kind in osutil.listdir(defaultpath):
737 for f, kind in osutil.listdir(defaultpath):
739 if f.endswith('.rc'):
738 if f.endswith('.rc'):
740 path.append(os.path.join(defaultpath, f))
739 path.append(os.path.join(defaultpath, f))
741 path.extend(systemrcpath())
740 path.extend(systemrcpath())
742 path.extend(userrcpath())
741 path.extend(userrcpath())
743 path = [os.path.normpath(f) for f in path]
742 path = [os.path.normpath(f) for f in path]
744 return path
743 return path
745
744
746 _rcpath = None
745 _rcpath = None
747
746
748 def rcpath():
747 def rcpath():
749 '''return hgrc search path. if env var HGRCPATH is set, use it.
748 '''return hgrc search path. if env var HGRCPATH is set, use it.
750 for each item in path, if directory, use files ending in .rc,
749 for each item in path, if directory, use files ending in .rc,
751 else use item.
750 else use item.
752 make HGRCPATH empty to only look in .hg/hgrc of current repo.
751 make HGRCPATH empty to only look in .hg/hgrc of current repo.
753 if no HGRCPATH, use default os-specific path.'''
752 if no HGRCPATH, use default os-specific path.'''
754 global _rcpath
753 global _rcpath
755 if _rcpath is None:
754 if _rcpath is None:
756 if 'HGRCPATH' in encoding.environ:
755 if 'HGRCPATH' in encoding.environ:
757 _rcpath = []
756 _rcpath = []
758 for p in os.environ['HGRCPATH'].split(os.pathsep):
757 for p in os.environ['HGRCPATH'].split(os.pathsep):
759 if not p:
758 if not p:
760 continue
759 continue
761 p = util.expandpath(p)
760 p = util.expandpath(p)
762 if os.path.isdir(p):
761 if os.path.isdir(p):
763 for f, kind in osutil.listdir(p):
762 for f, kind in osutil.listdir(p):
764 if f.endswith('.rc'):
763 if f.endswith('.rc'):
765 _rcpath.append(os.path.join(p, f))
764 _rcpath.append(os.path.join(p, f))
766 else:
765 else:
767 _rcpath.append(p)
766 _rcpath.append(p)
768 else:
767 else:
769 _rcpath = osrcpath()
768 _rcpath = osrcpath()
770 return _rcpath
769 return _rcpath
771
770
772 def intrev(rev):
771 def intrev(rev):
773 """Return integer for a given revision that can be used in comparison or
772 """Return integer for a given revision that can be used in comparison or
774 arithmetic operation"""
773 arithmetic operation"""
775 if rev is None:
774 if rev is None:
776 return wdirrev
775 return wdirrev
777 return rev
776 return rev
778
777
779 def revsingle(repo, revspec, default='.'):
778 def revsingle(repo, revspec, default='.'):
780 if not revspec and revspec != 0:
779 if not revspec and revspec != 0:
781 return repo[default]
780 return repo[default]
782
781
783 l = revrange(repo, [revspec])
782 l = revrange(repo, [revspec])
784 if not l:
783 if not l:
785 raise error.Abort(_('empty revision set'))
784 raise error.Abort(_('empty revision set'))
786 return repo[l.last()]
785 return repo[l.last()]
787
786
788 def _pairspec(revspec):
787 def _pairspec(revspec):
789 tree = revset.parse(revspec)
788 tree = revset.parse(revspec)
790 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
789 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
791
790
792 def revpair(repo, revs):
791 def revpair(repo, revs):
793 if not revs:
792 if not revs:
794 return repo.dirstate.p1(), None
793 return repo.dirstate.p1(), None
795
794
796 l = revrange(repo, revs)
795 l = revrange(repo, revs)
797
796
798 if not l:
797 if not l:
799 first = second = None
798 first = second = None
800 elif l.isascending():
799 elif l.isascending():
801 first = l.min()
800 first = l.min()
802 second = l.max()
801 second = l.max()
803 elif l.isdescending():
802 elif l.isdescending():
804 first = l.max()
803 first = l.max()
805 second = l.min()
804 second = l.min()
806 else:
805 else:
807 first = l.first()
806 first = l.first()
808 second = l.last()
807 second = l.last()
809
808
810 if first is None:
809 if first is None:
811 raise error.Abort(_('empty revision range'))
810 raise error.Abort(_('empty revision range'))
812 if (first == second and len(revs) >= 2
811 if (first == second and len(revs) >= 2
813 and not all(revrange(repo, [r]) for r in revs)):
812 and not all(revrange(repo, [r]) for r in revs)):
814 raise error.Abort(_('empty revision on one side of range'))
813 raise error.Abort(_('empty revision on one side of range'))
815
814
816 # if top-level is range expression, the result must always be a pair
815 # if top-level is range expression, the result must always be a pair
817 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
816 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
818 return repo.lookup(first), None
817 return repo.lookup(first), None
819
818
820 return repo.lookup(first), repo.lookup(second)
819 return repo.lookup(first), repo.lookup(second)
821
820
822 def revrange(repo, specs):
821 def revrange(repo, specs):
823 """Execute 1 to many revsets and return the union.
822 """Execute 1 to many revsets and return the union.
824
823
825 This is the preferred mechanism for executing revsets using user-specified
824 This is the preferred mechanism for executing revsets using user-specified
826 config options, such as revset aliases.
825 config options, such as revset aliases.
827
826
828 The revsets specified by ``specs`` will be executed via a chained ``OR``
827 The revsets specified by ``specs`` will be executed via a chained ``OR``
829 expression. If ``specs`` is empty, an empty result is returned.
828 expression. If ``specs`` is empty, an empty result is returned.
830
829
831 ``specs`` can contain integers, in which case they are assumed to be
830 ``specs`` can contain integers, in which case they are assumed to be
832 revision numbers.
831 revision numbers.
833
832
834 It is assumed the revsets are already formatted. If you have arguments
833 It is assumed the revsets are already formatted. If you have arguments
835 that need to be expanded in the revset, call ``revset.formatspec()``
834 that need to be expanded in the revset, call ``revset.formatspec()``
836 and pass the result as an element of ``specs``.
835 and pass the result as an element of ``specs``.
837
836
838 Specifying a single revset is allowed.
837 Specifying a single revset is allowed.
839
838
840 Returns a ``revset.abstractsmartset`` which is a list-like interface over
839 Returns a ``revset.abstractsmartset`` which is a list-like interface over
841 integer revisions.
840 integer revisions.
842 """
841 """
843 allspecs = []
842 allspecs = []
844 for spec in specs:
843 for spec in specs:
845 if isinstance(spec, int):
844 if isinstance(spec, int):
846 spec = revset.formatspec('rev(%d)', spec)
845 spec = revset.formatspec('rev(%d)', spec)
847 allspecs.append(spec)
846 allspecs.append(spec)
848 m = revset.matchany(repo.ui, allspecs, repo)
847 m = revset.matchany(repo.ui, allspecs, repo)
849 return m(repo)
848 return m(repo)
850
849
851 def meaningfulparents(repo, ctx):
850 def meaningfulparents(repo, ctx):
852 """Return list of meaningful (or all if debug) parentrevs for rev.
851 """Return list of meaningful (or all if debug) parentrevs for rev.
853
852
854 For merges (two non-nullrev revisions) both parents are meaningful.
853 For merges (two non-nullrev revisions) both parents are meaningful.
855 Otherwise the first parent revision is considered meaningful if it
854 Otherwise the first parent revision is considered meaningful if it
856 is not the preceding revision.
855 is not the preceding revision.
857 """
856 """
858 parents = ctx.parents()
857 parents = ctx.parents()
859 if len(parents) > 1:
858 if len(parents) > 1:
860 return parents
859 return parents
861 if repo.ui.debugflag:
860 if repo.ui.debugflag:
862 return [parents[0], repo['null']]
861 return [parents[0], repo['null']]
863 if parents[0].rev() >= intrev(ctx.rev()) - 1:
862 if parents[0].rev() >= intrev(ctx.rev()) - 1:
864 return []
863 return []
865 return parents
864 return parents
866
865
867 def expandpats(pats):
866 def expandpats(pats):
868 '''Expand bare globs when running on windows.
867 '''Expand bare globs when running on windows.
869 On posix we assume it already has already been done by sh.'''
868 On posix we assume it already has already been done by sh.'''
870 if not util.expandglobs:
869 if not util.expandglobs:
871 return list(pats)
870 return list(pats)
872 ret = []
871 ret = []
873 for kindpat in pats:
872 for kindpat in pats:
874 kind, pat = matchmod._patsplit(kindpat, None)
873 kind, pat = matchmod._patsplit(kindpat, None)
875 if kind is None:
874 if kind is None:
876 try:
875 try:
877 globbed = glob.glob(pat)
876 globbed = glob.glob(pat)
878 except re.error:
877 except re.error:
879 globbed = [pat]
878 globbed = [pat]
880 if globbed:
879 if globbed:
881 ret.extend(globbed)
880 ret.extend(globbed)
882 continue
881 continue
883 ret.append(kindpat)
882 ret.append(kindpat)
884 return ret
883 return ret
885
884
886 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
885 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
887 badfn=None):
886 badfn=None):
888 '''Return a matcher and the patterns that were used.
887 '''Return a matcher and the patterns that were used.
889 The matcher will warn about bad matches, unless an alternate badfn callback
888 The matcher will warn about bad matches, unless an alternate badfn callback
890 is provided.'''
889 is provided.'''
891 if pats == ("",):
890 if pats == ("",):
892 pats = []
891 pats = []
893 if opts is None:
892 if opts is None:
894 opts = {}
893 opts = {}
895 if not globbed and default == 'relpath':
894 if not globbed and default == 'relpath':
896 pats = expandpats(pats or [])
895 pats = expandpats(pats or [])
897
896
898 def bad(f, msg):
897 def bad(f, msg):
899 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
898 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
900
899
901 if badfn is None:
900 if badfn is None:
902 badfn = bad
901 badfn = bad
903
902
904 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
903 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
905 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
904 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
906
905
907 if m.always():
906 if m.always():
908 pats = []
907 pats = []
909 return m, pats
908 return m, pats
910
909
911 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
910 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
912 badfn=None):
911 badfn=None):
913 '''Return a matcher that will warn about bad matches.'''
912 '''Return a matcher that will warn about bad matches.'''
914 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
913 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
915
914
916 def matchall(repo):
915 def matchall(repo):
917 '''Return a matcher that will efficiently match everything.'''
916 '''Return a matcher that will efficiently match everything.'''
918 return matchmod.always(repo.root, repo.getcwd())
917 return matchmod.always(repo.root, repo.getcwd())
919
918
920 def matchfiles(repo, files, badfn=None):
919 def matchfiles(repo, files, badfn=None):
921 '''Return a matcher that will efficiently match exactly these files.'''
920 '''Return a matcher that will efficiently match exactly these files.'''
922 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
921 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
923
922
924 def origpath(ui, repo, filepath):
923 def origpath(ui, repo, filepath):
925 '''customize where .orig files are created
924 '''customize where .orig files are created
926
925
927 Fetch user defined path from config file: [ui] origbackuppath = <path>
926 Fetch user defined path from config file: [ui] origbackuppath = <path>
928 Fall back to default (filepath) if not specified
927 Fall back to default (filepath) if not specified
929 '''
928 '''
930 origbackuppath = ui.config('ui', 'origbackuppath', None)
929 origbackuppath = ui.config('ui', 'origbackuppath', None)
931 if origbackuppath is None:
930 if origbackuppath is None:
932 return filepath + ".orig"
931 return filepath + ".orig"
933
932
934 filepathfromroot = os.path.relpath(filepath, start=repo.root)
933 filepathfromroot = os.path.relpath(filepath, start=repo.root)
935 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
934 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
936
935
937 origbackupdir = repo.vfs.dirname(fullorigpath)
936 origbackupdir = repo.vfs.dirname(fullorigpath)
938 if not repo.vfs.exists(origbackupdir):
937 if not repo.vfs.exists(origbackupdir):
939 ui.note(_('creating directory: %s\n') % origbackupdir)
938 ui.note(_('creating directory: %s\n') % origbackupdir)
940 util.makedirs(origbackupdir)
939 util.makedirs(origbackupdir)
941
940
942 return fullorigpath + ".orig"
941 return fullorigpath + ".orig"
943
942
944 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
943 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
945 if opts is None:
944 if opts is None:
946 opts = {}
945 opts = {}
947 m = matcher
946 m = matcher
948 if dry_run is None:
947 if dry_run is None:
949 dry_run = opts.get('dry_run')
948 dry_run = opts.get('dry_run')
950 if similarity is None:
949 if similarity is None:
951 similarity = float(opts.get('similarity') or 0)
950 similarity = float(opts.get('similarity') or 0)
952
951
953 ret = 0
952 ret = 0
954 join = lambda f: os.path.join(prefix, f)
953 join = lambda f: os.path.join(prefix, f)
955
954
956 wctx = repo[None]
955 wctx = repo[None]
957 for subpath in sorted(wctx.substate):
956 for subpath in sorted(wctx.substate):
958 submatch = matchmod.subdirmatcher(subpath, m)
957 submatch = matchmod.subdirmatcher(subpath, m)
959 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
958 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
960 sub = wctx.sub(subpath)
959 sub = wctx.sub(subpath)
961 try:
960 try:
962 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
961 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
963 ret = 1
962 ret = 1
964 except error.LookupError:
963 except error.LookupError:
965 repo.ui.status(_("skipping missing subrepository: %s\n")
964 repo.ui.status(_("skipping missing subrepository: %s\n")
966 % join(subpath))
965 % join(subpath))
967
966
968 rejected = []
967 rejected = []
969 def badfn(f, msg):
968 def badfn(f, msg):
970 if f in m.files():
969 if f in m.files():
971 m.bad(f, msg)
970 m.bad(f, msg)
972 rejected.append(f)
971 rejected.append(f)
973
972
974 badmatch = matchmod.badmatch(m, badfn)
973 badmatch = matchmod.badmatch(m, badfn)
975 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
974 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
976 badmatch)
975 badmatch)
977
976
978 unknownset = set(unknown + forgotten)
977 unknownset = set(unknown + forgotten)
979 toprint = unknownset.copy()
978 toprint = unknownset.copy()
980 toprint.update(deleted)
979 toprint.update(deleted)
981 for abs in sorted(toprint):
980 for abs in sorted(toprint):
982 if repo.ui.verbose or not m.exact(abs):
981 if repo.ui.verbose or not m.exact(abs):
983 if abs in unknownset:
982 if abs in unknownset:
984 status = _('adding %s\n') % m.uipath(abs)
983 status = _('adding %s\n') % m.uipath(abs)
985 else:
984 else:
986 status = _('removing %s\n') % m.uipath(abs)
985 status = _('removing %s\n') % m.uipath(abs)
987 repo.ui.status(status)
986 repo.ui.status(status)
988
987
989 renames = _findrenames(repo, m, added + unknown, removed + deleted,
988 renames = _findrenames(repo, m, added + unknown, removed + deleted,
990 similarity)
989 similarity)
991
990
992 if not dry_run:
991 if not dry_run:
993 _markchanges(repo, unknown + forgotten, deleted, renames)
992 _markchanges(repo, unknown + forgotten, deleted, renames)
994
993
995 for f in rejected:
994 for f in rejected:
996 if f in m.files():
995 if f in m.files():
997 return 1
996 return 1
998 return ret
997 return ret
999
998
1000 def marktouched(repo, files, similarity=0.0):
999 def marktouched(repo, files, similarity=0.0):
1001 '''Assert that files have somehow been operated upon. files are relative to
1000 '''Assert that files have somehow been operated upon. files are relative to
1002 the repo root.'''
1001 the repo root.'''
1003 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1002 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1004 rejected = []
1003 rejected = []
1005
1004
1006 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1005 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1007
1006
1008 if repo.ui.verbose:
1007 if repo.ui.verbose:
1009 unknownset = set(unknown + forgotten)
1008 unknownset = set(unknown + forgotten)
1010 toprint = unknownset.copy()
1009 toprint = unknownset.copy()
1011 toprint.update(deleted)
1010 toprint.update(deleted)
1012 for abs in sorted(toprint):
1011 for abs in sorted(toprint):
1013 if abs in unknownset:
1012 if abs in unknownset:
1014 status = _('adding %s\n') % abs
1013 status = _('adding %s\n') % abs
1015 else:
1014 else:
1016 status = _('removing %s\n') % abs
1015 status = _('removing %s\n') % abs
1017 repo.ui.status(status)
1016 repo.ui.status(status)
1018
1017
1019 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1018 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1020 similarity)
1019 similarity)
1021
1020
1022 _markchanges(repo, unknown + forgotten, deleted, renames)
1021 _markchanges(repo, unknown + forgotten, deleted, renames)
1023
1022
1024 for f in rejected:
1023 for f in rejected:
1025 if f in m.files():
1024 if f in m.files():
1026 return 1
1025 return 1
1027 return 0
1026 return 0
1028
1027
1029 def _interestingfiles(repo, matcher):
1028 def _interestingfiles(repo, matcher):
1030 '''Walk dirstate with matcher, looking for files that addremove would care
1029 '''Walk dirstate with matcher, looking for files that addremove would care
1031 about.
1030 about.
1032
1031
1033 This is different from dirstate.status because it doesn't care about
1032 This is different from dirstate.status because it doesn't care about
1034 whether files are modified or clean.'''
1033 whether files are modified or clean.'''
1035 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1034 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1036 audit_path = pathutil.pathauditor(repo.root)
1035 audit_path = pathutil.pathauditor(repo.root)
1037
1036
1038 ctx = repo[None]
1037 ctx = repo[None]
1039 dirstate = repo.dirstate
1038 dirstate = repo.dirstate
1040 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1039 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1041 full=False)
1040 full=False)
1042 for abs, st in walkresults.iteritems():
1041 for abs, st in walkresults.iteritems():
1043 dstate = dirstate[abs]
1042 dstate = dirstate[abs]
1044 if dstate == '?' and audit_path.check(abs):
1043 if dstate == '?' and audit_path.check(abs):
1045 unknown.append(abs)
1044 unknown.append(abs)
1046 elif dstate != 'r' and not st:
1045 elif dstate != 'r' and not st:
1047 deleted.append(abs)
1046 deleted.append(abs)
1048 elif dstate == 'r' and st:
1047 elif dstate == 'r' and st:
1049 forgotten.append(abs)
1048 forgotten.append(abs)
1050 # for finding renames
1049 # for finding renames
1051 elif dstate == 'r' and not st:
1050 elif dstate == 'r' and not st:
1052 removed.append(abs)
1051 removed.append(abs)
1053 elif dstate == 'a':
1052 elif dstate == 'a':
1054 added.append(abs)
1053 added.append(abs)
1055
1054
1056 return added, unknown, deleted, removed, forgotten
1055 return added, unknown, deleted, removed, forgotten
1057
1056
1058 def _findrenames(repo, matcher, added, removed, similarity):
1057 def _findrenames(repo, matcher, added, removed, similarity):
1059 '''Find renames from removed files to added ones.'''
1058 '''Find renames from removed files to added ones.'''
1060 renames = {}
1059 renames = {}
1061 if similarity > 0:
1060 if similarity > 0:
1062 for old, new, score in similar.findrenames(repo, added, removed,
1061 for old, new, score in similar.findrenames(repo, added, removed,
1063 similarity):
1062 similarity):
1064 if (repo.ui.verbose or not matcher.exact(old)
1063 if (repo.ui.verbose or not matcher.exact(old)
1065 or not matcher.exact(new)):
1064 or not matcher.exact(new)):
1066 repo.ui.status(_('recording removal of %s as rename to %s '
1065 repo.ui.status(_('recording removal of %s as rename to %s '
1067 '(%d%% similar)\n') %
1066 '(%d%% similar)\n') %
1068 (matcher.rel(old), matcher.rel(new),
1067 (matcher.rel(old), matcher.rel(new),
1069 score * 100))
1068 score * 100))
1070 renames[new] = old
1069 renames[new] = old
1071 return renames
1070 return renames
1072
1071
1073 def _markchanges(repo, unknown, deleted, renames):
1072 def _markchanges(repo, unknown, deleted, renames):
1074 '''Marks the files in unknown as added, the files in deleted as removed,
1073 '''Marks the files in unknown as added, the files in deleted as removed,
1075 and the files in renames as copied.'''
1074 and the files in renames as copied.'''
1076 wctx = repo[None]
1075 wctx = repo[None]
1077 with repo.wlock():
1076 with repo.wlock():
1078 wctx.forget(deleted)
1077 wctx.forget(deleted)
1079 wctx.add(unknown)
1078 wctx.add(unknown)
1080 for new, old in renames.iteritems():
1079 for new, old in renames.iteritems():
1081 wctx.copy(old, new)
1080 wctx.copy(old, new)
1082
1081
1083 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1082 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1084 """Update the dirstate to reflect the intent of copying src to dst. For
1083 """Update the dirstate to reflect the intent of copying src to dst. For
1085 different reasons it might not end with dst being marked as copied from src.
1084 different reasons it might not end with dst being marked as copied from src.
1086 """
1085 """
1087 origsrc = repo.dirstate.copied(src) or src
1086 origsrc = repo.dirstate.copied(src) or src
1088 if dst == origsrc: # copying back a copy?
1087 if dst == origsrc: # copying back a copy?
1089 if repo.dirstate[dst] not in 'mn' and not dryrun:
1088 if repo.dirstate[dst] not in 'mn' and not dryrun:
1090 repo.dirstate.normallookup(dst)
1089 repo.dirstate.normallookup(dst)
1091 else:
1090 else:
1092 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1091 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1093 if not ui.quiet:
1092 if not ui.quiet:
1094 ui.warn(_("%s has not been committed yet, so no copy "
1093 ui.warn(_("%s has not been committed yet, so no copy "
1095 "data will be stored for %s.\n")
1094 "data will be stored for %s.\n")
1096 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1095 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1097 if repo.dirstate[dst] in '?r' and not dryrun:
1096 if repo.dirstate[dst] in '?r' and not dryrun:
1098 wctx.add([dst])
1097 wctx.add([dst])
1099 elif not dryrun:
1098 elif not dryrun:
1100 wctx.copy(origsrc, dst)
1099 wctx.copy(origsrc, dst)
1101
1100
1102 def readrequires(opener, supported):
1101 def readrequires(opener, supported):
1103 '''Reads and parses .hg/requires and checks if all entries found
1102 '''Reads and parses .hg/requires and checks if all entries found
1104 are in the list of supported features.'''
1103 are in the list of supported features.'''
1105 requirements = set(opener.read("requires").splitlines())
1104 requirements = set(opener.read("requires").splitlines())
1106 missings = []
1105 missings = []
1107 for r in requirements:
1106 for r in requirements:
1108 if r not in supported:
1107 if r not in supported:
1109 if not r or not r[0].isalnum():
1108 if not r or not r[0].isalnum():
1110 raise error.RequirementError(_(".hg/requires file is corrupt"))
1109 raise error.RequirementError(_(".hg/requires file is corrupt"))
1111 missings.append(r)
1110 missings.append(r)
1112 missings.sort()
1111 missings.sort()
1113 if missings:
1112 if missings:
1114 raise error.RequirementError(
1113 raise error.RequirementError(
1115 _("repository requires features unknown to this Mercurial: %s")
1114 _("repository requires features unknown to this Mercurial: %s")
1116 % " ".join(missings),
1115 % " ".join(missings),
1117 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1116 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1118 " for more information"))
1117 " for more information"))
1119 return requirements
1118 return requirements
1120
1119
1121 def writerequires(opener, requirements):
1120 def writerequires(opener, requirements):
1122 with opener('requires', 'w') as fp:
1121 with opener('requires', 'w') as fp:
1123 for r in sorted(requirements):
1122 for r in sorted(requirements):
1124 fp.write("%s\n" % r)
1123 fp.write("%s\n" % r)
1125
1124
1126 class filecachesubentry(object):
1125 class filecachesubentry(object):
1127 def __init__(self, path, stat):
1126 def __init__(self, path, stat):
1128 self.path = path
1127 self.path = path
1129 self.cachestat = None
1128 self.cachestat = None
1130 self._cacheable = None
1129 self._cacheable = None
1131
1130
1132 if stat:
1131 if stat:
1133 self.cachestat = filecachesubentry.stat(self.path)
1132 self.cachestat = filecachesubentry.stat(self.path)
1134
1133
1135 if self.cachestat:
1134 if self.cachestat:
1136 self._cacheable = self.cachestat.cacheable()
1135 self._cacheable = self.cachestat.cacheable()
1137 else:
1136 else:
1138 # None means we don't know yet
1137 # None means we don't know yet
1139 self._cacheable = None
1138 self._cacheable = None
1140
1139
1141 def refresh(self):
1140 def refresh(self):
1142 if self.cacheable():
1141 if self.cacheable():
1143 self.cachestat = filecachesubentry.stat(self.path)
1142 self.cachestat = filecachesubentry.stat(self.path)
1144
1143
1145 def cacheable(self):
1144 def cacheable(self):
1146 if self._cacheable is not None:
1145 if self._cacheable is not None:
1147 return self._cacheable
1146 return self._cacheable
1148
1147
1149 # we don't know yet, assume it is for now
1148 # we don't know yet, assume it is for now
1150 return True
1149 return True
1151
1150
1152 def changed(self):
1151 def changed(self):
1153 # no point in going further if we can't cache it
1152 # no point in going further if we can't cache it
1154 if not self.cacheable():
1153 if not self.cacheable():
1155 return True
1154 return True
1156
1155
1157 newstat = filecachesubentry.stat(self.path)
1156 newstat = filecachesubentry.stat(self.path)
1158
1157
1159 # we may not know if it's cacheable yet, check again now
1158 # we may not know if it's cacheable yet, check again now
1160 if newstat and self._cacheable is None:
1159 if newstat and self._cacheable is None:
1161 self._cacheable = newstat.cacheable()
1160 self._cacheable = newstat.cacheable()
1162
1161
1163 # check again
1162 # check again
1164 if not self._cacheable:
1163 if not self._cacheable:
1165 return True
1164 return True
1166
1165
1167 if self.cachestat != newstat:
1166 if self.cachestat != newstat:
1168 self.cachestat = newstat
1167 self.cachestat = newstat
1169 return True
1168 return True
1170 else:
1169 else:
1171 return False
1170 return False
1172
1171
1173 @staticmethod
1172 @staticmethod
1174 def stat(path):
1173 def stat(path):
1175 try:
1174 try:
1176 return util.cachestat(path)
1175 return util.cachestat(path)
1177 except OSError as e:
1176 except OSError as e:
1178 if e.errno != errno.ENOENT:
1177 if e.errno != errno.ENOENT:
1179 raise
1178 raise
1180
1179
1181 class filecacheentry(object):
1180 class filecacheentry(object):
1182 def __init__(self, paths, stat=True):
1181 def __init__(self, paths, stat=True):
1183 self._entries = []
1182 self._entries = []
1184 for path in paths:
1183 for path in paths:
1185 self._entries.append(filecachesubentry(path, stat))
1184 self._entries.append(filecachesubentry(path, stat))
1186
1185
1187 def changed(self):
1186 def changed(self):
1188 '''true if any entry has changed'''
1187 '''true if any entry has changed'''
1189 for entry in self._entries:
1188 for entry in self._entries:
1190 if entry.changed():
1189 if entry.changed():
1191 return True
1190 return True
1192 return False
1191 return False
1193
1192
1194 def refresh(self):
1193 def refresh(self):
1195 for entry in self._entries:
1194 for entry in self._entries:
1196 entry.refresh()
1195 entry.refresh()
1197
1196
1198 class filecache(object):
1197 class filecache(object):
1199 '''A property like decorator that tracks files under .hg/ for updates.
1198 '''A property like decorator that tracks files under .hg/ for updates.
1200
1199
1201 Records stat info when called in _filecache.
1200 Records stat info when called in _filecache.
1202
1201
1203 On subsequent calls, compares old stat info with new info, and recreates the
1202 On subsequent calls, compares old stat info with new info, and recreates the
1204 object when any of the files changes, updating the new stat info in
1203 object when any of the files changes, updating the new stat info in
1205 _filecache.
1204 _filecache.
1206
1205
1207 Mercurial either atomic renames or appends for files under .hg,
1206 Mercurial either atomic renames or appends for files under .hg,
1208 so to ensure the cache is reliable we need the filesystem to be able
1207 so to ensure the cache is reliable we need the filesystem to be able
1209 to tell us if a file has been replaced. If it can't, we fallback to
1208 to tell us if a file has been replaced. If it can't, we fallback to
1210 recreating the object on every call (essentially the same behavior as
1209 recreating the object on every call (essentially the same behavior as
1211 propertycache).
1210 propertycache).
1212
1211
1213 '''
1212 '''
1214 def __init__(self, *paths):
1213 def __init__(self, *paths):
1215 self.paths = paths
1214 self.paths = paths
1216
1215
1217 def join(self, obj, fname):
1216 def join(self, obj, fname):
1218 """Used to compute the runtime path of a cached file.
1217 """Used to compute the runtime path of a cached file.
1219
1218
1220 Users should subclass filecache and provide their own version of this
1219 Users should subclass filecache and provide their own version of this
1221 function to call the appropriate join function on 'obj' (an instance
1220 function to call the appropriate join function on 'obj' (an instance
1222 of the class that its member function was decorated).
1221 of the class that its member function was decorated).
1223 """
1222 """
1224 return obj.join(fname)
1223 return obj.join(fname)
1225
1224
1226 def __call__(self, func):
1225 def __call__(self, func):
1227 self.func = func
1226 self.func = func
1228 self.name = func.__name__
1227 self.name = func.__name__
1229 return self
1228 return self
1230
1229
1231 def __get__(self, obj, type=None):
1230 def __get__(self, obj, type=None):
1232 # if accessed on the class, return the descriptor itself.
1231 # if accessed on the class, return the descriptor itself.
1233 if obj is None:
1232 if obj is None:
1234 return self
1233 return self
1235 # do we need to check if the file changed?
1234 # do we need to check if the file changed?
1236 if self.name in obj.__dict__:
1235 if self.name in obj.__dict__:
1237 assert self.name in obj._filecache, self.name
1236 assert self.name in obj._filecache, self.name
1238 return obj.__dict__[self.name]
1237 return obj.__dict__[self.name]
1239
1238
1240 entry = obj._filecache.get(self.name)
1239 entry = obj._filecache.get(self.name)
1241
1240
1242 if entry:
1241 if entry:
1243 if entry.changed():
1242 if entry.changed():
1244 entry.obj = self.func(obj)
1243 entry.obj = self.func(obj)
1245 else:
1244 else:
1246 paths = [self.join(obj, path) for path in self.paths]
1245 paths = [self.join(obj, path) for path in self.paths]
1247
1246
1248 # We stat -before- creating the object so our cache doesn't lie if
1247 # We stat -before- creating the object so our cache doesn't lie if
1249 # a writer modified between the time we read and stat
1248 # a writer modified between the time we read and stat
1250 entry = filecacheentry(paths, True)
1249 entry = filecacheentry(paths, True)
1251 entry.obj = self.func(obj)
1250 entry.obj = self.func(obj)
1252
1251
1253 obj._filecache[self.name] = entry
1252 obj._filecache[self.name] = entry
1254
1253
1255 obj.__dict__[self.name] = entry.obj
1254 obj.__dict__[self.name] = entry.obj
1256 return entry.obj
1255 return entry.obj
1257
1256
1258 def __set__(self, obj, value):
1257 def __set__(self, obj, value):
1259 if self.name not in obj._filecache:
1258 if self.name not in obj._filecache:
1260 # we add an entry for the missing value because X in __dict__
1259 # we add an entry for the missing value because X in __dict__
1261 # implies X in _filecache
1260 # implies X in _filecache
1262 paths = [self.join(obj, path) for path in self.paths]
1261 paths = [self.join(obj, path) for path in self.paths]
1263 ce = filecacheentry(paths, False)
1262 ce = filecacheentry(paths, False)
1264 obj._filecache[self.name] = ce
1263 obj._filecache[self.name] = ce
1265 else:
1264 else:
1266 ce = obj._filecache[self.name]
1265 ce = obj._filecache[self.name]
1267
1266
1268 ce.obj = value # update cached copy
1267 ce.obj = value # update cached copy
1269 obj.__dict__[self.name] = value # update copy returned by obj.x
1268 obj.__dict__[self.name] = value # update copy returned by obj.x
1270
1269
1271 def __delete__(self, obj):
1270 def __delete__(self, obj):
1272 try:
1271 try:
1273 del obj.__dict__[self.name]
1272 del obj.__dict__[self.name]
1274 except KeyError:
1273 except KeyError:
1275 raise AttributeError(self.name)
1274 raise AttributeError(self.name)
1276
1275
1277 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1276 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1278 if lock is None:
1277 if lock is None:
1279 raise error.LockInheritanceContractViolation(
1278 raise error.LockInheritanceContractViolation(
1280 'lock can only be inherited while held')
1279 'lock can only be inherited while held')
1281 if environ is None:
1280 if environ is None:
1282 environ = {}
1281 environ = {}
1283 with lock.inherit() as locker:
1282 with lock.inherit() as locker:
1284 environ[envvar] = locker
1283 environ[envvar] = locker
1285 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1284 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1286
1285
1287 def wlocksub(repo, cmd, *args, **kwargs):
1286 def wlocksub(repo, cmd, *args, **kwargs):
1288 """run cmd as a subprocess that allows inheriting repo's wlock
1287 """run cmd as a subprocess that allows inheriting repo's wlock
1289
1288
1290 This can only be called while the wlock is held. This takes all the
1289 This can only be called while the wlock is held. This takes all the
1291 arguments that ui.system does, and returns the exit code of the
1290 arguments that ui.system does, and returns the exit code of the
1292 subprocess."""
1291 subprocess."""
1293 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1292 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1294 **kwargs)
1293 **kwargs)
1295
1294
1296 def gdinitconfig(ui):
1295 def gdinitconfig(ui):
1297 """helper function to know if a repo should be created as general delta
1296 """helper function to know if a repo should be created as general delta
1298 """
1297 """
1299 # experimental config: format.generaldelta
1298 # experimental config: format.generaldelta
1300 return (ui.configbool('format', 'generaldelta', False)
1299 return (ui.configbool('format', 'generaldelta', False)
1301 or ui.configbool('format', 'usegeneraldelta', True))
1300 or ui.configbool('format', 'usegeneraldelta', True))
1302
1301
1303 def gddeltaconfig(ui):
1302 def gddeltaconfig(ui):
1304 """helper function to know if incoming delta should be optimised
1303 """helper function to know if incoming delta should be optimised
1305 """
1304 """
1306 # experimental config: format.generaldelta
1305 # experimental config: format.generaldelta
1307 return ui.configbool('format', 'generaldelta', False)
1306 return ui.configbool('format', 'generaldelta', False)
1308
1307
1309 class closewrapbase(object):
1308 class closewrapbase(object):
1310 """Base class of wrapper, which hooks closing
1309 """Base class of wrapper, which hooks closing
1311
1310
1312 Do not instantiate outside of the vfs layer.
1311 Do not instantiate outside of the vfs layer.
1313 """
1312 """
1314 def __init__(self, fh):
1313 def __init__(self, fh):
1315 object.__setattr__(self, '_origfh', fh)
1314 object.__setattr__(self, '_origfh', fh)
1316
1315
1317 def __getattr__(self, attr):
1316 def __getattr__(self, attr):
1318 return getattr(self._origfh, attr)
1317 return getattr(self._origfh, attr)
1319
1318
1320 def __setattr__(self, attr, value):
1319 def __setattr__(self, attr, value):
1321 return setattr(self._origfh, attr, value)
1320 return setattr(self._origfh, attr, value)
1322
1321
1323 def __delattr__(self, attr):
1322 def __delattr__(self, attr):
1324 return delattr(self._origfh, attr)
1323 return delattr(self._origfh, attr)
1325
1324
1326 def __enter__(self):
1325 def __enter__(self):
1327 return self._origfh.__enter__()
1326 return self._origfh.__enter__()
1328
1327
1329 def __exit__(self, exc_type, exc_value, exc_tb):
1328 def __exit__(self, exc_type, exc_value, exc_tb):
1330 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1329 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1331
1330
1332 def close(self):
1331 def close(self):
1333 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1332 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1334
1333
1335 class delayclosedfile(closewrapbase):
1334 class delayclosedfile(closewrapbase):
1336 """Proxy for a file object whose close is delayed.
1335 """Proxy for a file object whose close is delayed.
1337
1336
1338 Do not instantiate outside of the vfs layer.
1337 Do not instantiate outside of the vfs layer.
1339 """
1338 """
1340 def __init__(self, fh, closer):
1339 def __init__(self, fh, closer):
1341 super(delayclosedfile, self).__init__(fh)
1340 super(delayclosedfile, self).__init__(fh)
1342 object.__setattr__(self, '_closer', closer)
1341 object.__setattr__(self, '_closer', closer)
1343
1342
1344 def __exit__(self, exc_type, exc_value, exc_tb):
1343 def __exit__(self, exc_type, exc_value, exc_tb):
1345 self._closer.close(self._origfh)
1344 self._closer.close(self._origfh)
1346
1345
1347 def close(self):
1346 def close(self):
1348 self._closer.close(self._origfh)
1347 self._closer.close(self._origfh)
1349
1348
1350 class backgroundfilecloser(object):
1349 class backgroundfilecloser(object):
1351 """Coordinates background closing of file handles on multiple threads."""
1350 """Coordinates background closing of file handles on multiple threads."""
1352 def __init__(self, ui, expectedcount=-1):
1351 def __init__(self, ui, expectedcount=-1):
1353 self._running = False
1352 self._running = False
1354 self._entered = False
1353 self._entered = False
1355 self._threads = []
1354 self._threads = []
1356 self._threadexception = None
1355 self._threadexception = None
1357
1356
1358 # Only Windows/NTFS has slow file closing. So only enable by default
1357 # Only Windows/NTFS has slow file closing. So only enable by default
1359 # on that platform. But allow to be enabled elsewhere for testing.
1358 # on that platform. But allow to be enabled elsewhere for testing.
1360 defaultenabled = os.name == 'nt'
1359 defaultenabled = os.name == 'nt'
1361 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1360 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1362
1361
1363 if not enabled:
1362 if not enabled:
1364 return
1363 return
1365
1364
1366 # There is overhead to starting and stopping the background threads.
1365 # There is overhead to starting and stopping the background threads.
1367 # Don't do background processing unless the file count is large enough
1366 # Don't do background processing unless the file count is large enough
1368 # to justify it.
1367 # to justify it.
1369 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1368 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1370 2048)
1369 2048)
1371 # FUTURE dynamically start background threads after minfilecount closes.
1370 # FUTURE dynamically start background threads after minfilecount closes.
1372 # (We don't currently have any callers that don't know their file count)
1371 # (We don't currently have any callers that don't know their file count)
1373 if expectedcount > 0 and expectedcount < minfilecount:
1372 if expectedcount > 0 and expectedcount < minfilecount:
1374 return
1373 return
1375
1374
1376 # Windows defaults to a limit of 512 open files. A buffer of 128
1375 # Windows defaults to a limit of 512 open files. A buffer of 128
1377 # should give us enough headway.
1376 # should give us enough headway.
1378 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1377 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1379 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1378 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1380
1379
1381 ui.debug('starting %d threads for background file closing\n' %
1380 ui.debug('starting %d threads for background file closing\n' %
1382 threadcount)
1381 threadcount)
1383
1382
1384 self._queue = util.queue(maxsize=maxqueue)
1383 self._queue = util.queue(maxsize=maxqueue)
1385 self._running = True
1384 self._running = True
1386
1385
1387 for i in range(threadcount):
1386 for i in range(threadcount):
1388 t = threading.Thread(target=self._worker, name='backgroundcloser')
1387 t = threading.Thread(target=self._worker, name='backgroundcloser')
1389 self._threads.append(t)
1388 self._threads.append(t)
1390 t.start()
1389 t.start()
1391
1390
1392 def __enter__(self):
1391 def __enter__(self):
1393 self._entered = True
1392 self._entered = True
1394 return self
1393 return self
1395
1394
1396 def __exit__(self, exc_type, exc_value, exc_tb):
1395 def __exit__(self, exc_type, exc_value, exc_tb):
1397 self._running = False
1396 self._running = False
1398
1397
1399 # Wait for threads to finish closing so open files don't linger for
1398 # Wait for threads to finish closing so open files don't linger for
1400 # longer than lifetime of context manager.
1399 # longer than lifetime of context manager.
1401 for t in self._threads:
1400 for t in self._threads:
1402 t.join()
1401 t.join()
1403
1402
1404 def _worker(self):
1403 def _worker(self):
1405 """Main routine for worker thread."""
1404 """Main routine for worker thread."""
1406 while True:
1405 while True:
1407 try:
1406 try:
1408 fh = self._queue.get(block=True, timeout=0.100)
1407 fh = self._queue.get(block=True, timeout=0.100)
1409 # Need to catch or the thread will terminate and
1408 # Need to catch or the thread will terminate and
1410 # we could orphan file descriptors.
1409 # we could orphan file descriptors.
1411 try:
1410 try:
1412 fh.close()
1411 fh.close()
1413 except Exception as e:
1412 except Exception as e:
1414 # Stash so can re-raise from main thread later.
1413 # Stash so can re-raise from main thread later.
1415 self._threadexception = e
1414 self._threadexception = e
1416 except util.empty:
1415 except util.empty:
1417 if not self._running:
1416 if not self._running:
1418 break
1417 break
1419
1418
1420 def close(self, fh):
1419 def close(self, fh):
1421 """Schedule a file for closing."""
1420 """Schedule a file for closing."""
1422 if not self._entered:
1421 if not self._entered:
1423 raise error.Abort(_('can only call close() when context manager '
1422 raise error.Abort(_('can only call close() when context manager '
1424 'active'))
1423 'active'))
1425
1424
1426 # If a background thread encountered an exception, raise now so we fail
1425 # If a background thread encountered an exception, raise now so we fail
1427 # fast. Otherwise we may potentially go on for minutes until the error
1426 # fast. Otherwise we may potentially go on for minutes until the error
1428 # is acted on.
1427 # is acted on.
1429 if self._threadexception:
1428 if self._threadexception:
1430 e = self._threadexception
1429 e = self._threadexception
1431 self._threadexception = None
1430 self._threadexception = None
1432 raise e
1431 raise e
1433
1432
1434 # If we're not actively running, close synchronously.
1433 # If we're not actively running, close synchronously.
1435 if not self._running:
1434 if not self._running:
1436 fh.close()
1435 fh.close()
1437 return
1436 return
1438
1437
1439 self._queue.put(fh, block=True, timeout=None)
1438 self._queue.put(fh, block=True, timeout=None)
1440
1439
1441 class checkambigatclosing(closewrapbase):
1440 class checkambigatclosing(closewrapbase):
1442 """Proxy for a file object, to avoid ambiguity of file stat
1441 """Proxy for a file object, to avoid ambiguity of file stat
1443
1442
1444 See also util.filestat for detail about "ambiguity of file stat".
1443 See also util.filestat for detail about "ambiguity of file stat".
1445
1444
1446 This proxy is useful only if the target file is guarded by any
1445 This proxy is useful only if the target file is guarded by any
1447 lock (e.g. repo.lock or repo.wlock)
1446 lock (e.g. repo.lock or repo.wlock)
1448
1447
1449 Do not instantiate outside of the vfs layer.
1448 Do not instantiate outside of the vfs layer.
1450 """
1449 """
1451 def __init__(self, fh):
1450 def __init__(self, fh):
1452 super(checkambigatclosing, self).__init__(fh)
1451 super(checkambigatclosing, self).__init__(fh)
1453 object.__setattr__(self, '_oldstat', util.filestat(fh.name))
1452 object.__setattr__(self, '_oldstat', util.filestat(fh.name))
1454
1453
1455 def _checkambig(self):
1454 def _checkambig(self):
1456 oldstat = self._oldstat
1455 oldstat = self._oldstat
1457 if oldstat.stat:
1456 if oldstat.stat:
1458 newstat = util.filestat(self._origfh.name)
1457 newstat = util.filestat(self._origfh.name)
1459 if newstat.isambig(oldstat):
1458 if newstat.isambig(oldstat):
1460 # stat of changed file is ambiguous to original one
1459 # stat of changed file is ambiguous to original one
1461 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1460 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1462 os.utime(self._origfh.name, (advanced, advanced))
1461 os.utime(self._origfh.name, (advanced, advanced))
1463
1462
1464 def __exit__(self, exc_type, exc_value, exc_tb):
1463 def __exit__(self, exc_type, exc_value, exc_tb):
1465 self._origfh.__exit__(exc_type, exc_value, exc_tb)
1464 self._origfh.__exit__(exc_type, exc_value, exc_tb)
1466 self._checkambig()
1465 self._checkambig()
1467
1466
1468 def close(self):
1467 def close(self):
1469 self._origfh.close()
1468 self._origfh.close()
1470 self._checkambig()
1469 self._checkambig()
General Comments 0
You need to be logged in to leave comments. Login now