##// END OF EJS Templates
scmutil: factor out common logic of delayclosedfile to reuse it...
FUJIWARA Katsunori -
r29994:0c40e64d default
parent child Browse files
Show More
@@ -1,1420 +1,1433 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import glob
12 import glob
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import re
15 import re
16 import shutil
16 import shutil
17 import stat
17 import stat
18 import tempfile
18 import tempfile
19 import threading
19 import threading
20
20
21 from .i18n import _
21 from .i18n import _
22 from .node import wdirrev
22 from .node import wdirrev
23 from . import (
23 from . import (
24 encoding,
24 encoding,
25 error,
25 error,
26 match as matchmod,
26 match as matchmod,
27 osutil,
27 osutil,
28 pathutil,
28 pathutil,
29 phases,
29 phases,
30 revset,
30 revset,
31 similar,
31 similar,
32 util,
32 util,
33 )
33 )
34
34
35 if os.name == 'nt':
35 if os.name == 'nt':
36 from . import scmwindows as scmplatform
36 from . import scmwindows as scmplatform
37 else:
37 else:
38 from . import scmposix as scmplatform
38 from . import scmposix as scmplatform
39
39
40 systemrcpath = scmplatform.systemrcpath
40 systemrcpath = scmplatform.systemrcpath
41 userrcpath = scmplatform.userrcpath
41 userrcpath = scmplatform.userrcpath
42
42
43 class status(tuple):
43 class status(tuple):
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 and 'ignored' properties are only relevant to the working copy.
45 and 'ignored' properties are only relevant to the working copy.
46 '''
46 '''
47
47
48 __slots__ = ()
48 __slots__ = ()
49
49
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 clean):
51 clean):
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 ignored, clean))
53 ignored, clean))
54
54
55 @property
55 @property
56 def modified(self):
56 def modified(self):
57 '''files that have been modified'''
57 '''files that have been modified'''
58 return self[0]
58 return self[0]
59
59
60 @property
60 @property
61 def added(self):
61 def added(self):
62 '''files that have been added'''
62 '''files that have been added'''
63 return self[1]
63 return self[1]
64
64
65 @property
65 @property
66 def removed(self):
66 def removed(self):
67 '''files that have been removed'''
67 '''files that have been removed'''
68 return self[2]
68 return self[2]
69
69
70 @property
70 @property
71 def deleted(self):
71 def deleted(self):
72 '''files that are in the dirstate, but have been deleted from the
72 '''files that are in the dirstate, but have been deleted from the
73 working copy (aka "missing")
73 working copy (aka "missing")
74 '''
74 '''
75 return self[3]
75 return self[3]
76
76
77 @property
77 @property
78 def unknown(self):
78 def unknown(self):
79 '''files not in the dirstate that are not ignored'''
79 '''files not in the dirstate that are not ignored'''
80 return self[4]
80 return self[4]
81
81
82 @property
82 @property
83 def ignored(self):
83 def ignored(self):
84 '''files not in the dirstate that are ignored (by _dirignore())'''
84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 return self[5]
85 return self[5]
86
86
87 @property
87 @property
88 def clean(self):
88 def clean(self):
89 '''files that have not been modified'''
89 '''files that have not been modified'''
90 return self[6]
90 return self[6]
91
91
92 def __repr__(self, *args, **kwargs):
92 def __repr__(self, *args, **kwargs):
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 'unknown=%r, ignored=%r, clean=%r>') % self)
94 'unknown=%r, ignored=%r, clean=%r>') % self)
95
95
96 def itersubrepos(ctx1, ctx2):
96 def itersubrepos(ctx1, ctx2):
97 """find subrepos in ctx1 or ctx2"""
97 """find subrepos in ctx1 or ctx2"""
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 # has been modified (in ctx2) but not yet committed (in ctx1).
100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103
103
104 missing = set()
104 missing = set()
105
105
106 for subpath in ctx2.substate:
106 for subpath in ctx2.substate:
107 if subpath not in ctx1.substate:
107 if subpath not in ctx1.substate:
108 del subpaths[subpath]
108 del subpaths[subpath]
109 missing.add(subpath)
109 missing.add(subpath)
110
110
111 for subpath, ctx in sorted(subpaths.iteritems()):
111 for subpath, ctx in sorted(subpaths.iteritems()):
112 yield subpath, ctx.sub(subpath)
112 yield subpath, ctx.sub(subpath)
113
113
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 # status and diff will have an accurate result when it does
115 # status and diff will have an accurate result when it does
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 # against itself.
117 # against itself.
118 for subpath in missing:
118 for subpath in missing:
119 yield subpath, ctx2.nullsub(subpath, ctx1)
119 yield subpath, ctx2.nullsub(subpath, ctx1)
120
120
121 def nochangesfound(ui, repo, excluded=None):
121 def nochangesfound(ui, repo, excluded=None):
122 '''Report no changes for push/pull, excluded is None or a list of
122 '''Report no changes for push/pull, excluded is None or a list of
123 nodes excluded from the push/pull.
123 nodes excluded from the push/pull.
124 '''
124 '''
125 secretlist = []
125 secretlist = []
126 if excluded:
126 if excluded:
127 for n in excluded:
127 for n in excluded:
128 if n not in repo:
128 if n not in repo:
129 # discovery should not have included the filtered revision,
129 # discovery should not have included the filtered revision,
130 # we have to explicitly exclude it until discovery is cleanup.
130 # we have to explicitly exclude it until discovery is cleanup.
131 continue
131 continue
132 ctx = repo[n]
132 ctx = repo[n]
133 if ctx.phase() >= phases.secret and not ctx.extinct():
133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 secretlist.append(n)
134 secretlist.append(n)
135
135
136 if secretlist:
136 if secretlist:
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 % len(secretlist))
138 % len(secretlist))
139 else:
139 else:
140 ui.status(_("no changes found\n"))
140 ui.status(_("no changes found\n"))
141
141
142 def checknewlabel(repo, lbl, kind):
142 def checknewlabel(repo, lbl, kind):
143 # Do not use the "kind" parameter in ui output.
143 # Do not use the "kind" parameter in ui output.
144 # It makes strings difficult to translate.
144 # It makes strings difficult to translate.
145 if lbl in ['tip', '.', 'null']:
145 if lbl in ['tip', '.', 'null']:
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 for c in (':', '\0', '\n', '\r'):
147 for c in (':', '\0', '\n', '\r'):
148 if c in lbl:
148 if c in lbl:
149 raise error.Abort(_("%r cannot be used in a name") % c)
149 raise error.Abort(_("%r cannot be used in a name") % c)
150 try:
150 try:
151 int(lbl)
151 int(lbl)
152 raise error.Abort(_("cannot use an integer as a name"))
152 raise error.Abort(_("cannot use an integer as a name"))
153 except ValueError:
153 except ValueError:
154 pass
154 pass
155
155
156 def checkfilename(f):
156 def checkfilename(f):
157 '''Check that the filename f is an acceptable filename for a tracked file'''
157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 if '\r' in f or '\n' in f:
158 if '\r' in f or '\n' in f:
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160
160
161 def checkportable(ui, f):
161 def checkportable(ui, f):
162 '''Check if filename f is portable and warn or abort depending on config'''
162 '''Check if filename f is portable and warn or abort depending on config'''
163 checkfilename(f)
163 checkfilename(f)
164 abort, warn = checkportabilityalert(ui)
164 abort, warn = checkportabilityalert(ui)
165 if abort or warn:
165 if abort or warn:
166 msg = util.checkwinfilename(f)
166 msg = util.checkwinfilename(f)
167 if msg:
167 if msg:
168 msg = "%s: %r" % (msg, f)
168 msg = "%s: %r" % (msg, f)
169 if abort:
169 if abort:
170 raise error.Abort(msg)
170 raise error.Abort(msg)
171 ui.warn(_("warning: %s\n") % msg)
171 ui.warn(_("warning: %s\n") % msg)
172
172
173 def checkportabilityalert(ui):
173 def checkportabilityalert(ui):
174 '''check if the user's config requests nothing, a warning, or abort for
174 '''check if the user's config requests nothing, a warning, or abort for
175 non-portable filenames'''
175 non-portable filenames'''
176 val = ui.config('ui', 'portablefilenames', 'warn')
176 val = ui.config('ui', 'portablefilenames', 'warn')
177 lval = val.lower()
177 lval = val.lower()
178 bval = util.parsebool(val)
178 bval = util.parsebool(val)
179 abort = os.name == 'nt' or lval == 'abort'
179 abort = os.name == 'nt' or lval == 'abort'
180 warn = bval or lval == 'warn'
180 warn = bval or lval == 'warn'
181 if bval is None and not (warn or abort or lval == 'ignore'):
181 if bval is None and not (warn or abort or lval == 'ignore'):
182 raise error.ConfigError(
182 raise error.ConfigError(
183 _("ui.portablefilenames value is invalid ('%s')") % val)
183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 return abort, warn
184 return abort, warn
185
185
186 class casecollisionauditor(object):
186 class casecollisionauditor(object):
187 def __init__(self, ui, abort, dirstate):
187 def __init__(self, ui, abort, dirstate):
188 self._ui = ui
188 self._ui = ui
189 self._abort = abort
189 self._abort = abort
190 allfiles = '\0'.join(dirstate._map)
190 allfiles = '\0'.join(dirstate._map)
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 self._dirstate = dirstate
192 self._dirstate = dirstate
193 # The purpose of _newfiles is so that we don't complain about
193 # The purpose of _newfiles is so that we don't complain about
194 # case collisions if someone were to call this object with the
194 # case collisions if someone were to call this object with the
195 # same filename twice.
195 # same filename twice.
196 self._newfiles = set()
196 self._newfiles = set()
197
197
198 def __call__(self, f):
198 def __call__(self, f):
199 if f in self._newfiles:
199 if f in self._newfiles:
200 return
200 return
201 fl = encoding.lower(f)
201 fl = encoding.lower(f)
202 if fl in self._loweredfiles and f not in self._dirstate:
202 if fl in self._loweredfiles and f not in self._dirstate:
203 msg = _('possible case-folding collision for %s') % f
203 msg = _('possible case-folding collision for %s') % f
204 if self._abort:
204 if self._abort:
205 raise error.Abort(msg)
205 raise error.Abort(msg)
206 self._ui.warn(_("warning: %s\n") % msg)
206 self._ui.warn(_("warning: %s\n") % msg)
207 self._loweredfiles.add(fl)
207 self._loweredfiles.add(fl)
208 self._newfiles.add(f)
208 self._newfiles.add(f)
209
209
210 def filteredhash(repo, maxrev):
210 def filteredhash(repo, maxrev):
211 """build hash of filtered revisions in the current repoview.
211 """build hash of filtered revisions in the current repoview.
212
212
213 Multiple caches perform up-to-date validation by checking that the
213 Multiple caches perform up-to-date validation by checking that the
214 tiprev and tipnode stored in the cache file match the current repository.
214 tiprev and tipnode stored in the cache file match the current repository.
215 However, this is not sufficient for validating repoviews because the set
215 However, this is not sufficient for validating repoviews because the set
216 of revisions in the view may change without the repository tiprev and
216 of revisions in the view may change without the repository tiprev and
217 tipnode changing.
217 tipnode changing.
218
218
219 This function hashes all the revs filtered from the view and returns
219 This function hashes all the revs filtered from the view and returns
220 that SHA-1 digest.
220 that SHA-1 digest.
221 """
221 """
222 cl = repo.changelog
222 cl = repo.changelog
223 if not cl.filteredrevs:
223 if not cl.filteredrevs:
224 return None
224 return None
225 key = None
225 key = None
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 if revs:
227 if revs:
228 s = hashlib.sha1()
228 s = hashlib.sha1()
229 for rev in revs:
229 for rev in revs:
230 s.update('%s;' % rev)
230 s.update('%s;' % rev)
231 key = s.digest()
231 key = s.digest()
232 return key
232 return key
233
233
234 class abstractvfs(object):
234 class abstractvfs(object):
235 """Abstract base class; cannot be instantiated"""
235 """Abstract base class; cannot be instantiated"""
236
236
237 def __init__(self, *args, **kwargs):
237 def __init__(self, *args, **kwargs):
238 '''Prevent instantiation; don't call this from subclasses.'''
238 '''Prevent instantiation; don't call this from subclasses.'''
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240
240
241 def tryread(self, path):
241 def tryread(self, path):
242 '''gracefully return an empty string for missing files'''
242 '''gracefully return an empty string for missing files'''
243 try:
243 try:
244 return self.read(path)
244 return self.read(path)
245 except IOError as inst:
245 except IOError as inst:
246 if inst.errno != errno.ENOENT:
246 if inst.errno != errno.ENOENT:
247 raise
247 raise
248 return ""
248 return ""
249
249
250 def tryreadlines(self, path, mode='rb'):
250 def tryreadlines(self, path, mode='rb'):
251 '''gracefully return an empty array for missing files'''
251 '''gracefully return an empty array for missing files'''
252 try:
252 try:
253 return self.readlines(path, mode=mode)
253 return self.readlines(path, mode=mode)
254 except IOError as inst:
254 except IOError as inst:
255 if inst.errno != errno.ENOENT:
255 if inst.errno != errno.ENOENT:
256 raise
256 raise
257 return []
257 return []
258
258
259 @util.propertycache
259 @util.propertycache
260 def open(self):
260 def open(self):
261 '''Open ``path`` file, which is relative to vfs root.
261 '''Open ``path`` file, which is relative to vfs root.
262
262
263 Newly created directories are marked as "not to be indexed by
263 Newly created directories are marked as "not to be indexed by
264 the content indexing service", if ``notindexed`` is specified
264 the content indexing service", if ``notindexed`` is specified
265 for "write" mode access.
265 for "write" mode access.
266 '''
266 '''
267 return self.__call__
267 return self.__call__
268
268
269 def read(self, path):
269 def read(self, path):
270 with self(path, 'rb') as fp:
270 with self(path, 'rb') as fp:
271 return fp.read()
271 return fp.read()
272
272
273 def readlines(self, path, mode='rb'):
273 def readlines(self, path, mode='rb'):
274 with self(path, mode=mode) as fp:
274 with self(path, mode=mode) as fp:
275 return fp.readlines()
275 return fp.readlines()
276
276
277 def write(self, path, data, backgroundclose=False):
277 def write(self, path, data, backgroundclose=False):
278 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
278 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
279 return fp.write(data)
279 return fp.write(data)
280
280
281 def writelines(self, path, data, mode='wb', notindexed=False):
281 def writelines(self, path, data, mode='wb', notindexed=False):
282 with self(path, mode=mode, notindexed=notindexed) as fp:
282 with self(path, mode=mode, notindexed=notindexed) as fp:
283 return fp.writelines(data)
283 return fp.writelines(data)
284
284
285 def append(self, path, data):
285 def append(self, path, data):
286 with self(path, 'ab') as fp:
286 with self(path, 'ab') as fp:
287 return fp.write(data)
287 return fp.write(data)
288
288
289 def basename(self, path):
289 def basename(self, path):
290 """return base element of a path (as os.path.basename would do)
290 """return base element of a path (as os.path.basename would do)
291
291
292 This exists to allow handling of strange encoding if needed."""
292 This exists to allow handling of strange encoding if needed."""
293 return os.path.basename(path)
293 return os.path.basename(path)
294
294
295 def chmod(self, path, mode):
295 def chmod(self, path, mode):
296 return os.chmod(self.join(path), mode)
296 return os.chmod(self.join(path), mode)
297
297
298 def dirname(self, path):
298 def dirname(self, path):
299 """return dirname element of a path (as os.path.dirname would do)
299 """return dirname element of a path (as os.path.dirname would do)
300
300
301 This exists to allow handling of strange encoding if needed."""
301 This exists to allow handling of strange encoding if needed."""
302 return os.path.dirname(path)
302 return os.path.dirname(path)
303
303
304 def exists(self, path=None):
304 def exists(self, path=None):
305 return os.path.exists(self.join(path))
305 return os.path.exists(self.join(path))
306
306
307 def fstat(self, fp):
307 def fstat(self, fp):
308 return util.fstat(fp)
308 return util.fstat(fp)
309
309
310 def isdir(self, path=None):
310 def isdir(self, path=None):
311 return os.path.isdir(self.join(path))
311 return os.path.isdir(self.join(path))
312
312
313 def isfile(self, path=None):
313 def isfile(self, path=None):
314 return os.path.isfile(self.join(path))
314 return os.path.isfile(self.join(path))
315
315
316 def islink(self, path=None):
316 def islink(self, path=None):
317 return os.path.islink(self.join(path))
317 return os.path.islink(self.join(path))
318
318
319 def isfileorlink(self, path=None):
319 def isfileorlink(self, path=None):
320 '''return whether path is a regular file or a symlink
320 '''return whether path is a regular file or a symlink
321
321
322 Unlike isfile, this doesn't follow symlinks.'''
322 Unlike isfile, this doesn't follow symlinks.'''
323 try:
323 try:
324 st = self.lstat(path)
324 st = self.lstat(path)
325 except OSError:
325 except OSError:
326 return False
326 return False
327 mode = st.st_mode
327 mode = st.st_mode
328 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
328 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
329
329
330 def reljoin(self, *paths):
330 def reljoin(self, *paths):
331 """join various elements of a path together (as os.path.join would do)
331 """join various elements of a path together (as os.path.join would do)
332
332
333 The vfs base is not injected so that path stay relative. This exists
333 The vfs base is not injected so that path stay relative. This exists
334 to allow handling of strange encoding if needed."""
334 to allow handling of strange encoding if needed."""
335 return os.path.join(*paths)
335 return os.path.join(*paths)
336
336
337 def split(self, path):
337 def split(self, path):
338 """split top-most element of a path (as os.path.split would do)
338 """split top-most element of a path (as os.path.split would do)
339
339
340 This exists to allow handling of strange encoding if needed."""
340 This exists to allow handling of strange encoding if needed."""
341 return os.path.split(path)
341 return os.path.split(path)
342
342
343 def lexists(self, path=None):
343 def lexists(self, path=None):
344 return os.path.lexists(self.join(path))
344 return os.path.lexists(self.join(path))
345
345
346 def lstat(self, path=None):
346 def lstat(self, path=None):
347 return os.lstat(self.join(path))
347 return os.lstat(self.join(path))
348
348
349 def listdir(self, path=None):
349 def listdir(self, path=None):
350 return os.listdir(self.join(path))
350 return os.listdir(self.join(path))
351
351
352 def makedir(self, path=None, notindexed=True):
352 def makedir(self, path=None, notindexed=True):
353 return util.makedir(self.join(path), notindexed)
353 return util.makedir(self.join(path), notindexed)
354
354
355 def makedirs(self, path=None, mode=None):
355 def makedirs(self, path=None, mode=None):
356 return util.makedirs(self.join(path), mode)
356 return util.makedirs(self.join(path), mode)
357
357
358 def makelock(self, info, path):
358 def makelock(self, info, path):
359 return util.makelock(info, self.join(path))
359 return util.makelock(info, self.join(path))
360
360
361 def mkdir(self, path=None):
361 def mkdir(self, path=None):
362 return os.mkdir(self.join(path))
362 return os.mkdir(self.join(path))
363
363
364 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
364 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
365 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
365 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
366 dir=self.join(dir), text=text)
366 dir=self.join(dir), text=text)
367 dname, fname = util.split(name)
367 dname, fname = util.split(name)
368 if dir:
368 if dir:
369 return fd, os.path.join(dir, fname)
369 return fd, os.path.join(dir, fname)
370 else:
370 else:
371 return fd, fname
371 return fd, fname
372
372
373 def readdir(self, path=None, stat=None, skip=None):
373 def readdir(self, path=None, stat=None, skip=None):
374 return osutil.listdir(self.join(path), stat, skip)
374 return osutil.listdir(self.join(path), stat, skip)
375
375
376 def readlock(self, path):
376 def readlock(self, path):
377 return util.readlock(self.join(path))
377 return util.readlock(self.join(path))
378
378
379 def rename(self, src, dst, checkambig=False):
379 def rename(self, src, dst, checkambig=False):
380 """Rename from src to dst
380 """Rename from src to dst
381
381
382 checkambig argument is used with util.filestat, and is useful
382 checkambig argument is used with util.filestat, and is useful
383 only if destination file is guarded by any lock
383 only if destination file is guarded by any lock
384 (e.g. repo.lock or repo.wlock).
384 (e.g. repo.lock or repo.wlock).
385 """
385 """
386 dstpath = self.join(dst)
386 dstpath = self.join(dst)
387 oldstat = checkambig and util.filestat(dstpath)
387 oldstat = checkambig and util.filestat(dstpath)
388 if oldstat and oldstat.stat:
388 if oldstat and oldstat.stat:
389 ret = util.rename(self.join(src), dstpath)
389 ret = util.rename(self.join(src), dstpath)
390 newstat = util.filestat(dstpath)
390 newstat = util.filestat(dstpath)
391 if newstat.isambig(oldstat):
391 if newstat.isambig(oldstat):
392 # stat of renamed file is ambiguous to original one
392 # stat of renamed file is ambiguous to original one
393 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
393 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
394 os.utime(dstpath, (advanced, advanced))
394 os.utime(dstpath, (advanced, advanced))
395 return ret
395 return ret
396 return util.rename(self.join(src), dstpath)
396 return util.rename(self.join(src), dstpath)
397
397
398 def readlink(self, path):
398 def readlink(self, path):
399 return os.readlink(self.join(path))
399 return os.readlink(self.join(path))
400
400
401 def removedirs(self, path=None):
401 def removedirs(self, path=None):
402 """Remove a leaf directory and all empty intermediate ones
402 """Remove a leaf directory and all empty intermediate ones
403 """
403 """
404 return util.removedirs(self.join(path))
404 return util.removedirs(self.join(path))
405
405
406 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
406 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
407 """Remove a directory tree recursively
407 """Remove a directory tree recursively
408
408
409 If ``forcibly``, this tries to remove READ-ONLY files, too.
409 If ``forcibly``, this tries to remove READ-ONLY files, too.
410 """
410 """
411 if forcibly:
411 if forcibly:
412 def onerror(function, path, excinfo):
412 def onerror(function, path, excinfo):
413 if function is not os.remove:
413 if function is not os.remove:
414 raise
414 raise
415 # read-only files cannot be unlinked under Windows
415 # read-only files cannot be unlinked under Windows
416 s = os.stat(path)
416 s = os.stat(path)
417 if (s.st_mode & stat.S_IWRITE) != 0:
417 if (s.st_mode & stat.S_IWRITE) != 0:
418 raise
418 raise
419 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
419 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
420 os.remove(path)
420 os.remove(path)
421 else:
421 else:
422 onerror = None
422 onerror = None
423 return shutil.rmtree(self.join(path),
423 return shutil.rmtree(self.join(path),
424 ignore_errors=ignore_errors, onerror=onerror)
424 ignore_errors=ignore_errors, onerror=onerror)
425
425
426 def setflags(self, path, l, x):
426 def setflags(self, path, l, x):
427 return util.setflags(self.join(path), l, x)
427 return util.setflags(self.join(path), l, x)
428
428
429 def stat(self, path=None):
429 def stat(self, path=None):
430 return os.stat(self.join(path))
430 return os.stat(self.join(path))
431
431
432 def unlink(self, path=None):
432 def unlink(self, path=None):
433 return util.unlink(self.join(path))
433 return util.unlink(self.join(path))
434
434
435 def unlinkpath(self, path=None, ignoremissing=False):
435 def unlinkpath(self, path=None, ignoremissing=False):
436 return util.unlinkpath(self.join(path), ignoremissing)
436 return util.unlinkpath(self.join(path), ignoremissing)
437
437
438 def utime(self, path=None, t=None):
438 def utime(self, path=None, t=None):
439 return os.utime(self.join(path), t)
439 return os.utime(self.join(path), t)
440
440
441 def walk(self, path=None, onerror=None):
441 def walk(self, path=None, onerror=None):
442 """Yield (dirpath, dirs, files) tuple for each directories under path
442 """Yield (dirpath, dirs, files) tuple for each directories under path
443
443
444 ``dirpath`` is relative one from the root of this vfs. This
444 ``dirpath`` is relative one from the root of this vfs. This
445 uses ``os.sep`` as path separator, even you specify POSIX
445 uses ``os.sep`` as path separator, even you specify POSIX
446 style ``path``.
446 style ``path``.
447
447
448 "The root of this vfs" is represented as empty ``dirpath``.
448 "The root of this vfs" is represented as empty ``dirpath``.
449 """
449 """
450 root = os.path.normpath(self.join(None))
450 root = os.path.normpath(self.join(None))
451 # when dirpath == root, dirpath[prefixlen:] becomes empty
451 # when dirpath == root, dirpath[prefixlen:] becomes empty
452 # because len(dirpath) < prefixlen.
452 # because len(dirpath) < prefixlen.
453 prefixlen = len(pathutil.normasprefix(root))
453 prefixlen = len(pathutil.normasprefix(root))
454 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
454 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
455 yield (dirpath[prefixlen:], dirs, files)
455 yield (dirpath[prefixlen:], dirs, files)
456
456
457 @contextlib.contextmanager
457 @contextlib.contextmanager
458 def backgroundclosing(self, ui, expectedcount=-1):
458 def backgroundclosing(self, ui, expectedcount=-1):
459 """Allow files to be closed asynchronously.
459 """Allow files to be closed asynchronously.
460
460
461 When this context manager is active, ``backgroundclose`` can be passed
461 When this context manager is active, ``backgroundclose`` can be passed
462 to ``__call__``/``open`` to result in the file possibly being closed
462 to ``__call__``/``open`` to result in the file possibly being closed
463 asynchronously, on a background thread.
463 asynchronously, on a background thread.
464 """
464 """
465 # This is an arbitrary restriction and could be changed if we ever
465 # This is an arbitrary restriction and could be changed if we ever
466 # have a use case.
466 # have a use case.
467 vfs = getattr(self, 'vfs', self)
467 vfs = getattr(self, 'vfs', self)
468 if getattr(vfs, '_backgroundfilecloser', None):
468 if getattr(vfs, '_backgroundfilecloser', None):
469 raise error.Abort(
469 raise error.Abort(
470 _('can only have 1 active background file closer'))
470 _('can only have 1 active background file closer'))
471
471
472 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
472 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
473 try:
473 try:
474 vfs._backgroundfilecloser = bfc
474 vfs._backgroundfilecloser = bfc
475 yield bfc
475 yield bfc
476 finally:
476 finally:
477 vfs._backgroundfilecloser = None
477 vfs._backgroundfilecloser = None
478
478
479 class vfs(abstractvfs):
479 class vfs(abstractvfs):
480 '''Operate files relative to a base directory
480 '''Operate files relative to a base directory
481
481
482 This class is used to hide the details of COW semantics and
482 This class is used to hide the details of COW semantics and
483 remote file access from higher level code.
483 remote file access from higher level code.
484 '''
484 '''
485 def __init__(self, base, audit=True, expandpath=False, realpath=False):
485 def __init__(self, base, audit=True, expandpath=False, realpath=False):
486 if expandpath:
486 if expandpath:
487 base = util.expandpath(base)
487 base = util.expandpath(base)
488 if realpath:
488 if realpath:
489 base = os.path.realpath(base)
489 base = os.path.realpath(base)
490 self.base = base
490 self.base = base
491 self.mustaudit = audit
491 self.mustaudit = audit
492 self.createmode = None
492 self.createmode = None
493 self._trustnlink = None
493 self._trustnlink = None
494
494
495 @property
495 @property
496 def mustaudit(self):
496 def mustaudit(self):
497 return self._audit
497 return self._audit
498
498
499 @mustaudit.setter
499 @mustaudit.setter
500 def mustaudit(self, onoff):
500 def mustaudit(self, onoff):
501 self._audit = onoff
501 self._audit = onoff
502 if onoff:
502 if onoff:
503 self.audit = pathutil.pathauditor(self.base)
503 self.audit = pathutil.pathauditor(self.base)
504 else:
504 else:
505 self.audit = util.always
505 self.audit = util.always
506
506
507 @util.propertycache
507 @util.propertycache
508 def _cansymlink(self):
508 def _cansymlink(self):
509 return util.checklink(self.base)
509 return util.checklink(self.base)
510
510
511 @util.propertycache
511 @util.propertycache
512 def _chmod(self):
512 def _chmod(self):
513 return util.checkexec(self.base)
513 return util.checkexec(self.base)
514
514
515 def _fixfilemode(self, name):
515 def _fixfilemode(self, name):
516 if self.createmode is None or not self._chmod:
516 if self.createmode is None or not self._chmod:
517 return
517 return
518 os.chmod(name, self.createmode & 0o666)
518 os.chmod(name, self.createmode & 0o666)
519
519
520 def __call__(self, path, mode="r", text=False, atomictemp=False,
520 def __call__(self, path, mode="r", text=False, atomictemp=False,
521 notindexed=False, backgroundclose=False, checkambig=False):
521 notindexed=False, backgroundclose=False, checkambig=False):
522 '''Open ``path`` file, which is relative to vfs root.
522 '''Open ``path`` file, which is relative to vfs root.
523
523
524 Newly created directories are marked as "not to be indexed by
524 Newly created directories are marked as "not to be indexed by
525 the content indexing service", if ``notindexed`` is specified
525 the content indexing service", if ``notindexed`` is specified
526 for "write" mode access.
526 for "write" mode access.
527
527
528 If ``backgroundclose`` is passed, the file may be closed asynchronously.
528 If ``backgroundclose`` is passed, the file may be closed asynchronously.
529 It can only be used if the ``self.backgroundclosing()`` context manager
529 It can only be used if the ``self.backgroundclosing()`` context manager
530 is active. This should only be specified if the following criteria hold:
530 is active. This should only be specified if the following criteria hold:
531
531
532 1. There is a potential for writing thousands of files. Unless you
532 1. There is a potential for writing thousands of files. Unless you
533 are writing thousands of files, the performance benefits of
533 are writing thousands of files, the performance benefits of
534 asynchronously closing files is not realized.
534 asynchronously closing files is not realized.
535 2. Files are opened exactly once for the ``backgroundclosing``
535 2. Files are opened exactly once for the ``backgroundclosing``
536 active duration and are therefore free of race conditions between
536 active duration and are therefore free of race conditions between
537 closing a file on a background thread and reopening it. (If the
537 closing a file on a background thread and reopening it. (If the
538 file were opened multiple times, there could be unflushed data
538 file were opened multiple times, there could be unflushed data
539 because the original file handle hasn't been flushed/closed yet.)
539 because the original file handle hasn't been flushed/closed yet.)
540
540
541 ``checkambig`` argument is passed to atomictemplfile (valid
541 ``checkambig`` argument is passed to atomictemplfile (valid
542 only for writing), and is useful only if target file is
542 only for writing), and is useful only if target file is
543 guarded by any lock (e.g. repo.lock or repo.wlock).
543 guarded by any lock (e.g. repo.lock or repo.wlock).
544 '''
544 '''
545 if self._audit:
545 if self._audit:
546 r = util.checkosfilename(path)
546 r = util.checkosfilename(path)
547 if r:
547 if r:
548 raise error.Abort("%s: %r" % (r, path))
548 raise error.Abort("%s: %r" % (r, path))
549 self.audit(path)
549 self.audit(path)
550 f = self.join(path)
550 f = self.join(path)
551
551
552 if not text and "b" not in mode:
552 if not text and "b" not in mode:
553 mode += "b" # for that other OS
553 mode += "b" # for that other OS
554
554
555 nlink = -1
555 nlink = -1
556 if mode not in ('r', 'rb'):
556 if mode not in ('r', 'rb'):
557 dirname, basename = util.split(f)
557 dirname, basename = util.split(f)
558 # If basename is empty, then the path is malformed because it points
558 # If basename is empty, then the path is malformed because it points
559 # to a directory. Let the posixfile() call below raise IOError.
559 # to a directory. Let the posixfile() call below raise IOError.
560 if basename:
560 if basename:
561 if atomictemp:
561 if atomictemp:
562 util.makedirs(dirname, self.createmode, notindexed)
562 util.makedirs(dirname, self.createmode, notindexed)
563 return util.atomictempfile(f, mode, self.createmode,
563 return util.atomictempfile(f, mode, self.createmode,
564 checkambig=checkambig)
564 checkambig=checkambig)
565 try:
565 try:
566 if 'w' in mode:
566 if 'w' in mode:
567 util.unlink(f)
567 util.unlink(f)
568 nlink = 0
568 nlink = 0
569 else:
569 else:
570 # nlinks() may behave differently for files on Windows
570 # nlinks() may behave differently for files on Windows
571 # shares if the file is open.
571 # shares if the file is open.
572 with util.posixfile(f):
572 with util.posixfile(f):
573 nlink = util.nlinks(f)
573 nlink = util.nlinks(f)
574 if nlink < 1:
574 if nlink < 1:
575 nlink = 2 # force mktempcopy (issue1922)
575 nlink = 2 # force mktempcopy (issue1922)
576 except (OSError, IOError) as e:
576 except (OSError, IOError) as e:
577 if e.errno != errno.ENOENT:
577 if e.errno != errno.ENOENT:
578 raise
578 raise
579 nlink = 0
579 nlink = 0
580 util.makedirs(dirname, self.createmode, notindexed)
580 util.makedirs(dirname, self.createmode, notindexed)
581 if nlink > 0:
581 if nlink > 0:
582 if self._trustnlink is None:
582 if self._trustnlink is None:
583 self._trustnlink = nlink > 1 or util.checknlink(f)
583 self._trustnlink = nlink > 1 or util.checknlink(f)
584 if nlink > 1 or not self._trustnlink:
584 if nlink > 1 or not self._trustnlink:
585 util.rename(util.mktempcopy(f), f)
585 util.rename(util.mktempcopy(f), f)
586 fp = util.posixfile(f, mode)
586 fp = util.posixfile(f, mode)
587 if nlink == 0:
587 if nlink == 0:
588 self._fixfilemode(f)
588 self._fixfilemode(f)
589
589
590 if backgroundclose:
590 if backgroundclose:
591 if not self._backgroundfilecloser:
591 if not self._backgroundfilecloser:
592 raise error.Abort(_('backgroundclose can only be used when a '
592 raise error.Abort(_('backgroundclose can only be used when a '
593 'backgroundclosing context manager is active')
593 'backgroundclosing context manager is active')
594 )
594 )
595
595
596 fp = delayclosedfile(fp, self._backgroundfilecloser)
596 fp = delayclosedfile(fp, self._backgroundfilecloser)
597
597
598 return fp
598 return fp
599
599
600 def symlink(self, src, dst):
600 def symlink(self, src, dst):
601 self.audit(dst)
601 self.audit(dst)
602 linkname = self.join(dst)
602 linkname = self.join(dst)
603 try:
603 try:
604 os.unlink(linkname)
604 os.unlink(linkname)
605 except OSError:
605 except OSError:
606 pass
606 pass
607
607
608 util.makedirs(os.path.dirname(linkname), self.createmode)
608 util.makedirs(os.path.dirname(linkname), self.createmode)
609
609
610 if self._cansymlink:
610 if self._cansymlink:
611 try:
611 try:
612 os.symlink(src, linkname)
612 os.symlink(src, linkname)
613 except OSError as err:
613 except OSError as err:
614 raise OSError(err.errno, _('could not symlink to %r: %s') %
614 raise OSError(err.errno, _('could not symlink to %r: %s') %
615 (src, err.strerror), linkname)
615 (src, err.strerror), linkname)
616 else:
616 else:
617 self.write(dst, src)
617 self.write(dst, src)
618
618
619 def join(self, path, *insidef):
619 def join(self, path, *insidef):
620 if path:
620 if path:
621 return os.path.join(self.base, path, *insidef)
621 return os.path.join(self.base, path, *insidef)
622 else:
622 else:
623 return self.base
623 return self.base
624
624
625 opener = vfs
625 opener = vfs
626
626
627 class auditvfs(object):
627 class auditvfs(object):
628 def __init__(self, vfs):
628 def __init__(self, vfs):
629 self.vfs = vfs
629 self.vfs = vfs
630
630
631 @property
631 @property
632 def mustaudit(self):
632 def mustaudit(self):
633 return self.vfs.mustaudit
633 return self.vfs.mustaudit
634
634
635 @mustaudit.setter
635 @mustaudit.setter
636 def mustaudit(self, onoff):
636 def mustaudit(self, onoff):
637 self.vfs.mustaudit = onoff
637 self.vfs.mustaudit = onoff
638
638
639 @property
639 @property
640 def options(self):
640 def options(self):
641 return self.vfs.options
641 return self.vfs.options
642
642
643 @options.setter
643 @options.setter
644 def options(self, value):
644 def options(self, value):
645 self.vfs.options = value
645 self.vfs.options = value
646
646
647 class filtervfs(abstractvfs, auditvfs):
647 class filtervfs(abstractvfs, auditvfs):
648 '''Wrapper vfs for filtering filenames with a function.'''
648 '''Wrapper vfs for filtering filenames with a function.'''
649
649
650 def __init__(self, vfs, filter):
650 def __init__(self, vfs, filter):
651 auditvfs.__init__(self, vfs)
651 auditvfs.__init__(self, vfs)
652 self._filter = filter
652 self._filter = filter
653
653
654 def __call__(self, path, *args, **kwargs):
654 def __call__(self, path, *args, **kwargs):
655 return self.vfs(self._filter(path), *args, **kwargs)
655 return self.vfs(self._filter(path), *args, **kwargs)
656
656
657 def join(self, path, *insidef):
657 def join(self, path, *insidef):
658 if path:
658 if path:
659 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
659 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
660 else:
660 else:
661 return self.vfs.join(path)
661 return self.vfs.join(path)
662
662
663 filteropener = filtervfs
663 filteropener = filtervfs
664
664
665 class readonlyvfs(abstractvfs, auditvfs):
665 class readonlyvfs(abstractvfs, auditvfs):
666 '''Wrapper vfs preventing any writing.'''
666 '''Wrapper vfs preventing any writing.'''
667
667
668 def __init__(self, vfs):
668 def __init__(self, vfs):
669 auditvfs.__init__(self, vfs)
669 auditvfs.__init__(self, vfs)
670
670
671 def __call__(self, path, mode='r', *args, **kw):
671 def __call__(self, path, mode='r', *args, **kw):
672 if mode not in ('r', 'rb'):
672 if mode not in ('r', 'rb'):
673 raise error.Abort(_('this vfs is read only'))
673 raise error.Abort(_('this vfs is read only'))
674 return self.vfs(path, mode, *args, **kw)
674 return self.vfs(path, mode, *args, **kw)
675
675
676 def join(self, path, *insidef):
676 def join(self, path, *insidef):
677 return self.vfs.join(path, *insidef)
677 return self.vfs.join(path, *insidef)
678
678
679 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
679 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
680 '''yield every hg repository under path, always recursively.
680 '''yield every hg repository under path, always recursively.
681 The recurse flag will only control recursion into repo working dirs'''
681 The recurse flag will only control recursion into repo working dirs'''
682 def errhandler(err):
682 def errhandler(err):
683 if err.filename == path:
683 if err.filename == path:
684 raise err
684 raise err
685 samestat = getattr(os.path, 'samestat', None)
685 samestat = getattr(os.path, 'samestat', None)
686 if followsym and samestat is not None:
686 if followsym and samestat is not None:
687 def adddir(dirlst, dirname):
687 def adddir(dirlst, dirname):
688 match = False
688 match = False
689 dirstat = os.stat(dirname)
689 dirstat = os.stat(dirname)
690 for lstdirstat in dirlst:
690 for lstdirstat in dirlst:
691 if samestat(dirstat, lstdirstat):
691 if samestat(dirstat, lstdirstat):
692 match = True
692 match = True
693 break
693 break
694 if not match:
694 if not match:
695 dirlst.append(dirstat)
695 dirlst.append(dirstat)
696 return not match
696 return not match
697 else:
697 else:
698 followsym = False
698 followsym = False
699
699
700 if (seen_dirs is None) and followsym:
700 if (seen_dirs is None) and followsym:
701 seen_dirs = []
701 seen_dirs = []
702 adddir(seen_dirs, path)
702 adddir(seen_dirs, path)
703 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
703 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
704 dirs.sort()
704 dirs.sort()
705 if '.hg' in dirs:
705 if '.hg' in dirs:
706 yield root # found a repository
706 yield root # found a repository
707 qroot = os.path.join(root, '.hg', 'patches')
707 qroot = os.path.join(root, '.hg', 'patches')
708 if os.path.isdir(os.path.join(qroot, '.hg')):
708 if os.path.isdir(os.path.join(qroot, '.hg')):
709 yield qroot # we have a patch queue repo here
709 yield qroot # we have a patch queue repo here
710 if recurse:
710 if recurse:
711 # avoid recursing inside the .hg directory
711 # avoid recursing inside the .hg directory
712 dirs.remove('.hg')
712 dirs.remove('.hg')
713 else:
713 else:
714 dirs[:] = [] # don't descend further
714 dirs[:] = [] # don't descend further
715 elif followsym:
715 elif followsym:
716 newdirs = []
716 newdirs = []
717 for d in dirs:
717 for d in dirs:
718 fname = os.path.join(root, d)
718 fname = os.path.join(root, d)
719 if adddir(seen_dirs, fname):
719 if adddir(seen_dirs, fname):
720 if os.path.islink(fname):
720 if os.path.islink(fname):
721 for hgname in walkrepos(fname, True, seen_dirs):
721 for hgname in walkrepos(fname, True, seen_dirs):
722 yield hgname
722 yield hgname
723 else:
723 else:
724 newdirs.append(d)
724 newdirs.append(d)
725 dirs[:] = newdirs
725 dirs[:] = newdirs
726
726
727 def osrcpath():
727 def osrcpath():
728 '''return default os-specific hgrc search path'''
728 '''return default os-specific hgrc search path'''
729 path = []
729 path = []
730 defaultpath = os.path.join(util.datapath, 'default.d')
730 defaultpath = os.path.join(util.datapath, 'default.d')
731 if os.path.isdir(defaultpath):
731 if os.path.isdir(defaultpath):
732 for f, kind in osutil.listdir(defaultpath):
732 for f, kind in osutil.listdir(defaultpath):
733 if f.endswith('.rc'):
733 if f.endswith('.rc'):
734 path.append(os.path.join(defaultpath, f))
734 path.append(os.path.join(defaultpath, f))
735 path.extend(systemrcpath())
735 path.extend(systemrcpath())
736 path.extend(userrcpath())
736 path.extend(userrcpath())
737 path = [os.path.normpath(f) for f in path]
737 path = [os.path.normpath(f) for f in path]
738 return path
738 return path
739
739
740 _rcpath = None
740 _rcpath = None
741
741
742 def rcpath():
742 def rcpath():
743 '''return hgrc search path. if env var HGRCPATH is set, use it.
743 '''return hgrc search path. if env var HGRCPATH is set, use it.
744 for each item in path, if directory, use files ending in .rc,
744 for each item in path, if directory, use files ending in .rc,
745 else use item.
745 else use item.
746 make HGRCPATH empty to only look in .hg/hgrc of current repo.
746 make HGRCPATH empty to only look in .hg/hgrc of current repo.
747 if no HGRCPATH, use default os-specific path.'''
747 if no HGRCPATH, use default os-specific path.'''
748 global _rcpath
748 global _rcpath
749 if _rcpath is None:
749 if _rcpath is None:
750 if 'HGRCPATH' in os.environ:
750 if 'HGRCPATH' in os.environ:
751 _rcpath = []
751 _rcpath = []
752 for p in os.environ['HGRCPATH'].split(os.pathsep):
752 for p in os.environ['HGRCPATH'].split(os.pathsep):
753 if not p:
753 if not p:
754 continue
754 continue
755 p = util.expandpath(p)
755 p = util.expandpath(p)
756 if os.path.isdir(p):
756 if os.path.isdir(p):
757 for f, kind in osutil.listdir(p):
757 for f, kind in osutil.listdir(p):
758 if f.endswith('.rc'):
758 if f.endswith('.rc'):
759 _rcpath.append(os.path.join(p, f))
759 _rcpath.append(os.path.join(p, f))
760 else:
760 else:
761 _rcpath.append(p)
761 _rcpath.append(p)
762 else:
762 else:
763 _rcpath = osrcpath()
763 _rcpath = osrcpath()
764 return _rcpath
764 return _rcpath
765
765
766 def intrev(rev):
766 def intrev(rev):
767 """Return integer for a given revision that can be used in comparison or
767 """Return integer for a given revision that can be used in comparison or
768 arithmetic operation"""
768 arithmetic operation"""
769 if rev is None:
769 if rev is None:
770 return wdirrev
770 return wdirrev
771 return rev
771 return rev
772
772
773 def revsingle(repo, revspec, default='.'):
773 def revsingle(repo, revspec, default='.'):
774 if not revspec and revspec != 0:
774 if not revspec and revspec != 0:
775 return repo[default]
775 return repo[default]
776
776
777 l = revrange(repo, [revspec])
777 l = revrange(repo, [revspec])
778 if not l:
778 if not l:
779 raise error.Abort(_('empty revision set'))
779 raise error.Abort(_('empty revision set'))
780 return repo[l.last()]
780 return repo[l.last()]
781
781
782 def _pairspec(revspec):
782 def _pairspec(revspec):
783 tree = revset.parse(revspec)
783 tree = revset.parse(revspec)
784 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
784 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
785
785
786 def revpair(repo, revs):
786 def revpair(repo, revs):
787 if not revs:
787 if not revs:
788 return repo.dirstate.p1(), None
788 return repo.dirstate.p1(), None
789
789
790 l = revrange(repo, revs)
790 l = revrange(repo, revs)
791
791
792 if not l:
792 if not l:
793 first = second = None
793 first = second = None
794 elif l.isascending():
794 elif l.isascending():
795 first = l.min()
795 first = l.min()
796 second = l.max()
796 second = l.max()
797 elif l.isdescending():
797 elif l.isdescending():
798 first = l.max()
798 first = l.max()
799 second = l.min()
799 second = l.min()
800 else:
800 else:
801 first = l.first()
801 first = l.first()
802 second = l.last()
802 second = l.last()
803
803
804 if first is None:
804 if first is None:
805 raise error.Abort(_('empty revision range'))
805 raise error.Abort(_('empty revision range'))
806 if (first == second and len(revs) >= 2
806 if (first == second and len(revs) >= 2
807 and not all(revrange(repo, [r]) for r in revs)):
807 and not all(revrange(repo, [r]) for r in revs)):
808 raise error.Abort(_('empty revision on one side of range'))
808 raise error.Abort(_('empty revision on one side of range'))
809
809
810 # if top-level is range expression, the result must always be a pair
810 # if top-level is range expression, the result must always be a pair
811 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
811 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
812 return repo.lookup(first), None
812 return repo.lookup(first), None
813
813
814 return repo.lookup(first), repo.lookup(second)
814 return repo.lookup(first), repo.lookup(second)
815
815
816 def revrange(repo, specs):
816 def revrange(repo, specs):
817 """Execute 1 to many revsets and return the union.
817 """Execute 1 to many revsets and return the union.
818
818
819 This is the preferred mechanism for executing revsets using user-specified
819 This is the preferred mechanism for executing revsets using user-specified
820 config options, such as revset aliases.
820 config options, such as revset aliases.
821
821
822 The revsets specified by ``specs`` will be executed via a chained ``OR``
822 The revsets specified by ``specs`` will be executed via a chained ``OR``
823 expression. If ``specs`` is empty, an empty result is returned.
823 expression. If ``specs`` is empty, an empty result is returned.
824
824
825 ``specs`` can contain integers, in which case they are assumed to be
825 ``specs`` can contain integers, in which case they are assumed to be
826 revision numbers.
826 revision numbers.
827
827
828 It is assumed the revsets are already formatted. If you have arguments
828 It is assumed the revsets are already formatted. If you have arguments
829 that need to be expanded in the revset, call ``revset.formatspec()``
829 that need to be expanded in the revset, call ``revset.formatspec()``
830 and pass the result as an element of ``specs``.
830 and pass the result as an element of ``specs``.
831
831
832 Specifying a single revset is allowed.
832 Specifying a single revset is allowed.
833
833
834 Returns a ``revset.abstractsmartset`` which is a list-like interface over
834 Returns a ``revset.abstractsmartset`` which is a list-like interface over
835 integer revisions.
835 integer revisions.
836 """
836 """
837 allspecs = []
837 allspecs = []
838 for spec in specs:
838 for spec in specs:
839 if isinstance(spec, int):
839 if isinstance(spec, int):
840 spec = revset.formatspec('rev(%d)', spec)
840 spec = revset.formatspec('rev(%d)', spec)
841 allspecs.append(spec)
841 allspecs.append(spec)
842 m = revset.matchany(repo.ui, allspecs, repo)
842 m = revset.matchany(repo.ui, allspecs, repo)
843 return m(repo)
843 return m(repo)
844
844
845 def meaningfulparents(repo, ctx):
845 def meaningfulparents(repo, ctx):
846 """Return list of meaningful (or all if debug) parentrevs for rev.
846 """Return list of meaningful (or all if debug) parentrevs for rev.
847
847
848 For merges (two non-nullrev revisions) both parents are meaningful.
848 For merges (two non-nullrev revisions) both parents are meaningful.
849 Otherwise the first parent revision is considered meaningful if it
849 Otherwise the first parent revision is considered meaningful if it
850 is not the preceding revision.
850 is not the preceding revision.
851 """
851 """
852 parents = ctx.parents()
852 parents = ctx.parents()
853 if len(parents) > 1:
853 if len(parents) > 1:
854 return parents
854 return parents
855 if repo.ui.debugflag:
855 if repo.ui.debugflag:
856 return [parents[0], repo['null']]
856 return [parents[0], repo['null']]
857 if parents[0].rev() >= intrev(ctx.rev()) - 1:
857 if parents[0].rev() >= intrev(ctx.rev()) - 1:
858 return []
858 return []
859 return parents
859 return parents
860
860
861 def expandpats(pats):
861 def expandpats(pats):
862 '''Expand bare globs when running on windows.
862 '''Expand bare globs when running on windows.
863 On posix we assume it already has already been done by sh.'''
863 On posix we assume it already has already been done by sh.'''
864 if not util.expandglobs:
864 if not util.expandglobs:
865 return list(pats)
865 return list(pats)
866 ret = []
866 ret = []
867 for kindpat in pats:
867 for kindpat in pats:
868 kind, pat = matchmod._patsplit(kindpat, None)
868 kind, pat = matchmod._patsplit(kindpat, None)
869 if kind is None:
869 if kind is None:
870 try:
870 try:
871 globbed = glob.glob(pat)
871 globbed = glob.glob(pat)
872 except re.error:
872 except re.error:
873 globbed = [pat]
873 globbed = [pat]
874 if globbed:
874 if globbed:
875 ret.extend(globbed)
875 ret.extend(globbed)
876 continue
876 continue
877 ret.append(kindpat)
877 ret.append(kindpat)
878 return ret
878 return ret
879
879
880 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
880 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
881 badfn=None):
881 badfn=None):
882 '''Return a matcher and the patterns that were used.
882 '''Return a matcher and the patterns that were used.
883 The matcher will warn about bad matches, unless an alternate badfn callback
883 The matcher will warn about bad matches, unless an alternate badfn callback
884 is provided.'''
884 is provided.'''
885 if pats == ("",):
885 if pats == ("",):
886 pats = []
886 pats = []
887 if opts is None:
887 if opts is None:
888 opts = {}
888 opts = {}
889 if not globbed and default == 'relpath':
889 if not globbed and default == 'relpath':
890 pats = expandpats(pats or [])
890 pats = expandpats(pats or [])
891
891
892 def bad(f, msg):
892 def bad(f, msg):
893 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
893 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
894
894
895 if badfn is None:
895 if badfn is None:
896 badfn = bad
896 badfn = bad
897
897
898 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
898 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
899 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
899 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
900
900
901 if m.always():
901 if m.always():
902 pats = []
902 pats = []
903 return m, pats
903 return m, pats
904
904
905 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
905 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
906 badfn=None):
906 badfn=None):
907 '''Return a matcher that will warn about bad matches.'''
907 '''Return a matcher that will warn about bad matches.'''
908 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
908 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
909
909
910 def matchall(repo):
910 def matchall(repo):
911 '''Return a matcher that will efficiently match everything.'''
911 '''Return a matcher that will efficiently match everything.'''
912 return matchmod.always(repo.root, repo.getcwd())
912 return matchmod.always(repo.root, repo.getcwd())
913
913
914 def matchfiles(repo, files, badfn=None):
914 def matchfiles(repo, files, badfn=None):
915 '''Return a matcher that will efficiently match exactly these files.'''
915 '''Return a matcher that will efficiently match exactly these files.'''
916 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
916 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
917
917
918 def origpath(ui, repo, filepath):
918 def origpath(ui, repo, filepath):
919 '''customize where .orig files are created
919 '''customize where .orig files are created
920
920
921 Fetch user defined path from config file: [ui] origbackuppath = <path>
921 Fetch user defined path from config file: [ui] origbackuppath = <path>
922 Fall back to default (filepath) if not specified
922 Fall back to default (filepath) if not specified
923 '''
923 '''
924 origbackuppath = ui.config('ui', 'origbackuppath', None)
924 origbackuppath = ui.config('ui', 'origbackuppath', None)
925 if origbackuppath is None:
925 if origbackuppath is None:
926 return filepath + ".orig"
926 return filepath + ".orig"
927
927
928 filepathfromroot = os.path.relpath(filepath, start=repo.root)
928 filepathfromroot = os.path.relpath(filepath, start=repo.root)
929 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
929 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
930
930
931 origbackupdir = repo.vfs.dirname(fullorigpath)
931 origbackupdir = repo.vfs.dirname(fullorigpath)
932 if not repo.vfs.exists(origbackupdir):
932 if not repo.vfs.exists(origbackupdir):
933 ui.note(_('creating directory: %s\n') % origbackupdir)
933 ui.note(_('creating directory: %s\n') % origbackupdir)
934 util.makedirs(origbackupdir)
934 util.makedirs(origbackupdir)
935
935
936 return fullorigpath + ".orig"
936 return fullorigpath + ".orig"
937
937
938 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
938 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
939 if opts is None:
939 if opts is None:
940 opts = {}
940 opts = {}
941 m = matcher
941 m = matcher
942 if dry_run is None:
942 if dry_run is None:
943 dry_run = opts.get('dry_run')
943 dry_run = opts.get('dry_run')
944 if similarity is None:
944 if similarity is None:
945 similarity = float(opts.get('similarity') or 0)
945 similarity = float(opts.get('similarity') or 0)
946
946
947 ret = 0
947 ret = 0
948 join = lambda f: os.path.join(prefix, f)
948 join = lambda f: os.path.join(prefix, f)
949
949
950 wctx = repo[None]
950 wctx = repo[None]
951 for subpath in sorted(wctx.substate):
951 for subpath in sorted(wctx.substate):
952 submatch = matchmod.subdirmatcher(subpath, m)
952 submatch = matchmod.subdirmatcher(subpath, m)
953 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
953 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
954 sub = wctx.sub(subpath)
954 sub = wctx.sub(subpath)
955 try:
955 try:
956 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
956 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
957 ret = 1
957 ret = 1
958 except error.LookupError:
958 except error.LookupError:
959 repo.ui.status(_("skipping missing subrepository: %s\n")
959 repo.ui.status(_("skipping missing subrepository: %s\n")
960 % join(subpath))
960 % join(subpath))
961
961
962 rejected = []
962 rejected = []
963 def badfn(f, msg):
963 def badfn(f, msg):
964 if f in m.files():
964 if f in m.files():
965 m.bad(f, msg)
965 m.bad(f, msg)
966 rejected.append(f)
966 rejected.append(f)
967
967
968 badmatch = matchmod.badmatch(m, badfn)
968 badmatch = matchmod.badmatch(m, badfn)
969 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
969 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
970 badmatch)
970 badmatch)
971
971
972 unknownset = set(unknown + forgotten)
972 unknownset = set(unknown + forgotten)
973 toprint = unknownset.copy()
973 toprint = unknownset.copy()
974 toprint.update(deleted)
974 toprint.update(deleted)
975 for abs in sorted(toprint):
975 for abs in sorted(toprint):
976 if repo.ui.verbose or not m.exact(abs):
976 if repo.ui.verbose or not m.exact(abs):
977 if abs in unknownset:
977 if abs in unknownset:
978 status = _('adding %s\n') % m.uipath(abs)
978 status = _('adding %s\n') % m.uipath(abs)
979 else:
979 else:
980 status = _('removing %s\n') % m.uipath(abs)
980 status = _('removing %s\n') % m.uipath(abs)
981 repo.ui.status(status)
981 repo.ui.status(status)
982
982
983 renames = _findrenames(repo, m, added + unknown, removed + deleted,
983 renames = _findrenames(repo, m, added + unknown, removed + deleted,
984 similarity)
984 similarity)
985
985
986 if not dry_run:
986 if not dry_run:
987 _markchanges(repo, unknown + forgotten, deleted, renames)
987 _markchanges(repo, unknown + forgotten, deleted, renames)
988
988
989 for f in rejected:
989 for f in rejected:
990 if f in m.files():
990 if f in m.files():
991 return 1
991 return 1
992 return ret
992 return ret
993
993
994 def marktouched(repo, files, similarity=0.0):
994 def marktouched(repo, files, similarity=0.0):
995 '''Assert that files have somehow been operated upon. files are relative to
995 '''Assert that files have somehow been operated upon. files are relative to
996 the repo root.'''
996 the repo root.'''
997 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
997 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
998 rejected = []
998 rejected = []
999
999
1000 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1000 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1001
1001
1002 if repo.ui.verbose:
1002 if repo.ui.verbose:
1003 unknownset = set(unknown + forgotten)
1003 unknownset = set(unknown + forgotten)
1004 toprint = unknownset.copy()
1004 toprint = unknownset.copy()
1005 toprint.update(deleted)
1005 toprint.update(deleted)
1006 for abs in sorted(toprint):
1006 for abs in sorted(toprint):
1007 if abs in unknownset:
1007 if abs in unknownset:
1008 status = _('adding %s\n') % abs
1008 status = _('adding %s\n') % abs
1009 else:
1009 else:
1010 status = _('removing %s\n') % abs
1010 status = _('removing %s\n') % abs
1011 repo.ui.status(status)
1011 repo.ui.status(status)
1012
1012
1013 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1013 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1014 similarity)
1014 similarity)
1015
1015
1016 _markchanges(repo, unknown + forgotten, deleted, renames)
1016 _markchanges(repo, unknown + forgotten, deleted, renames)
1017
1017
1018 for f in rejected:
1018 for f in rejected:
1019 if f in m.files():
1019 if f in m.files():
1020 return 1
1020 return 1
1021 return 0
1021 return 0
1022
1022
1023 def _interestingfiles(repo, matcher):
1023 def _interestingfiles(repo, matcher):
1024 '''Walk dirstate with matcher, looking for files that addremove would care
1024 '''Walk dirstate with matcher, looking for files that addremove would care
1025 about.
1025 about.
1026
1026
1027 This is different from dirstate.status because it doesn't care about
1027 This is different from dirstate.status because it doesn't care about
1028 whether files are modified or clean.'''
1028 whether files are modified or clean.'''
1029 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1029 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1030 audit_path = pathutil.pathauditor(repo.root)
1030 audit_path = pathutil.pathauditor(repo.root)
1031
1031
1032 ctx = repo[None]
1032 ctx = repo[None]
1033 dirstate = repo.dirstate
1033 dirstate = repo.dirstate
1034 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1034 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1035 full=False)
1035 full=False)
1036 for abs, st in walkresults.iteritems():
1036 for abs, st in walkresults.iteritems():
1037 dstate = dirstate[abs]
1037 dstate = dirstate[abs]
1038 if dstate == '?' and audit_path.check(abs):
1038 if dstate == '?' and audit_path.check(abs):
1039 unknown.append(abs)
1039 unknown.append(abs)
1040 elif dstate != 'r' and not st:
1040 elif dstate != 'r' and not st:
1041 deleted.append(abs)
1041 deleted.append(abs)
1042 elif dstate == 'r' and st:
1042 elif dstate == 'r' and st:
1043 forgotten.append(abs)
1043 forgotten.append(abs)
1044 # for finding renames
1044 # for finding renames
1045 elif dstate == 'r' and not st:
1045 elif dstate == 'r' and not st:
1046 removed.append(abs)
1046 removed.append(abs)
1047 elif dstate == 'a':
1047 elif dstate == 'a':
1048 added.append(abs)
1048 added.append(abs)
1049
1049
1050 return added, unknown, deleted, removed, forgotten
1050 return added, unknown, deleted, removed, forgotten
1051
1051
1052 def _findrenames(repo, matcher, added, removed, similarity):
1052 def _findrenames(repo, matcher, added, removed, similarity):
1053 '''Find renames from removed files to added ones.'''
1053 '''Find renames from removed files to added ones.'''
1054 renames = {}
1054 renames = {}
1055 if similarity > 0:
1055 if similarity > 0:
1056 for old, new, score in similar.findrenames(repo, added, removed,
1056 for old, new, score in similar.findrenames(repo, added, removed,
1057 similarity):
1057 similarity):
1058 if (repo.ui.verbose or not matcher.exact(old)
1058 if (repo.ui.verbose or not matcher.exact(old)
1059 or not matcher.exact(new)):
1059 or not matcher.exact(new)):
1060 repo.ui.status(_('recording removal of %s as rename to %s '
1060 repo.ui.status(_('recording removal of %s as rename to %s '
1061 '(%d%% similar)\n') %
1061 '(%d%% similar)\n') %
1062 (matcher.rel(old), matcher.rel(new),
1062 (matcher.rel(old), matcher.rel(new),
1063 score * 100))
1063 score * 100))
1064 renames[new] = old
1064 renames[new] = old
1065 return renames
1065 return renames
1066
1066
1067 def _markchanges(repo, unknown, deleted, renames):
1067 def _markchanges(repo, unknown, deleted, renames):
1068 '''Marks the files in unknown as added, the files in deleted as removed,
1068 '''Marks the files in unknown as added, the files in deleted as removed,
1069 and the files in renames as copied.'''
1069 and the files in renames as copied.'''
1070 wctx = repo[None]
1070 wctx = repo[None]
1071 with repo.wlock():
1071 with repo.wlock():
1072 wctx.forget(deleted)
1072 wctx.forget(deleted)
1073 wctx.add(unknown)
1073 wctx.add(unknown)
1074 for new, old in renames.iteritems():
1074 for new, old in renames.iteritems():
1075 wctx.copy(old, new)
1075 wctx.copy(old, new)
1076
1076
1077 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1077 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1078 """Update the dirstate to reflect the intent of copying src to dst. For
1078 """Update the dirstate to reflect the intent of copying src to dst. For
1079 different reasons it might not end with dst being marked as copied from src.
1079 different reasons it might not end with dst being marked as copied from src.
1080 """
1080 """
1081 origsrc = repo.dirstate.copied(src) or src
1081 origsrc = repo.dirstate.copied(src) or src
1082 if dst == origsrc: # copying back a copy?
1082 if dst == origsrc: # copying back a copy?
1083 if repo.dirstate[dst] not in 'mn' and not dryrun:
1083 if repo.dirstate[dst] not in 'mn' and not dryrun:
1084 repo.dirstate.normallookup(dst)
1084 repo.dirstate.normallookup(dst)
1085 else:
1085 else:
1086 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1086 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1087 if not ui.quiet:
1087 if not ui.quiet:
1088 ui.warn(_("%s has not been committed yet, so no copy "
1088 ui.warn(_("%s has not been committed yet, so no copy "
1089 "data will be stored for %s.\n")
1089 "data will be stored for %s.\n")
1090 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1090 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1091 if repo.dirstate[dst] in '?r' and not dryrun:
1091 if repo.dirstate[dst] in '?r' and not dryrun:
1092 wctx.add([dst])
1092 wctx.add([dst])
1093 elif not dryrun:
1093 elif not dryrun:
1094 wctx.copy(origsrc, dst)
1094 wctx.copy(origsrc, dst)
1095
1095
1096 def readrequires(opener, supported):
1096 def readrequires(opener, supported):
1097 '''Reads and parses .hg/requires and checks if all entries found
1097 '''Reads and parses .hg/requires and checks if all entries found
1098 are in the list of supported features.'''
1098 are in the list of supported features.'''
1099 requirements = set(opener.read("requires").splitlines())
1099 requirements = set(opener.read("requires").splitlines())
1100 missings = []
1100 missings = []
1101 for r in requirements:
1101 for r in requirements:
1102 if r not in supported:
1102 if r not in supported:
1103 if not r or not r[0].isalnum():
1103 if not r or not r[0].isalnum():
1104 raise error.RequirementError(_(".hg/requires file is corrupt"))
1104 raise error.RequirementError(_(".hg/requires file is corrupt"))
1105 missings.append(r)
1105 missings.append(r)
1106 missings.sort()
1106 missings.sort()
1107 if missings:
1107 if missings:
1108 raise error.RequirementError(
1108 raise error.RequirementError(
1109 _("repository requires features unknown to this Mercurial: %s")
1109 _("repository requires features unknown to this Mercurial: %s")
1110 % " ".join(missings),
1110 % " ".join(missings),
1111 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1111 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1112 " for more information"))
1112 " for more information"))
1113 return requirements
1113 return requirements
1114
1114
1115 def writerequires(opener, requirements):
1115 def writerequires(opener, requirements):
1116 with opener('requires', 'w') as fp:
1116 with opener('requires', 'w') as fp:
1117 for r in sorted(requirements):
1117 for r in sorted(requirements):
1118 fp.write("%s\n" % r)
1118 fp.write("%s\n" % r)
1119
1119
1120 class filecachesubentry(object):
1120 class filecachesubentry(object):
1121 def __init__(self, path, stat):
1121 def __init__(self, path, stat):
1122 self.path = path
1122 self.path = path
1123 self.cachestat = None
1123 self.cachestat = None
1124 self._cacheable = None
1124 self._cacheable = None
1125
1125
1126 if stat:
1126 if stat:
1127 self.cachestat = filecachesubentry.stat(self.path)
1127 self.cachestat = filecachesubentry.stat(self.path)
1128
1128
1129 if self.cachestat:
1129 if self.cachestat:
1130 self._cacheable = self.cachestat.cacheable()
1130 self._cacheable = self.cachestat.cacheable()
1131 else:
1131 else:
1132 # None means we don't know yet
1132 # None means we don't know yet
1133 self._cacheable = None
1133 self._cacheable = None
1134
1134
1135 def refresh(self):
1135 def refresh(self):
1136 if self.cacheable():
1136 if self.cacheable():
1137 self.cachestat = filecachesubentry.stat(self.path)
1137 self.cachestat = filecachesubentry.stat(self.path)
1138
1138
1139 def cacheable(self):
1139 def cacheable(self):
1140 if self._cacheable is not None:
1140 if self._cacheable is not None:
1141 return self._cacheable
1141 return self._cacheable
1142
1142
1143 # we don't know yet, assume it is for now
1143 # we don't know yet, assume it is for now
1144 return True
1144 return True
1145
1145
1146 def changed(self):
1146 def changed(self):
1147 # no point in going further if we can't cache it
1147 # no point in going further if we can't cache it
1148 if not self.cacheable():
1148 if not self.cacheable():
1149 return True
1149 return True
1150
1150
1151 newstat = filecachesubentry.stat(self.path)
1151 newstat = filecachesubentry.stat(self.path)
1152
1152
1153 # we may not know if it's cacheable yet, check again now
1153 # we may not know if it's cacheable yet, check again now
1154 if newstat and self._cacheable is None:
1154 if newstat and self._cacheable is None:
1155 self._cacheable = newstat.cacheable()
1155 self._cacheable = newstat.cacheable()
1156
1156
1157 # check again
1157 # check again
1158 if not self._cacheable:
1158 if not self._cacheable:
1159 return True
1159 return True
1160
1160
1161 if self.cachestat != newstat:
1161 if self.cachestat != newstat:
1162 self.cachestat = newstat
1162 self.cachestat = newstat
1163 return True
1163 return True
1164 else:
1164 else:
1165 return False
1165 return False
1166
1166
1167 @staticmethod
1167 @staticmethod
1168 def stat(path):
1168 def stat(path):
1169 try:
1169 try:
1170 return util.cachestat(path)
1170 return util.cachestat(path)
1171 except OSError as e:
1171 except OSError as e:
1172 if e.errno != errno.ENOENT:
1172 if e.errno != errno.ENOENT:
1173 raise
1173 raise
1174
1174
1175 class filecacheentry(object):
1175 class filecacheentry(object):
1176 def __init__(self, paths, stat=True):
1176 def __init__(self, paths, stat=True):
1177 self._entries = []
1177 self._entries = []
1178 for path in paths:
1178 for path in paths:
1179 self._entries.append(filecachesubentry(path, stat))
1179 self._entries.append(filecachesubentry(path, stat))
1180
1180
1181 def changed(self):
1181 def changed(self):
1182 '''true if any entry has changed'''
1182 '''true if any entry has changed'''
1183 for entry in self._entries:
1183 for entry in self._entries:
1184 if entry.changed():
1184 if entry.changed():
1185 return True
1185 return True
1186 return False
1186 return False
1187
1187
1188 def refresh(self):
1188 def refresh(self):
1189 for entry in self._entries:
1189 for entry in self._entries:
1190 entry.refresh()
1190 entry.refresh()
1191
1191
1192 class filecache(object):
1192 class filecache(object):
1193 '''A property like decorator that tracks files under .hg/ for updates.
1193 '''A property like decorator that tracks files under .hg/ for updates.
1194
1194
1195 Records stat info when called in _filecache.
1195 Records stat info when called in _filecache.
1196
1196
1197 On subsequent calls, compares old stat info with new info, and recreates the
1197 On subsequent calls, compares old stat info with new info, and recreates the
1198 object when any of the files changes, updating the new stat info in
1198 object when any of the files changes, updating the new stat info in
1199 _filecache.
1199 _filecache.
1200
1200
1201 Mercurial either atomic renames or appends for files under .hg,
1201 Mercurial either atomic renames or appends for files under .hg,
1202 so to ensure the cache is reliable we need the filesystem to be able
1202 so to ensure the cache is reliable we need the filesystem to be able
1203 to tell us if a file has been replaced. If it can't, we fallback to
1203 to tell us if a file has been replaced. If it can't, we fallback to
1204 recreating the object on every call (essentially the same behavior as
1204 recreating the object on every call (essentially the same behavior as
1205 propertycache).
1205 propertycache).
1206
1206
1207 '''
1207 '''
1208 def __init__(self, *paths):
1208 def __init__(self, *paths):
1209 self.paths = paths
1209 self.paths = paths
1210
1210
1211 def join(self, obj, fname):
1211 def join(self, obj, fname):
1212 """Used to compute the runtime path of a cached file.
1212 """Used to compute the runtime path of a cached file.
1213
1213
1214 Users should subclass filecache and provide their own version of this
1214 Users should subclass filecache and provide their own version of this
1215 function to call the appropriate join function on 'obj' (an instance
1215 function to call the appropriate join function on 'obj' (an instance
1216 of the class that its member function was decorated).
1216 of the class that its member function was decorated).
1217 """
1217 """
1218 return obj.join(fname)
1218 return obj.join(fname)
1219
1219
1220 def __call__(self, func):
1220 def __call__(self, func):
1221 self.func = func
1221 self.func = func
1222 self.name = func.__name__
1222 self.name = func.__name__
1223 return self
1223 return self
1224
1224
1225 def __get__(self, obj, type=None):
1225 def __get__(self, obj, type=None):
1226 # if accessed on the class, return the descriptor itself.
1226 # if accessed on the class, return the descriptor itself.
1227 if obj is None:
1227 if obj is None:
1228 return self
1228 return self
1229 # do we need to check if the file changed?
1229 # do we need to check if the file changed?
1230 if self.name in obj.__dict__:
1230 if self.name in obj.__dict__:
1231 assert self.name in obj._filecache, self.name
1231 assert self.name in obj._filecache, self.name
1232 return obj.__dict__[self.name]
1232 return obj.__dict__[self.name]
1233
1233
1234 entry = obj._filecache.get(self.name)
1234 entry = obj._filecache.get(self.name)
1235
1235
1236 if entry:
1236 if entry:
1237 if entry.changed():
1237 if entry.changed():
1238 entry.obj = self.func(obj)
1238 entry.obj = self.func(obj)
1239 else:
1239 else:
1240 paths = [self.join(obj, path) for path in self.paths]
1240 paths = [self.join(obj, path) for path in self.paths]
1241
1241
1242 # We stat -before- creating the object so our cache doesn't lie if
1242 # We stat -before- creating the object so our cache doesn't lie if
1243 # a writer modified between the time we read and stat
1243 # a writer modified between the time we read and stat
1244 entry = filecacheentry(paths, True)
1244 entry = filecacheentry(paths, True)
1245 entry.obj = self.func(obj)
1245 entry.obj = self.func(obj)
1246
1246
1247 obj._filecache[self.name] = entry
1247 obj._filecache[self.name] = entry
1248
1248
1249 obj.__dict__[self.name] = entry.obj
1249 obj.__dict__[self.name] = entry.obj
1250 return entry.obj
1250 return entry.obj
1251
1251
1252 def __set__(self, obj, value):
1252 def __set__(self, obj, value):
1253 if self.name not in obj._filecache:
1253 if self.name not in obj._filecache:
1254 # we add an entry for the missing value because X in __dict__
1254 # we add an entry for the missing value because X in __dict__
1255 # implies X in _filecache
1255 # implies X in _filecache
1256 paths = [self.join(obj, path) for path in self.paths]
1256 paths = [self.join(obj, path) for path in self.paths]
1257 ce = filecacheentry(paths, False)
1257 ce = filecacheentry(paths, False)
1258 obj._filecache[self.name] = ce
1258 obj._filecache[self.name] = ce
1259 else:
1259 else:
1260 ce = obj._filecache[self.name]
1260 ce = obj._filecache[self.name]
1261
1261
1262 ce.obj = value # update cached copy
1262 ce.obj = value # update cached copy
1263 obj.__dict__[self.name] = value # update copy returned by obj.x
1263 obj.__dict__[self.name] = value # update copy returned by obj.x
1264
1264
1265 def __delete__(self, obj):
1265 def __delete__(self, obj):
1266 try:
1266 try:
1267 del obj.__dict__[self.name]
1267 del obj.__dict__[self.name]
1268 except KeyError:
1268 except KeyError:
1269 raise AttributeError(self.name)
1269 raise AttributeError(self.name)
1270
1270
1271 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1271 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1272 if lock is None:
1272 if lock is None:
1273 raise error.LockInheritanceContractViolation(
1273 raise error.LockInheritanceContractViolation(
1274 'lock can only be inherited while held')
1274 'lock can only be inherited while held')
1275 if environ is None:
1275 if environ is None:
1276 environ = {}
1276 environ = {}
1277 with lock.inherit() as locker:
1277 with lock.inherit() as locker:
1278 environ[envvar] = locker
1278 environ[envvar] = locker
1279 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1279 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1280
1280
1281 def wlocksub(repo, cmd, *args, **kwargs):
1281 def wlocksub(repo, cmd, *args, **kwargs):
1282 """run cmd as a subprocess that allows inheriting repo's wlock
1282 """run cmd as a subprocess that allows inheriting repo's wlock
1283
1283
1284 This can only be called while the wlock is held. This takes all the
1284 This can only be called while the wlock is held. This takes all the
1285 arguments that ui.system does, and returns the exit code of the
1285 arguments that ui.system does, and returns the exit code of the
1286 subprocess."""
1286 subprocess."""
1287 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1287 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1288 **kwargs)
1288 **kwargs)
1289
1289
1290 def gdinitconfig(ui):
1290 def gdinitconfig(ui):
1291 """helper function to know if a repo should be created as general delta
1291 """helper function to know if a repo should be created as general delta
1292 """
1292 """
1293 # experimental config: format.generaldelta
1293 # experimental config: format.generaldelta
1294 return (ui.configbool('format', 'generaldelta', False)
1294 return (ui.configbool('format', 'generaldelta', False)
1295 or ui.configbool('format', 'usegeneraldelta', True))
1295 or ui.configbool('format', 'usegeneraldelta', True))
1296
1296
1297 def gddeltaconfig(ui):
1297 def gddeltaconfig(ui):
1298 """helper function to know if incoming delta should be optimised
1298 """helper function to know if incoming delta should be optimised
1299 """
1299 """
1300 # experimental config: format.generaldelta
1300 # experimental config: format.generaldelta
1301 return ui.configbool('format', 'generaldelta', False)
1301 return ui.configbool('format', 'generaldelta', False)
1302
1302
1303 class delayclosedfile(object):
1303 class closewrapbase(object):
1304 """Proxy for a file object whose close is delayed.
1304 """Base class of wrapper, which hooks closing
1305
1305
1306 Do not instantiate outside of the vfs layer.
1306 Do not instantiate outside of the vfs layer.
1307 """
1307 """
1308
1308 def __init__(self, fh):
1309 def __init__(self, fh, closer):
1310 object.__setattr__(self, '_origfh', fh)
1309 object.__setattr__(self, '_origfh', fh)
1311 object.__setattr__(self, '_closer', closer)
1312
1310
1313 def __getattr__(self, attr):
1311 def __getattr__(self, attr):
1314 return getattr(self._origfh, attr)
1312 return getattr(self._origfh, attr)
1315
1313
1316 def __setattr__(self, attr, value):
1314 def __setattr__(self, attr, value):
1317 return setattr(self._origfh, attr, value)
1315 return setattr(self._origfh, attr, value)
1318
1316
1319 def __delattr__(self, attr):
1317 def __delattr__(self, attr):
1320 return delattr(self._origfh, attr)
1318 return delattr(self._origfh, attr)
1321
1319
1322 def __enter__(self):
1320 def __enter__(self):
1323 return self._origfh.__enter__()
1321 return self._origfh.__enter__()
1324
1322
1325 def __exit__(self, exc_type, exc_value, exc_tb):
1323 def __exit__(self, exc_type, exc_value, exc_tb):
1324 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1325
1326 def close(self):
1327 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1328
1329 class delayclosedfile(closewrapbase):
1330 """Proxy for a file object whose close is delayed.
1331
1332 Do not instantiate outside of the vfs layer.
1333 """
1334 def __init__(self, fh, closer):
1335 super(delayclosedfile, self).__init__(fh)
1336 object.__setattr__(self, '_closer', closer)
1337
1338 def __exit__(self, exc_type, exc_value, exc_tb):
1326 self._closer.close(self._origfh)
1339 self._closer.close(self._origfh)
1327
1340
1328 def close(self):
1341 def close(self):
1329 self._closer.close(self._origfh)
1342 self._closer.close(self._origfh)
1330
1343
1331 class backgroundfilecloser(object):
1344 class backgroundfilecloser(object):
1332 """Coordinates background closing of file handles on multiple threads."""
1345 """Coordinates background closing of file handles on multiple threads."""
1333 def __init__(self, ui, expectedcount=-1):
1346 def __init__(self, ui, expectedcount=-1):
1334 self._running = False
1347 self._running = False
1335 self._entered = False
1348 self._entered = False
1336 self._threads = []
1349 self._threads = []
1337 self._threadexception = None
1350 self._threadexception = None
1338
1351
1339 # Only Windows/NTFS has slow file closing. So only enable by default
1352 # Only Windows/NTFS has slow file closing. So only enable by default
1340 # on that platform. But allow to be enabled elsewhere for testing.
1353 # on that platform. But allow to be enabled elsewhere for testing.
1341 defaultenabled = os.name == 'nt'
1354 defaultenabled = os.name == 'nt'
1342 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1355 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1343
1356
1344 if not enabled:
1357 if not enabled:
1345 return
1358 return
1346
1359
1347 # There is overhead to starting and stopping the background threads.
1360 # There is overhead to starting and stopping the background threads.
1348 # Don't do background processing unless the file count is large enough
1361 # Don't do background processing unless the file count is large enough
1349 # to justify it.
1362 # to justify it.
1350 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1363 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1351 2048)
1364 2048)
1352 # FUTURE dynamically start background threads after minfilecount closes.
1365 # FUTURE dynamically start background threads after minfilecount closes.
1353 # (We don't currently have any callers that don't know their file count)
1366 # (We don't currently have any callers that don't know their file count)
1354 if expectedcount > 0 and expectedcount < minfilecount:
1367 if expectedcount > 0 and expectedcount < minfilecount:
1355 return
1368 return
1356
1369
1357 # Windows defaults to a limit of 512 open files. A buffer of 128
1370 # Windows defaults to a limit of 512 open files. A buffer of 128
1358 # should give us enough headway.
1371 # should give us enough headway.
1359 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1372 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1360 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1373 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1361
1374
1362 ui.debug('starting %d threads for background file closing\n' %
1375 ui.debug('starting %d threads for background file closing\n' %
1363 threadcount)
1376 threadcount)
1364
1377
1365 self._queue = util.queue(maxsize=maxqueue)
1378 self._queue = util.queue(maxsize=maxqueue)
1366 self._running = True
1379 self._running = True
1367
1380
1368 for i in range(threadcount):
1381 for i in range(threadcount):
1369 t = threading.Thread(target=self._worker, name='backgroundcloser')
1382 t = threading.Thread(target=self._worker, name='backgroundcloser')
1370 self._threads.append(t)
1383 self._threads.append(t)
1371 t.start()
1384 t.start()
1372
1385
1373 def __enter__(self):
1386 def __enter__(self):
1374 self._entered = True
1387 self._entered = True
1375 return self
1388 return self
1376
1389
1377 def __exit__(self, exc_type, exc_value, exc_tb):
1390 def __exit__(self, exc_type, exc_value, exc_tb):
1378 self._running = False
1391 self._running = False
1379
1392
1380 # Wait for threads to finish closing so open files don't linger for
1393 # Wait for threads to finish closing so open files don't linger for
1381 # longer than lifetime of context manager.
1394 # longer than lifetime of context manager.
1382 for t in self._threads:
1395 for t in self._threads:
1383 t.join()
1396 t.join()
1384
1397
1385 def _worker(self):
1398 def _worker(self):
1386 """Main routine for worker thread."""
1399 """Main routine for worker thread."""
1387 while True:
1400 while True:
1388 try:
1401 try:
1389 fh = self._queue.get(block=True, timeout=0.100)
1402 fh = self._queue.get(block=True, timeout=0.100)
1390 # Need to catch or the thread will terminate and
1403 # Need to catch or the thread will terminate and
1391 # we could orphan file descriptors.
1404 # we could orphan file descriptors.
1392 try:
1405 try:
1393 fh.close()
1406 fh.close()
1394 except Exception as e:
1407 except Exception as e:
1395 # Stash so can re-raise from main thread later.
1408 # Stash so can re-raise from main thread later.
1396 self._threadexception = e
1409 self._threadexception = e
1397 except util.empty:
1410 except util.empty:
1398 if not self._running:
1411 if not self._running:
1399 break
1412 break
1400
1413
1401 def close(self, fh):
1414 def close(self, fh):
1402 """Schedule a file for closing."""
1415 """Schedule a file for closing."""
1403 if not self._entered:
1416 if not self._entered:
1404 raise error.Abort(_('can only call close() when context manager '
1417 raise error.Abort(_('can only call close() when context manager '
1405 'active'))
1418 'active'))
1406
1419
1407 # If a background thread encountered an exception, raise now so we fail
1420 # If a background thread encountered an exception, raise now so we fail
1408 # fast. Otherwise we may potentially go on for minutes until the error
1421 # fast. Otherwise we may potentially go on for minutes until the error
1409 # is acted on.
1422 # is acted on.
1410 if self._threadexception:
1423 if self._threadexception:
1411 e = self._threadexception
1424 e = self._threadexception
1412 self._threadexception = None
1425 self._threadexception = None
1413 raise e
1426 raise e
1414
1427
1415 # If we're not actively running, close synchronously.
1428 # If we're not actively running, close synchronously.
1416 if not self._running:
1429 if not self._running:
1417 fh.close()
1430 fh.close()
1418 return
1431 return
1419
1432
1420 self._queue.put(fh, block=True, timeout=None)
1433 self._queue.put(fh, block=True, timeout=None)
General Comments 0
You need to be logged in to leave comments. Login now