##// END OF EJS Templates
scmutil: delete extra newline at EOF...
Augie Fackler -
r29336:9368ed12 default
parent child Browse files
Show More
@@ -1,1391 +1,1390
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import glob
12 import glob
13 import os
13 import os
14 import re
14 import re
15 import shutil
15 import shutil
16 import stat
16 import stat
17 import tempfile
17 import tempfile
18 import threading
18 import threading
19
19
20 from .i18n import _
20 from .i18n import _
21 from .node import wdirrev
21 from .node import wdirrev
22 from . import (
22 from . import (
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 osutil,
26 osutil,
27 pathutil,
27 pathutil,
28 phases,
28 phases,
29 revset,
29 revset,
30 similar,
30 similar,
31 util,
31 util,
32 )
32 )
33
33
34 if os.name == 'nt':
34 if os.name == 'nt':
35 from . import scmwindows as scmplatform
35 from . import scmwindows as scmplatform
36 else:
36 else:
37 from . import scmposix as scmplatform
37 from . import scmposix as scmplatform
38
38
39 systemrcpath = scmplatform.systemrcpath
39 systemrcpath = scmplatform.systemrcpath
40 userrcpath = scmplatform.userrcpath
40 userrcpath = scmplatform.userrcpath
41
41
42 class status(tuple):
42 class status(tuple):
43 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
43 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
44 and 'ignored' properties are only relevant to the working copy.
44 and 'ignored' properties are only relevant to the working copy.
45 '''
45 '''
46
46
47 __slots__ = ()
47 __slots__ = ()
48
48
49 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
49 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
50 clean):
50 clean):
51 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
51 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
52 ignored, clean))
52 ignored, clean))
53
53
54 @property
54 @property
55 def modified(self):
55 def modified(self):
56 '''files that have been modified'''
56 '''files that have been modified'''
57 return self[0]
57 return self[0]
58
58
59 @property
59 @property
60 def added(self):
60 def added(self):
61 '''files that have been added'''
61 '''files that have been added'''
62 return self[1]
62 return self[1]
63
63
64 @property
64 @property
65 def removed(self):
65 def removed(self):
66 '''files that have been removed'''
66 '''files that have been removed'''
67 return self[2]
67 return self[2]
68
68
69 @property
69 @property
70 def deleted(self):
70 def deleted(self):
71 '''files that are in the dirstate, but have been deleted from the
71 '''files that are in the dirstate, but have been deleted from the
72 working copy (aka "missing")
72 working copy (aka "missing")
73 '''
73 '''
74 return self[3]
74 return self[3]
75
75
76 @property
76 @property
77 def unknown(self):
77 def unknown(self):
78 '''files not in the dirstate that are not ignored'''
78 '''files not in the dirstate that are not ignored'''
79 return self[4]
79 return self[4]
80
80
81 @property
81 @property
82 def ignored(self):
82 def ignored(self):
83 '''files not in the dirstate that are ignored (by _dirignore())'''
83 '''files not in the dirstate that are ignored (by _dirignore())'''
84 return self[5]
84 return self[5]
85
85
86 @property
86 @property
87 def clean(self):
87 def clean(self):
88 '''files that have not been modified'''
88 '''files that have not been modified'''
89 return self[6]
89 return self[6]
90
90
91 def __repr__(self, *args, **kwargs):
91 def __repr__(self, *args, **kwargs):
92 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
92 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
93 'unknown=%r, ignored=%r, clean=%r>') % self)
93 'unknown=%r, ignored=%r, clean=%r>') % self)
94
94
95 def itersubrepos(ctx1, ctx2):
95 def itersubrepos(ctx1, ctx2):
96 """find subrepos in ctx1 or ctx2"""
96 """find subrepos in ctx1 or ctx2"""
97 # Create a (subpath, ctx) mapping where we prefer subpaths from
97 # Create a (subpath, ctx) mapping where we prefer subpaths from
98 # ctx1. The subpaths from ctx2 are important when the .hgsub file
98 # ctx1. The subpaths from ctx2 are important when the .hgsub file
99 # has been modified (in ctx2) but not yet committed (in ctx1).
99 # has been modified (in ctx2) but not yet committed (in ctx1).
100 subpaths = dict.fromkeys(ctx2.substate, ctx2)
100 subpaths = dict.fromkeys(ctx2.substate, ctx2)
101 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
101 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
102
102
103 missing = set()
103 missing = set()
104
104
105 for subpath in ctx2.substate:
105 for subpath in ctx2.substate:
106 if subpath not in ctx1.substate:
106 if subpath not in ctx1.substate:
107 del subpaths[subpath]
107 del subpaths[subpath]
108 missing.add(subpath)
108 missing.add(subpath)
109
109
110 for subpath, ctx in sorted(subpaths.iteritems()):
110 for subpath, ctx in sorted(subpaths.iteritems()):
111 yield subpath, ctx.sub(subpath)
111 yield subpath, ctx.sub(subpath)
112
112
113 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
113 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
114 # status and diff will have an accurate result when it does
114 # status and diff will have an accurate result when it does
115 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
115 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
116 # against itself.
116 # against itself.
117 for subpath in missing:
117 for subpath in missing:
118 yield subpath, ctx2.nullsub(subpath, ctx1)
118 yield subpath, ctx2.nullsub(subpath, ctx1)
119
119
120 def nochangesfound(ui, repo, excluded=None):
120 def nochangesfound(ui, repo, excluded=None):
121 '''Report no changes for push/pull, excluded is None or a list of
121 '''Report no changes for push/pull, excluded is None or a list of
122 nodes excluded from the push/pull.
122 nodes excluded from the push/pull.
123 '''
123 '''
124 secretlist = []
124 secretlist = []
125 if excluded:
125 if excluded:
126 for n in excluded:
126 for n in excluded:
127 if n not in repo:
127 if n not in repo:
128 # discovery should not have included the filtered revision,
128 # discovery should not have included the filtered revision,
129 # we have to explicitly exclude it until discovery is cleanup.
129 # we have to explicitly exclude it until discovery is cleanup.
130 continue
130 continue
131 ctx = repo[n]
131 ctx = repo[n]
132 if ctx.phase() >= phases.secret and not ctx.extinct():
132 if ctx.phase() >= phases.secret and not ctx.extinct():
133 secretlist.append(n)
133 secretlist.append(n)
134
134
135 if secretlist:
135 if secretlist:
136 ui.status(_("no changes found (ignored %d secret changesets)\n")
136 ui.status(_("no changes found (ignored %d secret changesets)\n")
137 % len(secretlist))
137 % len(secretlist))
138 else:
138 else:
139 ui.status(_("no changes found\n"))
139 ui.status(_("no changes found\n"))
140
140
141 def checknewlabel(repo, lbl, kind):
141 def checknewlabel(repo, lbl, kind):
142 # Do not use the "kind" parameter in ui output.
142 # Do not use the "kind" parameter in ui output.
143 # It makes strings difficult to translate.
143 # It makes strings difficult to translate.
144 if lbl in ['tip', '.', 'null']:
144 if lbl in ['tip', '.', 'null']:
145 raise error.Abort(_("the name '%s' is reserved") % lbl)
145 raise error.Abort(_("the name '%s' is reserved") % lbl)
146 for c in (':', '\0', '\n', '\r'):
146 for c in (':', '\0', '\n', '\r'):
147 if c in lbl:
147 if c in lbl:
148 raise error.Abort(_("%r cannot be used in a name") % c)
148 raise error.Abort(_("%r cannot be used in a name") % c)
149 try:
149 try:
150 int(lbl)
150 int(lbl)
151 raise error.Abort(_("cannot use an integer as a name"))
151 raise error.Abort(_("cannot use an integer as a name"))
152 except ValueError:
152 except ValueError:
153 pass
153 pass
154
154
155 def checkfilename(f):
155 def checkfilename(f):
156 '''Check that the filename f is an acceptable filename for a tracked file'''
156 '''Check that the filename f is an acceptable filename for a tracked file'''
157 if '\r' in f or '\n' in f:
157 if '\r' in f or '\n' in f:
158 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
158 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
159
159
160 def checkportable(ui, f):
160 def checkportable(ui, f):
161 '''Check if filename f is portable and warn or abort depending on config'''
161 '''Check if filename f is portable and warn or abort depending on config'''
162 checkfilename(f)
162 checkfilename(f)
163 abort, warn = checkportabilityalert(ui)
163 abort, warn = checkportabilityalert(ui)
164 if abort or warn:
164 if abort or warn:
165 msg = util.checkwinfilename(f)
165 msg = util.checkwinfilename(f)
166 if msg:
166 if msg:
167 msg = "%s: %r" % (msg, f)
167 msg = "%s: %r" % (msg, f)
168 if abort:
168 if abort:
169 raise error.Abort(msg)
169 raise error.Abort(msg)
170 ui.warn(_("warning: %s\n") % msg)
170 ui.warn(_("warning: %s\n") % msg)
171
171
172 def checkportabilityalert(ui):
172 def checkportabilityalert(ui):
173 '''check if the user's config requests nothing, a warning, or abort for
173 '''check if the user's config requests nothing, a warning, or abort for
174 non-portable filenames'''
174 non-portable filenames'''
175 val = ui.config('ui', 'portablefilenames', 'warn')
175 val = ui.config('ui', 'portablefilenames', 'warn')
176 lval = val.lower()
176 lval = val.lower()
177 bval = util.parsebool(val)
177 bval = util.parsebool(val)
178 abort = os.name == 'nt' or lval == 'abort'
178 abort = os.name == 'nt' or lval == 'abort'
179 warn = bval or lval == 'warn'
179 warn = bval or lval == 'warn'
180 if bval is None and not (warn or abort or lval == 'ignore'):
180 if bval is None and not (warn or abort or lval == 'ignore'):
181 raise error.ConfigError(
181 raise error.ConfigError(
182 _("ui.portablefilenames value is invalid ('%s')") % val)
182 _("ui.portablefilenames value is invalid ('%s')") % val)
183 return abort, warn
183 return abort, warn
184
184
185 class casecollisionauditor(object):
185 class casecollisionauditor(object):
186 def __init__(self, ui, abort, dirstate):
186 def __init__(self, ui, abort, dirstate):
187 self._ui = ui
187 self._ui = ui
188 self._abort = abort
188 self._abort = abort
189 allfiles = '\0'.join(dirstate._map)
189 allfiles = '\0'.join(dirstate._map)
190 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
190 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
191 self._dirstate = dirstate
191 self._dirstate = dirstate
192 # The purpose of _newfiles is so that we don't complain about
192 # The purpose of _newfiles is so that we don't complain about
193 # case collisions if someone were to call this object with the
193 # case collisions if someone were to call this object with the
194 # same filename twice.
194 # same filename twice.
195 self._newfiles = set()
195 self._newfiles = set()
196
196
197 def __call__(self, f):
197 def __call__(self, f):
198 if f in self._newfiles:
198 if f in self._newfiles:
199 return
199 return
200 fl = encoding.lower(f)
200 fl = encoding.lower(f)
201 if fl in self._loweredfiles and f not in self._dirstate:
201 if fl in self._loweredfiles and f not in self._dirstate:
202 msg = _('possible case-folding collision for %s') % f
202 msg = _('possible case-folding collision for %s') % f
203 if self._abort:
203 if self._abort:
204 raise error.Abort(msg)
204 raise error.Abort(msg)
205 self._ui.warn(_("warning: %s\n") % msg)
205 self._ui.warn(_("warning: %s\n") % msg)
206 self._loweredfiles.add(fl)
206 self._loweredfiles.add(fl)
207 self._newfiles.add(f)
207 self._newfiles.add(f)
208
208
209 def filteredhash(repo, maxrev):
209 def filteredhash(repo, maxrev):
210 """build hash of filtered revisions in the current repoview.
210 """build hash of filtered revisions in the current repoview.
211
211
212 Multiple caches perform up-to-date validation by checking that the
212 Multiple caches perform up-to-date validation by checking that the
213 tiprev and tipnode stored in the cache file match the current repository.
213 tiprev and tipnode stored in the cache file match the current repository.
214 However, this is not sufficient for validating repoviews because the set
214 However, this is not sufficient for validating repoviews because the set
215 of revisions in the view may change without the repository tiprev and
215 of revisions in the view may change without the repository tiprev and
216 tipnode changing.
216 tipnode changing.
217
217
218 This function hashes all the revs filtered from the view and returns
218 This function hashes all the revs filtered from the view and returns
219 that SHA-1 digest.
219 that SHA-1 digest.
220 """
220 """
221 cl = repo.changelog
221 cl = repo.changelog
222 if not cl.filteredrevs:
222 if not cl.filteredrevs:
223 return None
223 return None
224 key = None
224 key = None
225 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
225 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
226 if revs:
226 if revs:
227 s = util.sha1()
227 s = util.sha1()
228 for rev in revs:
228 for rev in revs:
229 s.update('%s;' % rev)
229 s.update('%s;' % rev)
230 key = s.digest()
230 key = s.digest()
231 return key
231 return key
232
232
233 class abstractvfs(object):
233 class abstractvfs(object):
234 """Abstract base class; cannot be instantiated"""
234 """Abstract base class; cannot be instantiated"""
235
235
236 def __init__(self, *args, **kwargs):
236 def __init__(self, *args, **kwargs):
237 '''Prevent instantiation; don't call this from subclasses.'''
237 '''Prevent instantiation; don't call this from subclasses.'''
238 raise NotImplementedError('attempted instantiating ' + str(type(self)))
238 raise NotImplementedError('attempted instantiating ' + str(type(self)))
239
239
240 def tryread(self, path):
240 def tryread(self, path):
241 '''gracefully return an empty string for missing files'''
241 '''gracefully return an empty string for missing files'''
242 try:
242 try:
243 return self.read(path)
243 return self.read(path)
244 except IOError as inst:
244 except IOError as inst:
245 if inst.errno != errno.ENOENT:
245 if inst.errno != errno.ENOENT:
246 raise
246 raise
247 return ""
247 return ""
248
248
249 def tryreadlines(self, path, mode='rb'):
249 def tryreadlines(self, path, mode='rb'):
250 '''gracefully return an empty array for missing files'''
250 '''gracefully return an empty array for missing files'''
251 try:
251 try:
252 return self.readlines(path, mode=mode)
252 return self.readlines(path, mode=mode)
253 except IOError as inst:
253 except IOError as inst:
254 if inst.errno != errno.ENOENT:
254 if inst.errno != errno.ENOENT:
255 raise
255 raise
256 return []
256 return []
257
257
258 def open(self, path, mode="r", text=False, atomictemp=False,
258 def open(self, path, mode="r", text=False, atomictemp=False,
259 notindexed=False, backgroundclose=False):
259 notindexed=False, backgroundclose=False):
260 '''Open ``path`` file, which is relative to vfs root.
260 '''Open ``path`` file, which is relative to vfs root.
261
261
262 Newly created directories are marked as "not to be indexed by
262 Newly created directories are marked as "not to be indexed by
263 the content indexing service", if ``notindexed`` is specified
263 the content indexing service", if ``notindexed`` is specified
264 for "write" mode access.
264 for "write" mode access.
265 '''
265 '''
266 self.open = self.__call__
266 self.open = self.__call__
267 return self.__call__(path, mode, text, atomictemp, notindexed,
267 return self.__call__(path, mode, text, atomictemp, notindexed,
268 backgroundclose=backgroundclose)
268 backgroundclose=backgroundclose)
269
269
270 def read(self, path):
270 def read(self, path):
271 with self(path, 'rb') as fp:
271 with self(path, 'rb') as fp:
272 return fp.read()
272 return fp.read()
273
273
274 def readlines(self, path, mode='rb'):
274 def readlines(self, path, mode='rb'):
275 with self(path, mode=mode) as fp:
275 with self(path, mode=mode) as fp:
276 return fp.readlines()
276 return fp.readlines()
277
277
278 def write(self, path, data, backgroundclose=False):
278 def write(self, path, data, backgroundclose=False):
279 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
279 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
280 return fp.write(data)
280 return fp.write(data)
281
281
282 def writelines(self, path, data, mode='wb', notindexed=False):
282 def writelines(self, path, data, mode='wb', notindexed=False):
283 with self(path, mode=mode, notindexed=notindexed) as fp:
283 with self(path, mode=mode, notindexed=notindexed) as fp:
284 return fp.writelines(data)
284 return fp.writelines(data)
285
285
286 def append(self, path, data):
286 def append(self, path, data):
287 with self(path, 'ab') as fp:
287 with self(path, 'ab') as fp:
288 return fp.write(data)
288 return fp.write(data)
289
289
290 def basename(self, path):
290 def basename(self, path):
291 """return base element of a path (as os.path.basename would do)
291 """return base element of a path (as os.path.basename would do)
292
292
293 This exists to allow handling of strange encoding if needed."""
293 This exists to allow handling of strange encoding if needed."""
294 return os.path.basename(path)
294 return os.path.basename(path)
295
295
296 def chmod(self, path, mode):
296 def chmod(self, path, mode):
297 return os.chmod(self.join(path), mode)
297 return os.chmod(self.join(path), mode)
298
298
299 def dirname(self, path):
299 def dirname(self, path):
300 """return dirname element of a path (as os.path.dirname would do)
300 """return dirname element of a path (as os.path.dirname would do)
301
301
302 This exists to allow handling of strange encoding if needed."""
302 This exists to allow handling of strange encoding if needed."""
303 return os.path.dirname(path)
303 return os.path.dirname(path)
304
304
305 def exists(self, path=None):
305 def exists(self, path=None):
306 return os.path.exists(self.join(path))
306 return os.path.exists(self.join(path))
307
307
308 def fstat(self, fp):
308 def fstat(self, fp):
309 return util.fstat(fp)
309 return util.fstat(fp)
310
310
311 def isdir(self, path=None):
311 def isdir(self, path=None):
312 return os.path.isdir(self.join(path))
312 return os.path.isdir(self.join(path))
313
313
314 def isfile(self, path=None):
314 def isfile(self, path=None):
315 return os.path.isfile(self.join(path))
315 return os.path.isfile(self.join(path))
316
316
317 def islink(self, path=None):
317 def islink(self, path=None):
318 return os.path.islink(self.join(path))
318 return os.path.islink(self.join(path))
319
319
320 def isfileorlink(self, path=None):
320 def isfileorlink(self, path=None):
321 '''return whether path is a regular file or a symlink
321 '''return whether path is a regular file or a symlink
322
322
323 Unlike isfile, this doesn't follow symlinks.'''
323 Unlike isfile, this doesn't follow symlinks.'''
324 try:
324 try:
325 st = self.lstat(path)
325 st = self.lstat(path)
326 except OSError:
326 except OSError:
327 return False
327 return False
328 mode = st.st_mode
328 mode = st.st_mode
329 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
329 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
330
330
331 def reljoin(self, *paths):
331 def reljoin(self, *paths):
332 """join various elements of a path together (as os.path.join would do)
332 """join various elements of a path together (as os.path.join would do)
333
333
334 The vfs base is not injected so that path stay relative. This exists
334 The vfs base is not injected so that path stay relative. This exists
335 to allow handling of strange encoding if needed."""
335 to allow handling of strange encoding if needed."""
336 return os.path.join(*paths)
336 return os.path.join(*paths)
337
337
338 def split(self, path):
338 def split(self, path):
339 """split top-most element of a path (as os.path.split would do)
339 """split top-most element of a path (as os.path.split would do)
340
340
341 This exists to allow handling of strange encoding if needed."""
341 This exists to allow handling of strange encoding if needed."""
342 return os.path.split(path)
342 return os.path.split(path)
343
343
344 def lexists(self, path=None):
344 def lexists(self, path=None):
345 return os.path.lexists(self.join(path))
345 return os.path.lexists(self.join(path))
346
346
347 def lstat(self, path=None):
347 def lstat(self, path=None):
348 return os.lstat(self.join(path))
348 return os.lstat(self.join(path))
349
349
350 def listdir(self, path=None):
350 def listdir(self, path=None):
351 return os.listdir(self.join(path))
351 return os.listdir(self.join(path))
352
352
353 def makedir(self, path=None, notindexed=True):
353 def makedir(self, path=None, notindexed=True):
354 return util.makedir(self.join(path), notindexed)
354 return util.makedir(self.join(path), notindexed)
355
355
356 def makedirs(self, path=None, mode=None):
356 def makedirs(self, path=None, mode=None):
357 return util.makedirs(self.join(path), mode)
357 return util.makedirs(self.join(path), mode)
358
358
359 def makelock(self, info, path):
359 def makelock(self, info, path):
360 return util.makelock(info, self.join(path))
360 return util.makelock(info, self.join(path))
361
361
362 def mkdir(self, path=None):
362 def mkdir(self, path=None):
363 return os.mkdir(self.join(path))
363 return os.mkdir(self.join(path))
364
364
365 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
365 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
366 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
366 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
367 dir=self.join(dir), text=text)
367 dir=self.join(dir), text=text)
368 dname, fname = util.split(name)
368 dname, fname = util.split(name)
369 if dir:
369 if dir:
370 return fd, os.path.join(dir, fname)
370 return fd, os.path.join(dir, fname)
371 else:
371 else:
372 return fd, fname
372 return fd, fname
373
373
374 def readdir(self, path=None, stat=None, skip=None):
374 def readdir(self, path=None, stat=None, skip=None):
375 return osutil.listdir(self.join(path), stat, skip)
375 return osutil.listdir(self.join(path), stat, skip)
376
376
377 def readlock(self, path):
377 def readlock(self, path):
378 return util.readlock(self.join(path))
378 return util.readlock(self.join(path))
379
379
380 def rename(self, src, dst, checkambig=False):
380 def rename(self, src, dst, checkambig=False):
381 dstpath = self.join(dst)
381 dstpath = self.join(dst)
382 oldstat = checkambig and util.filestat(dstpath)
382 oldstat = checkambig and util.filestat(dstpath)
383 if oldstat and oldstat.stat:
383 if oldstat and oldstat.stat:
384 ret = util.rename(self.join(src), dstpath)
384 ret = util.rename(self.join(src), dstpath)
385 newstat = util.filestat(dstpath)
385 newstat = util.filestat(dstpath)
386 if newstat.isambig(oldstat):
386 if newstat.isambig(oldstat):
387 # stat of renamed file is ambiguous to original one
387 # stat of renamed file is ambiguous to original one
388 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
388 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
389 os.utime(dstpath, (advanced, advanced))
389 os.utime(dstpath, (advanced, advanced))
390 return ret
390 return ret
391 return util.rename(self.join(src), dstpath)
391 return util.rename(self.join(src), dstpath)
392
392
393 def readlink(self, path):
393 def readlink(self, path):
394 return os.readlink(self.join(path))
394 return os.readlink(self.join(path))
395
395
396 def removedirs(self, path=None):
396 def removedirs(self, path=None):
397 """Remove a leaf directory and all empty intermediate ones
397 """Remove a leaf directory and all empty intermediate ones
398 """
398 """
399 return util.removedirs(self.join(path))
399 return util.removedirs(self.join(path))
400
400
401 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
401 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
402 """Remove a directory tree recursively
402 """Remove a directory tree recursively
403
403
404 If ``forcibly``, this tries to remove READ-ONLY files, too.
404 If ``forcibly``, this tries to remove READ-ONLY files, too.
405 """
405 """
406 if forcibly:
406 if forcibly:
407 def onerror(function, path, excinfo):
407 def onerror(function, path, excinfo):
408 if function is not os.remove:
408 if function is not os.remove:
409 raise
409 raise
410 # read-only files cannot be unlinked under Windows
410 # read-only files cannot be unlinked under Windows
411 s = os.stat(path)
411 s = os.stat(path)
412 if (s.st_mode & stat.S_IWRITE) != 0:
412 if (s.st_mode & stat.S_IWRITE) != 0:
413 raise
413 raise
414 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
414 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
415 os.remove(path)
415 os.remove(path)
416 else:
416 else:
417 onerror = None
417 onerror = None
418 return shutil.rmtree(self.join(path),
418 return shutil.rmtree(self.join(path),
419 ignore_errors=ignore_errors, onerror=onerror)
419 ignore_errors=ignore_errors, onerror=onerror)
420
420
421 def setflags(self, path, l, x):
421 def setflags(self, path, l, x):
422 return util.setflags(self.join(path), l, x)
422 return util.setflags(self.join(path), l, x)
423
423
424 def stat(self, path=None):
424 def stat(self, path=None):
425 return os.stat(self.join(path))
425 return os.stat(self.join(path))
426
426
427 def unlink(self, path=None):
427 def unlink(self, path=None):
428 return util.unlink(self.join(path))
428 return util.unlink(self.join(path))
429
429
430 def unlinkpath(self, path=None, ignoremissing=False):
430 def unlinkpath(self, path=None, ignoremissing=False):
431 return util.unlinkpath(self.join(path), ignoremissing)
431 return util.unlinkpath(self.join(path), ignoremissing)
432
432
433 def utime(self, path=None, t=None):
433 def utime(self, path=None, t=None):
434 return os.utime(self.join(path), t)
434 return os.utime(self.join(path), t)
435
435
436 def walk(self, path=None, onerror=None):
436 def walk(self, path=None, onerror=None):
437 """Yield (dirpath, dirs, files) tuple for each directories under path
437 """Yield (dirpath, dirs, files) tuple for each directories under path
438
438
439 ``dirpath`` is relative one from the root of this vfs. This
439 ``dirpath`` is relative one from the root of this vfs. This
440 uses ``os.sep`` as path separator, even you specify POSIX
440 uses ``os.sep`` as path separator, even you specify POSIX
441 style ``path``.
441 style ``path``.
442
442
443 "The root of this vfs" is represented as empty ``dirpath``.
443 "The root of this vfs" is represented as empty ``dirpath``.
444 """
444 """
445 root = os.path.normpath(self.join(None))
445 root = os.path.normpath(self.join(None))
446 # when dirpath == root, dirpath[prefixlen:] becomes empty
446 # when dirpath == root, dirpath[prefixlen:] becomes empty
447 # because len(dirpath) < prefixlen.
447 # because len(dirpath) < prefixlen.
448 prefixlen = len(pathutil.normasprefix(root))
448 prefixlen = len(pathutil.normasprefix(root))
449 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
449 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
450 yield (dirpath[prefixlen:], dirs, files)
450 yield (dirpath[prefixlen:], dirs, files)
451
451
452 @contextlib.contextmanager
452 @contextlib.contextmanager
453 def backgroundclosing(self, ui, expectedcount=-1):
453 def backgroundclosing(self, ui, expectedcount=-1):
454 """Allow files to be closed asynchronously.
454 """Allow files to be closed asynchronously.
455
455
456 When this context manager is active, ``backgroundclose`` can be passed
456 When this context manager is active, ``backgroundclose`` can be passed
457 to ``__call__``/``open`` to result in the file possibly being closed
457 to ``__call__``/``open`` to result in the file possibly being closed
458 asynchronously, on a background thread.
458 asynchronously, on a background thread.
459 """
459 """
460 # This is an arbitrary restriction and could be changed if we ever
460 # This is an arbitrary restriction and could be changed if we ever
461 # have a use case.
461 # have a use case.
462 vfs = getattr(self, 'vfs', self)
462 vfs = getattr(self, 'vfs', self)
463 if getattr(vfs, '_backgroundfilecloser', None):
463 if getattr(vfs, '_backgroundfilecloser', None):
464 raise error.Abort('can only have 1 active background file closer')
464 raise error.Abort('can only have 1 active background file closer')
465
465
466 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
466 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
467 try:
467 try:
468 vfs._backgroundfilecloser = bfc
468 vfs._backgroundfilecloser = bfc
469 yield bfc
469 yield bfc
470 finally:
470 finally:
471 vfs._backgroundfilecloser = None
471 vfs._backgroundfilecloser = None
472
472
473 class vfs(abstractvfs):
473 class vfs(abstractvfs):
474 '''Operate files relative to a base directory
474 '''Operate files relative to a base directory
475
475
476 This class is used to hide the details of COW semantics and
476 This class is used to hide the details of COW semantics and
477 remote file access from higher level code.
477 remote file access from higher level code.
478 '''
478 '''
479 def __init__(self, base, audit=True, expandpath=False, realpath=False):
479 def __init__(self, base, audit=True, expandpath=False, realpath=False):
480 if expandpath:
480 if expandpath:
481 base = util.expandpath(base)
481 base = util.expandpath(base)
482 if realpath:
482 if realpath:
483 base = os.path.realpath(base)
483 base = os.path.realpath(base)
484 self.base = base
484 self.base = base
485 self.mustaudit = audit
485 self.mustaudit = audit
486 self.createmode = None
486 self.createmode = None
487 self._trustnlink = None
487 self._trustnlink = None
488
488
489 @property
489 @property
490 def mustaudit(self):
490 def mustaudit(self):
491 return self._audit
491 return self._audit
492
492
493 @mustaudit.setter
493 @mustaudit.setter
494 def mustaudit(self, onoff):
494 def mustaudit(self, onoff):
495 self._audit = onoff
495 self._audit = onoff
496 if onoff:
496 if onoff:
497 self.audit = pathutil.pathauditor(self.base)
497 self.audit = pathutil.pathauditor(self.base)
498 else:
498 else:
499 self.audit = util.always
499 self.audit = util.always
500
500
501 @util.propertycache
501 @util.propertycache
502 def _cansymlink(self):
502 def _cansymlink(self):
503 return util.checklink(self.base)
503 return util.checklink(self.base)
504
504
505 @util.propertycache
505 @util.propertycache
506 def _chmod(self):
506 def _chmod(self):
507 return util.checkexec(self.base)
507 return util.checkexec(self.base)
508
508
509 def _fixfilemode(self, name):
509 def _fixfilemode(self, name):
510 if self.createmode is None or not self._chmod:
510 if self.createmode is None or not self._chmod:
511 return
511 return
512 os.chmod(name, self.createmode & 0o666)
512 os.chmod(name, self.createmode & 0o666)
513
513
514 def __call__(self, path, mode="r", text=False, atomictemp=False,
514 def __call__(self, path, mode="r", text=False, atomictemp=False,
515 notindexed=False, backgroundclose=False, checkambig=False):
515 notindexed=False, backgroundclose=False, checkambig=False):
516 '''Open ``path`` file, which is relative to vfs root.
516 '''Open ``path`` file, which is relative to vfs root.
517
517
518 Newly created directories are marked as "not to be indexed by
518 Newly created directories are marked as "not to be indexed by
519 the content indexing service", if ``notindexed`` is specified
519 the content indexing service", if ``notindexed`` is specified
520 for "write" mode access.
520 for "write" mode access.
521
521
522 If ``backgroundclose`` is passed, the file may be closed asynchronously.
522 If ``backgroundclose`` is passed, the file may be closed asynchronously.
523 It can only be used if the ``self.backgroundclosing()`` context manager
523 It can only be used if the ``self.backgroundclosing()`` context manager
524 is active. This should only be specified if the following criteria hold:
524 is active. This should only be specified if the following criteria hold:
525
525
526 1. There is a potential for writing thousands of files. Unless you
526 1. There is a potential for writing thousands of files. Unless you
527 are writing thousands of files, the performance benefits of
527 are writing thousands of files, the performance benefits of
528 asynchronously closing files is not realized.
528 asynchronously closing files is not realized.
529 2. Files are opened exactly once for the ``backgroundclosing``
529 2. Files are opened exactly once for the ``backgroundclosing``
530 active duration and are therefore free of race conditions between
530 active duration and are therefore free of race conditions between
531 closing a file on a background thread and reopening it. (If the
531 closing a file on a background thread and reopening it. (If the
532 file were opened multiple times, there could be unflushed data
532 file were opened multiple times, there could be unflushed data
533 because the original file handle hasn't been flushed/closed yet.)
533 because the original file handle hasn't been flushed/closed yet.)
534
534
535 ``checkambig`` is passed to atomictempfile (valid only for writing).
535 ``checkambig`` is passed to atomictempfile (valid only for writing).
536 '''
536 '''
537 if self._audit:
537 if self._audit:
538 r = util.checkosfilename(path)
538 r = util.checkosfilename(path)
539 if r:
539 if r:
540 raise error.Abort("%s: %r" % (r, path))
540 raise error.Abort("%s: %r" % (r, path))
541 self.audit(path)
541 self.audit(path)
542 f = self.join(path)
542 f = self.join(path)
543
543
544 if not text and "b" not in mode:
544 if not text and "b" not in mode:
545 mode += "b" # for that other OS
545 mode += "b" # for that other OS
546
546
547 nlink = -1
547 nlink = -1
548 if mode not in ('r', 'rb'):
548 if mode not in ('r', 'rb'):
549 dirname, basename = util.split(f)
549 dirname, basename = util.split(f)
550 # If basename is empty, then the path is malformed because it points
550 # If basename is empty, then the path is malformed because it points
551 # to a directory. Let the posixfile() call below raise IOError.
551 # to a directory. Let the posixfile() call below raise IOError.
552 if basename:
552 if basename:
553 if atomictemp:
553 if atomictemp:
554 util.makedirs(dirname, self.createmode, notindexed)
554 util.makedirs(dirname, self.createmode, notindexed)
555 return util.atomictempfile(f, mode, self.createmode,
555 return util.atomictempfile(f, mode, self.createmode,
556 checkambig=checkambig)
556 checkambig=checkambig)
557 try:
557 try:
558 if 'w' in mode:
558 if 'w' in mode:
559 util.unlink(f)
559 util.unlink(f)
560 nlink = 0
560 nlink = 0
561 else:
561 else:
562 # nlinks() may behave differently for files on Windows
562 # nlinks() may behave differently for files on Windows
563 # shares if the file is open.
563 # shares if the file is open.
564 with util.posixfile(f):
564 with util.posixfile(f):
565 nlink = util.nlinks(f)
565 nlink = util.nlinks(f)
566 if nlink < 1:
566 if nlink < 1:
567 nlink = 2 # force mktempcopy (issue1922)
567 nlink = 2 # force mktempcopy (issue1922)
568 except (OSError, IOError) as e:
568 except (OSError, IOError) as e:
569 if e.errno != errno.ENOENT:
569 if e.errno != errno.ENOENT:
570 raise
570 raise
571 nlink = 0
571 nlink = 0
572 util.makedirs(dirname, self.createmode, notindexed)
572 util.makedirs(dirname, self.createmode, notindexed)
573 if nlink > 0:
573 if nlink > 0:
574 if self._trustnlink is None:
574 if self._trustnlink is None:
575 self._trustnlink = nlink > 1 or util.checknlink(f)
575 self._trustnlink = nlink > 1 or util.checknlink(f)
576 if nlink > 1 or not self._trustnlink:
576 if nlink > 1 or not self._trustnlink:
577 util.rename(util.mktempcopy(f), f)
577 util.rename(util.mktempcopy(f), f)
578 fp = util.posixfile(f, mode)
578 fp = util.posixfile(f, mode)
579 if nlink == 0:
579 if nlink == 0:
580 self._fixfilemode(f)
580 self._fixfilemode(f)
581
581
582 if backgroundclose:
582 if backgroundclose:
583 if not self._backgroundfilecloser:
583 if not self._backgroundfilecloser:
584 raise error.Abort('backgroundclose can only be used when a '
584 raise error.Abort('backgroundclose can only be used when a '
585 'backgroundclosing context manager is active')
585 'backgroundclosing context manager is active')
586
586
587 fp = delayclosedfile(fp, self._backgroundfilecloser)
587 fp = delayclosedfile(fp, self._backgroundfilecloser)
588
588
589 return fp
589 return fp
590
590
591 def symlink(self, src, dst):
591 def symlink(self, src, dst):
592 self.audit(dst)
592 self.audit(dst)
593 linkname = self.join(dst)
593 linkname = self.join(dst)
594 try:
594 try:
595 os.unlink(linkname)
595 os.unlink(linkname)
596 except OSError:
596 except OSError:
597 pass
597 pass
598
598
599 util.makedirs(os.path.dirname(linkname), self.createmode)
599 util.makedirs(os.path.dirname(linkname), self.createmode)
600
600
601 if self._cansymlink:
601 if self._cansymlink:
602 try:
602 try:
603 os.symlink(src, linkname)
603 os.symlink(src, linkname)
604 except OSError as err:
604 except OSError as err:
605 raise OSError(err.errno, _('could not symlink to %r: %s') %
605 raise OSError(err.errno, _('could not symlink to %r: %s') %
606 (src, err.strerror), linkname)
606 (src, err.strerror), linkname)
607 else:
607 else:
608 self.write(dst, src)
608 self.write(dst, src)
609
609
610 def join(self, path, *insidef):
610 def join(self, path, *insidef):
611 if path:
611 if path:
612 return os.path.join(self.base, path, *insidef)
612 return os.path.join(self.base, path, *insidef)
613 else:
613 else:
614 return self.base
614 return self.base
615
615
616 opener = vfs
616 opener = vfs
617
617
618 class auditvfs(object):
618 class auditvfs(object):
619 def __init__(self, vfs):
619 def __init__(self, vfs):
620 self.vfs = vfs
620 self.vfs = vfs
621
621
622 @property
622 @property
623 def mustaudit(self):
623 def mustaudit(self):
624 return self.vfs.mustaudit
624 return self.vfs.mustaudit
625
625
626 @mustaudit.setter
626 @mustaudit.setter
627 def mustaudit(self, onoff):
627 def mustaudit(self, onoff):
628 self.vfs.mustaudit = onoff
628 self.vfs.mustaudit = onoff
629
629
630 class filtervfs(abstractvfs, auditvfs):
630 class filtervfs(abstractvfs, auditvfs):
631 '''Wrapper vfs for filtering filenames with a function.'''
631 '''Wrapper vfs for filtering filenames with a function.'''
632
632
633 def __init__(self, vfs, filter):
633 def __init__(self, vfs, filter):
634 auditvfs.__init__(self, vfs)
634 auditvfs.__init__(self, vfs)
635 self._filter = filter
635 self._filter = filter
636
636
637 def __call__(self, path, *args, **kwargs):
637 def __call__(self, path, *args, **kwargs):
638 return self.vfs(self._filter(path), *args, **kwargs)
638 return self.vfs(self._filter(path), *args, **kwargs)
639
639
640 def join(self, path, *insidef):
640 def join(self, path, *insidef):
641 if path:
641 if path:
642 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
642 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
643 else:
643 else:
644 return self.vfs.join(path)
644 return self.vfs.join(path)
645
645
646 filteropener = filtervfs
646 filteropener = filtervfs
647
647
648 class readonlyvfs(abstractvfs, auditvfs):
648 class readonlyvfs(abstractvfs, auditvfs):
649 '''Wrapper vfs preventing any writing.'''
649 '''Wrapper vfs preventing any writing.'''
650
650
651 def __init__(self, vfs):
651 def __init__(self, vfs):
652 auditvfs.__init__(self, vfs)
652 auditvfs.__init__(self, vfs)
653
653
654 def __call__(self, path, mode='r', *args, **kw):
654 def __call__(self, path, mode='r', *args, **kw):
655 if mode not in ('r', 'rb'):
655 if mode not in ('r', 'rb'):
656 raise error.Abort('this vfs is read only')
656 raise error.Abort('this vfs is read only')
657 return self.vfs(path, mode, *args, **kw)
657 return self.vfs(path, mode, *args, **kw)
658
658
659 def join(self, path, *insidef):
659 def join(self, path, *insidef):
660 return self.vfs.join(path, *insidef)
660 return self.vfs.join(path, *insidef)
661
661
662 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
662 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
663 '''yield every hg repository under path, always recursively.
663 '''yield every hg repository under path, always recursively.
664 The recurse flag will only control recursion into repo working dirs'''
664 The recurse flag will only control recursion into repo working dirs'''
665 def errhandler(err):
665 def errhandler(err):
666 if err.filename == path:
666 if err.filename == path:
667 raise err
667 raise err
668 samestat = getattr(os.path, 'samestat', None)
668 samestat = getattr(os.path, 'samestat', None)
669 if followsym and samestat is not None:
669 if followsym and samestat is not None:
670 def adddir(dirlst, dirname):
670 def adddir(dirlst, dirname):
671 match = False
671 match = False
672 dirstat = os.stat(dirname)
672 dirstat = os.stat(dirname)
673 for lstdirstat in dirlst:
673 for lstdirstat in dirlst:
674 if samestat(dirstat, lstdirstat):
674 if samestat(dirstat, lstdirstat):
675 match = True
675 match = True
676 break
676 break
677 if not match:
677 if not match:
678 dirlst.append(dirstat)
678 dirlst.append(dirstat)
679 return not match
679 return not match
680 else:
680 else:
681 followsym = False
681 followsym = False
682
682
683 if (seen_dirs is None) and followsym:
683 if (seen_dirs is None) and followsym:
684 seen_dirs = []
684 seen_dirs = []
685 adddir(seen_dirs, path)
685 adddir(seen_dirs, path)
686 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
686 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
687 dirs.sort()
687 dirs.sort()
688 if '.hg' in dirs:
688 if '.hg' in dirs:
689 yield root # found a repository
689 yield root # found a repository
690 qroot = os.path.join(root, '.hg', 'patches')
690 qroot = os.path.join(root, '.hg', 'patches')
691 if os.path.isdir(os.path.join(qroot, '.hg')):
691 if os.path.isdir(os.path.join(qroot, '.hg')):
692 yield qroot # we have a patch queue repo here
692 yield qroot # we have a patch queue repo here
693 if recurse:
693 if recurse:
694 # avoid recursing inside the .hg directory
694 # avoid recursing inside the .hg directory
695 dirs.remove('.hg')
695 dirs.remove('.hg')
696 else:
696 else:
697 dirs[:] = [] # don't descend further
697 dirs[:] = [] # don't descend further
698 elif followsym:
698 elif followsym:
699 newdirs = []
699 newdirs = []
700 for d in dirs:
700 for d in dirs:
701 fname = os.path.join(root, d)
701 fname = os.path.join(root, d)
702 if adddir(seen_dirs, fname):
702 if adddir(seen_dirs, fname):
703 if os.path.islink(fname):
703 if os.path.islink(fname):
704 for hgname in walkrepos(fname, True, seen_dirs):
704 for hgname in walkrepos(fname, True, seen_dirs):
705 yield hgname
705 yield hgname
706 else:
706 else:
707 newdirs.append(d)
707 newdirs.append(d)
708 dirs[:] = newdirs
708 dirs[:] = newdirs
709
709
710 def osrcpath():
710 def osrcpath():
711 '''return default os-specific hgrc search path'''
711 '''return default os-specific hgrc search path'''
712 path = []
712 path = []
713 defaultpath = os.path.join(util.datapath, 'default.d')
713 defaultpath = os.path.join(util.datapath, 'default.d')
714 if os.path.isdir(defaultpath):
714 if os.path.isdir(defaultpath):
715 for f, kind in osutil.listdir(defaultpath):
715 for f, kind in osutil.listdir(defaultpath):
716 if f.endswith('.rc'):
716 if f.endswith('.rc'):
717 path.append(os.path.join(defaultpath, f))
717 path.append(os.path.join(defaultpath, f))
718 path.extend(systemrcpath())
718 path.extend(systemrcpath())
719 path.extend(userrcpath())
719 path.extend(userrcpath())
720 path = [os.path.normpath(f) for f in path]
720 path = [os.path.normpath(f) for f in path]
721 return path
721 return path
722
722
723 _rcpath = None
723 _rcpath = None
724
724
725 def rcpath():
725 def rcpath():
726 '''return hgrc search path. if env var HGRCPATH is set, use it.
726 '''return hgrc search path. if env var HGRCPATH is set, use it.
727 for each item in path, if directory, use files ending in .rc,
727 for each item in path, if directory, use files ending in .rc,
728 else use item.
728 else use item.
729 make HGRCPATH empty to only look in .hg/hgrc of current repo.
729 make HGRCPATH empty to only look in .hg/hgrc of current repo.
730 if no HGRCPATH, use default os-specific path.'''
730 if no HGRCPATH, use default os-specific path.'''
731 global _rcpath
731 global _rcpath
732 if _rcpath is None:
732 if _rcpath is None:
733 if 'HGRCPATH' in os.environ:
733 if 'HGRCPATH' in os.environ:
734 _rcpath = []
734 _rcpath = []
735 for p in os.environ['HGRCPATH'].split(os.pathsep):
735 for p in os.environ['HGRCPATH'].split(os.pathsep):
736 if not p:
736 if not p:
737 continue
737 continue
738 p = util.expandpath(p)
738 p = util.expandpath(p)
739 if os.path.isdir(p):
739 if os.path.isdir(p):
740 for f, kind in osutil.listdir(p):
740 for f, kind in osutil.listdir(p):
741 if f.endswith('.rc'):
741 if f.endswith('.rc'):
742 _rcpath.append(os.path.join(p, f))
742 _rcpath.append(os.path.join(p, f))
743 else:
743 else:
744 _rcpath.append(p)
744 _rcpath.append(p)
745 else:
745 else:
746 _rcpath = osrcpath()
746 _rcpath = osrcpath()
747 return _rcpath
747 return _rcpath
748
748
749 def intrev(rev):
749 def intrev(rev):
750 """Return integer for a given revision that can be used in comparison or
750 """Return integer for a given revision that can be used in comparison or
751 arithmetic operation"""
751 arithmetic operation"""
752 if rev is None:
752 if rev is None:
753 return wdirrev
753 return wdirrev
754 return rev
754 return rev
755
755
756 def revsingle(repo, revspec, default='.'):
756 def revsingle(repo, revspec, default='.'):
757 if not revspec and revspec != 0:
757 if not revspec and revspec != 0:
758 return repo[default]
758 return repo[default]
759
759
760 l = revrange(repo, [revspec])
760 l = revrange(repo, [revspec])
761 if not l:
761 if not l:
762 raise error.Abort(_('empty revision set'))
762 raise error.Abort(_('empty revision set'))
763 return repo[l.last()]
763 return repo[l.last()]
764
764
765 def _pairspec(revspec):
765 def _pairspec(revspec):
766 tree = revset.parse(revspec)
766 tree = revset.parse(revspec)
767 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
767 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
768 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
768 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
769
769
770 def revpair(repo, revs):
770 def revpair(repo, revs):
771 if not revs:
771 if not revs:
772 return repo.dirstate.p1(), None
772 return repo.dirstate.p1(), None
773
773
774 l = revrange(repo, revs)
774 l = revrange(repo, revs)
775
775
776 if not l:
776 if not l:
777 first = second = None
777 first = second = None
778 elif l.isascending():
778 elif l.isascending():
779 first = l.min()
779 first = l.min()
780 second = l.max()
780 second = l.max()
781 elif l.isdescending():
781 elif l.isdescending():
782 first = l.max()
782 first = l.max()
783 second = l.min()
783 second = l.min()
784 else:
784 else:
785 first = l.first()
785 first = l.first()
786 second = l.last()
786 second = l.last()
787
787
788 if first is None:
788 if first is None:
789 raise error.Abort(_('empty revision range'))
789 raise error.Abort(_('empty revision range'))
790 if (first == second and len(revs) >= 2
790 if (first == second and len(revs) >= 2
791 and not all(revrange(repo, [r]) for r in revs)):
791 and not all(revrange(repo, [r]) for r in revs)):
792 raise error.Abort(_('empty revision on one side of range'))
792 raise error.Abort(_('empty revision on one side of range'))
793
793
794 # if top-level is range expression, the result must always be a pair
794 # if top-level is range expression, the result must always be a pair
795 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
795 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
796 return repo.lookup(first), None
796 return repo.lookup(first), None
797
797
798 return repo.lookup(first), repo.lookup(second)
798 return repo.lookup(first), repo.lookup(second)
799
799
800 def revrange(repo, revs):
800 def revrange(repo, revs):
801 """Yield revision as strings from a list of revision specifications."""
801 """Yield revision as strings from a list of revision specifications."""
802 allspecs = []
802 allspecs = []
803 for spec in revs:
803 for spec in revs:
804 if isinstance(spec, int):
804 if isinstance(spec, int):
805 spec = revset.formatspec('rev(%d)', spec)
805 spec = revset.formatspec('rev(%d)', spec)
806 allspecs.append(spec)
806 allspecs.append(spec)
807 m = revset.matchany(repo.ui, allspecs, repo)
807 m = revset.matchany(repo.ui, allspecs, repo)
808 return m(repo)
808 return m(repo)
809
809
810 def meaningfulparents(repo, ctx):
810 def meaningfulparents(repo, ctx):
811 """Return list of meaningful (or all if debug) parentrevs for rev.
811 """Return list of meaningful (or all if debug) parentrevs for rev.
812
812
813 For merges (two non-nullrev revisions) both parents are meaningful.
813 For merges (two non-nullrev revisions) both parents are meaningful.
814 Otherwise the first parent revision is considered meaningful if it
814 Otherwise the first parent revision is considered meaningful if it
815 is not the preceding revision.
815 is not the preceding revision.
816 """
816 """
817 parents = ctx.parents()
817 parents = ctx.parents()
818 if len(parents) > 1:
818 if len(parents) > 1:
819 return parents
819 return parents
820 if repo.ui.debugflag:
820 if repo.ui.debugflag:
821 return [parents[0], repo['null']]
821 return [parents[0], repo['null']]
822 if parents[0].rev() >= intrev(ctx.rev()) - 1:
822 if parents[0].rev() >= intrev(ctx.rev()) - 1:
823 return []
823 return []
824 return parents
824 return parents
825
825
826 def expandpats(pats):
826 def expandpats(pats):
827 '''Expand bare globs when running on windows.
827 '''Expand bare globs when running on windows.
828 On posix we assume it already has already been done by sh.'''
828 On posix we assume it already has already been done by sh.'''
829 if not util.expandglobs:
829 if not util.expandglobs:
830 return list(pats)
830 return list(pats)
831 ret = []
831 ret = []
832 for kindpat in pats:
832 for kindpat in pats:
833 kind, pat = matchmod._patsplit(kindpat, None)
833 kind, pat = matchmod._patsplit(kindpat, None)
834 if kind is None:
834 if kind is None:
835 try:
835 try:
836 globbed = glob.glob(pat)
836 globbed = glob.glob(pat)
837 except re.error:
837 except re.error:
838 globbed = [pat]
838 globbed = [pat]
839 if globbed:
839 if globbed:
840 ret.extend(globbed)
840 ret.extend(globbed)
841 continue
841 continue
842 ret.append(kindpat)
842 ret.append(kindpat)
843 return ret
843 return ret
844
844
845 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
845 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
846 badfn=None):
846 badfn=None):
847 '''Return a matcher and the patterns that were used.
847 '''Return a matcher and the patterns that were used.
848 The matcher will warn about bad matches, unless an alternate badfn callback
848 The matcher will warn about bad matches, unless an alternate badfn callback
849 is provided.'''
849 is provided.'''
850 if pats == ("",):
850 if pats == ("",):
851 pats = []
851 pats = []
852 if opts is None:
852 if opts is None:
853 opts = {}
853 opts = {}
854 if not globbed and default == 'relpath':
854 if not globbed and default == 'relpath':
855 pats = expandpats(pats or [])
855 pats = expandpats(pats or [])
856
856
857 def bad(f, msg):
857 def bad(f, msg):
858 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
858 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
859
859
860 if badfn is None:
860 if badfn is None:
861 badfn = bad
861 badfn = bad
862
862
863 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
863 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
864 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
864 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
865
865
866 if m.always():
866 if m.always():
867 pats = []
867 pats = []
868 return m, pats
868 return m, pats
869
869
870 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
870 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
871 badfn=None):
871 badfn=None):
872 '''Return a matcher that will warn about bad matches.'''
872 '''Return a matcher that will warn about bad matches.'''
873 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
873 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
874
874
875 def matchall(repo):
875 def matchall(repo):
876 '''Return a matcher that will efficiently match everything.'''
876 '''Return a matcher that will efficiently match everything.'''
877 return matchmod.always(repo.root, repo.getcwd())
877 return matchmod.always(repo.root, repo.getcwd())
878
878
879 def matchfiles(repo, files, badfn=None):
879 def matchfiles(repo, files, badfn=None):
880 '''Return a matcher that will efficiently match exactly these files.'''
880 '''Return a matcher that will efficiently match exactly these files.'''
881 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
881 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
882
882
883 def origpath(ui, repo, filepath):
883 def origpath(ui, repo, filepath):
884 '''customize where .orig files are created
884 '''customize where .orig files are created
885
885
886 Fetch user defined path from config file: [ui] origbackuppath = <path>
886 Fetch user defined path from config file: [ui] origbackuppath = <path>
887 Fall back to default (filepath) if not specified
887 Fall back to default (filepath) if not specified
888 '''
888 '''
889 origbackuppath = ui.config('ui', 'origbackuppath', None)
889 origbackuppath = ui.config('ui', 'origbackuppath', None)
890 if origbackuppath is None:
890 if origbackuppath is None:
891 return filepath + ".orig"
891 return filepath + ".orig"
892
892
893 filepathfromroot = os.path.relpath(filepath, start=repo.root)
893 filepathfromroot = os.path.relpath(filepath, start=repo.root)
894 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
894 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
895
895
896 origbackupdir = repo.vfs.dirname(fullorigpath)
896 origbackupdir = repo.vfs.dirname(fullorigpath)
897 if not repo.vfs.exists(origbackupdir):
897 if not repo.vfs.exists(origbackupdir):
898 ui.note(_('creating directory: %s\n') % origbackupdir)
898 ui.note(_('creating directory: %s\n') % origbackupdir)
899 util.makedirs(origbackupdir)
899 util.makedirs(origbackupdir)
900
900
901 return fullorigpath + ".orig"
901 return fullorigpath + ".orig"
902
902
903 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
903 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
904 if opts is None:
904 if opts is None:
905 opts = {}
905 opts = {}
906 m = matcher
906 m = matcher
907 if dry_run is None:
907 if dry_run is None:
908 dry_run = opts.get('dry_run')
908 dry_run = opts.get('dry_run')
909 if similarity is None:
909 if similarity is None:
910 similarity = float(opts.get('similarity') or 0)
910 similarity = float(opts.get('similarity') or 0)
911
911
912 ret = 0
912 ret = 0
913 join = lambda f: os.path.join(prefix, f)
913 join = lambda f: os.path.join(prefix, f)
914
914
915 def matchessubrepo(matcher, subpath):
915 def matchessubrepo(matcher, subpath):
916 if matcher.exact(subpath):
916 if matcher.exact(subpath):
917 return True
917 return True
918 for f in matcher.files():
918 for f in matcher.files():
919 if f.startswith(subpath):
919 if f.startswith(subpath):
920 return True
920 return True
921 return False
921 return False
922
922
923 wctx = repo[None]
923 wctx = repo[None]
924 for subpath in sorted(wctx.substate):
924 for subpath in sorted(wctx.substate):
925 if opts.get('subrepos') or matchessubrepo(m, subpath):
925 if opts.get('subrepos') or matchessubrepo(m, subpath):
926 sub = wctx.sub(subpath)
926 sub = wctx.sub(subpath)
927 try:
927 try:
928 submatch = matchmod.subdirmatcher(subpath, m)
928 submatch = matchmod.subdirmatcher(subpath, m)
929 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
929 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
930 ret = 1
930 ret = 1
931 except error.LookupError:
931 except error.LookupError:
932 repo.ui.status(_("skipping missing subrepository: %s\n")
932 repo.ui.status(_("skipping missing subrepository: %s\n")
933 % join(subpath))
933 % join(subpath))
934
934
935 rejected = []
935 rejected = []
936 def badfn(f, msg):
936 def badfn(f, msg):
937 if f in m.files():
937 if f in m.files():
938 m.bad(f, msg)
938 m.bad(f, msg)
939 rejected.append(f)
939 rejected.append(f)
940
940
941 badmatch = matchmod.badmatch(m, badfn)
941 badmatch = matchmod.badmatch(m, badfn)
942 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
942 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
943 badmatch)
943 badmatch)
944
944
945 unknownset = set(unknown + forgotten)
945 unknownset = set(unknown + forgotten)
946 toprint = unknownset.copy()
946 toprint = unknownset.copy()
947 toprint.update(deleted)
947 toprint.update(deleted)
948 for abs in sorted(toprint):
948 for abs in sorted(toprint):
949 if repo.ui.verbose or not m.exact(abs):
949 if repo.ui.verbose or not m.exact(abs):
950 if abs in unknownset:
950 if abs in unknownset:
951 status = _('adding %s\n') % m.uipath(abs)
951 status = _('adding %s\n') % m.uipath(abs)
952 else:
952 else:
953 status = _('removing %s\n') % m.uipath(abs)
953 status = _('removing %s\n') % m.uipath(abs)
954 repo.ui.status(status)
954 repo.ui.status(status)
955
955
956 renames = _findrenames(repo, m, added + unknown, removed + deleted,
956 renames = _findrenames(repo, m, added + unknown, removed + deleted,
957 similarity)
957 similarity)
958
958
959 if not dry_run:
959 if not dry_run:
960 _markchanges(repo, unknown + forgotten, deleted, renames)
960 _markchanges(repo, unknown + forgotten, deleted, renames)
961
961
962 for f in rejected:
962 for f in rejected:
963 if f in m.files():
963 if f in m.files():
964 return 1
964 return 1
965 return ret
965 return ret
966
966
967 def marktouched(repo, files, similarity=0.0):
967 def marktouched(repo, files, similarity=0.0):
968 '''Assert that files have somehow been operated upon. files are relative to
968 '''Assert that files have somehow been operated upon. files are relative to
969 the repo root.'''
969 the repo root.'''
970 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
970 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
971 rejected = []
971 rejected = []
972
972
973 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
973 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
974
974
975 if repo.ui.verbose:
975 if repo.ui.verbose:
976 unknownset = set(unknown + forgotten)
976 unknownset = set(unknown + forgotten)
977 toprint = unknownset.copy()
977 toprint = unknownset.copy()
978 toprint.update(deleted)
978 toprint.update(deleted)
979 for abs in sorted(toprint):
979 for abs in sorted(toprint):
980 if abs in unknownset:
980 if abs in unknownset:
981 status = _('adding %s\n') % abs
981 status = _('adding %s\n') % abs
982 else:
982 else:
983 status = _('removing %s\n') % abs
983 status = _('removing %s\n') % abs
984 repo.ui.status(status)
984 repo.ui.status(status)
985
985
986 renames = _findrenames(repo, m, added + unknown, removed + deleted,
986 renames = _findrenames(repo, m, added + unknown, removed + deleted,
987 similarity)
987 similarity)
988
988
989 _markchanges(repo, unknown + forgotten, deleted, renames)
989 _markchanges(repo, unknown + forgotten, deleted, renames)
990
990
991 for f in rejected:
991 for f in rejected:
992 if f in m.files():
992 if f in m.files():
993 return 1
993 return 1
994 return 0
994 return 0
995
995
996 def _interestingfiles(repo, matcher):
996 def _interestingfiles(repo, matcher):
997 '''Walk dirstate with matcher, looking for files that addremove would care
997 '''Walk dirstate with matcher, looking for files that addremove would care
998 about.
998 about.
999
999
1000 This is different from dirstate.status because it doesn't care about
1000 This is different from dirstate.status because it doesn't care about
1001 whether files are modified or clean.'''
1001 whether files are modified or clean.'''
1002 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1002 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1003 audit_path = pathutil.pathauditor(repo.root)
1003 audit_path = pathutil.pathauditor(repo.root)
1004
1004
1005 ctx = repo[None]
1005 ctx = repo[None]
1006 dirstate = repo.dirstate
1006 dirstate = repo.dirstate
1007 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1007 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1008 full=False)
1008 full=False)
1009 for abs, st in walkresults.iteritems():
1009 for abs, st in walkresults.iteritems():
1010 dstate = dirstate[abs]
1010 dstate = dirstate[abs]
1011 if dstate == '?' and audit_path.check(abs):
1011 if dstate == '?' and audit_path.check(abs):
1012 unknown.append(abs)
1012 unknown.append(abs)
1013 elif dstate != 'r' and not st:
1013 elif dstate != 'r' and not st:
1014 deleted.append(abs)
1014 deleted.append(abs)
1015 elif dstate == 'r' and st:
1015 elif dstate == 'r' and st:
1016 forgotten.append(abs)
1016 forgotten.append(abs)
1017 # for finding renames
1017 # for finding renames
1018 elif dstate == 'r' and not st:
1018 elif dstate == 'r' and not st:
1019 removed.append(abs)
1019 removed.append(abs)
1020 elif dstate == 'a':
1020 elif dstate == 'a':
1021 added.append(abs)
1021 added.append(abs)
1022
1022
1023 return added, unknown, deleted, removed, forgotten
1023 return added, unknown, deleted, removed, forgotten
1024
1024
1025 def _findrenames(repo, matcher, added, removed, similarity):
1025 def _findrenames(repo, matcher, added, removed, similarity):
1026 '''Find renames from removed files to added ones.'''
1026 '''Find renames from removed files to added ones.'''
1027 renames = {}
1027 renames = {}
1028 if similarity > 0:
1028 if similarity > 0:
1029 for old, new, score in similar.findrenames(repo, added, removed,
1029 for old, new, score in similar.findrenames(repo, added, removed,
1030 similarity):
1030 similarity):
1031 if (repo.ui.verbose or not matcher.exact(old)
1031 if (repo.ui.verbose or not matcher.exact(old)
1032 or not matcher.exact(new)):
1032 or not matcher.exact(new)):
1033 repo.ui.status(_('recording removal of %s as rename to %s '
1033 repo.ui.status(_('recording removal of %s as rename to %s '
1034 '(%d%% similar)\n') %
1034 '(%d%% similar)\n') %
1035 (matcher.rel(old), matcher.rel(new),
1035 (matcher.rel(old), matcher.rel(new),
1036 score * 100))
1036 score * 100))
1037 renames[new] = old
1037 renames[new] = old
1038 return renames
1038 return renames
1039
1039
1040 def _markchanges(repo, unknown, deleted, renames):
1040 def _markchanges(repo, unknown, deleted, renames):
1041 '''Marks the files in unknown as added, the files in deleted as removed,
1041 '''Marks the files in unknown as added, the files in deleted as removed,
1042 and the files in renames as copied.'''
1042 and the files in renames as copied.'''
1043 wctx = repo[None]
1043 wctx = repo[None]
1044 with repo.wlock():
1044 with repo.wlock():
1045 wctx.forget(deleted)
1045 wctx.forget(deleted)
1046 wctx.add(unknown)
1046 wctx.add(unknown)
1047 for new, old in renames.iteritems():
1047 for new, old in renames.iteritems():
1048 wctx.copy(old, new)
1048 wctx.copy(old, new)
1049
1049
1050 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1050 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1051 """Update the dirstate to reflect the intent of copying src to dst. For
1051 """Update the dirstate to reflect the intent of copying src to dst. For
1052 different reasons it might not end with dst being marked as copied from src.
1052 different reasons it might not end with dst being marked as copied from src.
1053 """
1053 """
1054 origsrc = repo.dirstate.copied(src) or src
1054 origsrc = repo.dirstate.copied(src) or src
1055 if dst == origsrc: # copying back a copy?
1055 if dst == origsrc: # copying back a copy?
1056 if repo.dirstate[dst] not in 'mn' and not dryrun:
1056 if repo.dirstate[dst] not in 'mn' and not dryrun:
1057 repo.dirstate.normallookup(dst)
1057 repo.dirstate.normallookup(dst)
1058 else:
1058 else:
1059 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1059 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1060 if not ui.quiet:
1060 if not ui.quiet:
1061 ui.warn(_("%s has not been committed yet, so no copy "
1061 ui.warn(_("%s has not been committed yet, so no copy "
1062 "data will be stored for %s.\n")
1062 "data will be stored for %s.\n")
1063 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1063 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1064 if repo.dirstate[dst] in '?r' and not dryrun:
1064 if repo.dirstate[dst] in '?r' and not dryrun:
1065 wctx.add([dst])
1065 wctx.add([dst])
1066 elif not dryrun:
1066 elif not dryrun:
1067 wctx.copy(origsrc, dst)
1067 wctx.copy(origsrc, dst)
1068
1068
1069 def readrequires(opener, supported):
1069 def readrequires(opener, supported):
1070 '''Reads and parses .hg/requires and checks if all entries found
1070 '''Reads and parses .hg/requires and checks if all entries found
1071 are in the list of supported features.'''
1071 are in the list of supported features.'''
1072 requirements = set(opener.read("requires").splitlines())
1072 requirements = set(opener.read("requires").splitlines())
1073 missings = []
1073 missings = []
1074 for r in requirements:
1074 for r in requirements:
1075 if r not in supported:
1075 if r not in supported:
1076 if not r or not r[0].isalnum():
1076 if not r or not r[0].isalnum():
1077 raise error.RequirementError(_(".hg/requires file is corrupt"))
1077 raise error.RequirementError(_(".hg/requires file is corrupt"))
1078 missings.append(r)
1078 missings.append(r)
1079 missings.sort()
1079 missings.sort()
1080 if missings:
1080 if missings:
1081 raise error.RequirementError(
1081 raise error.RequirementError(
1082 _("repository requires features unknown to this Mercurial: %s")
1082 _("repository requires features unknown to this Mercurial: %s")
1083 % " ".join(missings),
1083 % " ".join(missings),
1084 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1084 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1085 " for more information"))
1085 " for more information"))
1086 return requirements
1086 return requirements
1087
1087
1088 def writerequires(opener, requirements):
1088 def writerequires(opener, requirements):
1089 with opener('requires', 'w') as fp:
1089 with opener('requires', 'w') as fp:
1090 for r in sorted(requirements):
1090 for r in sorted(requirements):
1091 fp.write("%s\n" % r)
1091 fp.write("%s\n" % r)
1092
1092
1093 class filecachesubentry(object):
1093 class filecachesubentry(object):
1094 def __init__(self, path, stat):
1094 def __init__(self, path, stat):
1095 self.path = path
1095 self.path = path
1096 self.cachestat = None
1096 self.cachestat = None
1097 self._cacheable = None
1097 self._cacheable = None
1098
1098
1099 if stat:
1099 if stat:
1100 self.cachestat = filecachesubentry.stat(self.path)
1100 self.cachestat = filecachesubentry.stat(self.path)
1101
1101
1102 if self.cachestat:
1102 if self.cachestat:
1103 self._cacheable = self.cachestat.cacheable()
1103 self._cacheable = self.cachestat.cacheable()
1104 else:
1104 else:
1105 # None means we don't know yet
1105 # None means we don't know yet
1106 self._cacheable = None
1106 self._cacheable = None
1107
1107
1108 def refresh(self):
1108 def refresh(self):
1109 if self.cacheable():
1109 if self.cacheable():
1110 self.cachestat = filecachesubentry.stat(self.path)
1110 self.cachestat = filecachesubentry.stat(self.path)
1111
1111
1112 def cacheable(self):
1112 def cacheable(self):
1113 if self._cacheable is not None:
1113 if self._cacheable is not None:
1114 return self._cacheable
1114 return self._cacheable
1115
1115
1116 # we don't know yet, assume it is for now
1116 # we don't know yet, assume it is for now
1117 return True
1117 return True
1118
1118
1119 def changed(self):
1119 def changed(self):
1120 # no point in going further if we can't cache it
1120 # no point in going further if we can't cache it
1121 if not self.cacheable():
1121 if not self.cacheable():
1122 return True
1122 return True
1123
1123
1124 newstat = filecachesubentry.stat(self.path)
1124 newstat = filecachesubentry.stat(self.path)
1125
1125
1126 # we may not know if it's cacheable yet, check again now
1126 # we may not know if it's cacheable yet, check again now
1127 if newstat and self._cacheable is None:
1127 if newstat and self._cacheable is None:
1128 self._cacheable = newstat.cacheable()
1128 self._cacheable = newstat.cacheable()
1129
1129
1130 # check again
1130 # check again
1131 if not self._cacheable:
1131 if not self._cacheable:
1132 return True
1132 return True
1133
1133
1134 if self.cachestat != newstat:
1134 if self.cachestat != newstat:
1135 self.cachestat = newstat
1135 self.cachestat = newstat
1136 return True
1136 return True
1137 else:
1137 else:
1138 return False
1138 return False
1139
1139
1140 @staticmethod
1140 @staticmethod
1141 def stat(path):
1141 def stat(path):
1142 try:
1142 try:
1143 return util.cachestat(path)
1143 return util.cachestat(path)
1144 except OSError as e:
1144 except OSError as e:
1145 if e.errno != errno.ENOENT:
1145 if e.errno != errno.ENOENT:
1146 raise
1146 raise
1147
1147
1148 class filecacheentry(object):
1148 class filecacheentry(object):
1149 def __init__(self, paths, stat=True):
1149 def __init__(self, paths, stat=True):
1150 self._entries = []
1150 self._entries = []
1151 for path in paths:
1151 for path in paths:
1152 self._entries.append(filecachesubentry(path, stat))
1152 self._entries.append(filecachesubentry(path, stat))
1153
1153
1154 def changed(self):
1154 def changed(self):
1155 '''true if any entry has changed'''
1155 '''true if any entry has changed'''
1156 for entry in self._entries:
1156 for entry in self._entries:
1157 if entry.changed():
1157 if entry.changed():
1158 return True
1158 return True
1159 return False
1159 return False
1160
1160
1161 def refresh(self):
1161 def refresh(self):
1162 for entry in self._entries:
1162 for entry in self._entries:
1163 entry.refresh()
1163 entry.refresh()
1164
1164
1165 class filecache(object):
1165 class filecache(object):
1166 '''A property like decorator that tracks files under .hg/ for updates.
1166 '''A property like decorator that tracks files under .hg/ for updates.
1167
1167
1168 Records stat info when called in _filecache.
1168 Records stat info when called in _filecache.
1169
1169
1170 On subsequent calls, compares old stat info with new info, and recreates the
1170 On subsequent calls, compares old stat info with new info, and recreates the
1171 object when any of the files changes, updating the new stat info in
1171 object when any of the files changes, updating the new stat info in
1172 _filecache.
1172 _filecache.
1173
1173
1174 Mercurial either atomic renames or appends for files under .hg,
1174 Mercurial either atomic renames or appends for files under .hg,
1175 so to ensure the cache is reliable we need the filesystem to be able
1175 so to ensure the cache is reliable we need the filesystem to be able
1176 to tell us if a file has been replaced. If it can't, we fallback to
1176 to tell us if a file has been replaced. If it can't, we fallback to
1177 recreating the object on every call (essentially the same behavior as
1177 recreating the object on every call (essentially the same behavior as
1178 propertycache).
1178 propertycache).
1179
1179
1180 '''
1180 '''
1181 def __init__(self, *paths):
1181 def __init__(self, *paths):
1182 self.paths = paths
1182 self.paths = paths
1183
1183
1184 def join(self, obj, fname):
1184 def join(self, obj, fname):
1185 """Used to compute the runtime path of a cached file.
1185 """Used to compute the runtime path of a cached file.
1186
1186
1187 Users should subclass filecache and provide their own version of this
1187 Users should subclass filecache and provide their own version of this
1188 function to call the appropriate join function on 'obj' (an instance
1188 function to call the appropriate join function on 'obj' (an instance
1189 of the class that its member function was decorated).
1189 of the class that its member function was decorated).
1190 """
1190 """
1191 return obj.join(fname)
1191 return obj.join(fname)
1192
1192
1193 def __call__(self, func):
1193 def __call__(self, func):
1194 self.func = func
1194 self.func = func
1195 self.name = func.__name__
1195 self.name = func.__name__
1196 return self
1196 return self
1197
1197
1198 def __get__(self, obj, type=None):
1198 def __get__(self, obj, type=None):
1199 # do we need to check if the file changed?
1199 # do we need to check if the file changed?
1200 if self.name in obj.__dict__:
1200 if self.name in obj.__dict__:
1201 assert self.name in obj._filecache, self.name
1201 assert self.name in obj._filecache, self.name
1202 return obj.__dict__[self.name]
1202 return obj.__dict__[self.name]
1203
1203
1204 entry = obj._filecache.get(self.name)
1204 entry = obj._filecache.get(self.name)
1205
1205
1206 if entry:
1206 if entry:
1207 if entry.changed():
1207 if entry.changed():
1208 entry.obj = self.func(obj)
1208 entry.obj = self.func(obj)
1209 else:
1209 else:
1210 paths = [self.join(obj, path) for path in self.paths]
1210 paths = [self.join(obj, path) for path in self.paths]
1211
1211
1212 # We stat -before- creating the object so our cache doesn't lie if
1212 # We stat -before- creating the object so our cache doesn't lie if
1213 # a writer modified between the time we read and stat
1213 # a writer modified between the time we read and stat
1214 entry = filecacheentry(paths, True)
1214 entry = filecacheentry(paths, True)
1215 entry.obj = self.func(obj)
1215 entry.obj = self.func(obj)
1216
1216
1217 obj._filecache[self.name] = entry
1217 obj._filecache[self.name] = entry
1218
1218
1219 obj.__dict__[self.name] = entry.obj
1219 obj.__dict__[self.name] = entry.obj
1220 return entry.obj
1220 return entry.obj
1221
1221
1222 def __set__(self, obj, value):
1222 def __set__(self, obj, value):
1223 if self.name not in obj._filecache:
1223 if self.name not in obj._filecache:
1224 # we add an entry for the missing value because X in __dict__
1224 # we add an entry for the missing value because X in __dict__
1225 # implies X in _filecache
1225 # implies X in _filecache
1226 paths = [self.join(obj, path) for path in self.paths]
1226 paths = [self.join(obj, path) for path in self.paths]
1227 ce = filecacheentry(paths, False)
1227 ce = filecacheentry(paths, False)
1228 obj._filecache[self.name] = ce
1228 obj._filecache[self.name] = ce
1229 else:
1229 else:
1230 ce = obj._filecache[self.name]
1230 ce = obj._filecache[self.name]
1231
1231
1232 ce.obj = value # update cached copy
1232 ce.obj = value # update cached copy
1233 obj.__dict__[self.name] = value # update copy returned by obj.x
1233 obj.__dict__[self.name] = value # update copy returned by obj.x
1234
1234
1235 def __delete__(self, obj):
1235 def __delete__(self, obj):
1236 try:
1236 try:
1237 del obj.__dict__[self.name]
1237 del obj.__dict__[self.name]
1238 except KeyError:
1238 except KeyError:
1239 raise AttributeError(self.name)
1239 raise AttributeError(self.name)
1240
1240
1241 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1241 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1242 if lock is None:
1242 if lock is None:
1243 raise error.LockInheritanceContractViolation(
1243 raise error.LockInheritanceContractViolation(
1244 'lock can only be inherited while held')
1244 'lock can only be inherited while held')
1245 if environ is None:
1245 if environ is None:
1246 environ = {}
1246 environ = {}
1247 with lock.inherit() as locker:
1247 with lock.inherit() as locker:
1248 environ[envvar] = locker
1248 environ[envvar] = locker
1249 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1249 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1250
1250
1251 def wlocksub(repo, cmd, *args, **kwargs):
1251 def wlocksub(repo, cmd, *args, **kwargs):
1252 """run cmd as a subprocess that allows inheriting repo's wlock
1252 """run cmd as a subprocess that allows inheriting repo's wlock
1253
1253
1254 This can only be called while the wlock is held. This takes all the
1254 This can only be called while the wlock is held. This takes all the
1255 arguments that ui.system does, and returns the exit code of the
1255 arguments that ui.system does, and returns the exit code of the
1256 subprocess."""
1256 subprocess."""
1257 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1257 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1258 **kwargs)
1258 **kwargs)
1259
1259
1260 def gdinitconfig(ui):
1260 def gdinitconfig(ui):
1261 """helper function to know if a repo should be created as general delta
1261 """helper function to know if a repo should be created as general delta
1262 """
1262 """
1263 # experimental config: format.generaldelta
1263 # experimental config: format.generaldelta
1264 return (ui.configbool('format', 'generaldelta', False)
1264 return (ui.configbool('format', 'generaldelta', False)
1265 or ui.configbool('format', 'usegeneraldelta', True))
1265 or ui.configbool('format', 'usegeneraldelta', True))
1266
1266
1267 def gddeltaconfig(ui):
1267 def gddeltaconfig(ui):
1268 """helper function to know if incoming delta should be optimised
1268 """helper function to know if incoming delta should be optimised
1269 """
1269 """
1270 # experimental config: format.generaldelta
1270 # experimental config: format.generaldelta
1271 return ui.configbool('format', 'generaldelta', False)
1271 return ui.configbool('format', 'generaldelta', False)
1272
1272
1273 class delayclosedfile(object):
1273 class delayclosedfile(object):
1274 """Proxy for a file object whose close is delayed.
1274 """Proxy for a file object whose close is delayed.
1275
1275
1276 Do not instantiate outside of the vfs layer.
1276 Do not instantiate outside of the vfs layer.
1277 """
1277 """
1278
1278
1279 def __init__(self, fh, closer):
1279 def __init__(self, fh, closer):
1280 object.__setattr__(self, '_origfh', fh)
1280 object.__setattr__(self, '_origfh', fh)
1281 object.__setattr__(self, '_closer', closer)
1281 object.__setattr__(self, '_closer', closer)
1282
1282
1283 def __getattr__(self, attr):
1283 def __getattr__(self, attr):
1284 return getattr(self._origfh, attr)
1284 return getattr(self._origfh, attr)
1285
1285
1286 def __setattr__(self, attr, value):
1286 def __setattr__(self, attr, value):
1287 return setattr(self._origfh, attr, value)
1287 return setattr(self._origfh, attr, value)
1288
1288
1289 def __delattr__(self, attr):
1289 def __delattr__(self, attr):
1290 return delattr(self._origfh, attr)
1290 return delattr(self._origfh, attr)
1291
1291
1292 def __enter__(self):
1292 def __enter__(self):
1293 return self._origfh.__enter__()
1293 return self._origfh.__enter__()
1294
1294
1295 def __exit__(self, exc_type, exc_value, exc_tb):
1295 def __exit__(self, exc_type, exc_value, exc_tb):
1296 self._closer.close(self._origfh)
1296 self._closer.close(self._origfh)
1297
1297
1298 def close(self):
1298 def close(self):
1299 self._closer.close(self._origfh)
1299 self._closer.close(self._origfh)
1300
1300
1301 class backgroundfilecloser(object):
1301 class backgroundfilecloser(object):
1302 """Coordinates background closing of file handles on multiple threads."""
1302 """Coordinates background closing of file handles on multiple threads."""
1303 def __init__(self, ui, expectedcount=-1):
1303 def __init__(self, ui, expectedcount=-1):
1304 self._running = False
1304 self._running = False
1305 self._entered = False
1305 self._entered = False
1306 self._threads = []
1306 self._threads = []
1307 self._threadexception = None
1307 self._threadexception = None
1308
1308
1309 # Only Windows/NTFS has slow file closing. So only enable by default
1309 # Only Windows/NTFS has slow file closing. So only enable by default
1310 # on that platform. But allow to be enabled elsewhere for testing.
1310 # on that platform. But allow to be enabled elsewhere for testing.
1311 defaultenabled = os.name == 'nt'
1311 defaultenabled = os.name == 'nt'
1312 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1312 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1313
1313
1314 if not enabled:
1314 if not enabled:
1315 return
1315 return
1316
1316
1317 # There is overhead to starting and stopping the background threads.
1317 # There is overhead to starting and stopping the background threads.
1318 # Don't do background processing unless the file count is large enough
1318 # Don't do background processing unless the file count is large enough
1319 # to justify it.
1319 # to justify it.
1320 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1320 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1321 2048)
1321 2048)
1322 # FUTURE dynamically start background threads after minfilecount closes.
1322 # FUTURE dynamically start background threads after minfilecount closes.
1323 # (We don't currently have any callers that don't know their file count)
1323 # (We don't currently have any callers that don't know their file count)
1324 if expectedcount > 0 and expectedcount < minfilecount:
1324 if expectedcount > 0 and expectedcount < minfilecount:
1325 return
1325 return
1326
1326
1327 # Windows defaults to a limit of 512 open files. A buffer of 128
1327 # Windows defaults to a limit of 512 open files. A buffer of 128
1328 # should give us enough headway.
1328 # should give us enough headway.
1329 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1329 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1330 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1330 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1331
1331
1332 ui.debug('starting %d threads for background file closing\n' %
1332 ui.debug('starting %d threads for background file closing\n' %
1333 threadcount)
1333 threadcount)
1334
1334
1335 self._queue = util.queue(maxsize=maxqueue)
1335 self._queue = util.queue(maxsize=maxqueue)
1336 self._running = True
1336 self._running = True
1337
1337
1338 for i in range(threadcount):
1338 for i in range(threadcount):
1339 t = threading.Thread(target=self._worker, name='backgroundcloser')
1339 t = threading.Thread(target=self._worker, name='backgroundcloser')
1340 self._threads.append(t)
1340 self._threads.append(t)
1341 t.start()
1341 t.start()
1342
1342
1343 def __enter__(self):
1343 def __enter__(self):
1344 self._entered = True
1344 self._entered = True
1345 return self
1345 return self
1346
1346
1347 def __exit__(self, exc_type, exc_value, exc_tb):
1347 def __exit__(self, exc_type, exc_value, exc_tb):
1348 self._running = False
1348 self._running = False
1349
1349
1350 # Wait for threads to finish closing so open files don't linger for
1350 # Wait for threads to finish closing so open files don't linger for
1351 # longer than lifetime of context manager.
1351 # longer than lifetime of context manager.
1352 for t in self._threads:
1352 for t in self._threads:
1353 t.join()
1353 t.join()
1354
1354
1355 def _worker(self):
1355 def _worker(self):
1356 """Main routine for worker thread."""
1356 """Main routine for worker thread."""
1357 while True:
1357 while True:
1358 try:
1358 try:
1359 fh = self._queue.get(block=True, timeout=0.100)
1359 fh = self._queue.get(block=True, timeout=0.100)
1360 # Need to catch or the thread will terminate and
1360 # Need to catch or the thread will terminate and
1361 # we could orphan file descriptors.
1361 # we could orphan file descriptors.
1362 try:
1362 try:
1363 fh.close()
1363 fh.close()
1364 except Exception as e:
1364 except Exception as e:
1365 # Stash so can re-raise from main thread later.
1365 # Stash so can re-raise from main thread later.
1366 self._threadexception = e
1366 self._threadexception = e
1367 except util.empty:
1367 except util.empty:
1368 if not self._running:
1368 if not self._running:
1369 break
1369 break
1370
1370
1371 def close(self, fh):
1371 def close(self, fh):
1372 """Schedule a file for closing."""
1372 """Schedule a file for closing."""
1373 if not self._entered:
1373 if not self._entered:
1374 raise error.Abort('can only call close() when context manager '
1374 raise error.Abort('can only call close() when context manager '
1375 'active')
1375 'active')
1376
1376
1377 # If a background thread encountered an exception, raise now so we fail
1377 # If a background thread encountered an exception, raise now so we fail
1378 # fast. Otherwise we may potentially go on for minutes until the error
1378 # fast. Otherwise we may potentially go on for minutes until the error
1379 # is acted on.
1379 # is acted on.
1380 if self._threadexception:
1380 if self._threadexception:
1381 e = self._threadexception
1381 e = self._threadexception
1382 self._threadexception = None
1382 self._threadexception = None
1383 raise e
1383 raise e
1384
1384
1385 # If we're not actively running, close synchronously.
1385 # If we're not actively running, close synchronously.
1386 if not self._running:
1386 if not self._running:
1387 fh.close()
1387 fh.close()
1388 return
1388 return
1389
1389
1390 self._queue.put(fh, block=True, timeout=None)
1390 self._queue.put(fh, block=True, timeout=None)
1391
General Comments 0
You need to be logged in to leave comments. Login now