##// END OF EJS Templates
scmutil: support background closing for write()...
Gregory Szorc -
r28197:2ada6238 default
parent child Browse files
Show More
@@ -1,1379 +1,1379
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import Queue
10 import Queue
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import glob
13 import glob
14 import os
14 import os
15 import re
15 import re
16 import shutil
16 import shutil
17 import stat
17 import stat
18 import tempfile
18 import tempfile
19 import threading
19 import threading
20
20
21 from .i18n import _
21 from .i18n import _
22 from .node import wdirrev
22 from .node import wdirrev
23 from . import (
23 from . import (
24 encoding,
24 encoding,
25 error,
25 error,
26 match as matchmod,
26 match as matchmod,
27 osutil,
27 osutil,
28 pathutil,
28 pathutil,
29 phases,
29 phases,
30 revset,
30 revset,
31 similar,
31 similar,
32 util,
32 util,
33 )
33 )
34
34
35 if os.name == 'nt':
35 if os.name == 'nt':
36 from . import scmwindows as scmplatform
36 from . import scmwindows as scmplatform
37 else:
37 else:
38 from . import scmposix as scmplatform
38 from . import scmposix as scmplatform
39
39
40 systemrcpath = scmplatform.systemrcpath
40 systemrcpath = scmplatform.systemrcpath
41 userrcpath = scmplatform.userrcpath
41 userrcpath = scmplatform.userrcpath
42
42
43 class status(tuple):
43 class status(tuple):
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 and 'ignored' properties are only relevant to the working copy.
45 and 'ignored' properties are only relevant to the working copy.
46 '''
46 '''
47
47
48 __slots__ = ()
48 __slots__ = ()
49
49
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 clean):
51 clean):
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 ignored, clean))
53 ignored, clean))
54
54
55 @property
55 @property
56 def modified(self):
56 def modified(self):
57 '''files that have been modified'''
57 '''files that have been modified'''
58 return self[0]
58 return self[0]
59
59
60 @property
60 @property
61 def added(self):
61 def added(self):
62 '''files that have been added'''
62 '''files that have been added'''
63 return self[1]
63 return self[1]
64
64
65 @property
65 @property
66 def removed(self):
66 def removed(self):
67 '''files that have been removed'''
67 '''files that have been removed'''
68 return self[2]
68 return self[2]
69
69
70 @property
70 @property
71 def deleted(self):
71 def deleted(self):
72 '''files that are in the dirstate, but have been deleted from the
72 '''files that are in the dirstate, but have been deleted from the
73 working copy (aka "missing")
73 working copy (aka "missing")
74 '''
74 '''
75 return self[3]
75 return self[3]
76
76
77 @property
77 @property
78 def unknown(self):
78 def unknown(self):
79 '''files not in the dirstate that are not ignored'''
79 '''files not in the dirstate that are not ignored'''
80 return self[4]
80 return self[4]
81
81
82 @property
82 @property
83 def ignored(self):
83 def ignored(self):
84 '''files not in the dirstate that are ignored (by _dirignore())'''
84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 return self[5]
85 return self[5]
86
86
87 @property
87 @property
88 def clean(self):
88 def clean(self):
89 '''files that have not been modified'''
89 '''files that have not been modified'''
90 return self[6]
90 return self[6]
91
91
92 def __repr__(self, *args, **kwargs):
92 def __repr__(self, *args, **kwargs):
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 'unknown=%r, ignored=%r, clean=%r>') % self)
94 'unknown=%r, ignored=%r, clean=%r>') % self)
95
95
96 def itersubrepos(ctx1, ctx2):
96 def itersubrepos(ctx1, ctx2):
97 """find subrepos in ctx1 or ctx2"""
97 """find subrepos in ctx1 or ctx2"""
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 # has been modified (in ctx2) but not yet committed (in ctx1).
100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103
103
104 missing = set()
104 missing = set()
105
105
106 for subpath in ctx2.substate:
106 for subpath in ctx2.substate:
107 if subpath not in ctx1.substate:
107 if subpath not in ctx1.substate:
108 del subpaths[subpath]
108 del subpaths[subpath]
109 missing.add(subpath)
109 missing.add(subpath)
110
110
111 for subpath, ctx in sorted(subpaths.iteritems()):
111 for subpath, ctx in sorted(subpaths.iteritems()):
112 yield subpath, ctx.sub(subpath)
112 yield subpath, ctx.sub(subpath)
113
113
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 # status and diff will have an accurate result when it does
115 # status and diff will have an accurate result when it does
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 # against itself.
117 # against itself.
118 for subpath in missing:
118 for subpath in missing:
119 yield subpath, ctx2.nullsub(subpath, ctx1)
119 yield subpath, ctx2.nullsub(subpath, ctx1)
120
120
121 def nochangesfound(ui, repo, excluded=None):
121 def nochangesfound(ui, repo, excluded=None):
122 '''Report no changes for push/pull, excluded is None or a list of
122 '''Report no changes for push/pull, excluded is None or a list of
123 nodes excluded from the push/pull.
123 nodes excluded from the push/pull.
124 '''
124 '''
125 secretlist = []
125 secretlist = []
126 if excluded:
126 if excluded:
127 for n in excluded:
127 for n in excluded:
128 if n not in repo:
128 if n not in repo:
129 # discovery should not have included the filtered revision,
129 # discovery should not have included the filtered revision,
130 # we have to explicitly exclude it until discovery is cleanup.
130 # we have to explicitly exclude it until discovery is cleanup.
131 continue
131 continue
132 ctx = repo[n]
132 ctx = repo[n]
133 if ctx.phase() >= phases.secret and not ctx.extinct():
133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 secretlist.append(n)
134 secretlist.append(n)
135
135
136 if secretlist:
136 if secretlist:
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 % len(secretlist))
138 % len(secretlist))
139 else:
139 else:
140 ui.status(_("no changes found\n"))
140 ui.status(_("no changes found\n"))
141
141
142 def checknewlabel(repo, lbl, kind):
142 def checknewlabel(repo, lbl, kind):
143 # Do not use the "kind" parameter in ui output.
143 # Do not use the "kind" parameter in ui output.
144 # It makes strings difficult to translate.
144 # It makes strings difficult to translate.
145 if lbl in ['tip', '.', 'null']:
145 if lbl in ['tip', '.', 'null']:
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 for c in (':', '\0', '\n', '\r'):
147 for c in (':', '\0', '\n', '\r'):
148 if c in lbl:
148 if c in lbl:
149 raise error.Abort(_("%r cannot be used in a name") % c)
149 raise error.Abort(_("%r cannot be used in a name") % c)
150 try:
150 try:
151 int(lbl)
151 int(lbl)
152 raise error.Abort(_("cannot use an integer as a name"))
152 raise error.Abort(_("cannot use an integer as a name"))
153 except ValueError:
153 except ValueError:
154 pass
154 pass
155
155
156 def checkfilename(f):
156 def checkfilename(f):
157 '''Check that the filename f is an acceptable filename for a tracked file'''
157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 if '\r' in f or '\n' in f:
158 if '\r' in f or '\n' in f:
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160
160
161 def checkportable(ui, f):
161 def checkportable(ui, f):
162 '''Check if filename f is portable and warn or abort depending on config'''
162 '''Check if filename f is portable and warn or abort depending on config'''
163 checkfilename(f)
163 checkfilename(f)
164 abort, warn = checkportabilityalert(ui)
164 abort, warn = checkportabilityalert(ui)
165 if abort or warn:
165 if abort or warn:
166 msg = util.checkwinfilename(f)
166 msg = util.checkwinfilename(f)
167 if msg:
167 if msg:
168 msg = "%s: %r" % (msg, f)
168 msg = "%s: %r" % (msg, f)
169 if abort:
169 if abort:
170 raise error.Abort(msg)
170 raise error.Abort(msg)
171 ui.warn(_("warning: %s\n") % msg)
171 ui.warn(_("warning: %s\n") % msg)
172
172
173 def checkportabilityalert(ui):
173 def checkportabilityalert(ui):
174 '''check if the user's config requests nothing, a warning, or abort for
174 '''check if the user's config requests nothing, a warning, or abort for
175 non-portable filenames'''
175 non-portable filenames'''
176 val = ui.config('ui', 'portablefilenames', 'warn')
176 val = ui.config('ui', 'portablefilenames', 'warn')
177 lval = val.lower()
177 lval = val.lower()
178 bval = util.parsebool(val)
178 bval = util.parsebool(val)
179 abort = os.name == 'nt' or lval == 'abort'
179 abort = os.name == 'nt' or lval == 'abort'
180 warn = bval or lval == 'warn'
180 warn = bval or lval == 'warn'
181 if bval is None and not (warn or abort or lval == 'ignore'):
181 if bval is None and not (warn or abort or lval == 'ignore'):
182 raise error.ConfigError(
182 raise error.ConfigError(
183 _("ui.portablefilenames value is invalid ('%s')") % val)
183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 return abort, warn
184 return abort, warn
185
185
186 class casecollisionauditor(object):
186 class casecollisionauditor(object):
187 def __init__(self, ui, abort, dirstate):
187 def __init__(self, ui, abort, dirstate):
188 self._ui = ui
188 self._ui = ui
189 self._abort = abort
189 self._abort = abort
190 allfiles = '\0'.join(dirstate._map)
190 allfiles = '\0'.join(dirstate._map)
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 self._dirstate = dirstate
192 self._dirstate = dirstate
193 # The purpose of _newfiles is so that we don't complain about
193 # The purpose of _newfiles is so that we don't complain about
194 # case collisions if someone were to call this object with the
194 # case collisions if someone were to call this object with the
195 # same filename twice.
195 # same filename twice.
196 self._newfiles = set()
196 self._newfiles = set()
197
197
198 def __call__(self, f):
198 def __call__(self, f):
199 if f in self._newfiles:
199 if f in self._newfiles:
200 return
200 return
201 fl = encoding.lower(f)
201 fl = encoding.lower(f)
202 if fl in self._loweredfiles and f not in self._dirstate:
202 if fl in self._loweredfiles and f not in self._dirstate:
203 msg = _('possible case-folding collision for %s') % f
203 msg = _('possible case-folding collision for %s') % f
204 if self._abort:
204 if self._abort:
205 raise error.Abort(msg)
205 raise error.Abort(msg)
206 self._ui.warn(_("warning: %s\n") % msg)
206 self._ui.warn(_("warning: %s\n") % msg)
207 self._loweredfiles.add(fl)
207 self._loweredfiles.add(fl)
208 self._newfiles.add(f)
208 self._newfiles.add(f)
209
209
210 def filteredhash(repo, maxrev):
210 def filteredhash(repo, maxrev):
211 """build hash of filtered revisions in the current repoview.
211 """build hash of filtered revisions in the current repoview.
212
212
213 Multiple caches perform up-to-date validation by checking that the
213 Multiple caches perform up-to-date validation by checking that the
214 tiprev and tipnode stored in the cache file match the current repository.
214 tiprev and tipnode stored in the cache file match the current repository.
215 However, this is not sufficient for validating repoviews because the set
215 However, this is not sufficient for validating repoviews because the set
216 of revisions in the view may change without the repository tiprev and
216 of revisions in the view may change without the repository tiprev and
217 tipnode changing.
217 tipnode changing.
218
218
219 This function hashes all the revs filtered from the view and returns
219 This function hashes all the revs filtered from the view and returns
220 that SHA-1 digest.
220 that SHA-1 digest.
221 """
221 """
222 cl = repo.changelog
222 cl = repo.changelog
223 if not cl.filteredrevs:
223 if not cl.filteredrevs:
224 return None
224 return None
225 key = None
225 key = None
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 if revs:
227 if revs:
228 s = util.sha1()
228 s = util.sha1()
229 for rev in revs:
229 for rev in revs:
230 s.update('%s;' % rev)
230 s.update('%s;' % rev)
231 key = s.digest()
231 key = s.digest()
232 return key
232 return key
233
233
234 class abstractvfs(object):
234 class abstractvfs(object):
235 """Abstract base class; cannot be instantiated"""
235 """Abstract base class; cannot be instantiated"""
236
236
237 def __init__(self, *args, **kwargs):
237 def __init__(self, *args, **kwargs):
238 '''Prevent instantiation; don't call this from subclasses.'''
238 '''Prevent instantiation; don't call this from subclasses.'''
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240
240
241 def tryread(self, path):
241 def tryread(self, path):
242 '''gracefully return an empty string for missing files'''
242 '''gracefully return an empty string for missing files'''
243 try:
243 try:
244 return self.read(path)
244 return self.read(path)
245 except IOError as inst:
245 except IOError as inst:
246 if inst.errno != errno.ENOENT:
246 if inst.errno != errno.ENOENT:
247 raise
247 raise
248 return ""
248 return ""
249
249
250 def tryreadlines(self, path, mode='rb'):
250 def tryreadlines(self, path, mode='rb'):
251 '''gracefully return an empty array for missing files'''
251 '''gracefully return an empty array for missing files'''
252 try:
252 try:
253 return self.readlines(path, mode=mode)
253 return self.readlines(path, mode=mode)
254 except IOError as inst:
254 except IOError as inst:
255 if inst.errno != errno.ENOENT:
255 if inst.errno != errno.ENOENT:
256 raise
256 raise
257 return []
257 return []
258
258
259 def open(self, path, mode="r", text=False, atomictemp=False,
259 def open(self, path, mode="r", text=False, atomictemp=False,
260 notindexed=False, backgroundclose=False):
260 notindexed=False, backgroundclose=False):
261 '''Open ``path`` file, which is relative to vfs root.
261 '''Open ``path`` file, which is relative to vfs root.
262
262
263 Newly created directories are marked as "not to be indexed by
263 Newly created directories are marked as "not to be indexed by
264 the content indexing service", if ``notindexed`` is specified
264 the content indexing service", if ``notindexed`` is specified
265 for "write" mode access.
265 for "write" mode access.
266 '''
266 '''
267 self.open = self.__call__
267 self.open = self.__call__
268 return self.__call__(path, mode, text, atomictemp, notindexed,
268 return self.__call__(path, mode, text, atomictemp, notindexed,
269 backgroundclose=backgroundclose)
269 backgroundclose=backgroundclose)
270
270
271 def read(self, path):
271 def read(self, path):
272 with self(path, 'rb') as fp:
272 with self(path, 'rb') as fp:
273 return fp.read()
273 return fp.read()
274
274
275 def readlines(self, path, mode='rb'):
275 def readlines(self, path, mode='rb'):
276 with self(path, mode=mode) as fp:
276 with self(path, mode=mode) as fp:
277 return fp.readlines()
277 return fp.readlines()
278
278
279 def write(self, path, data):
279 def write(self, path, data, backgroundclose=False):
280 with self(path, 'wb') as fp:
280 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
281 return fp.write(data)
281 return fp.write(data)
282
282
283 def writelines(self, path, data, mode='wb', notindexed=False):
283 def writelines(self, path, data, mode='wb', notindexed=False):
284 with self(path, mode=mode, notindexed=notindexed) as fp:
284 with self(path, mode=mode, notindexed=notindexed) as fp:
285 return fp.writelines(data)
285 return fp.writelines(data)
286
286
287 def append(self, path, data):
287 def append(self, path, data):
288 with self(path, 'ab') as fp:
288 with self(path, 'ab') as fp:
289 return fp.write(data)
289 return fp.write(data)
290
290
291 def basename(self, path):
291 def basename(self, path):
292 """return base element of a path (as os.path.basename would do)
292 """return base element of a path (as os.path.basename would do)
293
293
294 This exists to allow handling of strange encoding if needed."""
294 This exists to allow handling of strange encoding if needed."""
295 return os.path.basename(path)
295 return os.path.basename(path)
296
296
297 def chmod(self, path, mode):
297 def chmod(self, path, mode):
298 return os.chmod(self.join(path), mode)
298 return os.chmod(self.join(path), mode)
299
299
300 def dirname(self, path):
300 def dirname(self, path):
301 """return dirname element of a path (as os.path.dirname would do)
301 """return dirname element of a path (as os.path.dirname would do)
302
302
303 This exists to allow handling of strange encoding if needed."""
303 This exists to allow handling of strange encoding if needed."""
304 return os.path.dirname(path)
304 return os.path.dirname(path)
305
305
306 def exists(self, path=None):
306 def exists(self, path=None):
307 return os.path.exists(self.join(path))
307 return os.path.exists(self.join(path))
308
308
309 def fstat(self, fp):
309 def fstat(self, fp):
310 return util.fstat(fp)
310 return util.fstat(fp)
311
311
312 def isdir(self, path=None):
312 def isdir(self, path=None):
313 return os.path.isdir(self.join(path))
313 return os.path.isdir(self.join(path))
314
314
315 def isfile(self, path=None):
315 def isfile(self, path=None):
316 return os.path.isfile(self.join(path))
316 return os.path.isfile(self.join(path))
317
317
318 def islink(self, path=None):
318 def islink(self, path=None):
319 return os.path.islink(self.join(path))
319 return os.path.islink(self.join(path))
320
320
321 def isfileorlink(self, path=None):
321 def isfileorlink(self, path=None):
322 '''return whether path is a regular file or a symlink
322 '''return whether path is a regular file or a symlink
323
323
324 Unlike isfile, this doesn't follow symlinks.'''
324 Unlike isfile, this doesn't follow symlinks.'''
325 try:
325 try:
326 st = self.lstat(path)
326 st = self.lstat(path)
327 except OSError:
327 except OSError:
328 return False
328 return False
329 mode = st.st_mode
329 mode = st.st_mode
330 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
330 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
331
331
332 def reljoin(self, *paths):
332 def reljoin(self, *paths):
333 """join various elements of a path together (as os.path.join would do)
333 """join various elements of a path together (as os.path.join would do)
334
334
335 The vfs base is not injected so that path stay relative. This exists
335 The vfs base is not injected so that path stay relative. This exists
336 to allow handling of strange encoding if needed."""
336 to allow handling of strange encoding if needed."""
337 return os.path.join(*paths)
337 return os.path.join(*paths)
338
338
339 def split(self, path):
339 def split(self, path):
340 """split top-most element of a path (as os.path.split would do)
340 """split top-most element of a path (as os.path.split would do)
341
341
342 This exists to allow handling of strange encoding if needed."""
342 This exists to allow handling of strange encoding if needed."""
343 return os.path.split(path)
343 return os.path.split(path)
344
344
345 def lexists(self, path=None):
345 def lexists(self, path=None):
346 return os.path.lexists(self.join(path))
346 return os.path.lexists(self.join(path))
347
347
348 def lstat(self, path=None):
348 def lstat(self, path=None):
349 return os.lstat(self.join(path))
349 return os.lstat(self.join(path))
350
350
351 def listdir(self, path=None):
351 def listdir(self, path=None):
352 return os.listdir(self.join(path))
352 return os.listdir(self.join(path))
353
353
354 def makedir(self, path=None, notindexed=True):
354 def makedir(self, path=None, notindexed=True):
355 return util.makedir(self.join(path), notindexed)
355 return util.makedir(self.join(path), notindexed)
356
356
357 def makedirs(self, path=None, mode=None):
357 def makedirs(self, path=None, mode=None):
358 return util.makedirs(self.join(path), mode)
358 return util.makedirs(self.join(path), mode)
359
359
360 def makelock(self, info, path):
360 def makelock(self, info, path):
361 return util.makelock(info, self.join(path))
361 return util.makelock(info, self.join(path))
362
362
363 def mkdir(self, path=None):
363 def mkdir(self, path=None):
364 return os.mkdir(self.join(path))
364 return os.mkdir(self.join(path))
365
365
366 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
366 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
367 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
367 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
368 dir=self.join(dir), text=text)
368 dir=self.join(dir), text=text)
369 dname, fname = util.split(name)
369 dname, fname = util.split(name)
370 if dir:
370 if dir:
371 return fd, os.path.join(dir, fname)
371 return fd, os.path.join(dir, fname)
372 else:
372 else:
373 return fd, fname
373 return fd, fname
374
374
375 def readdir(self, path=None, stat=None, skip=None):
375 def readdir(self, path=None, stat=None, skip=None):
376 return osutil.listdir(self.join(path), stat, skip)
376 return osutil.listdir(self.join(path), stat, skip)
377
377
378 def readlock(self, path):
378 def readlock(self, path):
379 return util.readlock(self.join(path))
379 return util.readlock(self.join(path))
380
380
381 def rename(self, src, dst):
381 def rename(self, src, dst):
382 return util.rename(self.join(src), self.join(dst))
382 return util.rename(self.join(src), self.join(dst))
383
383
384 def readlink(self, path):
384 def readlink(self, path):
385 return os.readlink(self.join(path))
385 return os.readlink(self.join(path))
386
386
387 def removedirs(self, path=None):
387 def removedirs(self, path=None):
388 """Remove a leaf directory and all empty intermediate ones
388 """Remove a leaf directory and all empty intermediate ones
389 """
389 """
390 return util.removedirs(self.join(path))
390 return util.removedirs(self.join(path))
391
391
392 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
392 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
393 """Remove a directory tree recursively
393 """Remove a directory tree recursively
394
394
395 If ``forcibly``, this tries to remove READ-ONLY files, too.
395 If ``forcibly``, this tries to remove READ-ONLY files, too.
396 """
396 """
397 if forcibly:
397 if forcibly:
398 def onerror(function, path, excinfo):
398 def onerror(function, path, excinfo):
399 if function is not os.remove:
399 if function is not os.remove:
400 raise
400 raise
401 # read-only files cannot be unlinked under Windows
401 # read-only files cannot be unlinked under Windows
402 s = os.stat(path)
402 s = os.stat(path)
403 if (s.st_mode & stat.S_IWRITE) != 0:
403 if (s.st_mode & stat.S_IWRITE) != 0:
404 raise
404 raise
405 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
405 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
406 os.remove(path)
406 os.remove(path)
407 else:
407 else:
408 onerror = None
408 onerror = None
409 return shutil.rmtree(self.join(path),
409 return shutil.rmtree(self.join(path),
410 ignore_errors=ignore_errors, onerror=onerror)
410 ignore_errors=ignore_errors, onerror=onerror)
411
411
412 def setflags(self, path, l, x):
412 def setflags(self, path, l, x):
413 return util.setflags(self.join(path), l, x)
413 return util.setflags(self.join(path), l, x)
414
414
415 def stat(self, path=None):
415 def stat(self, path=None):
416 return os.stat(self.join(path))
416 return os.stat(self.join(path))
417
417
418 def unlink(self, path=None):
418 def unlink(self, path=None):
419 return util.unlink(self.join(path))
419 return util.unlink(self.join(path))
420
420
421 def unlinkpath(self, path=None, ignoremissing=False):
421 def unlinkpath(self, path=None, ignoremissing=False):
422 return util.unlinkpath(self.join(path), ignoremissing)
422 return util.unlinkpath(self.join(path), ignoremissing)
423
423
424 def utime(self, path=None, t=None):
424 def utime(self, path=None, t=None):
425 return os.utime(self.join(path), t)
425 return os.utime(self.join(path), t)
426
426
427 def walk(self, path=None, onerror=None):
427 def walk(self, path=None, onerror=None):
428 """Yield (dirpath, dirs, files) tuple for each directories under path
428 """Yield (dirpath, dirs, files) tuple for each directories under path
429
429
430 ``dirpath`` is relative one from the root of this vfs. This
430 ``dirpath`` is relative one from the root of this vfs. This
431 uses ``os.sep`` as path separator, even you specify POSIX
431 uses ``os.sep`` as path separator, even you specify POSIX
432 style ``path``.
432 style ``path``.
433
433
434 "The root of this vfs" is represented as empty ``dirpath``.
434 "The root of this vfs" is represented as empty ``dirpath``.
435 """
435 """
436 root = os.path.normpath(self.join(None))
436 root = os.path.normpath(self.join(None))
437 # when dirpath == root, dirpath[prefixlen:] becomes empty
437 # when dirpath == root, dirpath[prefixlen:] becomes empty
438 # because len(dirpath) < prefixlen.
438 # because len(dirpath) < prefixlen.
439 prefixlen = len(pathutil.normasprefix(root))
439 prefixlen = len(pathutil.normasprefix(root))
440 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
440 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
441 yield (dirpath[prefixlen:], dirs, files)
441 yield (dirpath[prefixlen:], dirs, files)
442
442
443 @contextlib.contextmanager
443 @contextlib.contextmanager
444 def backgroundclosing(self, ui, expectedcount=-1):
444 def backgroundclosing(self, ui, expectedcount=-1):
445 """Allow files to be closed asynchronously.
445 """Allow files to be closed asynchronously.
446
446
447 When this context manager is active, ``backgroundclose`` can be passed
447 When this context manager is active, ``backgroundclose`` can be passed
448 to ``__call__``/``open`` to result in the file possibly being closed
448 to ``__call__``/``open`` to result in the file possibly being closed
449 asynchronously, on a background thread.
449 asynchronously, on a background thread.
450 """
450 """
451 # This is an arbitrary restriction and could be changed if we ever
451 # This is an arbitrary restriction and could be changed if we ever
452 # have a use case.
452 # have a use case.
453 vfs = getattr(self, 'vfs', self)
453 vfs = getattr(self, 'vfs', self)
454 if getattr(vfs, '_backgroundfilecloser', None):
454 if getattr(vfs, '_backgroundfilecloser', None):
455 raise error.Abort('can only have 1 active background file closer')
455 raise error.Abort('can only have 1 active background file closer')
456
456
457 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
457 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
458 try:
458 try:
459 vfs._backgroundfilecloser = bfc
459 vfs._backgroundfilecloser = bfc
460 yield bfc
460 yield bfc
461 finally:
461 finally:
462 vfs._backgroundfilecloser = None
462 vfs._backgroundfilecloser = None
463
463
464 class vfs(abstractvfs):
464 class vfs(abstractvfs):
465 '''Operate files relative to a base directory
465 '''Operate files relative to a base directory
466
466
467 This class is used to hide the details of COW semantics and
467 This class is used to hide the details of COW semantics and
468 remote file access from higher level code.
468 remote file access from higher level code.
469 '''
469 '''
470 def __init__(self, base, audit=True, expandpath=False, realpath=False):
470 def __init__(self, base, audit=True, expandpath=False, realpath=False):
471 if expandpath:
471 if expandpath:
472 base = util.expandpath(base)
472 base = util.expandpath(base)
473 if realpath:
473 if realpath:
474 base = os.path.realpath(base)
474 base = os.path.realpath(base)
475 self.base = base
475 self.base = base
476 self.mustaudit = audit
476 self.mustaudit = audit
477 self.createmode = None
477 self.createmode = None
478 self._trustnlink = None
478 self._trustnlink = None
479
479
480 @property
480 @property
481 def mustaudit(self):
481 def mustaudit(self):
482 return self._audit
482 return self._audit
483
483
484 @mustaudit.setter
484 @mustaudit.setter
485 def mustaudit(self, onoff):
485 def mustaudit(self, onoff):
486 self._audit = onoff
486 self._audit = onoff
487 if onoff:
487 if onoff:
488 self.audit = pathutil.pathauditor(self.base)
488 self.audit = pathutil.pathauditor(self.base)
489 else:
489 else:
490 self.audit = util.always
490 self.audit = util.always
491
491
492 @util.propertycache
492 @util.propertycache
493 def _cansymlink(self):
493 def _cansymlink(self):
494 return util.checklink(self.base)
494 return util.checklink(self.base)
495
495
496 @util.propertycache
496 @util.propertycache
497 def _chmod(self):
497 def _chmod(self):
498 return util.checkexec(self.base)
498 return util.checkexec(self.base)
499
499
500 def _fixfilemode(self, name):
500 def _fixfilemode(self, name):
501 if self.createmode is None or not self._chmod:
501 if self.createmode is None or not self._chmod:
502 return
502 return
503 os.chmod(name, self.createmode & 0o666)
503 os.chmod(name, self.createmode & 0o666)
504
504
505 def __call__(self, path, mode="r", text=False, atomictemp=False,
505 def __call__(self, path, mode="r", text=False, atomictemp=False,
506 notindexed=False, backgroundclose=False):
506 notindexed=False, backgroundclose=False):
507 '''Open ``path`` file, which is relative to vfs root.
507 '''Open ``path`` file, which is relative to vfs root.
508
508
509 Newly created directories are marked as "not to be indexed by
509 Newly created directories are marked as "not to be indexed by
510 the content indexing service", if ``notindexed`` is specified
510 the content indexing service", if ``notindexed`` is specified
511 for "write" mode access.
511 for "write" mode access.
512
512
513 If ``backgroundclose`` is passed, the file may be closed asynchronously.
513 If ``backgroundclose`` is passed, the file may be closed asynchronously.
514 It can only be used if the ``self.backgroundclosing()`` context manager
514 It can only be used if the ``self.backgroundclosing()`` context manager
515 is active. This should only be specified if the following criteria hold:
515 is active. This should only be specified if the following criteria hold:
516
516
517 1. There is a potential for writing thousands of files. Unless you
517 1. There is a potential for writing thousands of files. Unless you
518 are writing thousands of files, the performance benefits of
518 are writing thousands of files, the performance benefits of
519 asynchronously closing files is not realized.
519 asynchronously closing files is not realized.
520 2. Files are opened exactly once for the ``backgroundclosing``
520 2. Files are opened exactly once for the ``backgroundclosing``
521 active duration and are therefore free of race conditions between
521 active duration and are therefore free of race conditions between
522 closing a file on a background thread and reopening it. (If the
522 closing a file on a background thread and reopening it. (If the
523 file were opened multiple times, there could be unflushed data
523 file were opened multiple times, there could be unflushed data
524 because the original file handle hasn't been flushed/closed yet.)
524 because the original file handle hasn't been flushed/closed yet.)
525 '''
525 '''
526 if self._audit:
526 if self._audit:
527 r = util.checkosfilename(path)
527 r = util.checkosfilename(path)
528 if r:
528 if r:
529 raise error.Abort("%s: %r" % (r, path))
529 raise error.Abort("%s: %r" % (r, path))
530 self.audit(path)
530 self.audit(path)
531 f = self.join(path)
531 f = self.join(path)
532
532
533 if not text and "b" not in mode:
533 if not text and "b" not in mode:
534 mode += "b" # for that other OS
534 mode += "b" # for that other OS
535
535
536 nlink = -1
536 nlink = -1
537 if mode not in ('r', 'rb'):
537 if mode not in ('r', 'rb'):
538 dirname, basename = util.split(f)
538 dirname, basename = util.split(f)
539 # If basename is empty, then the path is malformed because it points
539 # If basename is empty, then the path is malformed because it points
540 # to a directory. Let the posixfile() call below raise IOError.
540 # to a directory. Let the posixfile() call below raise IOError.
541 if basename:
541 if basename:
542 if atomictemp:
542 if atomictemp:
543 util.ensuredirs(dirname, self.createmode, notindexed)
543 util.ensuredirs(dirname, self.createmode, notindexed)
544 return util.atomictempfile(f, mode, self.createmode)
544 return util.atomictempfile(f, mode, self.createmode)
545 try:
545 try:
546 if 'w' in mode:
546 if 'w' in mode:
547 util.unlink(f)
547 util.unlink(f)
548 nlink = 0
548 nlink = 0
549 else:
549 else:
550 # nlinks() may behave differently for files on Windows
550 # nlinks() may behave differently for files on Windows
551 # shares if the file is open.
551 # shares if the file is open.
552 with util.posixfile(f):
552 with util.posixfile(f):
553 nlink = util.nlinks(f)
553 nlink = util.nlinks(f)
554 if nlink < 1:
554 if nlink < 1:
555 nlink = 2 # force mktempcopy (issue1922)
555 nlink = 2 # force mktempcopy (issue1922)
556 except (OSError, IOError) as e:
556 except (OSError, IOError) as e:
557 if e.errno != errno.ENOENT:
557 if e.errno != errno.ENOENT:
558 raise
558 raise
559 nlink = 0
559 nlink = 0
560 util.ensuredirs(dirname, self.createmode, notindexed)
560 util.ensuredirs(dirname, self.createmode, notindexed)
561 if nlink > 0:
561 if nlink > 0:
562 if self._trustnlink is None:
562 if self._trustnlink is None:
563 self._trustnlink = nlink > 1 or util.checknlink(f)
563 self._trustnlink = nlink > 1 or util.checknlink(f)
564 if nlink > 1 or not self._trustnlink:
564 if nlink > 1 or not self._trustnlink:
565 util.rename(util.mktempcopy(f), f)
565 util.rename(util.mktempcopy(f), f)
566 fp = util.posixfile(f, mode)
566 fp = util.posixfile(f, mode)
567 if nlink == 0:
567 if nlink == 0:
568 self._fixfilemode(f)
568 self._fixfilemode(f)
569
569
570 if backgroundclose:
570 if backgroundclose:
571 if not self._backgroundfilecloser:
571 if not self._backgroundfilecloser:
572 raise error.Abort('backgroundclose can only be used when a '
572 raise error.Abort('backgroundclose can only be used when a '
573 'backgroundclosing context manager is active')
573 'backgroundclosing context manager is active')
574
574
575 fp = delayclosedfile(fp, self._backgroundfilecloser)
575 fp = delayclosedfile(fp, self._backgroundfilecloser)
576
576
577 return fp
577 return fp
578
578
579 def symlink(self, src, dst):
579 def symlink(self, src, dst):
580 self.audit(dst)
580 self.audit(dst)
581 linkname = self.join(dst)
581 linkname = self.join(dst)
582 try:
582 try:
583 os.unlink(linkname)
583 os.unlink(linkname)
584 except OSError:
584 except OSError:
585 pass
585 pass
586
586
587 util.ensuredirs(os.path.dirname(linkname), self.createmode)
587 util.ensuredirs(os.path.dirname(linkname), self.createmode)
588
588
589 if self._cansymlink:
589 if self._cansymlink:
590 try:
590 try:
591 os.symlink(src, linkname)
591 os.symlink(src, linkname)
592 except OSError as err:
592 except OSError as err:
593 raise OSError(err.errno, _('could not symlink to %r: %s') %
593 raise OSError(err.errno, _('could not symlink to %r: %s') %
594 (src, err.strerror), linkname)
594 (src, err.strerror), linkname)
595 else:
595 else:
596 self.write(dst, src)
596 self.write(dst, src)
597
597
598 def join(self, path, *insidef):
598 def join(self, path, *insidef):
599 if path:
599 if path:
600 return os.path.join(self.base, path, *insidef)
600 return os.path.join(self.base, path, *insidef)
601 else:
601 else:
602 return self.base
602 return self.base
603
603
604 opener = vfs
604 opener = vfs
605
605
606 class auditvfs(object):
606 class auditvfs(object):
607 def __init__(self, vfs):
607 def __init__(self, vfs):
608 self.vfs = vfs
608 self.vfs = vfs
609
609
610 @property
610 @property
611 def mustaudit(self):
611 def mustaudit(self):
612 return self.vfs.mustaudit
612 return self.vfs.mustaudit
613
613
614 @mustaudit.setter
614 @mustaudit.setter
615 def mustaudit(self, onoff):
615 def mustaudit(self, onoff):
616 self.vfs.mustaudit = onoff
616 self.vfs.mustaudit = onoff
617
617
618 class filtervfs(abstractvfs, auditvfs):
618 class filtervfs(abstractvfs, auditvfs):
619 '''Wrapper vfs for filtering filenames with a function.'''
619 '''Wrapper vfs for filtering filenames with a function.'''
620
620
621 def __init__(self, vfs, filter):
621 def __init__(self, vfs, filter):
622 auditvfs.__init__(self, vfs)
622 auditvfs.__init__(self, vfs)
623 self._filter = filter
623 self._filter = filter
624
624
625 def __call__(self, path, *args, **kwargs):
625 def __call__(self, path, *args, **kwargs):
626 return self.vfs(self._filter(path), *args, **kwargs)
626 return self.vfs(self._filter(path), *args, **kwargs)
627
627
628 def join(self, path, *insidef):
628 def join(self, path, *insidef):
629 if path:
629 if path:
630 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
630 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
631 else:
631 else:
632 return self.vfs.join(path)
632 return self.vfs.join(path)
633
633
634 filteropener = filtervfs
634 filteropener = filtervfs
635
635
636 class readonlyvfs(abstractvfs, auditvfs):
636 class readonlyvfs(abstractvfs, auditvfs):
637 '''Wrapper vfs preventing any writing.'''
637 '''Wrapper vfs preventing any writing.'''
638
638
639 def __init__(self, vfs):
639 def __init__(self, vfs):
640 auditvfs.__init__(self, vfs)
640 auditvfs.__init__(self, vfs)
641
641
642 def __call__(self, path, mode='r', *args, **kw):
642 def __call__(self, path, mode='r', *args, **kw):
643 if mode not in ('r', 'rb'):
643 if mode not in ('r', 'rb'):
644 raise error.Abort('this vfs is read only')
644 raise error.Abort('this vfs is read only')
645 return self.vfs(path, mode, *args, **kw)
645 return self.vfs(path, mode, *args, **kw)
646
646
647 def join(self, path, *insidef):
647 def join(self, path, *insidef):
648 return self.vfs.join(path, *insidef)
648 return self.vfs.join(path, *insidef)
649
649
650 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
650 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
651 '''yield every hg repository under path, always recursively.
651 '''yield every hg repository under path, always recursively.
652 The recurse flag will only control recursion into repo working dirs'''
652 The recurse flag will only control recursion into repo working dirs'''
653 def errhandler(err):
653 def errhandler(err):
654 if err.filename == path:
654 if err.filename == path:
655 raise err
655 raise err
656 samestat = getattr(os.path, 'samestat', None)
656 samestat = getattr(os.path, 'samestat', None)
657 if followsym and samestat is not None:
657 if followsym and samestat is not None:
658 def adddir(dirlst, dirname):
658 def adddir(dirlst, dirname):
659 match = False
659 match = False
660 dirstat = os.stat(dirname)
660 dirstat = os.stat(dirname)
661 for lstdirstat in dirlst:
661 for lstdirstat in dirlst:
662 if samestat(dirstat, lstdirstat):
662 if samestat(dirstat, lstdirstat):
663 match = True
663 match = True
664 break
664 break
665 if not match:
665 if not match:
666 dirlst.append(dirstat)
666 dirlst.append(dirstat)
667 return not match
667 return not match
668 else:
668 else:
669 followsym = False
669 followsym = False
670
670
671 if (seen_dirs is None) and followsym:
671 if (seen_dirs is None) and followsym:
672 seen_dirs = []
672 seen_dirs = []
673 adddir(seen_dirs, path)
673 adddir(seen_dirs, path)
674 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
674 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
675 dirs.sort()
675 dirs.sort()
676 if '.hg' in dirs:
676 if '.hg' in dirs:
677 yield root # found a repository
677 yield root # found a repository
678 qroot = os.path.join(root, '.hg', 'patches')
678 qroot = os.path.join(root, '.hg', 'patches')
679 if os.path.isdir(os.path.join(qroot, '.hg')):
679 if os.path.isdir(os.path.join(qroot, '.hg')):
680 yield qroot # we have a patch queue repo here
680 yield qroot # we have a patch queue repo here
681 if recurse:
681 if recurse:
682 # avoid recursing inside the .hg directory
682 # avoid recursing inside the .hg directory
683 dirs.remove('.hg')
683 dirs.remove('.hg')
684 else:
684 else:
685 dirs[:] = [] # don't descend further
685 dirs[:] = [] # don't descend further
686 elif followsym:
686 elif followsym:
687 newdirs = []
687 newdirs = []
688 for d in dirs:
688 for d in dirs:
689 fname = os.path.join(root, d)
689 fname = os.path.join(root, d)
690 if adddir(seen_dirs, fname):
690 if adddir(seen_dirs, fname):
691 if os.path.islink(fname):
691 if os.path.islink(fname):
692 for hgname in walkrepos(fname, True, seen_dirs):
692 for hgname in walkrepos(fname, True, seen_dirs):
693 yield hgname
693 yield hgname
694 else:
694 else:
695 newdirs.append(d)
695 newdirs.append(d)
696 dirs[:] = newdirs
696 dirs[:] = newdirs
697
697
698 def osrcpath():
698 def osrcpath():
699 '''return default os-specific hgrc search path'''
699 '''return default os-specific hgrc search path'''
700 path = []
700 path = []
701 defaultpath = os.path.join(util.datapath, 'default.d')
701 defaultpath = os.path.join(util.datapath, 'default.d')
702 if os.path.isdir(defaultpath):
702 if os.path.isdir(defaultpath):
703 for f, kind in osutil.listdir(defaultpath):
703 for f, kind in osutil.listdir(defaultpath):
704 if f.endswith('.rc'):
704 if f.endswith('.rc'):
705 path.append(os.path.join(defaultpath, f))
705 path.append(os.path.join(defaultpath, f))
706 path.extend(systemrcpath())
706 path.extend(systemrcpath())
707 path.extend(userrcpath())
707 path.extend(userrcpath())
708 path = [os.path.normpath(f) for f in path]
708 path = [os.path.normpath(f) for f in path]
709 return path
709 return path
710
710
711 _rcpath = None
711 _rcpath = None
712
712
713 def rcpath():
713 def rcpath():
714 '''return hgrc search path. if env var HGRCPATH is set, use it.
714 '''return hgrc search path. if env var HGRCPATH is set, use it.
715 for each item in path, if directory, use files ending in .rc,
715 for each item in path, if directory, use files ending in .rc,
716 else use item.
716 else use item.
717 make HGRCPATH empty to only look in .hg/hgrc of current repo.
717 make HGRCPATH empty to only look in .hg/hgrc of current repo.
718 if no HGRCPATH, use default os-specific path.'''
718 if no HGRCPATH, use default os-specific path.'''
719 global _rcpath
719 global _rcpath
720 if _rcpath is None:
720 if _rcpath is None:
721 if 'HGRCPATH' in os.environ:
721 if 'HGRCPATH' in os.environ:
722 _rcpath = []
722 _rcpath = []
723 for p in os.environ['HGRCPATH'].split(os.pathsep):
723 for p in os.environ['HGRCPATH'].split(os.pathsep):
724 if not p:
724 if not p:
725 continue
725 continue
726 p = util.expandpath(p)
726 p = util.expandpath(p)
727 if os.path.isdir(p):
727 if os.path.isdir(p):
728 for f, kind in osutil.listdir(p):
728 for f, kind in osutil.listdir(p):
729 if f.endswith('.rc'):
729 if f.endswith('.rc'):
730 _rcpath.append(os.path.join(p, f))
730 _rcpath.append(os.path.join(p, f))
731 else:
731 else:
732 _rcpath.append(p)
732 _rcpath.append(p)
733 else:
733 else:
734 _rcpath = osrcpath()
734 _rcpath = osrcpath()
735 return _rcpath
735 return _rcpath
736
736
737 def intrev(rev):
737 def intrev(rev):
738 """Return integer for a given revision that can be used in comparison or
738 """Return integer for a given revision that can be used in comparison or
739 arithmetic operation"""
739 arithmetic operation"""
740 if rev is None:
740 if rev is None:
741 return wdirrev
741 return wdirrev
742 return rev
742 return rev
743
743
744 def revsingle(repo, revspec, default='.'):
744 def revsingle(repo, revspec, default='.'):
745 if not revspec and revspec != 0:
745 if not revspec and revspec != 0:
746 return repo[default]
746 return repo[default]
747
747
748 l = revrange(repo, [revspec])
748 l = revrange(repo, [revspec])
749 if not l:
749 if not l:
750 raise error.Abort(_('empty revision set'))
750 raise error.Abort(_('empty revision set'))
751 return repo[l.last()]
751 return repo[l.last()]
752
752
753 def _pairspec(revspec):
753 def _pairspec(revspec):
754 tree = revset.parse(revspec)
754 tree = revset.parse(revspec)
755 tree = revset.optimize(tree, True)[1] # fix up "x^:y" -> "(x^):y"
755 tree = revset.optimize(tree, True)[1] # fix up "x^:y" -> "(x^):y"
756 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
756 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
757
757
758 def revpair(repo, revs):
758 def revpair(repo, revs):
759 if not revs:
759 if not revs:
760 return repo.dirstate.p1(), None
760 return repo.dirstate.p1(), None
761
761
762 l = revrange(repo, revs)
762 l = revrange(repo, revs)
763
763
764 if not l:
764 if not l:
765 first = second = None
765 first = second = None
766 elif l.isascending():
766 elif l.isascending():
767 first = l.min()
767 first = l.min()
768 second = l.max()
768 second = l.max()
769 elif l.isdescending():
769 elif l.isdescending():
770 first = l.max()
770 first = l.max()
771 second = l.min()
771 second = l.min()
772 else:
772 else:
773 first = l.first()
773 first = l.first()
774 second = l.last()
774 second = l.last()
775
775
776 if first is None:
776 if first is None:
777 raise error.Abort(_('empty revision range'))
777 raise error.Abort(_('empty revision range'))
778 if (first == second and len(revs) >= 2
778 if (first == second and len(revs) >= 2
779 and not all(revrange(repo, [r]) for r in revs)):
779 and not all(revrange(repo, [r]) for r in revs)):
780 raise error.Abort(_('empty revision on one side of range'))
780 raise error.Abort(_('empty revision on one side of range'))
781
781
782 # if top-level is range expression, the result must always be a pair
782 # if top-level is range expression, the result must always be a pair
783 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
783 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
784 return repo.lookup(first), None
784 return repo.lookup(first), None
785
785
786 return repo.lookup(first), repo.lookup(second)
786 return repo.lookup(first), repo.lookup(second)
787
787
788 def revrange(repo, revs):
788 def revrange(repo, revs):
789 """Yield revision as strings from a list of revision specifications."""
789 """Yield revision as strings from a list of revision specifications."""
790 allspecs = []
790 allspecs = []
791 for spec in revs:
791 for spec in revs:
792 if isinstance(spec, int):
792 if isinstance(spec, int):
793 spec = revset.formatspec('rev(%d)', spec)
793 spec = revset.formatspec('rev(%d)', spec)
794 allspecs.append(spec)
794 allspecs.append(spec)
795 m = revset.matchany(repo.ui, allspecs, repo)
795 m = revset.matchany(repo.ui, allspecs, repo)
796 return m(repo)
796 return m(repo)
797
797
798 def meaningfulparents(repo, ctx):
798 def meaningfulparents(repo, ctx):
799 """Return list of meaningful (or all if debug) parentrevs for rev.
799 """Return list of meaningful (or all if debug) parentrevs for rev.
800
800
801 For merges (two non-nullrev revisions) both parents are meaningful.
801 For merges (two non-nullrev revisions) both parents are meaningful.
802 Otherwise the first parent revision is considered meaningful if it
802 Otherwise the first parent revision is considered meaningful if it
803 is not the preceding revision.
803 is not the preceding revision.
804 """
804 """
805 parents = ctx.parents()
805 parents = ctx.parents()
806 if len(parents) > 1:
806 if len(parents) > 1:
807 return parents
807 return parents
808 if repo.ui.debugflag:
808 if repo.ui.debugflag:
809 return [parents[0], repo['null']]
809 return [parents[0], repo['null']]
810 if parents[0].rev() >= intrev(ctx.rev()) - 1:
810 if parents[0].rev() >= intrev(ctx.rev()) - 1:
811 return []
811 return []
812 return parents
812 return parents
813
813
814 def expandpats(pats):
814 def expandpats(pats):
815 '''Expand bare globs when running on windows.
815 '''Expand bare globs when running on windows.
816 On posix we assume it already has already been done by sh.'''
816 On posix we assume it already has already been done by sh.'''
817 if not util.expandglobs:
817 if not util.expandglobs:
818 return list(pats)
818 return list(pats)
819 ret = []
819 ret = []
820 for kindpat in pats:
820 for kindpat in pats:
821 kind, pat = matchmod._patsplit(kindpat, None)
821 kind, pat = matchmod._patsplit(kindpat, None)
822 if kind is None:
822 if kind is None:
823 try:
823 try:
824 globbed = glob.glob(pat)
824 globbed = glob.glob(pat)
825 except re.error:
825 except re.error:
826 globbed = [pat]
826 globbed = [pat]
827 if globbed:
827 if globbed:
828 ret.extend(globbed)
828 ret.extend(globbed)
829 continue
829 continue
830 ret.append(kindpat)
830 ret.append(kindpat)
831 return ret
831 return ret
832
832
833 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
833 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
834 badfn=None):
834 badfn=None):
835 '''Return a matcher and the patterns that were used.
835 '''Return a matcher and the patterns that were used.
836 The matcher will warn about bad matches, unless an alternate badfn callback
836 The matcher will warn about bad matches, unless an alternate badfn callback
837 is provided.'''
837 is provided.'''
838 if pats == ("",):
838 if pats == ("",):
839 pats = []
839 pats = []
840 if opts is None:
840 if opts is None:
841 opts = {}
841 opts = {}
842 if not globbed and default == 'relpath':
842 if not globbed and default == 'relpath':
843 pats = expandpats(pats or [])
843 pats = expandpats(pats or [])
844
844
845 def bad(f, msg):
845 def bad(f, msg):
846 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
846 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
847
847
848 if badfn is None:
848 if badfn is None:
849 badfn = bad
849 badfn = bad
850
850
851 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
851 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
852 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
852 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
853
853
854 if m.always():
854 if m.always():
855 pats = []
855 pats = []
856 return m, pats
856 return m, pats
857
857
858 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
858 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
859 badfn=None):
859 badfn=None):
860 '''Return a matcher that will warn about bad matches.'''
860 '''Return a matcher that will warn about bad matches.'''
861 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
861 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
862
862
863 def matchall(repo):
863 def matchall(repo):
864 '''Return a matcher that will efficiently match everything.'''
864 '''Return a matcher that will efficiently match everything.'''
865 return matchmod.always(repo.root, repo.getcwd())
865 return matchmod.always(repo.root, repo.getcwd())
866
866
867 def matchfiles(repo, files, badfn=None):
867 def matchfiles(repo, files, badfn=None):
868 '''Return a matcher that will efficiently match exactly these files.'''
868 '''Return a matcher that will efficiently match exactly these files.'''
869 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
869 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
870
870
871 def origpath(ui, repo, filepath):
871 def origpath(ui, repo, filepath):
872 '''customize where .orig files are created
872 '''customize where .orig files are created
873
873
874 Fetch user defined path from config file: [ui] origbackuppath = <path>
874 Fetch user defined path from config file: [ui] origbackuppath = <path>
875 Fall back to default (filepath) if not specified
875 Fall back to default (filepath) if not specified
876 '''
876 '''
877 origbackuppath = ui.config('ui', 'origbackuppath', None)
877 origbackuppath = ui.config('ui', 'origbackuppath', None)
878 if origbackuppath is None:
878 if origbackuppath is None:
879 return filepath + ".orig"
879 return filepath + ".orig"
880
880
881 filepathfromroot = os.path.relpath(filepath, start=repo.root)
881 filepathfromroot = os.path.relpath(filepath, start=repo.root)
882 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
882 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
883
883
884 origbackupdir = repo.vfs.dirname(fullorigpath)
884 origbackupdir = repo.vfs.dirname(fullorigpath)
885 if not repo.vfs.exists(origbackupdir):
885 if not repo.vfs.exists(origbackupdir):
886 ui.note(_('creating directory: %s\n') % origbackupdir)
886 ui.note(_('creating directory: %s\n') % origbackupdir)
887 util.makedirs(origbackupdir)
887 util.makedirs(origbackupdir)
888
888
889 return fullorigpath + ".orig"
889 return fullorigpath + ".orig"
890
890
891 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
891 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
892 if opts is None:
892 if opts is None:
893 opts = {}
893 opts = {}
894 m = matcher
894 m = matcher
895 if dry_run is None:
895 if dry_run is None:
896 dry_run = opts.get('dry_run')
896 dry_run = opts.get('dry_run')
897 if similarity is None:
897 if similarity is None:
898 similarity = float(opts.get('similarity') or 0)
898 similarity = float(opts.get('similarity') or 0)
899
899
900 ret = 0
900 ret = 0
901 join = lambda f: os.path.join(prefix, f)
901 join = lambda f: os.path.join(prefix, f)
902
902
903 def matchessubrepo(matcher, subpath):
903 def matchessubrepo(matcher, subpath):
904 if matcher.exact(subpath):
904 if matcher.exact(subpath):
905 return True
905 return True
906 for f in matcher.files():
906 for f in matcher.files():
907 if f.startswith(subpath):
907 if f.startswith(subpath):
908 return True
908 return True
909 return False
909 return False
910
910
911 wctx = repo[None]
911 wctx = repo[None]
912 for subpath in sorted(wctx.substate):
912 for subpath in sorted(wctx.substate):
913 if opts.get('subrepos') or matchessubrepo(m, subpath):
913 if opts.get('subrepos') or matchessubrepo(m, subpath):
914 sub = wctx.sub(subpath)
914 sub = wctx.sub(subpath)
915 try:
915 try:
916 submatch = matchmod.subdirmatcher(subpath, m)
916 submatch = matchmod.subdirmatcher(subpath, m)
917 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
917 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
918 ret = 1
918 ret = 1
919 except error.LookupError:
919 except error.LookupError:
920 repo.ui.status(_("skipping missing subrepository: %s\n")
920 repo.ui.status(_("skipping missing subrepository: %s\n")
921 % join(subpath))
921 % join(subpath))
922
922
923 rejected = []
923 rejected = []
924 def badfn(f, msg):
924 def badfn(f, msg):
925 if f in m.files():
925 if f in m.files():
926 m.bad(f, msg)
926 m.bad(f, msg)
927 rejected.append(f)
927 rejected.append(f)
928
928
929 badmatch = matchmod.badmatch(m, badfn)
929 badmatch = matchmod.badmatch(m, badfn)
930 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
930 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
931 badmatch)
931 badmatch)
932
932
933 unknownset = set(unknown + forgotten)
933 unknownset = set(unknown + forgotten)
934 toprint = unknownset.copy()
934 toprint = unknownset.copy()
935 toprint.update(deleted)
935 toprint.update(deleted)
936 for abs in sorted(toprint):
936 for abs in sorted(toprint):
937 if repo.ui.verbose or not m.exact(abs):
937 if repo.ui.verbose or not m.exact(abs):
938 if abs in unknownset:
938 if abs in unknownset:
939 status = _('adding %s\n') % m.uipath(abs)
939 status = _('adding %s\n') % m.uipath(abs)
940 else:
940 else:
941 status = _('removing %s\n') % m.uipath(abs)
941 status = _('removing %s\n') % m.uipath(abs)
942 repo.ui.status(status)
942 repo.ui.status(status)
943
943
944 renames = _findrenames(repo, m, added + unknown, removed + deleted,
944 renames = _findrenames(repo, m, added + unknown, removed + deleted,
945 similarity)
945 similarity)
946
946
947 if not dry_run:
947 if not dry_run:
948 _markchanges(repo, unknown + forgotten, deleted, renames)
948 _markchanges(repo, unknown + forgotten, deleted, renames)
949
949
950 for f in rejected:
950 for f in rejected:
951 if f in m.files():
951 if f in m.files():
952 return 1
952 return 1
953 return ret
953 return ret
954
954
955 def marktouched(repo, files, similarity=0.0):
955 def marktouched(repo, files, similarity=0.0):
956 '''Assert that files have somehow been operated upon. files are relative to
956 '''Assert that files have somehow been operated upon. files are relative to
957 the repo root.'''
957 the repo root.'''
958 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
958 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
959 rejected = []
959 rejected = []
960
960
961 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
961 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
962
962
963 if repo.ui.verbose:
963 if repo.ui.verbose:
964 unknownset = set(unknown + forgotten)
964 unknownset = set(unknown + forgotten)
965 toprint = unknownset.copy()
965 toprint = unknownset.copy()
966 toprint.update(deleted)
966 toprint.update(deleted)
967 for abs in sorted(toprint):
967 for abs in sorted(toprint):
968 if abs in unknownset:
968 if abs in unknownset:
969 status = _('adding %s\n') % abs
969 status = _('adding %s\n') % abs
970 else:
970 else:
971 status = _('removing %s\n') % abs
971 status = _('removing %s\n') % abs
972 repo.ui.status(status)
972 repo.ui.status(status)
973
973
974 renames = _findrenames(repo, m, added + unknown, removed + deleted,
974 renames = _findrenames(repo, m, added + unknown, removed + deleted,
975 similarity)
975 similarity)
976
976
977 _markchanges(repo, unknown + forgotten, deleted, renames)
977 _markchanges(repo, unknown + forgotten, deleted, renames)
978
978
979 for f in rejected:
979 for f in rejected:
980 if f in m.files():
980 if f in m.files():
981 return 1
981 return 1
982 return 0
982 return 0
983
983
984 def _interestingfiles(repo, matcher):
984 def _interestingfiles(repo, matcher):
985 '''Walk dirstate with matcher, looking for files that addremove would care
985 '''Walk dirstate with matcher, looking for files that addremove would care
986 about.
986 about.
987
987
988 This is different from dirstate.status because it doesn't care about
988 This is different from dirstate.status because it doesn't care about
989 whether files are modified or clean.'''
989 whether files are modified or clean.'''
990 added, unknown, deleted, removed, forgotten = [], [], [], [], []
990 added, unknown, deleted, removed, forgotten = [], [], [], [], []
991 audit_path = pathutil.pathauditor(repo.root)
991 audit_path = pathutil.pathauditor(repo.root)
992
992
993 ctx = repo[None]
993 ctx = repo[None]
994 dirstate = repo.dirstate
994 dirstate = repo.dirstate
995 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
995 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
996 full=False)
996 full=False)
997 for abs, st in walkresults.iteritems():
997 for abs, st in walkresults.iteritems():
998 dstate = dirstate[abs]
998 dstate = dirstate[abs]
999 if dstate == '?' and audit_path.check(abs):
999 if dstate == '?' and audit_path.check(abs):
1000 unknown.append(abs)
1000 unknown.append(abs)
1001 elif dstate != 'r' and not st:
1001 elif dstate != 'r' and not st:
1002 deleted.append(abs)
1002 deleted.append(abs)
1003 elif dstate == 'r' and st:
1003 elif dstate == 'r' and st:
1004 forgotten.append(abs)
1004 forgotten.append(abs)
1005 # for finding renames
1005 # for finding renames
1006 elif dstate == 'r' and not st:
1006 elif dstate == 'r' and not st:
1007 removed.append(abs)
1007 removed.append(abs)
1008 elif dstate == 'a':
1008 elif dstate == 'a':
1009 added.append(abs)
1009 added.append(abs)
1010
1010
1011 return added, unknown, deleted, removed, forgotten
1011 return added, unknown, deleted, removed, forgotten
1012
1012
1013 def _findrenames(repo, matcher, added, removed, similarity):
1013 def _findrenames(repo, matcher, added, removed, similarity):
1014 '''Find renames from removed files to added ones.'''
1014 '''Find renames from removed files to added ones.'''
1015 renames = {}
1015 renames = {}
1016 if similarity > 0:
1016 if similarity > 0:
1017 for old, new, score in similar.findrenames(repo, added, removed,
1017 for old, new, score in similar.findrenames(repo, added, removed,
1018 similarity):
1018 similarity):
1019 if (repo.ui.verbose or not matcher.exact(old)
1019 if (repo.ui.verbose or not matcher.exact(old)
1020 or not matcher.exact(new)):
1020 or not matcher.exact(new)):
1021 repo.ui.status(_('recording removal of %s as rename to %s '
1021 repo.ui.status(_('recording removal of %s as rename to %s '
1022 '(%d%% similar)\n') %
1022 '(%d%% similar)\n') %
1023 (matcher.rel(old), matcher.rel(new),
1023 (matcher.rel(old), matcher.rel(new),
1024 score * 100))
1024 score * 100))
1025 renames[new] = old
1025 renames[new] = old
1026 return renames
1026 return renames
1027
1027
1028 def _markchanges(repo, unknown, deleted, renames):
1028 def _markchanges(repo, unknown, deleted, renames):
1029 '''Marks the files in unknown as added, the files in deleted as removed,
1029 '''Marks the files in unknown as added, the files in deleted as removed,
1030 and the files in renames as copied.'''
1030 and the files in renames as copied.'''
1031 wctx = repo[None]
1031 wctx = repo[None]
1032 with repo.wlock():
1032 with repo.wlock():
1033 wctx.forget(deleted)
1033 wctx.forget(deleted)
1034 wctx.add(unknown)
1034 wctx.add(unknown)
1035 for new, old in renames.iteritems():
1035 for new, old in renames.iteritems():
1036 wctx.copy(old, new)
1036 wctx.copy(old, new)
1037
1037
1038 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1038 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1039 """Update the dirstate to reflect the intent of copying src to dst. For
1039 """Update the dirstate to reflect the intent of copying src to dst. For
1040 different reasons it might not end with dst being marked as copied from src.
1040 different reasons it might not end with dst being marked as copied from src.
1041 """
1041 """
1042 origsrc = repo.dirstate.copied(src) or src
1042 origsrc = repo.dirstate.copied(src) or src
1043 if dst == origsrc: # copying back a copy?
1043 if dst == origsrc: # copying back a copy?
1044 if repo.dirstate[dst] not in 'mn' and not dryrun:
1044 if repo.dirstate[dst] not in 'mn' and not dryrun:
1045 repo.dirstate.normallookup(dst)
1045 repo.dirstate.normallookup(dst)
1046 else:
1046 else:
1047 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1047 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1048 if not ui.quiet:
1048 if not ui.quiet:
1049 ui.warn(_("%s has not been committed yet, so no copy "
1049 ui.warn(_("%s has not been committed yet, so no copy "
1050 "data will be stored for %s.\n")
1050 "data will be stored for %s.\n")
1051 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1051 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1052 if repo.dirstate[dst] in '?r' and not dryrun:
1052 if repo.dirstate[dst] in '?r' and not dryrun:
1053 wctx.add([dst])
1053 wctx.add([dst])
1054 elif not dryrun:
1054 elif not dryrun:
1055 wctx.copy(origsrc, dst)
1055 wctx.copy(origsrc, dst)
1056
1056
1057 def readrequires(opener, supported):
1057 def readrequires(opener, supported):
1058 '''Reads and parses .hg/requires and checks if all entries found
1058 '''Reads and parses .hg/requires and checks if all entries found
1059 are in the list of supported features.'''
1059 are in the list of supported features.'''
1060 requirements = set(opener.read("requires").splitlines())
1060 requirements = set(opener.read("requires").splitlines())
1061 missings = []
1061 missings = []
1062 for r in requirements:
1062 for r in requirements:
1063 if r not in supported:
1063 if r not in supported:
1064 if not r or not r[0].isalnum():
1064 if not r or not r[0].isalnum():
1065 raise error.RequirementError(_(".hg/requires file is corrupt"))
1065 raise error.RequirementError(_(".hg/requires file is corrupt"))
1066 missings.append(r)
1066 missings.append(r)
1067 missings.sort()
1067 missings.sort()
1068 if missings:
1068 if missings:
1069 raise error.RequirementError(
1069 raise error.RequirementError(
1070 _("repository requires features unknown to this Mercurial: %s")
1070 _("repository requires features unknown to this Mercurial: %s")
1071 % " ".join(missings),
1071 % " ".join(missings),
1072 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1072 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1073 " for more information"))
1073 " for more information"))
1074 return requirements
1074 return requirements
1075
1075
1076 def writerequires(opener, requirements):
1076 def writerequires(opener, requirements):
1077 with opener('requires', 'w') as fp:
1077 with opener('requires', 'w') as fp:
1078 for r in sorted(requirements):
1078 for r in sorted(requirements):
1079 fp.write("%s\n" % r)
1079 fp.write("%s\n" % r)
1080
1080
1081 class filecachesubentry(object):
1081 class filecachesubentry(object):
1082 def __init__(self, path, stat):
1082 def __init__(self, path, stat):
1083 self.path = path
1083 self.path = path
1084 self.cachestat = None
1084 self.cachestat = None
1085 self._cacheable = None
1085 self._cacheable = None
1086
1086
1087 if stat:
1087 if stat:
1088 self.cachestat = filecachesubentry.stat(self.path)
1088 self.cachestat = filecachesubentry.stat(self.path)
1089
1089
1090 if self.cachestat:
1090 if self.cachestat:
1091 self._cacheable = self.cachestat.cacheable()
1091 self._cacheable = self.cachestat.cacheable()
1092 else:
1092 else:
1093 # None means we don't know yet
1093 # None means we don't know yet
1094 self._cacheable = None
1094 self._cacheable = None
1095
1095
1096 def refresh(self):
1096 def refresh(self):
1097 if self.cacheable():
1097 if self.cacheable():
1098 self.cachestat = filecachesubentry.stat(self.path)
1098 self.cachestat = filecachesubentry.stat(self.path)
1099
1099
1100 def cacheable(self):
1100 def cacheable(self):
1101 if self._cacheable is not None:
1101 if self._cacheable is not None:
1102 return self._cacheable
1102 return self._cacheable
1103
1103
1104 # we don't know yet, assume it is for now
1104 # we don't know yet, assume it is for now
1105 return True
1105 return True
1106
1106
1107 def changed(self):
1107 def changed(self):
1108 # no point in going further if we can't cache it
1108 # no point in going further if we can't cache it
1109 if not self.cacheable():
1109 if not self.cacheable():
1110 return True
1110 return True
1111
1111
1112 newstat = filecachesubentry.stat(self.path)
1112 newstat = filecachesubentry.stat(self.path)
1113
1113
1114 # we may not know if it's cacheable yet, check again now
1114 # we may not know if it's cacheable yet, check again now
1115 if newstat and self._cacheable is None:
1115 if newstat and self._cacheable is None:
1116 self._cacheable = newstat.cacheable()
1116 self._cacheable = newstat.cacheable()
1117
1117
1118 # check again
1118 # check again
1119 if not self._cacheable:
1119 if not self._cacheable:
1120 return True
1120 return True
1121
1121
1122 if self.cachestat != newstat:
1122 if self.cachestat != newstat:
1123 self.cachestat = newstat
1123 self.cachestat = newstat
1124 return True
1124 return True
1125 else:
1125 else:
1126 return False
1126 return False
1127
1127
1128 @staticmethod
1128 @staticmethod
1129 def stat(path):
1129 def stat(path):
1130 try:
1130 try:
1131 return util.cachestat(path)
1131 return util.cachestat(path)
1132 except OSError as e:
1132 except OSError as e:
1133 if e.errno != errno.ENOENT:
1133 if e.errno != errno.ENOENT:
1134 raise
1134 raise
1135
1135
1136 class filecacheentry(object):
1136 class filecacheentry(object):
1137 def __init__(self, paths, stat=True):
1137 def __init__(self, paths, stat=True):
1138 self._entries = []
1138 self._entries = []
1139 for path in paths:
1139 for path in paths:
1140 self._entries.append(filecachesubentry(path, stat))
1140 self._entries.append(filecachesubentry(path, stat))
1141
1141
1142 def changed(self):
1142 def changed(self):
1143 '''true if any entry has changed'''
1143 '''true if any entry has changed'''
1144 for entry in self._entries:
1144 for entry in self._entries:
1145 if entry.changed():
1145 if entry.changed():
1146 return True
1146 return True
1147 return False
1147 return False
1148
1148
1149 def refresh(self):
1149 def refresh(self):
1150 for entry in self._entries:
1150 for entry in self._entries:
1151 entry.refresh()
1151 entry.refresh()
1152
1152
1153 class filecache(object):
1153 class filecache(object):
1154 '''A property like decorator that tracks files under .hg/ for updates.
1154 '''A property like decorator that tracks files under .hg/ for updates.
1155
1155
1156 Records stat info when called in _filecache.
1156 Records stat info when called in _filecache.
1157
1157
1158 On subsequent calls, compares old stat info with new info, and recreates the
1158 On subsequent calls, compares old stat info with new info, and recreates the
1159 object when any of the files changes, updating the new stat info in
1159 object when any of the files changes, updating the new stat info in
1160 _filecache.
1160 _filecache.
1161
1161
1162 Mercurial either atomic renames or appends for files under .hg,
1162 Mercurial either atomic renames or appends for files under .hg,
1163 so to ensure the cache is reliable we need the filesystem to be able
1163 so to ensure the cache is reliable we need the filesystem to be able
1164 to tell us if a file has been replaced. If it can't, we fallback to
1164 to tell us if a file has been replaced. If it can't, we fallback to
1165 recreating the object on every call (essentially the same behavior as
1165 recreating the object on every call (essentially the same behavior as
1166 propertycache).
1166 propertycache).
1167
1167
1168 '''
1168 '''
1169 def __init__(self, *paths):
1169 def __init__(self, *paths):
1170 self.paths = paths
1170 self.paths = paths
1171
1171
1172 def join(self, obj, fname):
1172 def join(self, obj, fname):
1173 """Used to compute the runtime path of a cached file.
1173 """Used to compute the runtime path of a cached file.
1174
1174
1175 Users should subclass filecache and provide their own version of this
1175 Users should subclass filecache and provide their own version of this
1176 function to call the appropriate join function on 'obj' (an instance
1176 function to call the appropriate join function on 'obj' (an instance
1177 of the class that its member function was decorated).
1177 of the class that its member function was decorated).
1178 """
1178 """
1179 return obj.join(fname)
1179 return obj.join(fname)
1180
1180
1181 def __call__(self, func):
1181 def __call__(self, func):
1182 self.func = func
1182 self.func = func
1183 self.name = func.__name__
1183 self.name = func.__name__
1184 return self
1184 return self
1185
1185
1186 def __get__(self, obj, type=None):
1186 def __get__(self, obj, type=None):
1187 # do we need to check if the file changed?
1187 # do we need to check if the file changed?
1188 if self.name in obj.__dict__:
1188 if self.name in obj.__dict__:
1189 assert self.name in obj._filecache, self.name
1189 assert self.name in obj._filecache, self.name
1190 return obj.__dict__[self.name]
1190 return obj.__dict__[self.name]
1191
1191
1192 entry = obj._filecache.get(self.name)
1192 entry = obj._filecache.get(self.name)
1193
1193
1194 if entry:
1194 if entry:
1195 if entry.changed():
1195 if entry.changed():
1196 entry.obj = self.func(obj)
1196 entry.obj = self.func(obj)
1197 else:
1197 else:
1198 paths = [self.join(obj, path) for path in self.paths]
1198 paths = [self.join(obj, path) for path in self.paths]
1199
1199
1200 # We stat -before- creating the object so our cache doesn't lie if
1200 # We stat -before- creating the object so our cache doesn't lie if
1201 # a writer modified between the time we read and stat
1201 # a writer modified between the time we read and stat
1202 entry = filecacheentry(paths, True)
1202 entry = filecacheentry(paths, True)
1203 entry.obj = self.func(obj)
1203 entry.obj = self.func(obj)
1204
1204
1205 obj._filecache[self.name] = entry
1205 obj._filecache[self.name] = entry
1206
1206
1207 obj.__dict__[self.name] = entry.obj
1207 obj.__dict__[self.name] = entry.obj
1208 return entry.obj
1208 return entry.obj
1209
1209
1210 def __set__(self, obj, value):
1210 def __set__(self, obj, value):
1211 if self.name not in obj._filecache:
1211 if self.name not in obj._filecache:
1212 # we add an entry for the missing value because X in __dict__
1212 # we add an entry for the missing value because X in __dict__
1213 # implies X in _filecache
1213 # implies X in _filecache
1214 paths = [self.join(obj, path) for path in self.paths]
1214 paths = [self.join(obj, path) for path in self.paths]
1215 ce = filecacheentry(paths, False)
1215 ce = filecacheentry(paths, False)
1216 obj._filecache[self.name] = ce
1216 obj._filecache[self.name] = ce
1217 else:
1217 else:
1218 ce = obj._filecache[self.name]
1218 ce = obj._filecache[self.name]
1219
1219
1220 ce.obj = value # update cached copy
1220 ce.obj = value # update cached copy
1221 obj.__dict__[self.name] = value # update copy returned by obj.x
1221 obj.__dict__[self.name] = value # update copy returned by obj.x
1222
1222
1223 def __delete__(self, obj):
1223 def __delete__(self, obj):
1224 try:
1224 try:
1225 del obj.__dict__[self.name]
1225 del obj.__dict__[self.name]
1226 except KeyError:
1226 except KeyError:
1227 raise AttributeError(self.name)
1227 raise AttributeError(self.name)
1228
1228
1229 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1229 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1230 if lock is None:
1230 if lock is None:
1231 raise error.LockInheritanceContractViolation(
1231 raise error.LockInheritanceContractViolation(
1232 'lock can only be inherited while held')
1232 'lock can only be inherited while held')
1233 if environ is None:
1233 if environ is None:
1234 environ = {}
1234 environ = {}
1235 with lock.inherit() as locker:
1235 with lock.inherit() as locker:
1236 environ[envvar] = locker
1236 environ[envvar] = locker
1237 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1237 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1238
1238
1239 def wlocksub(repo, cmd, *args, **kwargs):
1239 def wlocksub(repo, cmd, *args, **kwargs):
1240 """run cmd as a subprocess that allows inheriting repo's wlock
1240 """run cmd as a subprocess that allows inheriting repo's wlock
1241
1241
1242 This can only be called while the wlock is held. This takes all the
1242 This can only be called while the wlock is held. This takes all the
1243 arguments that ui.system does, and returns the exit code of the
1243 arguments that ui.system does, and returns the exit code of the
1244 subprocess."""
1244 subprocess."""
1245 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1245 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1246 **kwargs)
1246 **kwargs)
1247
1247
1248 def gdinitconfig(ui):
1248 def gdinitconfig(ui):
1249 """helper function to know if a repo should be created as general delta
1249 """helper function to know if a repo should be created as general delta
1250 """
1250 """
1251 # experimental config: format.generaldelta
1251 # experimental config: format.generaldelta
1252 return (ui.configbool('format', 'generaldelta', False)
1252 return (ui.configbool('format', 'generaldelta', False)
1253 or ui.configbool('format', 'usegeneraldelta', True))
1253 or ui.configbool('format', 'usegeneraldelta', True))
1254
1254
1255 def gddeltaconfig(ui):
1255 def gddeltaconfig(ui):
1256 """helper function to know if incoming delta should be optimised
1256 """helper function to know if incoming delta should be optimised
1257 """
1257 """
1258 # experimental config: format.generaldelta
1258 # experimental config: format.generaldelta
1259 return ui.configbool('format', 'generaldelta', False)
1259 return ui.configbool('format', 'generaldelta', False)
1260
1260
1261 class delayclosedfile(object):
1261 class delayclosedfile(object):
1262 """Proxy for a file object whose close is delayed.
1262 """Proxy for a file object whose close is delayed.
1263
1263
1264 Do not instantiate outside of the vfs layer.
1264 Do not instantiate outside of the vfs layer.
1265 """
1265 """
1266
1266
1267 def __init__(self, fh, closer):
1267 def __init__(self, fh, closer):
1268 object.__setattr__(self, '_origfh', fh)
1268 object.__setattr__(self, '_origfh', fh)
1269 object.__setattr__(self, '_closer', closer)
1269 object.__setattr__(self, '_closer', closer)
1270
1270
1271 def __getattr__(self, attr):
1271 def __getattr__(self, attr):
1272 return getattr(self._origfh, attr)
1272 return getattr(self._origfh, attr)
1273
1273
1274 def __setattr__(self, attr, value):
1274 def __setattr__(self, attr, value):
1275 return setattr(self._origfh, attr, value)
1275 return setattr(self._origfh, attr, value)
1276
1276
1277 def __delattr__(self, attr):
1277 def __delattr__(self, attr):
1278 return delattr(self._origfh, attr)
1278 return delattr(self._origfh, attr)
1279
1279
1280 def __enter__(self):
1280 def __enter__(self):
1281 return self._origfh.__enter__()
1281 return self._origfh.__enter__()
1282
1282
1283 def __exit__(self, exc_type, exc_value, exc_tb):
1283 def __exit__(self, exc_type, exc_value, exc_tb):
1284 self._closer.close(self._origfh)
1284 self._closer.close(self._origfh)
1285
1285
1286 def close(self):
1286 def close(self):
1287 self._closer.close(self._origfh)
1287 self._closer.close(self._origfh)
1288
1288
1289 class backgroundfilecloser(object):
1289 class backgroundfilecloser(object):
1290 """Coordinates background closing of file handles on multiple threads."""
1290 """Coordinates background closing of file handles on multiple threads."""
1291 def __init__(self, ui, expectedcount=-1):
1291 def __init__(self, ui, expectedcount=-1):
1292 self._running = False
1292 self._running = False
1293 self._entered = False
1293 self._entered = False
1294 self._threads = []
1294 self._threads = []
1295 self._threadexception = None
1295 self._threadexception = None
1296
1296
1297 # Only Windows/NTFS has slow file closing. So only enable by default
1297 # Only Windows/NTFS has slow file closing. So only enable by default
1298 # on that platform. But allow to be enabled elsewhere for testing.
1298 # on that platform. But allow to be enabled elsewhere for testing.
1299 defaultenabled = os.name == 'nt'
1299 defaultenabled = os.name == 'nt'
1300 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1300 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1301
1301
1302 if not enabled:
1302 if not enabled:
1303 return
1303 return
1304
1304
1305 # There is overhead to starting and stopping the background threads.
1305 # There is overhead to starting and stopping the background threads.
1306 # Don't do background processing unless the file count is large enough
1306 # Don't do background processing unless the file count is large enough
1307 # to justify it.
1307 # to justify it.
1308 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1308 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1309 2048)
1309 2048)
1310 # FUTURE dynamically start background threads after minfilecount closes.
1310 # FUTURE dynamically start background threads after minfilecount closes.
1311 # (We don't currently have any callers that don't know their file count)
1311 # (We don't currently have any callers that don't know their file count)
1312 if expectedcount > 0 and expectedcount < minfilecount:
1312 if expectedcount > 0 and expectedcount < minfilecount:
1313 return
1313 return
1314
1314
1315 # Windows defaults to a limit of 512 open files. A buffer of 128
1315 # Windows defaults to a limit of 512 open files. A buffer of 128
1316 # should give us enough headway.
1316 # should give us enough headway.
1317 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1317 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1318 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1318 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1319
1319
1320 ui.debug('starting %d threads for background file closing\n' %
1320 ui.debug('starting %d threads for background file closing\n' %
1321 threadcount)
1321 threadcount)
1322
1322
1323 self._queue = Queue.Queue(maxsize=maxqueue)
1323 self._queue = Queue.Queue(maxsize=maxqueue)
1324 self._running = True
1324 self._running = True
1325
1325
1326 for i in range(threadcount):
1326 for i in range(threadcount):
1327 t = threading.Thread(target=self._worker, name='backgroundcloser')
1327 t = threading.Thread(target=self._worker, name='backgroundcloser')
1328 self._threads.append(t)
1328 self._threads.append(t)
1329 t.start()
1329 t.start()
1330
1330
1331 def __enter__(self):
1331 def __enter__(self):
1332 self._entered = True
1332 self._entered = True
1333 return self
1333 return self
1334
1334
1335 def __exit__(self, exc_type, exc_value, exc_tb):
1335 def __exit__(self, exc_type, exc_value, exc_tb):
1336 self._running = False
1336 self._running = False
1337
1337
1338 # Wait for threads to finish closing so open files don't linger for
1338 # Wait for threads to finish closing so open files don't linger for
1339 # longer than lifetime of context manager.
1339 # longer than lifetime of context manager.
1340 for t in self._threads:
1340 for t in self._threads:
1341 t.join()
1341 t.join()
1342
1342
1343 def _worker(self):
1343 def _worker(self):
1344 """Main routine for worker thread."""
1344 """Main routine for worker thread."""
1345 while True:
1345 while True:
1346 try:
1346 try:
1347 fh = self._queue.get(block=True, timeout=0.100)
1347 fh = self._queue.get(block=True, timeout=0.100)
1348 # Need to catch or the thread will terminate and
1348 # Need to catch or the thread will terminate and
1349 # we could orphan file descriptors.
1349 # we could orphan file descriptors.
1350 try:
1350 try:
1351 fh.close()
1351 fh.close()
1352 except Exception as e:
1352 except Exception as e:
1353 # Stash so can re-raise from main thread later.
1353 # Stash so can re-raise from main thread later.
1354 self._threadexception = e
1354 self._threadexception = e
1355 except Queue.Empty:
1355 except Queue.Empty:
1356 if not self._running:
1356 if not self._running:
1357 break
1357 break
1358
1358
1359 def close(self, fh):
1359 def close(self, fh):
1360 """Schedule a file for closing."""
1360 """Schedule a file for closing."""
1361 if not self._entered:
1361 if not self._entered:
1362 raise error.Abort('can only call close() when context manager '
1362 raise error.Abort('can only call close() when context manager '
1363 'active')
1363 'active')
1364
1364
1365 # If a background thread encountered an exception, raise now so we fail
1365 # If a background thread encountered an exception, raise now so we fail
1366 # fast. Otherwise we may potentially go on for minutes until the error
1366 # fast. Otherwise we may potentially go on for minutes until the error
1367 # is acted on.
1367 # is acted on.
1368 if self._threadexception:
1368 if self._threadexception:
1369 e = self._threadexception
1369 e = self._threadexception
1370 self._threadexception = None
1370 self._threadexception = None
1371 raise e
1371 raise e
1372
1372
1373 # If we're not actively running, close synchronously.
1373 # If we're not actively running, close synchronously.
1374 if not self._running:
1374 if not self._running:
1375 fh.close()
1375 fh.close()
1376 return
1376 return
1377
1377
1378 self._queue.put(fh, block=True, timeout=None)
1378 self._queue.put(fh, block=True, timeout=None)
1379
1379
General Comments 0
You need to be logged in to leave comments. Login now