##// END OF EJS Templates
vfs: use propertycache for open...
Pierre-Yves David -
r29718:2dd8c225 default
parent child Browse files
Show More
@@ -1,1431 +1,1429 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import glob
12 import glob
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import re
15 import re
16 import shutil
16 import shutil
17 import stat
17 import stat
18 import tempfile
18 import tempfile
19 import threading
19 import threading
20
20
21 from .i18n import _
21 from .i18n import _
22 from .node import wdirrev
22 from .node import wdirrev
23 from . import (
23 from . import (
24 encoding,
24 encoding,
25 error,
25 error,
26 match as matchmod,
26 match as matchmod,
27 osutil,
27 osutil,
28 pathutil,
28 pathutil,
29 phases,
29 phases,
30 revset,
30 revset,
31 similar,
31 similar,
32 util,
32 util,
33 )
33 )
34
34
35 if os.name == 'nt':
35 if os.name == 'nt':
36 from . import scmwindows as scmplatform
36 from . import scmwindows as scmplatform
37 else:
37 else:
38 from . import scmposix as scmplatform
38 from . import scmposix as scmplatform
39
39
40 systemrcpath = scmplatform.systemrcpath
40 systemrcpath = scmplatform.systemrcpath
41 userrcpath = scmplatform.userrcpath
41 userrcpath = scmplatform.userrcpath
42
42
43 class status(tuple):
43 class status(tuple):
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 and 'ignored' properties are only relevant to the working copy.
45 and 'ignored' properties are only relevant to the working copy.
46 '''
46 '''
47
47
48 __slots__ = ()
48 __slots__ = ()
49
49
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 clean):
51 clean):
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 ignored, clean))
53 ignored, clean))
54
54
55 @property
55 @property
56 def modified(self):
56 def modified(self):
57 '''files that have been modified'''
57 '''files that have been modified'''
58 return self[0]
58 return self[0]
59
59
60 @property
60 @property
61 def added(self):
61 def added(self):
62 '''files that have been added'''
62 '''files that have been added'''
63 return self[1]
63 return self[1]
64
64
65 @property
65 @property
66 def removed(self):
66 def removed(self):
67 '''files that have been removed'''
67 '''files that have been removed'''
68 return self[2]
68 return self[2]
69
69
70 @property
70 @property
71 def deleted(self):
71 def deleted(self):
72 '''files that are in the dirstate, but have been deleted from the
72 '''files that are in the dirstate, but have been deleted from the
73 working copy (aka "missing")
73 working copy (aka "missing")
74 '''
74 '''
75 return self[3]
75 return self[3]
76
76
77 @property
77 @property
78 def unknown(self):
78 def unknown(self):
79 '''files not in the dirstate that are not ignored'''
79 '''files not in the dirstate that are not ignored'''
80 return self[4]
80 return self[4]
81
81
82 @property
82 @property
83 def ignored(self):
83 def ignored(self):
84 '''files not in the dirstate that are ignored (by _dirignore())'''
84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 return self[5]
85 return self[5]
86
86
87 @property
87 @property
88 def clean(self):
88 def clean(self):
89 '''files that have not been modified'''
89 '''files that have not been modified'''
90 return self[6]
90 return self[6]
91
91
92 def __repr__(self, *args, **kwargs):
92 def __repr__(self, *args, **kwargs):
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 'unknown=%r, ignored=%r, clean=%r>') % self)
94 'unknown=%r, ignored=%r, clean=%r>') % self)
95
95
96 def itersubrepos(ctx1, ctx2):
96 def itersubrepos(ctx1, ctx2):
97 """find subrepos in ctx1 or ctx2"""
97 """find subrepos in ctx1 or ctx2"""
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 # has been modified (in ctx2) but not yet committed (in ctx1).
100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103
103
104 missing = set()
104 missing = set()
105
105
106 for subpath in ctx2.substate:
106 for subpath in ctx2.substate:
107 if subpath not in ctx1.substate:
107 if subpath not in ctx1.substate:
108 del subpaths[subpath]
108 del subpaths[subpath]
109 missing.add(subpath)
109 missing.add(subpath)
110
110
111 for subpath, ctx in sorted(subpaths.iteritems()):
111 for subpath, ctx in sorted(subpaths.iteritems()):
112 yield subpath, ctx.sub(subpath)
112 yield subpath, ctx.sub(subpath)
113
113
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 # status and diff will have an accurate result when it does
115 # status and diff will have an accurate result when it does
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 # against itself.
117 # against itself.
118 for subpath in missing:
118 for subpath in missing:
119 yield subpath, ctx2.nullsub(subpath, ctx1)
119 yield subpath, ctx2.nullsub(subpath, ctx1)
120
120
121 def nochangesfound(ui, repo, excluded=None):
121 def nochangesfound(ui, repo, excluded=None):
122 '''Report no changes for push/pull, excluded is None or a list of
122 '''Report no changes for push/pull, excluded is None or a list of
123 nodes excluded from the push/pull.
123 nodes excluded from the push/pull.
124 '''
124 '''
125 secretlist = []
125 secretlist = []
126 if excluded:
126 if excluded:
127 for n in excluded:
127 for n in excluded:
128 if n not in repo:
128 if n not in repo:
129 # discovery should not have included the filtered revision,
129 # discovery should not have included the filtered revision,
130 # we have to explicitly exclude it until discovery is cleanup.
130 # we have to explicitly exclude it until discovery is cleanup.
131 continue
131 continue
132 ctx = repo[n]
132 ctx = repo[n]
133 if ctx.phase() >= phases.secret and not ctx.extinct():
133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 secretlist.append(n)
134 secretlist.append(n)
135
135
136 if secretlist:
136 if secretlist:
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 % len(secretlist))
138 % len(secretlist))
139 else:
139 else:
140 ui.status(_("no changes found\n"))
140 ui.status(_("no changes found\n"))
141
141
142 def checknewlabel(repo, lbl, kind):
142 def checknewlabel(repo, lbl, kind):
143 # Do not use the "kind" parameter in ui output.
143 # Do not use the "kind" parameter in ui output.
144 # It makes strings difficult to translate.
144 # It makes strings difficult to translate.
145 if lbl in ['tip', '.', 'null']:
145 if lbl in ['tip', '.', 'null']:
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 for c in (':', '\0', '\n', '\r'):
147 for c in (':', '\0', '\n', '\r'):
148 if c in lbl:
148 if c in lbl:
149 raise error.Abort(_("%r cannot be used in a name") % c)
149 raise error.Abort(_("%r cannot be used in a name") % c)
150 try:
150 try:
151 int(lbl)
151 int(lbl)
152 raise error.Abort(_("cannot use an integer as a name"))
152 raise error.Abort(_("cannot use an integer as a name"))
153 except ValueError:
153 except ValueError:
154 pass
154 pass
155
155
156 def checkfilename(f):
156 def checkfilename(f):
157 '''Check that the filename f is an acceptable filename for a tracked file'''
157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 if '\r' in f or '\n' in f:
158 if '\r' in f or '\n' in f:
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160
160
161 def checkportable(ui, f):
161 def checkportable(ui, f):
162 '''Check if filename f is portable and warn or abort depending on config'''
162 '''Check if filename f is portable and warn or abort depending on config'''
163 checkfilename(f)
163 checkfilename(f)
164 abort, warn = checkportabilityalert(ui)
164 abort, warn = checkportabilityalert(ui)
165 if abort or warn:
165 if abort or warn:
166 msg = util.checkwinfilename(f)
166 msg = util.checkwinfilename(f)
167 if msg:
167 if msg:
168 msg = "%s: %r" % (msg, f)
168 msg = "%s: %r" % (msg, f)
169 if abort:
169 if abort:
170 raise error.Abort(msg)
170 raise error.Abort(msg)
171 ui.warn(_("warning: %s\n") % msg)
171 ui.warn(_("warning: %s\n") % msg)
172
172
173 def checkportabilityalert(ui):
173 def checkportabilityalert(ui):
174 '''check if the user's config requests nothing, a warning, or abort for
174 '''check if the user's config requests nothing, a warning, or abort for
175 non-portable filenames'''
175 non-portable filenames'''
176 val = ui.config('ui', 'portablefilenames', 'warn')
176 val = ui.config('ui', 'portablefilenames', 'warn')
177 lval = val.lower()
177 lval = val.lower()
178 bval = util.parsebool(val)
178 bval = util.parsebool(val)
179 abort = os.name == 'nt' or lval == 'abort'
179 abort = os.name == 'nt' or lval == 'abort'
180 warn = bval or lval == 'warn'
180 warn = bval or lval == 'warn'
181 if bval is None and not (warn or abort or lval == 'ignore'):
181 if bval is None and not (warn or abort or lval == 'ignore'):
182 raise error.ConfigError(
182 raise error.ConfigError(
183 _("ui.portablefilenames value is invalid ('%s')") % val)
183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 return abort, warn
184 return abort, warn
185
185
186 class casecollisionauditor(object):
186 class casecollisionauditor(object):
187 def __init__(self, ui, abort, dirstate):
187 def __init__(self, ui, abort, dirstate):
188 self._ui = ui
188 self._ui = ui
189 self._abort = abort
189 self._abort = abort
190 allfiles = '\0'.join(dirstate._map)
190 allfiles = '\0'.join(dirstate._map)
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 self._dirstate = dirstate
192 self._dirstate = dirstate
193 # The purpose of _newfiles is so that we don't complain about
193 # The purpose of _newfiles is so that we don't complain about
194 # case collisions if someone were to call this object with the
194 # case collisions if someone were to call this object with the
195 # same filename twice.
195 # same filename twice.
196 self._newfiles = set()
196 self._newfiles = set()
197
197
198 def __call__(self, f):
198 def __call__(self, f):
199 if f in self._newfiles:
199 if f in self._newfiles:
200 return
200 return
201 fl = encoding.lower(f)
201 fl = encoding.lower(f)
202 if fl in self._loweredfiles and f not in self._dirstate:
202 if fl in self._loweredfiles and f not in self._dirstate:
203 msg = _('possible case-folding collision for %s') % f
203 msg = _('possible case-folding collision for %s') % f
204 if self._abort:
204 if self._abort:
205 raise error.Abort(msg)
205 raise error.Abort(msg)
206 self._ui.warn(_("warning: %s\n") % msg)
206 self._ui.warn(_("warning: %s\n") % msg)
207 self._loweredfiles.add(fl)
207 self._loweredfiles.add(fl)
208 self._newfiles.add(f)
208 self._newfiles.add(f)
209
209
210 def filteredhash(repo, maxrev):
210 def filteredhash(repo, maxrev):
211 """build hash of filtered revisions in the current repoview.
211 """build hash of filtered revisions in the current repoview.
212
212
213 Multiple caches perform up-to-date validation by checking that the
213 Multiple caches perform up-to-date validation by checking that the
214 tiprev and tipnode stored in the cache file match the current repository.
214 tiprev and tipnode stored in the cache file match the current repository.
215 However, this is not sufficient for validating repoviews because the set
215 However, this is not sufficient for validating repoviews because the set
216 of revisions in the view may change without the repository tiprev and
216 of revisions in the view may change without the repository tiprev and
217 tipnode changing.
217 tipnode changing.
218
218
219 This function hashes all the revs filtered from the view and returns
219 This function hashes all the revs filtered from the view and returns
220 that SHA-1 digest.
220 that SHA-1 digest.
221 """
221 """
222 cl = repo.changelog
222 cl = repo.changelog
223 if not cl.filteredrevs:
223 if not cl.filteredrevs:
224 return None
224 return None
225 key = None
225 key = None
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 if revs:
227 if revs:
228 s = hashlib.sha1()
228 s = hashlib.sha1()
229 for rev in revs:
229 for rev in revs:
230 s.update('%s;' % rev)
230 s.update('%s;' % rev)
231 key = s.digest()
231 key = s.digest()
232 return key
232 return key
233
233
234 class abstractvfs(object):
234 class abstractvfs(object):
235 """Abstract base class; cannot be instantiated"""
235 """Abstract base class; cannot be instantiated"""
236
236
237 def __init__(self, *args, **kwargs):
237 def __init__(self, *args, **kwargs):
238 '''Prevent instantiation; don't call this from subclasses.'''
238 '''Prevent instantiation; don't call this from subclasses.'''
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240
240
241 def tryread(self, path):
241 def tryread(self, path):
242 '''gracefully return an empty string for missing files'''
242 '''gracefully return an empty string for missing files'''
243 try:
243 try:
244 return self.read(path)
244 return self.read(path)
245 except IOError as inst:
245 except IOError as inst:
246 if inst.errno != errno.ENOENT:
246 if inst.errno != errno.ENOENT:
247 raise
247 raise
248 return ""
248 return ""
249
249
250 def tryreadlines(self, path, mode='rb'):
250 def tryreadlines(self, path, mode='rb'):
251 '''gracefully return an empty array for missing files'''
251 '''gracefully return an empty array for missing files'''
252 try:
252 try:
253 return self.readlines(path, mode=mode)
253 return self.readlines(path, mode=mode)
254 except IOError as inst:
254 except IOError as inst:
255 if inst.errno != errno.ENOENT:
255 if inst.errno != errno.ENOENT:
256 raise
256 raise
257 return []
257 return []
258
258
259 def open(self, path, mode="r", text=False, atomictemp=False,
259 @util.propertycache
260 notindexed=False, backgroundclose=False):
260 def open(self):
261 '''Open ``path`` file, which is relative to vfs root.
261 '''Open ``path`` file, which is relative to vfs root.
262
262
263 Newly created directories are marked as "not to be indexed by
263 Newly created directories are marked as "not to be indexed by
264 the content indexing service", if ``notindexed`` is specified
264 the content indexing service", if ``notindexed`` is specified
265 for "write" mode access.
265 for "write" mode access.
266 '''
266 '''
267 self.open = self.__call__
267 return self.__call__
268 return self.__call__(path, mode, text, atomictemp, notindexed,
269 backgroundclose=backgroundclose)
270
268
271 def read(self, path):
269 def read(self, path):
272 with self(path, 'rb') as fp:
270 with self(path, 'rb') as fp:
273 return fp.read()
271 return fp.read()
274
272
275 def readlines(self, path, mode='rb'):
273 def readlines(self, path, mode='rb'):
276 with self(path, mode=mode) as fp:
274 with self(path, mode=mode) as fp:
277 return fp.readlines()
275 return fp.readlines()
278
276
279 def write(self, path, data, backgroundclose=False):
277 def write(self, path, data, backgroundclose=False):
280 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
278 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
281 return fp.write(data)
279 return fp.write(data)
282
280
283 def writelines(self, path, data, mode='wb', notindexed=False):
281 def writelines(self, path, data, mode='wb', notindexed=False):
284 with self(path, mode=mode, notindexed=notindexed) as fp:
282 with self(path, mode=mode, notindexed=notindexed) as fp:
285 return fp.writelines(data)
283 return fp.writelines(data)
286
284
287 def append(self, path, data):
285 def append(self, path, data):
288 with self(path, 'ab') as fp:
286 with self(path, 'ab') as fp:
289 return fp.write(data)
287 return fp.write(data)
290
288
291 def basename(self, path):
289 def basename(self, path):
292 """return base element of a path (as os.path.basename would do)
290 """return base element of a path (as os.path.basename would do)
293
291
294 This exists to allow handling of strange encoding if needed."""
292 This exists to allow handling of strange encoding if needed."""
295 return os.path.basename(path)
293 return os.path.basename(path)
296
294
297 def chmod(self, path, mode):
295 def chmod(self, path, mode):
298 return os.chmod(self.join(path), mode)
296 return os.chmod(self.join(path), mode)
299
297
300 def dirname(self, path):
298 def dirname(self, path):
301 """return dirname element of a path (as os.path.dirname would do)
299 """return dirname element of a path (as os.path.dirname would do)
302
300
303 This exists to allow handling of strange encoding if needed."""
301 This exists to allow handling of strange encoding if needed."""
304 return os.path.dirname(path)
302 return os.path.dirname(path)
305
303
306 def exists(self, path=None):
304 def exists(self, path=None):
307 return os.path.exists(self.join(path))
305 return os.path.exists(self.join(path))
308
306
309 def fstat(self, fp):
307 def fstat(self, fp):
310 return util.fstat(fp)
308 return util.fstat(fp)
311
309
312 def isdir(self, path=None):
310 def isdir(self, path=None):
313 return os.path.isdir(self.join(path))
311 return os.path.isdir(self.join(path))
314
312
315 def isfile(self, path=None):
313 def isfile(self, path=None):
316 return os.path.isfile(self.join(path))
314 return os.path.isfile(self.join(path))
317
315
318 def islink(self, path=None):
316 def islink(self, path=None):
319 return os.path.islink(self.join(path))
317 return os.path.islink(self.join(path))
320
318
321 def isfileorlink(self, path=None):
319 def isfileorlink(self, path=None):
322 '''return whether path is a regular file or a symlink
320 '''return whether path is a regular file or a symlink
323
321
324 Unlike isfile, this doesn't follow symlinks.'''
322 Unlike isfile, this doesn't follow symlinks.'''
325 try:
323 try:
326 st = self.lstat(path)
324 st = self.lstat(path)
327 except OSError:
325 except OSError:
328 return False
326 return False
329 mode = st.st_mode
327 mode = st.st_mode
330 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
328 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
331
329
332 def reljoin(self, *paths):
330 def reljoin(self, *paths):
333 """join various elements of a path together (as os.path.join would do)
331 """join various elements of a path together (as os.path.join would do)
334
332
335 The vfs base is not injected so that path stay relative. This exists
333 The vfs base is not injected so that path stay relative. This exists
336 to allow handling of strange encoding if needed."""
334 to allow handling of strange encoding if needed."""
337 return os.path.join(*paths)
335 return os.path.join(*paths)
338
336
339 def split(self, path):
337 def split(self, path):
340 """split top-most element of a path (as os.path.split would do)
338 """split top-most element of a path (as os.path.split would do)
341
339
342 This exists to allow handling of strange encoding if needed."""
340 This exists to allow handling of strange encoding if needed."""
343 return os.path.split(path)
341 return os.path.split(path)
344
342
345 def lexists(self, path=None):
343 def lexists(self, path=None):
346 return os.path.lexists(self.join(path))
344 return os.path.lexists(self.join(path))
347
345
348 def lstat(self, path=None):
346 def lstat(self, path=None):
349 return os.lstat(self.join(path))
347 return os.lstat(self.join(path))
350
348
351 def listdir(self, path=None):
349 def listdir(self, path=None):
352 return os.listdir(self.join(path))
350 return os.listdir(self.join(path))
353
351
354 def makedir(self, path=None, notindexed=True):
352 def makedir(self, path=None, notindexed=True):
355 return util.makedir(self.join(path), notindexed)
353 return util.makedir(self.join(path), notindexed)
356
354
357 def makedirs(self, path=None, mode=None):
355 def makedirs(self, path=None, mode=None):
358 return util.makedirs(self.join(path), mode)
356 return util.makedirs(self.join(path), mode)
359
357
360 def makelock(self, info, path):
358 def makelock(self, info, path):
361 return util.makelock(info, self.join(path))
359 return util.makelock(info, self.join(path))
362
360
363 def mkdir(self, path=None):
361 def mkdir(self, path=None):
364 return os.mkdir(self.join(path))
362 return os.mkdir(self.join(path))
365
363
366 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
364 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
367 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
365 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
368 dir=self.join(dir), text=text)
366 dir=self.join(dir), text=text)
369 dname, fname = util.split(name)
367 dname, fname = util.split(name)
370 if dir:
368 if dir:
371 return fd, os.path.join(dir, fname)
369 return fd, os.path.join(dir, fname)
372 else:
370 else:
373 return fd, fname
371 return fd, fname
374
372
375 def readdir(self, path=None, stat=None, skip=None):
373 def readdir(self, path=None, stat=None, skip=None):
376 return osutil.listdir(self.join(path), stat, skip)
374 return osutil.listdir(self.join(path), stat, skip)
377
375
378 def readlock(self, path):
376 def readlock(self, path):
379 return util.readlock(self.join(path))
377 return util.readlock(self.join(path))
380
378
381 def rename(self, src, dst, checkambig=False):
379 def rename(self, src, dst, checkambig=False):
382 """Rename from src to dst
380 """Rename from src to dst
383
381
384 checkambig argument is used with util.filestat, and is useful
382 checkambig argument is used with util.filestat, and is useful
385 only if destination file is guarded by any lock
383 only if destination file is guarded by any lock
386 (e.g. repo.lock or repo.wlock).
384 (e.g. repo.lock or repo.wlock).
387 """
385 """
388 dstpath = self.join(dst)
386 dstpath = self.join(dst)
389 oldstat = checkambig and util.filestat(dstpath)
387 oldstat = checkambig and util.filestat(dstpath)
390 if oldstat and oldstat.stat:
388 if oldstat and oldstat.stat:
391 ret = util.rename(self.join(src), dstpath)
389 ret = util.rename(self.join(src), dstpath)
392 newstat = util.filestat(dstpath)
390 newstat = util.filestat(dstpath)
393 if newstat.isambig(oldstat):
391 if newstat.isambig(oldstat):
394 # stat of renamed file is ambiguous to original one
392 # stat of renamed file is ambiguous to original one
395 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
393 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
396 os.utime(dstpath, (advanced, advanced))
394 os.utime(dstpath, (advanced, advanced))
397 return ret
395 return ret
398 return util.rename(self.join(src), dstpath)
396 return util.rename(self.join(src), dstpath)
399
397
400 def readlink(self, path):
398 def readlink(self, path):
401 return os.readlink(self.join(path))
399 return os.readlink(self.join(path))
402
400
403 def removedirs(self, path=None):
401 def removedirs(self, path=None):
404 """Remove a leaf directory and all empty intermediate ones
402 """Remove a leaf directory and all empty intermediate ones
405 """
403 """
406 return util.removedirs(self.join(path))
404 return util.removedirs(self.join(path))
407
405
408 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
406 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
409 """Remove a directory tree recursively
407 """Remove a directory tree recursively
410
408
411 If ``forcibly``, this tries to remove READ-ONLY files, too.
409 If ``forcibly``, this tries to remove READ-ONLY files, too.
412 """
410 """
413 if forcibly:
411 if forcibly:
414 def onerror(function, path, excinfo):
412 def onerror(function, path, excinfo):
415 if function is not os.remove:
413 if function is not os.remove:
416 raise
414 raise
417 # read-only files cannot be unlinked under Windows
415 # read-only files cannot be unlinked under Windows
418 s = os.stat(path)
416 s = os.stat(path)
419 if (s.st_mode & stat.S_IWRITE) != 0:
417 if (s.st_mode & stat.S_IWRITE) != 0:
420 raise
418 raise
421 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
419 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
422 os.remove(path)
420 os.remove(path)
423 else:
421 else:
424 onerror = None
422 onerror = None
425 return shutil.rmtree(self.join(path),
423 return shutil.rmtree(self.join(path),
426 ignore_errors=ignore_errors, onerror=onerror)
424 ignore_errors=ignore_errors, onerror=onerror)
427
425
428 def setflags(self, path, l, x):
426 def setflags(self, path, l, x):
429 return util.setflags(self.join(path), l, x)
427 return util.setflags(self.join(path), l, x)
430
428
431 def stat(self, path=None):
429 def stat(self, path=None):
432 return os.stat(self.join(path))
430 return os.stat(self.join(path))
433
431
434 def unlink(self, path=None):
432 def unlink(self, path=None):
435 return util.unlink(self.join(path))
433 return util.unlink(self.join(path))
436
434
437 def unlinkpath(self, path=None, ignoremissing=False):
435 def unlinkpath(self, path=None, ignoremissing=False):
438 return util.unlinkpath(self.join(path), ignoremissing)
436 return util.unlinkpath(self.join(path), ignoremissing)
439
437
440 def utime(self, path=None, t=None):
438 def utime(self, path=None, t=None):
441 return os.utime(self.join(path), t)
439 return os.utime(self.join(path), t)
442
440
443 def walk(self, path=None, onerror=None):
441 def walk(self, path=None, onerror=None):
444 """Yield (dirpath, dirs, files) tuple for each directories under path
442 """Yield (dirpath, dirs, files) tuple for each directories under path
445
443
446 ``dirpath`` is relative one from the root of this vfs. This
444 ``dirpath`` is relative one from the root of this vfs. This
447 uses ``os.sep`` as path separator, even you specify POSIX
445 uses ``os.sep`` as path separator, even you specify POSIX
448 style ``path``.
446 style ``path``.
449
447
450 "The root of this vfs" is represented as empty ``dirpath``.
448 "The root of this vfs" is represented as empty ``dirpath``.
451 """
449 """
452 root = os.path.normpath(self.join(None))
450 root = os.path.normpath(self.join(None))
453 # when dirpath == root, dirpath[prefixlen:] becomes empty
451 # when dirpath == root, dirpath[prefixlen:] becomes empty
454 # because len(dirpath) < prefixlen.
452 # because len(dirpath) < prefixlen.
455 prefixlen = len(pathutil.normasprefix(root))
453 prefixlen = len(pathutil.normasprefix(root))
456 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
454 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
457 yield (dirpath[prefixlen:], dirs, files)
455 yield (dirpath[prefixlen:], dirs, files)
458
456
459 @contextlib.contextmanager
457 @contextlib.contextmanager
460 def backgroundclosing(self, ui, expectedcount=-1):
458 def backgroundclosing(self, ui, expectedcount=-1):
461 """Allow files to be closed asynchronously.
459 """Allow files to be closed asynchronously.
462
460
463 When this context manager is active, ``backgroundclose`` can be passed
461 When this context manager is active, ``backgroundclose`` can be passed
464 to ``__call__``/``open`` to result in the file possibly being closed
462 to ``__call__``/``open`` to result in the file possibly being closed
465 asynchronously, on a background thread.
463 asynchronously, on a background thread.
466 """
464 """
467 # This is an arbitrary restriction and could be changed if we ever
465 # This is an arbitrary restriction and could be changed if we ever
468 # have a use case.
466 # have a use case.
469 vfs = getattr(self, 'vfs', self)
467 vfs = getattr(self, 'vfs', self)
470 if getattr(vfs, '_backgroundfilecloser', None):
468 if getattr(vfs, '_backgroundfilecloser', None):
471 raise error.Abort(
469 raise error.Abort(
472 _('can only have 1 active background file closer'))
470 _('can only have 1 active background file closer'))
473
471
474 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
472 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
475 try:
473 try:
476 vfs._backgroundfilecloser = bfc
474 vfs._backgroundfilecloser = bfc
477 yield bfc
475 yield bfc
478 finally:
476 finally:
479 vfs._backgroundfilecloser = None
477 vfs._backgroundfilecloser = None
480
478
481 class vfs(abstractvfs):
479 class vfs(abstractvfs):
482 '''Operate files relative to a base directory
480 '''Operate files relative to a base directory
483
481
484 This class is used to hide the details of COW semantics and
482 This class is used to hide the details of COW semantics and
485 remote file access from higher level code.
483 remote file access from higher level code.
486 '''
484 '''
487 def __init__(self, base, audit=True, expandpath=False, realpath=False):
485 def __init__(self, base, audit=True, expandpath=False, realpath=False):
488 if expandpath:
486 if expandpath:
489 base = util.expandpath(base)
487 base = util.expandpath(base)
490 if realpath:
488 if realpath:
491 base = os.path.realpath(base)
489 base = os.path.realpath(base)
492 self.base = base
490 self.base = base
493 self.mustaudit = audit
491 self.mustaudit = audit
494 self.createmode = None
492 self.createmode = None
495 self._trustnlink = None
493 self._trustnlink = None
496
494
497 @property
495 @property
498 def mustaudit(self):
496 def mustaudit(self):
499 return self._audit
497 return self._audit
500
498
501 @mustaudit.setter
499 @mustaudit.setter
502 def mustaudit(self, onoff):
500 def mustaudit(self, onoff):
503 self._audit = onoff
501 self._audit = onoff
504 if onoff:
502 if onoff:
505 self.audit = pathutil.pathauditor(self.base)
503 self.audit = pathutil.pathauditor(self.base)
506 else:
504 else:
507 self.audit = util.always
505 self.audit = util.always
508
506
509 @util.propertycache
507 @util.propertycache
510 def _cansymlink(self):
508 def _cansymlink(self):
511 return util.checklink(self.base)
509 return util.checklink(self.base)
512
510
513 @util.propertycache
511 @util.propertycache
514 def _chmod(self):
512 def _chmod(self):
515 return util.checkexec(self.base)
513 return util.checkexec(self.base)
516
514
517 def _fixfilemode(self, name):
515 def _fixfilemode(self, name):
518 if self.createmode is None or not self._chmod:
516 if self.createmode is None or not self._chmod:
519 return
517 return
520 os.chmod(name, self.createmode & 0o666)
518 os.chmod(name, self.createmode & 0o666)
521
519
522 def __call__(self, path, mode="r", text=False, atomictemp=False,
520 def __call__(self, path, mode="r", text=False, atomictemp=False,
523 notindexed=False, backgroundclose=False, checkambig=False):
521 notindexed=False, backgroundclose=False, checkambig=False):
524 '''Open ``path`` file, which is relative to vfs root.
522 '''Open ``path`` file, which is relative to vfs root.
525
523
526 Newly created directories are marked as "not to be indexed by
524 Newly created directories are marked as "not to be indexed by
527 the content indexing service", if ``notindexed`` is specified
525 the content indexing service", if ``notindexed`` is specified
528 for "write" mode access.
526 for "write" mode access.
529
527
530 If ``backgroundclose`` is passed, the file may be closed asynchronously.
528 If ``backgroundclose`` is passed, the file may be closed asynchronously.
531 It can only be used if the ``self.backgroundclosing()`` context manager
529 It can only be used if the ``self.backgroundclosing()`` context manager
532 is active. This should only be specified if the following criteria hold:
530 is active. This should only be specified if the following criteria hold:
533
531
534 1. There is a potential for writing thousands of files. Unless you
532 1. There is a potential for writing thousands of files. Unless you
535 are writing thousands of files, the performance benefits of
533 are writing thousands of files, the performance benefits of
536 asynchronously closing files is not realized.
534 asynchronously closing files is not realized.
537 2. Files are opened exactly once for the ``backgroundclosing``
535 2. Files are opened exactly once for the ``backgroundclosing``
538 active duration and are therefore free of race conditions between
536 active duration and are therefore free of race conditions between
539 closing a file on a background thread and reopening it. (If the
537 closing a file on a background thread and reopening it. (If the
540 file were opened multiple times, there could be unflushed data
538 file were opened multiple times, there could be unflushed data
541 because the original file handle hasn't been flushed/closed yet.)
539 because the original file handle hasn't been flushed/closed yet.)
542
540
543 ``checkambig`` argument is passed to atomictemplfile (valid
541 ``checkambig`` argument is passed to atomictemplfile (valid
544 only for writing), and is useful only if target file is
542 only for writing), and is useful only if target file is
545 guarded by any lock (e.g. repo.lock or repo.wlock).
543 guarded by any lock (e.g. repo.lock or repo.wlock).
546 '''
544 '''
547 if self._audit:
545 if self._audit:
548 r = util.checkosfilename(path)
546 r = util.checkosfilename(path)
549 if r:
547 if r:
550 raise error.Abort("%s: %r" % (r, path))
548 raise error.Abort("%s: %r" % (r, path))
551 self.audit(path)
549 self.audit(path)
552 f = self.join(path)
550 f = self.join(path)
553
551
554 if not text and "b" not in mode:
552 if not text and "b" not in mode:
555 mode += "b" # for that other OS
553 mode += "b" # for that other OS
556
554
557 nlink = -1
555 nlink = -1
558 if mode not in ('r', 'rb'):
556 if mode not in ('r', 'rb'):
559 dirname, basename = util.split(f)
557 dirname, basename = util.split(f)
560 # If basename is empty, then the path is malformed because it points
558 # If basename is empty, then the path is malformed because it points
561 # to a directory. Let the posixfile() call below raise IOError.
559 # to a directory. Let the posixfile() call below raise IOError.
562 if basename:
560 if basename:
563 if atomictemp:
561 if atomictemp:
564 util.makedirs(dirname, self.createmode, notindexed)
562 util.makedirs(dirname, self.createmode, notindexed)
565 return util.atomictempfile(f, mode, self.createmode,
563 return util.atomictempfile(f, mode, self.createmode,
566 checkambig=checkambig)
564 checkambig=checkambig)
567 try:
565 try:
568 if 'w' in mode:
566 if 'w' in mode:
569 util.unlink(f)
567 util.unlink(f)
570 nlink = 0
568 nlink = 0
571 else:
569 else:
572 # nlinks() may behave differently for files on Windows
570 # nlinks() may behave differently for files on Windows
573 # shares if the file is open.
571 # shares if the file is open.
574 with util.posixfile(f):
572 with util.posixfile(f):
575 nlink = util.nlinks(f)
573 nlink = util.nlinks(f)
576 if nlink < 1:
574 if nlink < 1:
577 nlink = 2 # force mktempcopy (issue1922)
575 nlink = 2 # force mktempcopy (issue1922)
578 except (OSError, IOError) as e:
576 except (OSError, IOError) as e:
579 if e.errno != errno.ENOENT:
577 if e.errno != errno.ENOENT:
580 raise
578 raise
581 nlink = 0
579 nlink = 0
582 util.makedirs(dirname, self.createmode, notindexed)
580 util.makedirs(dirname, self.createmode, notindexed)
583 if nlink > 0:
581 if nlink > 0:
584 if self._trustnlink is None:
582 if self._trustnlink is None:
585 self._trustnlink = nlink > 1 or util.checknlink(f)
583 self._trustnlink = nlink > 1 or util.checknlink(f)
586 if nlink > 1 or not self._trustnlink:
584 if nlink > 1 or not self._trustnlink:
587 util.rename(util.mktempcopy(f), f)
585 util.rename(util.mktempcopy(f), f)
588 fp = util.posixfile(f, mode)
586 fp = util.posixfile(f, mode)
589 if nlink == 0:
587 if nlink == 0:
590 self._fixfilemode(f)
588 self._fixfilemode(f)
591
589
592 if backgroundclose:
590 if backgroundclose:
593 if not self._backgroundfilecloser:
591 if not self._backgroundfilecloser:
594 raise error.Abort(_('backgroundclose can only be used when a '
592 raise error.Abort(_('backgroundclose can only be used when a '
595 'backgroundclosing context manager is active')
593 'backgroundclosing context manager is active')
596 )
594 )
597
595
598 fp = delayclosedfile(fp, self._backgroundfilecloser)
596 fp = delayclosedfile(fp, self._backgroundfilecloser)
599
597
600 return fp
598 return fp
601
599
602 def symlink(self, src, dst):
600 def symlink(self, src, dst):
603 self.audit(dst)
601 self.audit(dst)
604 linkname = self.join(dst)
602 linkname = self.join(dst)
605 try:
603 try:
606 os.unlink(linkname)
604 os.unlink(linkname)
607 except OSError:
605 except OSError:
608 pass
606 pass
609
607
610 util.makedirs(os.path.dirname(linkname), self.createmode)
608 util.makedirs(os.path.dirname(linkname), self.createmode)
611
609
612 if self._cansymlink:
610 if self._cansymlink:
613 try:
611 try:
614 os.symlink(src, linkname)
612 os.symlink(src, linkname)
615 except OSError as err:
613 except OSError as err:
616 raise OSError(err.errno, _('could not symlink to %r: %s') %
614 raise OSError(err.errno, _('could not symlink to %r: %s') %
617 (src, err.strerror), linkname)
615 (src, err.strerror), linkname)
618 else:
616 else:
619 self.write(dst, src)
617 self.write(dst, src)
620
618
621 def join(self, path, *insidef):
619 def join(self, path, *insidef):
622 if path:
620 if path:
623 return os.path.join(self.base, path, *insidef)
621 return os.path.join(self.base, path, *insidef)
624 else:
622 else:
625 return self.base
623 return self.base
626
624
627 opener = vfs
625 opener = vfs
628
626
629 class auditvfs(object):
627 class auditvfs(object):
630 def __init__(self, vfs):
628 def __init__(self, vfs):
631 self.vfs = vfs
629 self.vfs = vfs
632
630
633 @property
631 @property
634 def mustaudit(self):
632 def mustaudit(self):
635 return self.vfs.mustaudit
633 return self.vfs.mustaudit
636
634
637 @mustaudit.setter
635 @mustaudit.setter
638 def mustaudit(self, onoff):
636 def mustaudit(self, onoff):
639 self.vfs.mustaudit = onoff
637 self.vfs.mustaudit = onoff
640
638
641 @property
639 @property
642 def options(self):
640 def options(self):
643 return self.vfs.options
641 return self.vfs.options
644
642
645 @options.setter
643 @options.setter
646 def options(self, value):
644 def options(self, value):
647 self.vfs.options = value
645 self.vfs.options = value
648
646
649 class filtervfs(abstractvfs, auditvfs):
647 class filtervfs(abstractvfs, auditvfs):
650 '''Wrapper vfs for filtering filenames with a function.'''
648 '''Wrapper vfs for filtering filenames with a function.'''
651
649
652 def __init__(self, vfs, filter):
650 def __init__(self, vfs, filter):
653 auditvfs.__init__(self, vfs)
651 auditvfs.__init__(self, vfs)
654 self._filter = filter
652 self._filter = filter
655
653
656 def __call__(self, path, *args, **kwargs):
654 def __call__(self, path, *args, **kwargs):
657 return self.vfs(self._filter(path), *args, **kwargs)
655 return self.vfs(self._filter(path), *args, **kwargs)
658
656
659 def join(self, path, *insidef):
657 def join(self, path, *insidef):
660 if path:
658 if path:
661 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
659 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
662 else:
660 else:
663 return self.vfs.join(path)
661 return self.vfs.join(path)
664
662
665 filteropener = filtervfs
663 filteropener = filtervfs
666
664
667 class readonlyvfs(abstractvfs, auditvfs):
665 class readonlyvfs(abstractvfs, auditvfs):
668 '''Wrapper vfs preventing any writing.'''
666 '''Wrapper vfs preventing any writing.'''
669
667
670 def __init__(self, vfs):
668 def __init__(self, vfs):
671 auditvfs.__init__(self, vfs)
669 auditvfs.__init__(self, vfs)
672
670
673 def __call__(self, path, mode='r', *args, **kw):
671 def __call__(self, path, mode='r', *args, **kw):
674 if mode not in ('r', 'rb'):
672 if mode not in ('r', 'rb'):
675 raise error.Abort(_('this vfs is read only'))
673 raise error.Abort(_('this vfs is read only'))
676 return self.vfs(path, mode, *args, **kw)
674 return self.vfs(path, mode, *args, **kw)
677
675
678 def join(self, path, *insidef):
676 def join(self, path, *insidef):
679 return self.vfs.join(path, *insidef)
677 return self.vfs.join(path, *insidef)
680
678
681 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
679 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
682 '''yield every hg repository under path, always recursively.
680 '''yield every hg repository under path, always recursively.
683 The recurse flag will only control recursion into repo working dirs'''
681 The recurse flag will only control recursion into repo working dirs'''
684 def errhandler(err):
682 def errhandler(err):
685 if err.filename == path:
683 if err.filename == path:
686 raise err
684 raise err
687 samestat = getattr(os.path, 'samestat', None)
685 samestat = getattr(os.path, 'samestat', None)
688 if followsym and samestat is not None:
686 if followsym and samestat is not None:
689 def adddir(dirlst, dirname):
687 def adddir(dirlst, dirname):
690 match = False
688 match = False
691 dirstat = os.stat(dirname)
689 dirstat = os.stat(dirname)
692 for lstdirstat in dirlst:
690 for lstdirstat in dirlst:
693 if samestat(dirstat, lstdirstat):
691 if samestat(dirstat, lstdirstat):
694 match = True
692 match = True
695 break
693 break
696 if not match:
694 if not match:
697 dirlst.append(dirstat)
695 dirlst.append(dirstat)
698 return not match
696 return not match
699 else:
697 else:
700 followsym = False
698 followsym = False
701
699
702 if (seen_dirs is None) and followsym:
700 if (seen_dirs is None) and followsym:
703 seen_dirs = []
701 seen_dirs = []
704 adddir(seen_dirs, path)
702 adddir(seen_dirs, path)
705 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
703 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
706 dirs.sort()
704 dirs.sort()
707 if '.hg' in dirs:
705 if '.hg' in dirs:
708 yield root # found a repository
706 yield root # found a repository
709 qroot = os.path.join(root, '.hg', 'patches')
707 qroot = os.path.join(root, '.hg', 'patches')
710 if os.path.isdir(os.path.join(qroot, '.hg')):
708 if os.path.isdir(os.path.join(qroot, '.hg')):
711 yield qroot # we have a patch queue repo here
709 yield qroot # we have a patch queue repo here
712 if recurse:
710 if recurse:
713 # avoid recursing inside the .hg directory
711 # avoid recursing inside the .hg directory
714 dirs.remove('.hg')
712 dirs.remove('.hg')
715 else:
713 else:
716 dirs[:] = [] # don't descend further
714 dirs[:] = [] # don't descend further
717 elif followsym:
715 elif followsym:
718 newdirs = []
716 newdirs = []
719 for d in dirs:
717 for d in dirs:
720 fname = os.path.join(root, d)
718 fname = os.path.join(root, d)
721 if adddir(seen_dirs, fname):
719 if adddir(seen_dirs, fname):
722 if os.path.islink(fname):
720 if os.path.islink(fname):
723 for hgname in walkrepos(fname, True, seen_dirs):
721 for hgname in walkrepos(fname, True, seen_dirs):
724 yield hgname
722 yield hgname
725 else:
723 else:
726 newdirs.append(d)
724 newdirs.append(d)
727 dirs[:] = newdirs
725 dirs[:] = newdirs
728
726
729 def osrcpath():
727 def osrcpath():
730 '''return default os-specific hgrc search path'''
728 '''return default os-specific hgrc search path'''
731 path = []
729 path = []
732 defaultpath = os.path.join(util.datapath, 'default.d')
730 defaultpath = os.path.join(util.datapath, 'default.d')
733 if os.path.isdir(defaultpath):
731 if os.path.isdir(defaultpath):
734 for f, kind in osutil.listdir(defaultpath):
732 for f, kind in osutil.listdir(defaultpath):
735 if f.endswith('.rc'):
733 if f.endswith('.rc'):
736 path.append(os.path.join(defaultpath, f))
734 path.append(os.path.join(defaultpath, f))
737 path.extend(systemrcpath())
735 path.extend(systemrcpath())
738 path.extend(userrcpath())
736 path.extend(userrcpath())
739 path = [os.path.normpath(f) for f in path]
737 path = [os.path.normpath(f) for f in path]
740 return path
738 return path
741
739
742 _rcpath = None
740 _rcpath = None
743
741
744 def rcpath():
742 def rcpath():
745 '''return hgrc search path. if env var HGRCPATH is set, use it.
743 '''return hgrc search path. if env var HGRCPATH is set, use it.
746 for each item in path, if directory, use files ending in .rc,
744 for each item in path, if directory, use files ending in .rc,
747 else use item.
745 else use item.
748 make HGRCPATH empty to only look in .hg/hgrc of current repo.
746 make HGRCPATH empty to only look in .hg/hgrc of current repo.
749 if no HGRCPATH, use default os-specific path.'''
747 if no HGRCPATH, use default os-specific path.'''
750 global _rcpath
748 global _rcpath
751 if _rcpath is None:
749 if _rcpath is None:
752 if 'HGRCPATH' in os.environ:
750 if 'HGRCPATH' in os.environ:
753 _rcpath = []
751 _rcpath = []
754 for p in os.environ['HGRCPATH'].split(os.pathsep):
752 for p in os.environ['HGRCPATH'].split(os.pathsep):
755 if not p:
753 if not p:
756 continue
754 continue
757 p = util.expandpath(p)
755 p = util.expandpath(p)
758 if os.path.isdir(p):
756 if os.path.isdir(p):
759 for f, kind in osutil.listdir(p):
757 for f, kind in osutil.listdir(p):
760 if f.endswith('.rc'):
758 if f.endswith('.rc'):
761 _rcpath.append(os.path.join(p, f))
759 _rcpath.append(os.path.join(p, f))
762 else:
760 else:
763 _rcpath.append(p)
761 _rcpath.append(p)
764 else:
762 else:
765 _rcpath = osrcpath()
763 _rcpath = osrcpath()
766 return _rcpath
764 return _rcpath
767
765
768 def intrev(rev):
766 def intrev(rev):
769 """Return integer for a given revision that can be used in comparison or
767 """Return integer for a given revision that can be used in comparison or
770 arithmetic operation"""
768 arithmetic operation"""
771 if rev is None:
769 if rev is None:
772 return wdirrev
770 return wdirrev
773 return rev
771 return rev
774
772
775 def revsingle(repo, revspec, default='.'):
773 def revsingle(repo, revspec, default='.'):
776 if not revspec and revspec != 0:
774 if not revspec and revspec != 0:
777 return repo[default]
775 return repo[default]
778
776
779 l = revrange(repo, [revspec])
777 l = revrange(repo, [revspec])
780 if not l:
778 if not l:
781 raise error.Abort(_('empty revision set'))
779 raise error.Abort(_('empty revision set'))
782 return repo[l.last()]
780 return repo[l.last()]
783
781
784 def _pairspec(revspec):
782 def _pairspec(revspec):
785 tree = revset.parse(revspec)
783 tree = revset.parse(revspec)
786 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
784 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
787 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
785 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
788
786
789 def revpair(repo, revs):
787 def revpair(repo, revs):
790 if not revs:
788 if not revs:
791 return repo.dirstate.p1(), None
789 return repo.dirstate.p1(), None
792
790
793 l = revrange(repo, revs)
791 l = revrange(repo, revs)
794
792
795 if not l:
793 if not l:
796 first = second = None
794 first = second = None
797 elif l.isascending():
795 elif l.isascending():
798 first = l.min()
796 first = l.min()
799 second = l.max()
797 second = l.max()
800 elif l.isdescending():
798 elif l.isdescending():
801 first = l.max()
799 first = l.max()
802 second = l.min()
800 second = l.min()
803 else:
801 else:
804 first = l.first()
802 first = l.first()
805 second = l.last()
803 second = l.last()
806
804
807 if first is None:
805 if first is None:
808 raise error.Abort(_('empty revision range'))
806 raise error.Abort(_('empty revision range'))
809 if (first == second and len(revs) >= 2
807 if (first == second and len(revs) >= 2
810 and not all(revrange(repo, [r]) for r in revs)):
808 and not all(revrange(repo, [r]) for r in revs)):
811 raise error.Abort(_('empty revision on one side of range'))
809 raise error.Abort(_('empty revision on one side of range'))
812
810
813 # if top-level is range expression, the result must always be a pair
811 # if top-level is range expression, the result must always be a pair
814 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
812 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
815 return repo.lookup(first), None
813 return repo.lookup(first), None
816
814
817 return repo.lookup(first), repo.lookup(second)
815 return repo.lookup(first), repo.lookup(second)
818
816
819 def revrange(repo, specs):
817 def revrange(repo, specs):
820 """Execute 1 to many revsets and return the union.
818 """Execute 1 to many revsets and return the union.
821
819
822 This is the preferred mechanism for executing revsets using user-specified
820 This is the preferred mechanism for executing revsets using user-specified
823 config options, such as revset aliases.
821 config options, such as revset aliases.
824
822
825 The revsets specified by ``specs`` will be executed via a chained ``OR``
823 The revsets specified by ``specs`` will be executed via a chained ``OR``
826 expression. If ``specs`` is empty, an empty result is returned.
824 expression. If ``specs`` is empty, an empty result is returned.
827
825
828 ``specs`` can contain integers, in which case they are assumed to be
826 ``specs`` can contain integers, in which case they are assumed to be
829 revision numbers.
827 revision numbers.
830
828
831 It is assumed the revsets are already formatted. If you have arguments
829 It is assumed the revsets are already formatted. If you have arguments
832 that need to be expanded in the revset, call ``revset.formatspec()``
830 that need to be expanded in the revset, call ``revset.formatspec()``
833 and pass the result as an element of ``specs``.
831 and pass the result as an element of ``specs``.
834
832
835 Specifying a single revset is allowed.
833 Specifying a single revset is allowed.
836
834
837 Returns a ``revset.abstractsmartset`` which is a list-like interface over
835 Returns a ``revset.abstractsmartset`` which is a list-like interface over
838 integer revisions.
836 integer revisions.
839 """
837 """
840 allspecs = []
838 allspecs = []
841 for spec in specs:
839 for spec in specs:
842 if isinstance(spec, int):
840 if isinstance(spec, int):
843 spec = revset.formatspec('rev(%d)', spec)
841 spec = revset.formatspec('rev(%d)', spec)
844 allspecs.append(spec)
842 allspecs.append(spec)
845 m = revset.matchany(repo.ui, allspecs, repo)
843 m = revset.matchany(repo.ui, allspecs, repo)
846 return m(repo)
844 return m(repo)
847
845
848 def meaningfulparents(repo, ctx):
846 def meaningfulparents(repo, ctx):
849 """Return list of meaningful (or all if debug) parentrevs for rev.
847 """Return list of meaningful (or all if debug) parentrevs for rev.
850
848
851 For merges (two non-nullrev revisions) both parents are meaningful.
849 For merges (two non-nullrev revisions) both parents are meaningful.
852 Otherwise the first parent revision is considered meaningful if it
850 Otherwise the first parent revision is considered meaningful if it
853 is not the preceding revision.
851 is not the preceding revision.
854 """
852 """
855 parents = ctx.parents()
853 parents = ctx.parents()
856 if len(parents) > 1:
854 if len(parents) > 1:
857 return parents
855 return parents
858 if repo.ui.debugflag:
856 if repo.ui.debugflag:
859 return [parents[0], repo['null']]
857 return [parents[0], repo['null']]
860 if parents[0].rev() >= intrev(ctx.rev()) - 1:
858 if parents[0].rev() >= intrev(ctx.rev()) - 1:
861 return []
859 return []
862 return parents
860 return parents
863
861
864 def expandpats(pats):
862 def expandpats(pats):
865 '''Expand bare globs when running on windows.
863 '''Expand bare globs when running on windows.
866 On posix we assume it already has already been done by sh.'''
864 On posix we assume it already has already been done by sh.'''
867 if not util.expandglobs:
865 if not util.expandglobs:
868 return list(pats)
866 return list(pats)
869 ret = []
867 ret = []
870 for kindpat in pats:
868 for kindpat in pats:
871 kind, pat = matchmod._patsplit(kindpat, None)
869 kind, pat = matchmod._patsplit(kindpat, None)
872 if kind is None:
870 if kind is None:
873 try:
871 try:
874 globbed = glob.glob(pat)
872 globbed = glob.glob(pat)
875 except re.error:
873 except re.error:
876 globbed = [pat]
874 globbed = [pat]
877 if globbed:
875 if globbed:
878 ret.extend(globbed)
876 ret.extend(globbed)
879 continue
877 continue
880 ret.append(kindpat)
878 ret.append(kindpat)
881 return ret
879 return ret
882
880
883 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
881 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
884 badfn=None):
882 badfn=None):
885 '''Return a matcher and the patterns that were used.
883 '''Return a matcher and the patterns that were used.
886 The matcher will warn about bad matches, unless an alternate badfn callback
884 The matcher will warn about bad matches, unless an alternate badfn callback
887 is provided.'''
885 is provided.'''
888 if pats == ("",):
886 if pats == ("",):
889 pats = []
887 pats = []
890 if opts is None:
888 if opts is None:
891 opts = {}
889 opts = {}
892 if not globbed and default == 'relpath':
890 if not globbed and default == 'relpath':
893 pats = expandpats(pats or [])
891 pats = expandpats(pats or [])
894
892
895 def bad(f, msg):
893 def bad(f, msg):
896 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
894 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
897
895
898 if badfn is None:
896 if badfn is None:
899 badfn = bad
897 badfn = bad
900
898
901 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
899 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
902 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
900 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
903
901
904 if m.always():
902 if m.always():
905 pats = []
903 pats = []
906 return m, pats
904 return m, pats
907
905
908 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
906 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
909 badfn=None):
907 badfn=None):
910 '''Return a matcher that will warn about bad matches.'''
908 '''Return a matcher that will warn about bad matches.'''
911 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
909 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
912
910
913 def matchall(repo):
911 def matchall(repo):
914 '''Return a matcher that will efficiently match everything.'''
912 '''Return a matcher that will efficiently match everything.'''
915 return matchmod.always(repo.root, repo.getcwd())
913 return matchmod.always(repo.root, repo.getcwd())
916
914
917 def matchfiles(repo, files, badfn=None):
915 def matchfiles(repo, files, badfn=None):
918 '''Return a matcher that will efficiently match exactly these files.'''
916 '''Return a matcher that will efficiently match exactly these files.'''
919 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
917 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
920
918
921 def origpath(ui, repo, filepath):
919 def origpath(ui, repo, filepath):
922 '''customize where .orig files are created
920 '''customize where .orig files are created
923
921
924 Fetch user defined path from config file: [ui] origbackuppath = <path>
922 Fetch user defined path from config file: [ui] origbackuppath = <path>
925 Fall back to default (filepath) if not specified
923 Fall back to default (filepath) if not specified
926 '''
924 '''
927 origbackuppath = ui.config('ui', 'origbackuppath', None)
925 origbackuppath = ui.config('ui', 'origbackuppath', None)
928 if origbackuppath is None:
926 if origbackuppath is None:
929 return filepath + ".orig"
927 return filepath + ".orig"
930
928
931 filepathfromroot = os.path.relpath(filepath, start=repo.root)
929 filepathfromroot = os.path.relpath(filepath, start=repo.root)
932 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
930 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
933
931
934 origbackupdir = repo.vfs.dirname(fullorigpath)
932 origbackupdir = repo.vfs.dirname(fullorigpath)
935 if not repo.vfs.exists(origbackupdir):
933 if not repo.vfs.exists(origbackupdir):
936 ui.note(_('creating directory: %s\n') % origbackupdir)
934 ui.note(_('creating directory: %s\n') % origbackupdir)
937 util.makedirs(origbackupdir)
935 util.makedirs(origbackupdir)
938
936
939 return fullorigpath + ".orig"
937 return fullorigpath + ".orig"
940
938
941 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
939 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
942 if opts is None:
940 if opts is None:
943 opts = {}
941 opts = {}
944 m = matcher
942 m = matcher
945 if dry_run is None:
943 if dry_run is None:
946 dry_run = opts.get('dry_run')
944 dry_run = opts.get('dry_run')
947 if similarity is None:
945 if similarity is None:
948 similarity = float(opts.get('similarity') or 0)
946 similarity = float(opts.get('similarity') or 0)
949
947
950 ret = 0
948 ret = 0
951 join = lambda f: os.path.join(prefix, f)
949 join = lambda f: os.path.join(prefix, f)
952
950
953 def matchessubrepo(matcher, subpath):
951 def matchessubrepo(matcher, subpath):
954 if matcher.exact(subpath):
952 if matcher.exact(subpath):
955 return True
953 return True
956 for f in matcher.files():
954 for f in matcher.files():
957 if f.startswith(subpath):
955 if f.startswith(subpath):
958 return True
956 return True
959 return False
957 return False
960
958
961 wctx = repo[None]
959 wctx = repo[None]
962 for subpath in sorted(wctx.substate):
960 for subpath in sorted(wctx.substate):
963 if opts.get('subrepos') or matchessubrepo(m, subpath):
961 if opts.get('subrepos') or matchessubrepo(m, subpath):
964 sub = wctx.sub(subpath)
962 sub = wctx.sub(subpath)
965 try:
963 try:
966 submatch = matchmod.subdirmatcher(subpath, m)
964 submatch = matchmod.subdirmatcher(subpath, m)
967 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
965 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
968 ret = 1
966 ret = 1
969 except error.LookupError:
967 except error.LookupError:
970 repo.ui.status(_("skipping missing subrepository: %s\n")
968 repo.ui.status(_("skipping missing subrepository: %s\n")
971 % join(subpath))
969 % join(subpath))
972
970
973 rejected = []
971 rejected = []
974 def badfn(f, msg):
972 def badfn(f, msg):
975 if f in m.files():
973 if f in m.files():
976 m.bad(f, msg)
974 m.bad(f, msg)
977 rejected.append(f)
975 rejected.append(f)
978
976
979 badmatch = matchmod.badmatch(m, badfn)
977 badmatch = matchmod.badmatch(m, badfn)
980 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
978 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
981 badmatch)
979 badmatch)
982
980
983 unknownset = set(unknown + forgotten)
981 unknownset = set(unknown + forgotten)
984 toprint = unknownset.copy()
982 toprint = unknownset.copy()
985 toprint.update(deleted)
983 toprint.update(deleted)
986 for abs in sorted(toprint):
984 for abs in sorted(toprint):
987 if repo.ui.verbose or not m.exact(abs):
985 if repo.ui.verbose or not m.exact(abs):
988 if abs in unknownset:
986 if abs in unknownset:
989 status = _('adding %s\n') % m.uipath(abs)
987 status = _('adding %s\n') % m.uipath(abs)
990 else:
988 else:
991 status = _('removing %s\n') % m.uipath(abs)
989 status = _('removing %s\n') % m.uipath(abs)
992 repo.ui.status(status)
990 repo.ui.status(status)
993
991
994 renames = _findrenames(repo, m, added + unknown, removed + deleted,
992 renames = _findrenames(repo, m, added + unknown, removed + deleted,
995 similarity)
993 similarity)
996
994
997 if not dry_run:
995 if not dry_run:
998 _markchanges(repo, unknown + forgotten, deleted, renames)
996 _markchanges(repo, unknown + forgotten, deleted, renames)
999
997
1000 for f in rejected:
998 for f in rejected:
1001 if f in m.files():
999 if f in m.files():
1002 return 1
1000 return 1
1003 return ret
1001 return ret
1004
1002
1005 def marktouched(repo, files, similarity=0.0):
1003 def marktouched(repo, files, similarity=0.0):
1006 '''Assert that files have somehow been operated upon. files are relative to
1004 '''Assert that files have somehow been operated upon. files are relative to
1007 the repo root.'''
1005 the repo root.'''
1008 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1006 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1009 rejected = []
1007 rejected = []
1010
1008
1011 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1009 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1012
1010
1013 if repo.ui.verbose:
1011 if repo.ui.verbose:
1014 unknownset = set(unknown + forgotten)
1012 unknownset = set(unknown + forgotten)
1015 toprint = unknownset.copy()
1013 toprint = unknownset.copy()
1016 toprint.update(deleted)
1014 toprint.update(deleted)
1017 for abs in sorted(toprint):
1015 for abs in sorted(toprint):
1018 if abs in unknownset:
1016 if abs in unknownset:
1019 status = _('adding %s\n') % abs
1017 status = _('adding %s\n') % abs
1020 else:
1018 else:
1021 status = _('removing %s\n') % abs
1019 status = _('removing %s\n') % abs
1022 repo.ui.status(status)
1020 repo.ui.status(status)
1023
1021
1024 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1022 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1025 similarity)
1023 similarity)
1026
1024
1027 _markchanges(repo, unknown + forgotten, deleted, renames)
1025 _markchanges(repo, unknown + forgotten, deleted, renames)
1028
1026
1029 for f in rejected:
1027 for f in rejected:
1030 if f in m.files():
1028 if f in m.files():
1031 return 1
1029 return 1
1032 return 0
1030 return 0
1033
1031
1034 def _interestingfiles(repo, matcher):
1032 def _interestingfiles(repo, matcher):
1035 '''Walk dirstate with matcher, looking for files that addremove would care
1033 '''Walk dirstate with matcher, looking for files that addremove would care
1036 about.
1034 about.
1037
1035
1038 This is different from dirstate.status because it doesn't care about
1036 This is different from dirstate.status because it doesn't care about
1039 whether files are modified or clean.'''
1037 whether files are modified or clean.'''
1040 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1038 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1041 audit_path = pathutil.pathauditor(repo.root)
1039 audit_path = pathutil.pathauditor(repo.root)
1042
1040
1043 ctx = repo[None]
1041 ctx = repo[None]
1044 dirstate = repo.dirstate
1042 dirstate = repo.dirstate
1045 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1043 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1046 full=False)
1044 full=False)
1047 for abs, st in walkresults.iteritems():
1045 for abs, st in walkresults.iteritems():
1048 dstate = dirstate[abs]
1046 dstate = dirstate[abs]
1049 if dstate == '?' and audit_path.check(abs):
1047 if dstate == '?' and audit_path.check(abs):
1050 unknown.append(abs)
1048 unknown.append(abs)
1051 elif dstate != 'r' and not st:
1049 elif dstate != 'r' and not st:
1052 deleted.append(abs)
1050 deleted.append(abs)
1053 elif dstate == 'r' and st:
1051 elif dstate == 'r' and st:
1054 forgotten.append(abs)
1052 forgotten.append(abs)
1055 # for finding renames
1053 # for finding renames
1056 elif dstate == 'r' and not st:
1054 elif dstate == 'r' and not st:
1057 removed.append(abs)
1055 removed.append(abs)
1058 elif dstate == 'a':
1056 elif dstate == 'a':
1059 added.append(abs)
1057 added.append(abs)
1060
1058
1061 return added, unknown, deleted, removed, forgotten
1059 return added, unknown, deleted, removed, forgotten
1062
1060
1063 def _findrenames(repo, matcher, added, removed, similarity):
1061 def _findrenames(repo, matcher, added, removed, similarity):
1064 '''Find renames from removed files to added ones.'''
1062 '''Find renames from removed files to added ones.'''
1065 renames = {}
1063 renames = {}
1066 if similarity > 0:
1064 if similarity > 0:
1067 for old, new, score in similar.findrenames(repo, added, removed,
1065 for old, new, score in similar.findrenames(repo, added, removed,
1068 similarity):
1066 similarity):
1069 if (repo.ui.verbose or not matcher.exact(old)
1067 if (repo.ui.verbose or not matcher.exact(old)
1070 or not matcher.exact(new)):
1068 or not matcher.exact(new)):
1071 repo.ui.status(_('recording removal of %s as rename to %s '
1069 repo.ui.status(_('recording removal of %s as rename to %s '
1072 '(%d%% similar)\n') %
1070 '(%d%% similar)\n') %
1073 (matcher.rel(old), matcher.rel(new),
1071 (matcher.rel(old), matcher.rel(new),
1074 score * 100))
1072 score * 100))
1075 renames[new] = old
1073 renames[new] = old
1076 return renames
1074 return renames
1077
1075
1078 def _markchanges(repo, unknown, deleted, renames):
1076 def _markchanges(repo, unknown, deleted, renames):
1079 '''Marks the files in unknown as added, the files in deleted as removed,
1077 '''Marks the files in unknown as added, the files in deleted as removed,
1080 and the files in renames as copied.'''
1078 and the files in renames as copied.'''
1081 wctx = repo[None]
1079 wctx = repo[None]
1082 with repo.wlock():
1080 with repo.wlock():
1083 wctx.forget(deleted)
1081 wctx.forget(deleted)
1084 wctx.add(unknown)
1082 wctx.add(unknown)
1085 for new, old in renames.iteritems():
1083 for new, old in renames.iteritems():
1086 wctx.copy(old, new)
1084 wctx.copy(old, new)
1087
1085
1088 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1086 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1089 """Update the dirstate to reflect the intent of copying src to dst. For
1087 """Update the dirstate to reflect the intent of copying src to dst. For
1090 different reasons it might not end with dst being marked as copied from src.
1088 different reasons it might not end with dst being marked as copied from src.
1091 """
1089 """
1092 origsrc = repo.dirstate.copied(src) or src
1090 origsrc = repo.dirstate.copied(src) or src
1093 if dst == origsrc: # copying back a copy?
1091 if dst == origsrc: # copying back a copy?
1094 if repo.dirstate[dst] not in 'mn' and not dryrun:
1092 if repo.dirstate[dst] not in 'mn' and not dryrun:
1095 repo.dirstate.normallookup(dst)
1093 repo.dirstate.normallookup(dst)
1096 else:
1094 else:
1097 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1095 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1098 if not ui.quiet:
1096 if not ui.quiet:
1099 ui.warn(_("%s has not been committed yet, so no copy "
1097 ui.warn(_("%s has not been committed yet, so no copy "
1100 "data will be stored for %s.\n")
1098 "data will be stored for %s.\n")
1101 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1099 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1102 if repo.dirstate[dst] in '?r' and not dryrun:
1100 if repo.dirstate[dst] in '?r' and not dryrun:
1103 wctx.add([dst])
1101 wctx.add([dst])
1104 elif not dryrun:
1102 elif not dryrun:
1105 wctx.copy(origsrc, dst)
1103 wctx.copy(origsrc, dst)
1106
1104
1107 def readrequires(opener, supported):
1105 def readrequires(opener, supported):
1108 '''Reads and parses .hg/requires and checks if all entries found
1106 '''Reads and parses .hg/requires and checks if all entries found
1109 are in the list of supported features.'''
1107 are in the list of supported features.'''
1110 requirements = set(opener.read("requires").splitlines())
1108 requirements = set(opener.read("requires").splitlines())
1111 missings = []
1109 missings = []
1112 for r in requirements:
1110 for r in requirements:
1113 if r not in supported:
1111 if r not in supported:
1114 if not r or not r[0].isalnum():
1112 if not r or not r[0].isalnum():
1115 raise error.RequirementError(_(".hg/requires file is corrupt"))
1113 raise error.RequirementError(_(".hg/requires file is corrupt"))
1116 missings.append(r)
1114 missings.append(r)
1117 missings.sort()
1115 missings.sort()
1118 if missings:
1116 if missings:
1119 raise error.RequirementError(
1117 raise error.RequirementError(
1120 _("repository requires features unknown to this Mercurial: %s")
1118 _("repository requires features unknown to this Mercurial: %s")
1121 % " ".join(missings),
1119 % " ".join(missings),
1122 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1120 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1123 " for more information"))
1121 " for more information"))
1124 return requirements
1122 return requirements
1125
1123
1126 def writerequires(opener, requirements):
1124 def writerequires(opener, requirements):
1127 with opener('requires', 'w') as fp:
1125 with opener('requires', 'w') as fp:
1128 for r in sorted(requirements):
1126 for r in sorted(requirements):
1129 fp.write("%s\n" % r)
1127 fp.write("%s\n" % r)
1130
1128
1131 class filecachesubentry(object):
1129 class filecachesubentry(object):
1132 def __init__(self, path, stat):
1130 def __init__(self, path, stat):
1133 self.path = path
1131 self.path = path
1134 self.cachestat = None
1132 self.cachestat = None
1135 self._cacheable = None
1133 self._cacheable = None
1136
1134
1137 if stat:
1135 if stat:
1138 self.cachestat = filecachesubentry.stat(self.path)
1136 self.cachestat = filecachesubentry.stat(self.path)
1139
1137
1140 if self.cachestat:
1138 if self.cachestat:
1141 self._cacheable = self.cachestat.cacheable()
1139 self._cacheable = self.cachestat.cacheable()
1142 else:
1140 else:
1143 # None means we don't know yet
1141 # None means we don't know yet
1144 self._cacheable = None
1142 self._cacheable = None
1145
1143
1146 def refresh(self):
1144 def refresh(self):
1147 if self.cacheable():
1145 if self.cacheable():
1148 self.cachestat = filecachesubentry.stat(self.path)
1146 self.cachestat = filecachesubentry.stat(self.path)
1149
1147
1150 def cacheable(self):
1148 def cacheable(self):
1151 if self._cacheable is not None:
1149 if self._cacheable is not None:
1152 return self._cacheable
1150 return self._cacheable
1153
1151
1154 # we don't know yet, assume it is for now
1152 # we don't know yet, assume it is for now
1155 return True
1153 return True
1156
1154
1157 def changed(self):
1155 def changed(self):
1158 # no point in going further if we can't cache it
1156 # no point in going further if we can't cache it
1159 if not self.cacheable():
1157 if not self.cacheable():
1160 return True
1158 return True
1161
1159
1162 newstat = filecachesubentry.stat(self.path)
1160 newstat = filecachesubentry.stat(self.path)
1163
1161
1164 # we may not know if it's cacheable yet, check again now
1162 # we may not know if it's cacheable yet, check again now
1165 if newstat and self._cacheable is None:
1163 if newstat and self._cacheable is None:
1166 self._cacheable = newstat.cacheable()
1164 self._cacheable = newstat.cacheable()
1167
1165
1168 # check again
1166 # check again
1169 if not self._cacheable:
1167 if not self._cacheable:
1170 return True
1168 return True
1171
1169
1172 if self.cachestat != newstat:
1170 if self.cachestat != newstat:
1173 self.cachestat = newstat
1171 self.cachestat = newstat
1174 return True
1172 return True
1175 else:
1173 else:
1176 return False
1174 return False
1177
1175
1178 @staticmethod
1176 @staticmethod
1179 def stat(path):
1177 def stat(path):
1180 try:
1178 try:
1181 return util.cachestat(path)
1179 return util.cachestat(path)
1182 except OSError as e:
1180 except OSError as e:
1183 if e.errno != errno.ENOENT:
1181 if e.errno != errno.ENOENT:
1184 raise
1182 raise
1185
1183
1186 class filecacheentry(object):
1184 class filecacheentry(object):
1187 def __init__(self, paths, stat=True):
1185 def __init__(self, paths, stat=True):
1188 self._entries = []
1186 self._entries = []
1189 for path in paths:
1187 for path in paths:
1190 self._entries.append(filecachesubentry(path, stat))
1188 self._entries.append(filecachesubentry(path, stat))
1191
1189
1192 def changed(self):
1190 def changed(self):
1193 '''true if any entry has changed'''
1191 '''true if any entry has changed'''
1194 for entry in self._entries:
1192 for entry in self._entries:
1195 if entry.changed():
1193 if entry.changed():
1196 return True
1194 return True
1197 return False
1195 return False
1198
1196
1199 def refresh(self):
1197 def refresh(self):
1200 for entry in self._entries:
1198 for entry in self._entries:
1201 entry.refresh()
1199 entry.refresh()
1202
1200
1203 class filecache(object):
1201 class filecache(object):
1204 '''A property like decorator that tracks files under .hg/ for updates.
1202 '''A property like decorator that tracks files under .hg/ for updates.
1205
1203
1206 Records stat info when called in _filecache.
1204 Records stat info when called in _filecache.
1207
1205
1208 On subsequent calls, compares old stat info with new info, and recreates the
1206 On subsequent calls, compares old stat info with new info, and recreates the
1209 object when any of the files changes, updating the new stat info in
1207 object when any of the files changes, updating the new stat info in
1210 _filecache.
1208 _filecache.
1211
1209
1212 Mercurial either atomic renames or appends for files under .hg,
1210 Mercurial either atomic renames or appends for files under .hg,
1213 so to ensure the cache is reliable we need the filesystem to be able
1211 so to ensure the cache is reliable we need the filesystem to be able
1214 to tell us if a file has been replaced. If it can't, we fallback to
1212 to tell us if a file has been replaced. If it can't, we fallback to
1215 recreating the object on every call (essentially the same behavior as
1213 recreating the object on every call (essentially the same behavior as
1216 propertycache).
1214 propertycache).
1217
1215
1218 '''
1216 '''
1219 def __init__(self, *paths):
1217 def __init__(self, *paths):
1220 self.paths = paths
1218 self.paths = paths
1221
1219
1222 def join(self, obj, fname):
1220 def join(self, obj, fname):
1223 """Used to compute the runtime path of a cached file.
1221 """Used to compute the runtime path of a cached file.
1224
1222
1225 Users should subclass filecache and provide their own version of this
1223 Users should subclass filecache and provide their own version of this
1226 function to call the appropriate join function on 'obj' (an instance
1224 function to call the appropriate join function on 'obj' (an instance
1227 of the class that its member function was decorated).
1225 of the class that its member function was decorated).
1228 """
1226 """
1229 return obj.join(fname)
1227 return obj.join(fname)
1230
1228
1231 def __call__(self, func):
1229 def __call__(self, func):
1232 self.func = func
1230 self.func = func
1233 self.name = func.__name__
1231 self.name = func.__name__
1234 return self
1232 return self
1235
1233
1236 def __get__(self, obj, type=None):
1234 def __get__(self, obj, type=None):
1237 # if accessed on the class, return the descriptor itself.
1235 # if accessed on the class, return the descriptor itself.
1238 if obj is None:
1236 if obj is None:
1239 return self
1237 return self
1240 # do we need to check if the file changed?
1238 # do we need to check if the file changed?
1241 if self.name in obj.__dict__:
1239 if self.name in obj.__dict__:
1242 assert self.name in obj._filecache, self.name
1240 assert self.name in obj._filecache, self.name
1243 return obj.__dict__[self.name]
1241 return obj.__dict__[self.name]
1244
1242
1245 entry = obj._filecache.get(self.name)
1243 entry = obj._filecache.get(self.name)
1246
1244
1247 if entry:
1245 if entry:
1248 if entry.changed():
1246 if entry.changed():
1249 entry.obj = self.func(obj)
1247 entry.obj = self.func(obj)
1250 else:
1248 else:
1251 paths = [self.join(obj, path) for path in self.paths]
1249 paths = [self.join(obj, path) for path in self.paths]
1252
1250
1253 # We stat -before- creating the object so our cache doesn't lie if
1251 # We stat -before- creating the object so our cache doesn't lie if
1254 # a writer modified between the time we read and stat
1252 # a writer modified between the time we read and stat
1255 entry = filecacheentry(paths, True)
1253 entry = filecacheentry(paths, True)
1256 entry.obj = self.func(obj)
1254 entry.obj = self.func(obj)
1257
1255
1258 obj._filecache[self.name] = entry
1256 obj._filecache[self.name] = entry
1259
1257
1260 obj.__dict__[self.name] = entry.obj
1258 obj.__dict__[self.name] = entry.obj
1261 return entry.obj
1259 return entry.obj
1262
1260
1263 def __set__(self, obj, value):
1261 def __set__(self, obj, value):
1264 if self.name not in obj._filecache:
1262 if self.name not in obj._filecache:
1265 # we add an entry for the missing value because X in __dict__
1263 # we add an entry for the missing value because X in __dict__
1266 # implies X in _filecache
1264 # implies X in _filecache
1267 paths = [self.join(obj, path) for path in self.paths]
1265 paths = [self.join(obj, path) for path in self.paths]
1268 ce = filecacheentry(paths, False)
1266 ce = filecacheentry(paths, False)
1269 obj._filecache[self.name] = ce
1267 obj._filecache[self.name] = ce
1270 else:
1268 else:
1271 ce = obj._filecache[self.name]
1269 ce = obj._filecache[self.name]
1272
1270
1273 ce.obj = value # update cached copy
1271 ce.obj = value # update cached copy
1274 obj.__dict__[self.name] = value # update copy returned by obj.x
1272 obj.__dict__[self.name] = value # update copy returned by obj.x
1275
1273
1276 def __delete__(self, obj):
1274 def __delete__(self, obj):
1277 try:
1275 try:
1278 del obj.__dict__[self.name]
1276 del obj.__dict__[self.name]
1279 except KeyError:
1277 except KeyError:
1280 raise AttributeError(self.name)
1278 raise AttributeError(self.name)
1281
1279
1282 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1280 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1283 if lock is None:
1281 if lock is None:
1284 raise error.LockInheritanceContractViolation(
1282 raise error.LockInheritanceContractViolation(
1285 'lock can only be inherited while held')
1283 'lock can only be inherited while held')
1286 if environ is None:
1284 if environ is None:
1287 environ = {}
1285 environ = {}
1288 with lock.inherit() as locker:
1286 with lock.inherit() as locker:
1289 environ[envvar] = locker
1287 environ[envvar] = locker
1290 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1288 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1291
1289
1292 def wlocksub(repo, cmd, *args, **kwargs):
1290 def wlocksub(repo, cmd, *args, **kwargs):
1293 """run cmd as a subprocess that allows inheriting repo's wlock
1291 """run cmd as a subprocess that allows inheriting repo's wlock
1294
1292
1295 This can only be called while the wlock is held. This takes all the
1293 This can only be called while the wlock is held. This takes all the
1296 arguments that ui.system does, and returns the exit code of the
1294 arguments that ui.system does, and returns the exit code of the
1297 subprocess."""
1295 subprocess."""
1298 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1296 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1299 **kwargs)
1297 **kwargs)
1300
1298
1301 def gdinitconfig(ui):
1299 def gdinitconfig(ui):
1302 """helper function to know if a repo should be created as general delta
1300 """helper function to know if a repo should be created as general delta
1303 """
1301 """
1304 # experimental config: format.generaldelta
1302 # experimental config: format.generaldelta
1305 return (ui.configbool('format', 'generaldelta', False)
1303 return (ui.configbool('format', 'generaldelta', False)
1306 or ui.configbool('format', 'usegeneraldelta', True))
1304 or ui.configbool('format', 'usegeneraldelta', True))
1307
1305
1308 def gddeltaconfig(ui):
1306 def gddeltaconfig(ui):
1309 """helper function to know if incoming delta should be optimised
1307 """helper function to know if incoming delta should be optimised
1310 """
1308 """
1311 # experimental config: format.generaldelta
1309 # experimental config: format.generaldelta
1312 return ui.configbool('format', 'generaldelta', False)
1310 return ui.configbool('format', 'generaldelta', False)
1313
1311
1314 class delayclosedfile(object):
1312 class delayclosedfile(object):
1315 """Proxy for a file object whose close is delayed.
1313 """Proxy for a file object whose close is delayed.
1316
1314
1317 Do not instantiate outside of the vfs layer.
1315 Do not instantiate outside of the vfs layer.
1318 """
1316 """
1319
1317
1320 def __init__(self, fh, closer):
1318 def __init__(self, fh, closer):
1321 object.__setattr__(self, '_origfh', fh)
1319 object.__setattr__(self, '_origfh', fh)
1322 object.__setattr__(self, '_closer', closer)
1320 object.__setattr__(self, '_closer', closer)
1323
1321
1324 def __getattr__(self, attr):
1322 def __getattr__(self, attr):
1325 return getattr(self._origfh, attr)
1323 return getattr(self._origfh, attr)
1326
1324
1327 def __setattr__(self, attr, value):
1325 def __setattr__(self, attr, value):
1328 return setattr(self._origfh, attr, value)
1326 return setattr(self._origfh, attr, value)
1329
1327
1330 def __delattr__(self, attr):
1328 def __delattr__(self, attr):
1331 return delattr(self._origfh, attr)
1329 return delattr(self._origfh, attr)
1332
1330
1333 def __enter__(self):
1331 def __enter__(self):
1334 return self._origfh.__enter__()
1332 return self._origfh.__enter__()
1335
1333
1336 def __exit__(self, exc_type, exc_value, exc_tb):
1334 def __exit__(self, exc_type, exc_value, exc_tb):
1337 self._closer.close(self._origfh)
1335 self._closer.close(self._origfh)
1338
1336
1339 def close(self):
1337 def close(self):
1340 self._closer.close(self._origfh)
1338 self._closer.close(self._origfh)
1341
1339
1342 class backgroundfilecloser(object):
1340 class backgroundfilecloser(object):
1343 """Coordinates background closing of file handles on multiple threads."""
1341 """Coordinates background closing of file handles on multiple threads."""
1344 def __init__(self, ui, expectedcount=-1):
1342 def __init__(self, ui, expectedcount=-1):
1345 self._running = False
1343 self._running = False
1346 self._entered = False
1344 self._entered = False
1347 self._threads = []
1345 self._threads = []
1348 self._threadexception = None
1346 self._threadexception = None
1349
1347
1350 # Only Windows/NTFS has slow file closing. So only enable by default
1348 # Only Windows/NTFS has slow file closing. So only enable by default
1351 # on that platform. But allow to be enabled elsewhere for testing.
1349 # on that platform. But allow to be enabled elsewhere for testing.
1352 defaultenabled = os.name == 'nt'
1350 defaultenabled = os.name == 'nt'
1353 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1351 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1354
1352
1355 if not enabled:
1353 if not enabled:
1356 return
1354 return
1357
1355
1358 # There is overhead to starting and stopping the background threads.
1356 # There is overhead to starting and stopping the background threads.
1359 # Don't do background processing unless the file count is large enough
1357 # Don't do background processing unless the file count is large enough
1360 # to justify it.
1358 # to justify it.
1361 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1359 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1362 2048)
1360 2048)
1363 # FUTURE dynamically start background threads after minfilecount closes.
1361 # FUTURE dynamically start background threads after minfilecount closes.
1364 # (We don't currently have any callers that don't know their file count)
1362 # (We don't currently have any callers that don't know their file count)
1365 if expectedcount > 0 and expectedcount < minfilecount:
1363 if expectedcount > 0 and expectedcount < minfilecount:
1366 return
1364 return
1367
1365
1368 # Windows defaults to a limit of 512 open files. A buffer of 128
1366 # Windows defaults to a limit of 512 open files. A buffer of 128
1369 # should give us enough headway.
1367 # should give us enough headway.
1370 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1368 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1371 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1369 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1372
1370
1373 ui.debug('starting %d threads for background file closing\n' %
1371 ui.debug('starting %d threads for background file closing\n' %
1374 threadcount)
1372 threadcount)
1375
1373
1376 self._queue = util.queue(maxsize=maxqueue)
1374 self._queue = util.queue(maxsize=maxqueue)
1377 self._running = True
1375 self._running = True
1378
1376
1379 for i in range(threadcount):
1377 for i in range(threadcount):
1380 t = threading.Thread(target=self._worker, name='backgroundcloser')
1378 t = threading.Thread(target=self._worker, name='backgroundcloser')
1381 self._threads.append(t)
1379 self._threads.append(t)
1382 t.start()
1380 t.start()
1383
1381
1384 def __enter__(self):
1382 def __enter__(self):
1385 self._entered = True
1383 self._entered = True
1386 return self
1384 return self
1387
1385
1388 def __exit__(self, exc_type, exc_value, exc_tb):
1386 def __exit__(self, exc_type, exc_value, exc_tb):
1389 self._running = False
1387 self._running = False
1390
1388
1391 # Wait for threads to finish closing so open files don't linger for
1389 # Wait for threads to finish closing so open files don't linger for
1392 # longer than lifetime of context manager.
1390 # longer than lifetime of context manager.
1393 for t in self._threads:
1391 for t in self._threads:
1394 t.join()
1392 t.join()
1395
1393
1396 def _worker(self):
1394 def _worker(self):
1397 """Main routine for worker thread."""
1395 """Main routine for worker thread."""
1398 while True:
1396 while True:
1399 try:
1397 try:
1400 fh = self._queue.get(block=True, timeout=0.100)
1398 fh = self._queue.get(block=True, timeout=0.100)
1401 # Need to catch or the thread will terminate and
1399 # Need to catch or the thread will terminate and
1402 # we could orphan file descriptors.
1400 # we could orphan file descriptors.
1403 try:
1401 try:
1404 fh.close()
1402 fh.close()
1405 except Exception as e:
1403 except Exception as e:
1406 # Stash so can re-raise from main thread later.
1404 # Stash so can re-raise from main thread later.
1407 self._threadexception = e
1405 self._threadexception = e
1408 except util.empty:
1406 except util.empty:
1409 if not self._running:
1407 if not self._running:
1410 break
1408 break
1411
1409
1412 def close(self, fh):
1410 def close(self, fh):
1413 """Schedule a file for closing."""
1411 """Schedule a file for closing."""
1414 if not self._entered:
1412 if not self._entered:
1415 raise error.Abort(_('can only call close() when context manager '
1413 raise error.Abort(_('can only call close() when context manager '
1416 'active'))
1414 'active'))
1417
1415
1418 # If a background thread encountered an exception, raise now so we fail
1416 # If a background thread encountered an exception, raise now so we fail
1419 # fast. Otherwise we may potentially go on for minutes until the error
1417 # fast. Otherwise we may potentially go on for minutes until the error
1420 # is acted on.
1418 # is acted on.
1421 if self._threadexception:
1419 if self._threadexception:
1422 e = self._threadexception
1420 e = self._threadexception
1423 self._threadexception = None
1421 self._threadexception = None
1424 raise e
1422 raise e
1425
1423
1426 # If we're not actively running, close synchronously.
1424 # If we're not actively running, close synchronously.
1427 if not self._running:
1425 if not self._running:
1428 fh.close()
1426 fh.close()
1429 return
1427 return
1430
1428
1431 self._queue.put(fh, block=True, timeout=None)
1429 self._queue.put(fh, block=True, timeout=None)
General Comments 0
You need to be logged in to leave comments. Login now