##// END OF EJS Templates
py3: use encoding.environ instead of os.environ...
Pulkit Goyal -
r30109:96a2278e default
parent child Browse files
Show More
@@ -1,1470 +1,1470 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import glob
12 import glob
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import re
15 import re
16 import shutil
16 import shutil
17 import stat
17 import stat
18 import tempfile
18 import tempfile
19 import threading
19 import threading
20
20
21 from .i18n import _
21 from .i18n import _
22 from .node import wdirrev
22 from .node import wdirrev
23 from . import (
23 from . import (
24 encoding,
24 encoding,
25 error,
25 error,
26 match as matchmod,
26 match as matchmod,
27 osutil,
27 osutil,
28 pathutil,
28 pathutil,
29 phases,
29 phases,
30 revset,
30 revset,
31 similar,
31 similar,
32 util,
32 util,
33 )
33 )
34
34
35 if os.name == 'nt':
35 if os.name == 'nt':
36 from . import scmwindows as scmplatform
36 from . import scmwindows as scmplatform
37 else:
37 else:
38 from . import scmposix as scmplatform
38 from . import scmposix as scmplatform
39
39
40 systemrcpath = scmplatform.systemrcpath
40 systemrcpath = scmplatform.systemrcpath
41 userrcpath = scmplatform.userrcpath
41 userrcpath = scmplatform.userrcpath
42
42
43 class status(tuple):
43 class status(tuple):
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 and 'ignored' properties are only relevant to the working copy.
45 and 'ignored' properties are only relevant to the working copy.
46 '''
46 '''
47
47
48 __slots__ = ()
48 __slots__ = ()
49
49
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 clean):
51 clean):
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 ignored, clean))
53 ignored, clean))
54
54
55 @property
55 @property
56 def modified(self):
56 def modified(self):
57 '''files that have been modified'''
57 '''files that have been modified'''
58 return self[0]
58 return self[0]
59
59
60 @property
60 @property
61 def added(self):
61 def added(self):
62 '''files that have been added'''
62 '''files that have been added'''
63 return self[1]
63 return self[1]
64
64
65 @property
65 @property
66 def removed(self):
66 def removed(self):
67 '''files that have been removed'''
67 '''files that have been removed'''
68 return self[2]
68 return self[2]
69
69
70 @property
70 @property
71 def deleted(self):
71 def deleted(self):
72 '''files that are in the dirstate, but have been deleted from the
72 '''files that are in the dirstate, but have been deleted from the
73 working copy (aka "missing")
73 working copy (aka "missing")
74 '''
74 '''
75 return self[3]
75 return self[3]
76
76
77 @property
77 @property
78 def unknown(self):
78 def unknown(self):
79 '''files not in the dirstate that are not ignored'''
79 '''files not in the dirstate that are not ignored'''
80 return self[4]
80 return self[4]
81
81
82 @property
82 @property
83 def ignored(self):
83 def ignored(self):
84 '''files not in the dirstate that are ignored (by _dirignore())'''
84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 return self[5]
85 return self[5]
86
86
87 @property
87 @property
88 def clean(self):
88 def clean(self):
89 '''files that have not been modified'''
89 '''files that have not been modified'''
90 return self[6]
90 return self[6]
91
91
92 def __repr__(self, *args, **kwargs):
92 def __repr__(self, *args, **kwargs):
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 'unknown=%r, ignored=%r, clean=%r>') % self)
94 'unknown=%r, ignored=%r, clean=%r>') % self)
95
95
96 def itersubrepos(ctx1, ctx2):
96 def itersubrepos(ctx1, ctx2):
97 """find subrepos in ctx1 or ctx2"""
97 """find subrepos in ctx1 or ctx2"""
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 # has been modified (in ctx2) but not yet committed (in ctx1).
100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103
103
104 missing = set()
104 missing = set()
105
105
106 for subpath in ctx2.substate:
106 for subpath in ctx2.substate:
107 if subpath not in ctx1.substate:
107 if subpath not in ctx1.substate:
108 del subpaths[subpath]
108 del subpaths[subpath]
109 missing.add(subpath)
109 missing.add(subpath)
110
110
111 for subpath, ctx in sorted(subpaths.iteritems()):
111 for subpath, ctx in sorted(subpaths.iteritems()):
112 yield subpath, ctx.sub(subpath)
112 yield subpath, ctx.sub(subpath)
113
113
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 # status and diff will have an accurate result when it does
115 # status and diff will have an accurate result when it does
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 # against itself.
117 # against itself.
118 for subpath in missing:
118 for subpath in missing:
119 yield subpath, ctx2.nullsub(subpath, ctx1)
119 yield subpath, ctx2.nullsub(subpath, ctx1)
120
120
121 def nochangesfound(ui, repo, excluded=None):
121 def nochangesfound(ui, repo, excluded=None):
122 '''Report no changes for push/pull, excluded is None or a list of
122 '''Report no changes for push/pull, excluded is None or a list of
123 nodes excluded from the push/pull.
123 nodes excluded from the push/pull.
124 '''
124 '''
125 secretlist = []
125 secretlist = []
126 if excluded:
126 if excluded:
127 for n in excluded:
127 for n in excluded:
128 if n not in repo:
128 if n not in repo:
129 # discovery should not have included the filtered revision,
129 # discovery should not have included the filtered revision,
130 # we have to explicitly exclude it until discovery is cleanup.
130 # we have to explicitly exclude it until discovery is cleanup.
131 continue
131 continue
132 ctx = repo[n]
132 ctx = repo[n]
133 if ctx.phase() >= phases.secret and not ctx.extinct():
133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 secretlist.append(n)
134 secretlist.append(n)
135
135
136 if secretlist:
136 if secretlist:
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 % len(secretlist))
138 % len(secretlist))
139 else:
139 else:
140 ui.status(_("no changes found\n"))
140 ui.status(_("no changes found\n"))
141
141
142 def checknewlabel(repo, lbl, kind):
142 def checknewlabel(repo, lbl, kind):
143 # Do not use the "kind" parameter in ui output.
143 # Do not use the "kind" parameter in ui output.
144 # It makes strings difficult to translate.
144 # It makes strings difficult to translate.
145 if lbl in ['tip', '.', 'null']:
145 if lbl in ['tip', '.', 'null']:
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 for c in (':', '\0', '\n', '\r'):
147 for c in (':', '\0', '\n', '\r'):
148 if c in lbl:
148 if c in lbl:
149 raise error.Abort(_("%r cannot be used in a name") % c)
149 raise error.Abort(_("%r cannot be used in a name") % c)
150 try:
150 try:
151 int(lbl)
151 int(lbl)
152 raise error.Abort(_("cannot use an integer as a name"))
152 raise error.Abort(_("cannot use an integer as a name"))
153 except ValueError:
153 except ValueError:
154 pass
154 pass
155
155
156 def checkfilename(f):
156 def checkfilename(f):
157 '''Check that the filename f is an acceptable filename for a tracked file'''
157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 if '\r' in f or '\n' in f:
158 if '\r' in f or '\n' in f:
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160
160
161 def checkportable(ui, f):
161 def checkportable(ui, f):
162 '''Check if filename f is portable and warn or abort depending on config'''
162 '''Check if filename f is portable and warn or abort depending on config'''
163 checkfilename(f)
163 checkfilename(f)
164 abort, warn = checkportabilityalert(ui)
164 abort, warn = checkportabilityalert(ui)
165 if abort or warn:
165 if abort or warn:
166 msg = util.checkwinfilename(f)
166 msg = util.checkwinfilename(f)
167 if msg:
167 if msg:
168 msg = "%s: %r" % (msg, f)
168 msg = "%s: %r" % (msg, f)
169 if abort:
169 if abort:
170 raise error.Abort(msg)
170 raise error.Abort(msg)
171 ui.warn(_("warning: %s\n") % msg)
171 ui.warn(_("warning: %s\n") % msg)
172
172
173 def checkportabilityalert(ui):
173 def checkportabilityalert(ui):
174 '''check if the user's config requests nothing, a warning, or abort for
174 '''check if the user's config requests nothing, a warning, or abort for
175 non-portable filenames'''
175 non-portable filenames'''
176 val = ui.config('ui', 'portablefilenames', 'warn')
176 val = ui.config('ui', 'portablefilenames', 'warn')
177 lval = val.lower()
177 lval = val.lower()
178 bval = util.parsebool(val)
178 bval = util.parsebool(val)
179 abort = os.name == 'nt' or lval == 'abort'
179 abort = os.name == 'nt' or lval == 'abort'
180 warn = bval or lval == 'warn'
180 warn = bval or lval == 'warn'
181 if bval is None and not (warn or abort or lval == 'ignore'):
181 if bval is None and not (warn or abort or lval == 'ignore'):
182 raise error.ConfigError(
182 raise error.ConfigError(
183 _("ui.portablefilenames value is invalid ('%s')") % val)
183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 return abort, warn
184 return abort, warn
185
185
186 class casecollisionauditor(object):
186 class casecollisionauditor(object):
187 def __init__(self, ui, abort, dirstate):
187 def __init__(self, ui, abort, dirstate):
188 self._ui = ui
188 self._ui = ui
189 self._abort = abort
189 self._abort = abort
190 allfiles = '\0'.join(dirstate._map)
190 allfiles = '\0'.join(dirstate._map)
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 self._dirstate = dirstate
192 self._dirstate = dirstate
193 # The purpose of _newfiles is so that we don't complain about
193 # The purpose of _newfiles is so that we don't complain about
194 # case collisions if someone were to call this object with the
194 # case collisions if someone were to call this object with the
195 # same filename twice.
195 # same filename twice.
196 self._newfiles = set()
196 self._newfiles = set()
197
197
198 def __call__(self, f):
198 def __call__(self, f):
199 if f in self._newfiles:
199 if f in self._newfiles:
200 return
200 return
201 fl = encoding.lower(f)
201 fl = encoding.lower(f)
202 if fl in self._loweredfiles and f not in self._dirstate:
202 if fl in self._loweredfiles and f not in self._dirstate:
203 msg = _('possible case-folding collision for %s') % f
203 msg = _('possible case-folding collision for %s') % f
204 if self._abort:
204 if self._abort:
205 raise error.Abort(msg)
205 raise error.Abort(msg)
206 self._ui.warn(_("warning: %s\n") % msg)
206 self._ui.warn(_("warning: %s\n") % msg)
207 self._loweredfiles.add(fl)
207 self._loweredfiles.add(fl)
208 self._newfiles.add(f)
208 self._newfiles.add(f)
209
209
210 def filteredhash(repo, maxrev):
210 def filteredhash(repo, maxrev):
211 """build hash of filtered revisions in the current repoview.
211 """build hash of filtered revisions in the current repoview.
212
212
213 Multiple caches perform up-to-date validation by checking that the
213 Multiple caches perform up-to-date validation by checking that the
214 tiprev and tipnode stored in the cache file match the current repository.
214 tiprev and tipnode stored in the cache file match the current repository.
215 However, this is not sufficient for validating repoviews because the set
215 However, this is not sufficient for validating repoviews because the set
216 of revisions in the view may change without the repository tiprev and
216 of revisions in the view may change without the repository tiprev and
217 tipnode changing.
217 tipnode changing.
218
218
219 This function hashes all the revs filtered from the view and returns
219 This function hashes all the revs filtered from the view and returns
220 that SHA-1 digest.
220 that SHA-1 digest.
221 """
221 """
222 cl = repo.changelog
222 cl = repo.changelog
223 if not cl.filteredrevs:
223 if not cl.filteredrevs:
224 return None
224 return None
225 key = None
225 key = None
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 if revs:
227 if revs:
228 s = hashlib.sha1()
228 s = hashlib.sha1()
229 for rev in revs:
229 for rev in revs:
230 s.update('%s;' % rev)
230 s.update('%s;' % rev)
231 key = s.digest()
231 key = s.digest()
232 return key
232 return key
233
233
234 class abstractvfs(object):
234 class abstractvfs(object):
235 """Abstract base class; cannot be instantiated"""
235 """Abstract base class; cannot be instantiated"""
236
236
237 def __init__(self, *args, **kwargs):
237 def __init__(self, *args, **kwargs):
238 '''Prevent instantiation; don't call this from subclasses.'''
238 '''Prevent instantiation; don't call this from subclasses.'''
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240
240
241 def tryread(self, path):
241 def tryread(self, path):
242 '''gracefully return an empty string for missing files'''
242 '''gracefully return an empty string for missing files'''
243 try:
243 try:
244 return self.read(path)
244 return self.read(path)
245 except IOError as inst:
245 except IOError as inst:
246 if inst.errno != errno.ENOENT:
246 if inst.errno != errno.ENOENT:
247 raise
247 raise
248 return ""
248 return ""
249
249
250 def tryreadlines(self, path, mode='rb'):
250 def tryreadlines(self, path, mode='rb'):
251 '''gracefully return an empty array for missing files'''
251 '''gracefully return an empty array for missing files'''
252 try:
252 try:
253 return self.readlines(path, mode=mode)
253 return self.readlines(path, mode=mode)
254 except IOError as inst:
254 except IOError as inst:
255 if inst.errno != errno.ENOENT:
255 if inst.errno != errno.ENOENT:
256 raise
256 raise
257 return []
257 return []
258
258
259 @util.propertycache
259 @util.propertycache
260 def open(self):
260 def open(self):
261 '''Open ``path`` file, which is relative to vfs root.
261 '''Open ``path`` file, which is relative to vfs root.
262
262
263 Newly created directories are marked as "not to be indexed by
263 Newly created directories are marked as "not to be indexed by
264 the content indexing service", if ``notindexed`` is specified
264 the content indexing service", if ``notindexed`` is specified
265 for "write" mode access.
265 for "write" mode access.
266 '''
266 '''
267 return self.__call__
267 return self.__call__
268
268
269 def read(self, path):
269 def read(self, path):
270 with self(path, 'rb') as fp:
270 with self(path, 'rb') as fp:
271 return fp.read()
271 return fp.read()
272
272
273 def readlines(self, path, mode='rb'):
273 def readlines(self, path, mode='rb'):
274 with self(path, mode=mode) as fp:
274 with self(path, mode=mode) as fp:
275 return fp.readlines()
275 return fp.readlines()
276
276
277 def write(self, path, data, backgroundclose=False):
277 def write(self, path, data, backgroundclose=False):
278 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
278 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
279 return fp.write(data)
279 return fp.write(data)
280
280
281 def writelines(self, path, data, mode='wb', notindexed=False):
281 def writelines(self, path, data, mode='wb', notindexed=False):
282 with self(path, mode=mode, notindexed=notindexed) as fp:
282 with self(path, mode=mode, notindexed=notindexed) as fp:
283 return fp.writelines(data)
283 return fp.writelines(data)
284
284
285 def append(self, path, data):
285 def append(self, path, data):
286 with self(path, 'ab') as fp:
286 with self(path, 'ab') as fp:
287 return fp.write(data)
287 return fp.write(data)
288
288
289 def basename(self, path):
289 def basename(self, path):
290 """return base element of a path (as os.path.basename would do)
290 """return base element of a path (as os.path.basename would do)
291
291
292 This exists to allow handling of strange encoding if needed."""
292 This exists to allow handling of strange encoding if needed."""
293 return os.path.basename(path)
293 return os.path.basename(path)
294
294
295 def chmod(self, path, mode):
295 def chmod(self, path, mode):
296 return os.chmod(self.join(path), mode)
296 return os.chmod(self.join(path), mode)
297
297
298 def dirname(self, path):
298 def dirname(self, path):
299 """return dirname element of a path (as os.path.dirname would do)
299 """return dirname element of a path (as os.path.dirname would do)
300
300
301 This exists to allow handling of strange encoding if needed."""
301 This exists to allow handling of strange encoding if needed."""
302 return os.path.dirname(path)
302 return os.path.dirname(path)
303
303
304 def exists(self, path=None):
304 def exists(self, path=None):
305 return os.path.exists(self.join(path))
305 return os.path.exists(self.join(path))
306
306
307 def fstat(self, fp):
307 def fstat(self, fp):
308 return util.fstat(fp)
308 return util.fstat(fp)
309
309
310 def isdir(self, path=None):
310 def isdir(self, path=None):
311 return os.path.isdir(self.join(path))
311 return os.path.isdir(self.join(path))
312
312
313 def isfile(self, path=None):
313 def isfile(self, path=None):
314 return os.path.isfile(self.join(path))
314 return os.path.isfile(self.join(path))
315
315
316 def islink(self, path=None):
316 def islink(self, path=None):
317 return os.path.islink(self.join(path))
317 return os.path.islink(self.join(path))
318
318
319 def isfileorlink(self, path=None):
319 def isfileorlink(self, path=None):
320 '''return whether path is a regular file or a symlink
320 '''return whether path is a regular file or a symlink
321
321
322 Unlike isfile, this doesn't follow symlinks.'''
322 Unlike isfile, this doesn't follow symlinks.'''
323 try:
323 try:
324 st = self.lstat(path)
324 st = self.lstat(path)
325 except OSError:
325 except OSError:
326 return False
326 return False
327 mode = st.st_mode
327 mode = st.st_mode
328 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
328 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
329
329
330 def reljoin(self, *paths):
330 def reljoin(self, *paths):
331 """join various elements of a path together (as os.path.join would do)
331 """join various elements of a path together (as os.path.join would do)
332
332
333 The vfs base is not injected so that path stay relative. This exists
333 The vfs base is not injected so that path stay relative. This exists
334 to allow handling of strange encoding if needed."""
334 to allow handling of strange encoding if needed."""
335 return os.path.join(*paths)
335 return os.path.join(*paths)
336
336
337 def split(self, path):
337 def split(self, path):
338 """split top-most element of a path (as os.path.split would do)
338 """split top-most element of a path (as os.path.split would do)
339
339
340 This exists to allow handling of strange encoding if needed."""
340 This exists to allow handling of strange encoding if needed."""
341 return os.path.split(path)
341 return os.path.split(path)
342
342
343 def lexists(self, path=None):
343 def lexists(self, path=None):
344 return os.path.lexists(self.join(path))
344 return os.path.lexists(self.join(path))
345
345
346 def lstat(self, path=None):
346 def lstat(self, path=None):
347 return os.lstat(self.join(path))
347 return os.lstat(self.join(path))
348
348
349 def listdir(self, path=None):
349 def listdir(self, path=None):
350 return os.listdir(self.join(path))
350 return os.listdir(self.join(path))
351
351
352 def makedir(self, path=None, notindexed=True):
352 def makedir(self, path=None, notindexed=True):
353 return util.makedir(self.join(path), notindexed)
353 return util.makedir(self.join(path), notindexed)
354
354
355 def makedirs(self, path=None, mode=None):
355 def makedirs(self, path=None, mode=None):
356 return util.makedirs(self.join(path), mode)
356 return util.makedirs(self.join(path), mode)
357
357
358 def makelock(self, info, path):
358 def makelock(self, info, path):
359 return util.makelock(info, self.join(path))
359 return util.makelock(info, self.join(path))
360
360
361 def mkdir(self, path=None):
361 def mkdir(self, path=None):
362 return os.mkdir(self.join(path))
362 return os.mkdir(self.join(path))
363
363
364 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
364 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
365 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
365 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
366 dir=self.join(dir), text=text)
366 dir=self.join(dir), text=text)
367 dname, fname = util.split(name)
367 dname, fname = util.split(name)
368 if dir:
368 if dir:
369 return fd, os.path.join(dir, fname)
369 return fd, os.path.join(dir, fname)
370 else:
370 else:
371 return fd, fname
371 return fd, fname
372
372
373 def readdir(self, path=None, stat=None, skip=None):
373 def readdir(self, path=None, stat=None, skip=None):
374 return osutil.listdir(self.join(path), stat, skip)
374 return osutil.listdir(self.join(path), stat, skip)
375
375
376 def readlock(self, path):
376 def readlock(self, path):
377 return util.readlock(self.join(path))
377 return util.readlock(self.join(path))
378
378
379 def rename(self, src, dst, checkambig=False):
379 def rename(self, src, dst, checkambig=False):
380 """Rename from src to dst
380 """Rename from src to dst
381
381
382 checkambig argument is used with util.filestat, and is useful
382 checkambig argument is used with util.filestat, and is useful
383 only if destination file is guarded by any lock
383 only if destination file is guarded by any lock
384 (e.g. repo.lock or repo.wlock).
384 (e.g. repo.lock or repo.wlock).
385 """
385 """
386 dstpath = self.join(dst)
386 dstpath = self.join(dst)
387 oldstat = checkambig and util.filestat(dstpath)
387 oldstat = checkambig and util.filestat(dstpath)
388 if oldstat and oldstat.stat:
388 if oldstat and oldstat.stat:
389 ret = util.rename(self.join(src), dstpath)
389 ret = util.rename(self.join(src), dstpath)
390 newstat = util.filestat(dstpath)
390 newstat = util.filestat(dstpath)
391 if newstat.isambig(oldstat):
391 if newstat.isambig(oldstat):
392 # stat of renamed file is ambiguous to original one
392 # stat of renamed file is ambiguous to original one
393 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
393 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
394 os.utime(dstpath, (advanced, advanced))
394 os.utime(dstpath, (advanced, advanced))
395 return ret
395 return ret
396 return util.rename(self.join(src), dstpath)
396 return util.rename(self.join(src), dstpath)
397
397
398 def readlink(self, path):
398 def readlink(self, path):
399 return os.readlink(self.join(path))
399 return os.readlink(self.join(path))
400
400
401 def removedirs(self, path=None):
401 def removedirs(self, path=None):
402 """Remove a leaf directory and all empty intermediate ones
402 """Remove a leaf directory and all empty intermediate ones
403 """
403 """
404 return util.removedirs(self.join(path))
404 return util.removedirs(self.join(path))
405
405
406 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
406 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
407 """Remove a directory tree recursively
407 """Remove a directory tree recursively
408
408
409 If ``forcibly``, this tries to remove READ-ONLY files, too.
409 If ``forcibly``, this tries to remove READ-ONLY files, too.
410 """
410 """
411 if forcibly:
411 if forcibly:
412 def onerror(function, path, excinfo):
412 def onerror(function, path, excinfo):
413 if function is not os.remove:
413 if function is not os.remove:
414 raise
414 raise
415 # read-only files cannot be unlinked under Windows
415 # read-only files cannot be unlinked under Windows
416 s = os.stat(path)
416 s = os.stat(path)
417 if (s.st_mode & stat.S_IWRITE) != 0:
417 if (s.st_mode & stat.S_IWRITE) != 0:
418 raise
418 raise
419 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
419 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
420 os.remove(path)
420 os.remove(path)
421 else:
421 else:
422 onerror = None
422 onerror = None
423 return shutil.rmtree(self.join(path),
423 return shutil.rmtree(self.join(path),
424 ignore_errors=ignore_errors, onerror=onerror)
424 ignore_errors=ignore_errors, onerror=onerror)
425
425
426 def setflags(self, path, l, x):
426 def setflags(self, path, l, x):
427 return util.setflags(self.join(path), l, x)
427 return util.setflags(self.join(path), l, x)
428
428
429 def stat(self, path=None):
429 def stat(self, path=None):
430 return os.stat(self.join(path))
430 return os.stat(self.join(path))
431
431
432 def unlink(self, path=None):
432 def unlink(self, path=None):
433 return util.unlink(self.join(path))
433 return util.unlink(self.join(path))
434
434
435 def unlinkpath(self, path=None, ignoremissing=False):
435 def unlinkpath(self, path=None, ignoremissing=False):
436 return util.unlinkpath(self.join(path), ignoremissing)
436 return util.unlinkpath(self.join(path), ignoremissing)
437
437
438 def utime(self, path=None, t=None):
438 def utime(self, path=None, t=None):
439 return os.utime(self.join(path), t)
439 return os.utime(self.join(path), t)
440
440
441 def walk(self, path=None, onerror=None):
441 def walk(self, path=None, onerror=None):
442 """Yield (dirpath, dirs, files) tuple for each directories under path
442 """Yield (dirpath, dirs, files) tuple for each directories under path
443
443
444 ``dirpath`` is relative one from the root of this vfs. This
444 ``dirpath`` is relative one from the root of this vfs. This
445 uses ``os.sep`` as path separator, even you specify POSIX
445 uses ``os.sep`` as path separator, even you specify POSIX
446 style ``path``.
446 style ``path``.
447
447
448 "The root of this vfs" is represented as empty ``dirpath``.
448 "The root of this vfs" is represented as empty ``dirpath``.
449 """
449 """
450 root = os.path.normpath(self.join(None))
450 root = os.path.normpath(self.join(None))
451 # when dirpath == root, dirpath[prefixlen:] becomes empty
451 # when dirpath == root, dirpath[prefixlen:] becomes empty
452 # because len(dirpath) < prefixlen.
452 # because len(dirpath) < prefixlen.
453 prefixlen = len(pathutil.normasprefix(root))
453 prefixlen = len(pathutil.normasprefix(root))
454 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
454 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
455 yield (dirpath[prefixlen:], dirs, files)
455 yield (dirpath[prefixlen:], dirs, files)
456
456
457 @contextlib.contextmanager
457 @contextlib.contextmanager
458 def backgroundclosing(self, ui, expectedcount=-1):
458 def backgroundclosing(self, ui, expectedcount=-1):
459 """Allow files to be closed asynchronously.
459 """Allow files to be closed asynchronously.
460
460
461 When this context manager is active, ``backgroundclose`` can be passed
461 When this context manager is active, ``backgroundclose`` can be passed
462 to ``__call__``/``open`` to result in the file possibly being closed
462 to ``__call__``/``open`` to result in the file possibly being closed
463 asynchronously, on a background thread.
463 asynchronously, on a background thread.
464 """
464 """
465 # This is an arbitrary restriction and could be changed if we ever
465 # This is an arbitrary restriction and could be changed if we ever
466 # have a use case.
466 # have a use case.
467 vfs = getattr(self, 'vfs', self)
467 vfs = getattr(self, 'vfs', self)
468 if getattr(vfs, '_backgroundfilecloser', None):
468 if getattr(vfs, '_backgroundfilecloser', None):
469 raise error.Abort(
469 raise error.Abort(
470 _('can only have 1 active background file closer'))
470 _('can only have 1 active background file closer'))
471
471
472 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
472 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
473 try:
473 try:
474 vfs._backgroundfilecloser = bfc
474 vfs._backgroundfilecloser = bfc
475 yield bfc
475 yield bfc
476 finally:
476 finally:
477 vfs._backgroundfilecloser = None
477 vfs._backgroundfilecloser = None
478
478
479 class vfs(abstractvfs):
479 class vfs(abstractvfs):
480 '''Operate files relative to a base directory
480 '''Operate files relative to a base directory
481
481
482 This class is used to hide the details of COW semantics and
482 This class is used to hide the details of COW semantics and
483 remote file access from higher level code.
483 remote file access from higher level code.
484 '''
484 '''
485 def __init__(self, base, audit=True, expandpath=False, realpath=False):
485 def __init__(self, base, audit=True, expandpath=False, realpath=False):
486 if expandpath:
486 if expandpath:
487 base = util.expandpath(base)
487 base = util.expandpath(base)
488 if realpath:
488 if realpath:
489 base = os.path.realpath(base)
489 base = os.path.realpath(base)
490 self.base = base
490 self.base = base
491 self.mustaudit = audit
491 self.mustaudit = audit
492 self.createmode = None
492 self.createmode = None
493 self._trustnlink = None
493 self._trustnlink = None
494
494
495 @property
495 @property
496 def mustaudit(self):
496 def mustaudit(self):
497 return self._audit
497 return self._audit
498
498
499 @mustaudit.setter
499 @mustaudit.setter
500 def mustaudit(self, onoff):
500 def mustaudit(self, onoff):
501 self._audit = onoff
501 self._audit = onoff
502 if onoff:
502 if onoff:
503 self.audit = pathutil.pathauditor(self.base)
503 self.audit = pathutil.pathauditor(self.base)
504 else:
504 else:
505 self.audit = util.always
505 self.audit = util.always
506
506
507 @util.propertycache
507 @util.propertycache
508 def _cansymlink(self):
508 def _cansymlink(self):
509 return util.checklink(self.base)
509 return util.checklink(self.base)
510
510
511 @util.propertycache
511 @util.propertycache
512 def _chmod(self):
512 def _chmod(self):
513 return util.checkexec(self.base)
513 return util.checkexec(self.base)
514
514
515 def _fixfilemode(self, name):
515 def _fixfilemode(self, name):
516 if self.createmode is None or not self._chmod:
516 if self.createmode is None or not self._chmod:
517 return
517 return
518 os.chmod(name, self.createmode & 0o666)
518 os.chmod(name, self.createmode & 0o666)
519
519
520 def __call__(self, path, mode="r", text=False, atomictemp=False,
520 def __call__(self, path, mode="r", text=False, atomictemp=False,
521 notindexed=False, backgroundclose=False, checkambig=False):
521 notindexed=False, backgroundclose=False, checkambig=False):
522 '''Open ``path`` file, which is relative to vfs root.
522 '''Open ``path`` file, which is relative to vfs root.
523
523
524 Newly created directories are marked as "not to be indexed by
524 Newly created directories are marked as "not to be indexed by
525 the content indexing service", if ``notindexed`` is specified
525 the content indexing service", if ``notindexed`` is specified
526 for "write" mode access.
526 for "write" mode access.
527
527
528 If ``backgroundclose`` is passed, the file may be closed asynchronously.
528 If ``backgroundclose`` is passed, the file may be closed asynchronously.
529 It can only be used if the ``self.backgroundclosing()`` context manager
529 It can only be used if the ``self.backgroundclosing()`` context manager
530 is active. This should only be specified if the following criteria hold:
530 is active. This should only be specified if the following criteria hold:
531
531
532 1. There is a potential for writing thousands of files. Unless you
532 1. There is a potential for writing thousands of files. Unless you
533 are writing thousands of files, the performance benefits of
533 are writing thousands of files, the performance benefits of
534 asynchronously closing files is not realized.
534 asynchronously closing files is not realized.
535 2. Files are opened exactly once for the ``backgroundclosing``
535 2. Files are opened exactly once for the ``backgroundclosing``
536 active duration and are therefore free of race conditions between
536 active duration and are therefore free of race conditions between
537 closing a file on a background thread and reopening it. (If the
537 closing a file on a background thread and reopening it. (If the
538 file were opened multiple times, there could be unflushed data
538 file were opened multiple times, there could be unflushed data
539 because the original file handle hasn't been flushed/closed yet.)
539 because the original file handle hasn't been flushed/closed yet.)
540
540
541 ``checkambig`` argument is passed to atomictemplfile (valid
541 ``checkambig`` argument is passed to atomictemplfile (valid
542 only for writing), and is useful only if target file is
542 only for writing), and is useful only if target file is
543 guarded by any lock (e.g. repo.lock or repo.wlock).
543 guarded by any lock (e.g. repo.lock or repo.wlock).
544 '''
544 '''
545 if self._audit:
545 if self._audit:
546 r = util.checkosfilename(path)
546 r = util.checkosfilename(path)
547 if r:
547 if r:
548 raise error.Abort("%s: %r" % (r, path))
548 raise error.Abort("%s: %r" % (r, path))
549 self.audit(path)
549 self.audit(path)
550 f = self.join(path)
550 f = self.join(path)
551
551
552 if not text and "b" not in mode:
552 if not text and "b" not in mode:
553 mode += "b" # for that other OS
553 mode += "b" # for that other OS
554
554
555 nlink = -1
555 nlink = -1
556 if mode not in ('r', 'rb'):
556 if mode not in ('r', 'rb'):
557 dirname, basename = util.split(f)
557 dirname, basename = util.split(f)
558 # If basename is empty, then the path is malformed because it points
558 # If basename is empty, then the path is malformed because it points
559 # to a directory. Let the posixfile() call below raise IOError.
559 # to a directory. Let the posixfile() call below raise IOError.
560 if basename:
560 if basename:
561 if atomictemp:
561 if atomictemp:
562 util.makedirs(dirname, self.createmode, notindexed)
562 util.makedirs(dirname, self.createmode, notindexed)
563 return util.atomictempfile(f, mode, self.createmode,
563 return util.atomictempfile(f, mode, self.createmode,
564 checkambig=checkambig)
564 checkambig=checkambig)
565 try:
565 try:
566 if 'w' in mode:
566 if 'w' in mode:
567 util.unlink(f)
567 util.unlink(f)
568 nlink = 0
568 nlink = 0
569 else:
569 else:
570 # nlinks() may behave differently for files on Windows
570 # nlinks() may behave differently for files on Windows
571 # shares if the file is open.
571 # shares if the file is open.
572 with util.posixfile(f):
572 with util.posixfile(f):
573 nlink = util.nlinks(f)
573 nlink = util.nlinks(f)
574 if nlink < 1:
574 if nlink < 1:
575 nlink = 2 # force mktempcopy (issue1922)
575 nlink = 2 # force mktempcopy (issue1922)
576 except (OSError, IOError) as e:
576 except (OSError, IOError) as e:
577 if e.errno != errno.ENOENT:
577 if e.errno != errno.ENOENT:
578 raise
578 raise
579 nlink = 0
579 nlink = 0
580 util.makedirs(dirname, self.createmode, notindexed)
580 util.makedirs(dirname, self.createmode, notindexed)
581 if nlink > 0:
581 if nlink > 0:
582 if self._trustnlink is None:
582 if self._trustnlink is None:
583 self._trustnlink = nlink > 1 or util.checknlink(f)
583 self._trustnlink = nlink > 1 or util.checknlink(f)
584 if nlink > 1 or not self._trustnlink:
584 if nlink > 1 or not self._trustnlink:
585 util.rename(util.mktempcopy(f), f)
585 util.rename(util.mktempcopy(f), f)
586 fp = util.posixfile(f, mode)
586 fp = util.posixfile(f, mode)
587 if nlink == 0:
587 if nlink == 0:
588 self._fixfilemode(f)
588 self._fixfilemode(f)
589
589
590 if checkambig:
590 if checkambig:
591 if mode in ('r', 'rb'):
591 if mode in ('r', 'rb'):
592 raise error.Abort(_('implementation error: mode %s is not'
592 raise error.Abort(_('implementation error: mode %s is not'
593 ' valid for checkambig=True') % mode)
593 ' valid for checkambig=True') % mode)
594 fp = checkambigatclosing(fp)
594 fp = checkambigatclosing(fp)
595
595
596 if backgroundclose:
596 if backgroundclose:
597 if not self._backgroundfilecloser:
597 if not self._backgroundfilecloser:
598 raise error.Abort(_('backgroundclose can only be used when a '
598 raise error.Abort(_('backgroundclose can only be used when a '
599 'backgroundclosing context manager is active')
599 'backgroundclosing context manager is active')
600 )
600 )
601
601
602 fp = delayclosedfile(fp, self._backgroundfilecloser)
602 fp = delayclosedfile(fp, self._backgroundfilecloser)
603
603
604 return fp
604 return fp
605
605
606 def symlink(self, src, dst):
606 def symlink(self, src, dst):
607 self.audit(dst)
607 self.audit(dst)
608 linkname = self.join(dst)
608 linkname = self.join(dst)
609 try:
609 try:
610 os.unlink(linkname)
610 os.unlink(linkname)
611 except OSError:
611 except OSError:
612 pass
612 pass
613
613
614 util.makedirs(os.path.dirname(linkname), self.createmode)
614 util.makedirs(os.path.dirname(linkname), self.createmode)
615
615
616 if self._cansymlink:
616 if self._cansymlink:
617 try:
617 try:
618 os.symlink(src, linkname)
618 os.symlink(src, linkname)
619 except OSError as err:
619 except OSError as err:
620 raise OSError(err.errno, _('could not symlink to %r: %s') %
620 raise OSError(err.errno, _('could not symlink to %r: %s') %
621 (src, err.strerror), linkname)
621 (src, err.strerror), linkname)
622 else:
622 else:
623 self.write(dst, src)
623 self.write(dst, src)
624
624
625 def join(self, path, *insidef):
625 def join(self, path, *insidef):
626 if path:
626 if path:
627 return os.path.join(self.base, path, *insidef)
627 return os.path.join(self.base, path, *insidef)
628 else:
628 else:
629 return self.base
629 return self.base
630
630
631 opener = vfs
631 opener = vfs
632
632
633 class auditvfs(object):
633 class auditvfs(object):
634 def __init__(self, vfs):
634 def __init__(self, vfs):
635 self.vfs = vfs
635 self.vfs = vfs
636
636
637 @property
637 @property
638 def mustaudit(self):
638 def mustaudit(self):
639 return self.vfs.mustaudit
639 return self.vfs.mustaudit
640
640
641 @mustaudit.setter
641 @mustaudit.setter
642 def mustaudit(self, onoff):
642 def mustaudit(self, onoff):
643 self.vfs.mustaudit = onoff
643 self.vfs.mustaudit = onoff
644
644
645 @property
645 @property
646 def options(self):
646 def options(self):
647 return self.vfs.options
647 return self.vfs.options
648
648
649 @options.setter
649 @options.setter
650 def options(self, value):
650 def options(self, value):
651 self.vfs.options = value
651 self.vfs.options = value
652
652
653 class filtervfs(abstractvfs, auditvfs):
653 class filtervfs(abstractvfs, auditvfs):
654 '''Wrapper vfs for filtering filenames with a function.'''
654 '''Wrapper vfs for filtering filenames with a function.'''
655
655
656 def __init__(self, vfs, filter):
656 def __init__(self, vfs, filter):
657 auditvfs.__init__(self, vfs)
657 auditvfs.__init__(self, vfs)
658 self._filter = filter
658 self._filter = filter
659
659
660 def __call__(self, path, *args, **kwargs):
660 def __call__(self, path, *args, **kwargs):
661 return self.vfs(self._filter(path), *args, **kwargs)
661 return self.vfs(self._filter(path), *args, **kwargs)
662
662
663 def join(self, path, *insidef):
663 def join(self, path, *insidef):
664 if path:
664 if path:
665 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
665 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
666 else:
666 else:
667 return self.vfs.join(path)
667 return self.vfs.join(path)
668
668
669 filteropener = filtervfs
669 filteropener = filtervfs
670
670
671 class readonlyvfs(abstractvfs, auditvfs):
671 class readonlyvfs(abstractvfs, auditvfs):
672 '''Wrapper vfs preventing any writing.'''
672 '''Wrapper vfs preventing any writing.'''
673
673
674 def __init__(self, vfs):
674 def __init__(self, vfs):
675 auditvfs.__init__(self, vfs)
675 auditvfs.__init__(self, vfs)
676
676
677 def __call__(self, path, mode='r', *args, **kw):
677 def __call__(self, path, mode='r', *args, **kw):
678 if mode not in ('r', 'rb'):
678 if mode not in ('r', 'rb'):
679 raise error.Abort(_('this vfs is read only'))
679 raise error.Abort(_('this vfs is read only'))
680 return self.vfs(path, mode, *args, **kw)
680 return self.vfs(path, mode, *args, **kw)
681
681
682 def join(self, path, *insidef):
682 def join(self, path, *insidef):
683 return self.vfs.join(path, *insidef)
683 return self.vfs.join(path, *insidef)
684
684
685 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
685 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
686 '''yield every hg repository under path, always recursively.
686 '''yield every hg repository under path, always recursively.
687 The recurse flag will only control recursion into repo working dirs'''
687 The recurse flag will only control recursion into repo working dirs'''
688 def errhandler(err):
688 def errhandler(err):
689 if err.filename == path:
689 if err.filename == path:
690 raise err
690 raise err
691 samestat = getattr(os.path, 'samestat', None)
691 samestat = getattr(os.path, 'samestat', None)
692 if followsym and samestat is not None:
692 if followsym and samestat is not None:
693 def adddir(dirlst, dirname):
693 def adddir(dirlst, dirname):
694 match = False
694 match = False
695 dirstat = os.stat(dirname)
695 dirstat = os.stat(dirname)
696 for lstdirstat in dirlst:
696 for lstdirstat in dirlst:
697 if samestat(dirstat, lstdirstat):
697 if samestat(dirstat, lstdirstat):
698 match = True
698 match = True
699 break
699 break
700 if not match:
700 if not match:
701 dirlst.append(dirstat)
701 dirlst.append(dirstat)
702 return not match
702 return not match
703 else:
703 else:
704 followsym = False
704 followsym = False
705
705
706 if (seen_dirs is None) and followsym:
706 if (seen_dirs is None) and followsym:
707 seen_dirs = []
707 seen_dirs = []
708 adddir(seen_dirs, path)
708 adddir(seen_dirs, path)
709 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
709 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
710 dirs.sort()
710 dirs.sort()
711 if '.hg' in dirs:
711 if '.hg' in dirs:
712 yield root # found a repository
712 yield root # found a repository
713 qroot = os.path.join(root, '.hg', 'patches')
713 qroot = os.path.join(root, '.hg', 'patches')
714 if os.path.isdir(os.path.join(qroot, '.hg')):
714 if os.path.isdir(os.path.join(qroot, '.hg')):
715 yield qroot # we have a patch queue repo here
715 yield qroot # we have a patch queue repo here
716 if recurse:
716 if recurse:
717 # avoid recursing inside the .hg directory
717 # avoid recursing inside the .hg directory
718 dirs.remove('.hg')
718 dirs.remove('.hg')
719 else:
719 else:
720 dirs[:] = [] # don't descend further
720 dirs[:] = [] # don't descend further
721 elif followsym:
721 elif followsym:
722 newdirs = []
722 newdirs = []
723 for d in dirs:
723 for d in dirs:
724 fname = os.path.join(root, d)
724 fname = os.path.join(root, d)
725 if adddir(seen_dirs, fname):
725 if adddir(seen_dirs, fname):
726 if os.path.islink(fname):
726 if os.path.islink(fname):
727 for hgname in walkrepos(fname, True, seen_dirs):
727 for hgname in walkrepos(fname, True, seen_dirs):
728 yield hgname
728 yield hgname
729 else:
729 else:
730 newdirs.append(d)
730 newdirs.append(d)
731 dirs[:] = newdirs
731 dirs[:] = newdirs
732
732
733 def osrcpath():
733 def osrcpath():
734 '''return default os-specific hgrc search path'''
734 '''return default os-specific hgrc search path'''
735 path = []
735 path = []
736 defaultpath = os.path.join(util.datapath, 'default.d')
736 defaultpath = os.path.join(util.datapath, 'default.d')
737 if os.path.isdir(defaultpath):
737 if os.path.isdir(defaultpath):
738 for f, kind in osutil.listdir(defaultpath):
738 for f, kind in osutil.listdir(defaultpath):
739 if f.endswith('.rc'):
739 if f.endswith('.rc'):
740 path.append(os.path.join(defaultpath, f))
740 path.append(os.path.join(defaultpath, f))
741 path.extend(systemrcpath())
741 path.extend(systemrcpath())
742 path.extend(userrcpath())
742 path.extend(userrcpath())
743 path = [os.path.normpath(f) for f in path]
743 path = [os.path.normpath(f) for f in path]
744 return path
744 return path
745
745
746 _rcpath = None
746 _rcpath = None
747
747
748 def rcpath():
748 def rcpath():
749 '''return hgrc search path. if env var HGRCPATH is set, use it.
749 '''return hgrc search path. if env var HGRCPATH is set, use it.
750 for each item in path, if directory, use files ending in .rc,
750 for each item in path, if directory, use files ending in .rc,
751 else use item.
751 else use item.
752 make HGRCPATH empty to only look in .hg/hgrc of current repo.
752 make HGRCPATH empty to only look in .hg/hgrc of current repo.
753 if no HGRCPATH, use default os-specific path.'''
753 if no HGRCPATH, use default os-specific path.'''
754 global _rcpath
754 global _rcpath
755 if _rcpath is None:
755 if _rcpath is None:
756 if 'HGRCPATH' in os.environ:
756 if 'HGRCPATH' in encoding.environ:
757 _rcpath = []
757 _rcpath = []
758 for p in os.environ['HGRCPATH'].split(os.pathsep):
758 for p in os.environ['HGRCPATH'].split(os.pathsep):
759 if not p:
759 if not p:
760 continue
760 continue
761 p = util.expandpath(p)
761 p = util.expandpath(p)
762 if os.path.isdir(p):
762 if os.path.isdir(p):
763 for f, kind in osutil.listdir(p):
763 for f, kind in osutil.listdir(p):
764 if f.endswith('.rc'):
764 if f.endswith('.rc'):
765 _rcpath.append(os.path.join(p, f))
765 _rcpath.append(os.path.join(p, f))
766 else:
766 else:
767 _rcpath.append(p)
767 _rcpath.append(p)
768 else:
768 else:
769 _rcpath = osrcpath()
769 _rcpath = osrcpath()
770 return _rcpath
770 return _rcpath
771
771
772 def intrev(rev):
772 def intrev(rev):
773 """Return integer for a given revision that can be used in comparison or
773 """Return integer for a given revision that can be used in comparison or
774 arithmetic operation"""
774 arithmetic operation"""
775 if rev is None:
775 if rev is None:
776 return wdirrev
776 return wdirrev
777 return rev
777 return rev
778
778
779 def revsingle(repo, revspec, default='.'):
779 def revsingle(repo, revspec, default='.'):
780 if not revspec and revspec != 0:
780 if not revspec and revspec != 0:
781 return repo[default]
781 return repo[default]
782
782
783 l = revrange(repo, [revspec])
783 l = revrange(repo, [revspec])
784 if not l:
784 if not l:
785 raise error.Abort(_('empty revision set'))
785 raise error.Abort(_('empty revision set'))
786 return repo[l.last()]
786 return repo[l.last()]
787
787
788 def _pairspec(revspec):
788 def _pairspec(revspec):
789 tree = revset.parse(revspec)
789 tree = revset.parse(revspec)
790 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
790 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
791
791
792 def revpair(repo, revs):
792 def revpair(repo, revs):
793 if not revs:
793 if not revs:
794 return repo.dirstate.p1(), None
794 return repo.dirstate.p1(), None
795
795
796 l = revrange(repo, revs)
796 l = revrange(repo, revs)
797
797
798 if not l:
798 if not l:
799 first = second = None
799 first = second = None
800 elif l.isascending():
800 elif l.isascending():
801 first = l.min()
801 first = l.min()
802 second = l.max()
802 second = l.max()
803 elif l.isdescending():
803 elif l.isdescending():
804 first = l.max()
804 first = l.max()
805 second = l.min()
805 second = l.min()
806 else:
806 else:
807 first = l.first()
807 first = l.first()
808 second = l.last()
808 second = l.last()
809
809
810 if first is None:
810 if first is None:
811 raise error.Abort(_('empty revision range'))
811 raise error.Abort(_('empty revision range'))
812 if (first == second and len(revs) >= 2
812 if (first == second and len(revs) >= 2
813 and not all(revrange(repo, [r]) for r in revs)):
813 and not all(revrange(repo, [r]) for r in revs)):
814 raise error.Abort(_('empty revision on one side of range'))
814 raise error.Abort(_('empty revision on one side of range'))
815
815
816 # if top-level is range expression, the result must always be a pair
816 # if top-level is range expression, the result must always be a pair
817 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
817 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
818 return repo.lookup(first), None
818 return repo.lookup(first), None
819
819
820 return repo.lookup(first), repo.lookup(second)
820 return repo.lookup(first), repo.lookup(second)
821
821
822 def revrange(repo, specs):
822 def revrange(repo, specs):
823 """Execute 1 to many revsets and return the union.
823 """Execute 1 to many revsets and return the union.
824
824
825 This is the preferred mechanism for executing revsets using user-specified
825 This is the preferred mechanism for executing revsets using user-specified
826 config options, such as revset aliases.
826 config options, such as revset aliases.
827
827
828 The revsets specified by ``specs`` will be executed via a chained ``OR``
828 The revsets specified by ``specs`` will be executed via a chained ``OR``
829 expression. If ``specs`` is empty, an empty result is returned.
829 expression. If ``specs`` is empty, an empty result is returned.
830
830
831 ``specs`` can contain integers, in which case they are assumed to be
831 ``specs`` can contain integers, in which case they are assumed to be
832 revision numbers.
832 revision numbers.
833
833
834 It is assumed the revsets are already formatted. If you have arguments
834 It is assumed the revsets are already formatted. If you have arguments
835 that need to be expanded in the revset, call ``revset.formatspec()``
835 that need to be expanded in the revset, call ``revset.formatspec()``
836 and pass the result as an element of ``specs``.
836 and pass the result as an element of ``specs``.
837
837
838 Specifying a single revset is allowed.
838 Specifying a single revset is allowed.
839
839
840 Returns a ``revset.abstractsmartset`` which is a list-like interface over
840 Returns a ``revset.abstractsmartset`` which is a list-like interface over
841 integer revisions.
841 integer revisions.
842 """
842 """
843 allspecs = []
843 allspecs = []
844 for spec in specs:
844 for spec in specs:
845 if isinstance(spec, int):
845 if isinstance(spec, int):
846 spec = revset.formatspec('rev(%d)', spec)
846 spec = revset.formatspec('rev(%d)', spec)
847 allspecs.append(spec)
847 allspecs.append(spec)
848 m = revset.matchany(repo.ui, allspecs, repo)
848 m = revset.matchany(repo.ui, allspecs, repo)
849 return m(repo)
849 return m(repo)
850
850
851 def meaningfulparents(repo, ctx):
851 def meaningfulparents(repo, ctx):
852 """Return list of meaningful (or all if debug) parentrevs for rev.
852 """Return list of meaningful (or all if debug) parentrevs for rev.
853
853
854 For merges (two non-nullrev revisions) both parents are meaningful.
854 For merges (two non-nullrev revisions) both parents are meaningful.
855 Otherwise the first parent revision is considered meaningful if it
855 Otherwise the first parent revision is considered meaningful if it
856 is not the preceding revision.
856 is not the preceding revision.
857 """
857 """
858 parents = ctx.parents()
858 parents = ctx.parents()
859 if len(parents) > 1:
859 if len(parents) > 1:
860 return parents
860 return parents
861 if repo.ui.debugflag:
861 if repo.ui.debugflag:
862 return [parents[0], repo['null']]
862 return [parents[0], repo['null']]
863 if parents[0].rev() >= intrev(ctx.rev()) - 1:
863 if parents[0].rev() >= intrev(ctx.rev()) - 1:
864 return []
864 return []
865 return parents
865 return parents
866
866
867 def expandpats(pats):
867 def expandpats(pats):
868 '''Expand bare globs when running on windows.
868 '''Expand bare globs when running on windows.
869 On posix we assume it already has already been done by sh.'''
869 On posix we assume it already has already been done by sh.'''
870 if not util.expandglobs:
870 if not util.expandglobs:
871 return list(pats)
871 return list(pats)
872 ret = []
872 ret = []
873 for kindpat in pats:
873 for kindpat in pats:
874 kind, pat = matchmod._patsplit(kindpat, None)
874 kind, pat = matchmod._patsplit(kindpat, None)
875 if kind is None:
875 if kind is None:
876 try:
876 try:
877 globbed = glob.glob(pat)
877 globbed = glob.glob(pat)
878 except re.error:
878 except re.error:
879 globbed = [pat]
879 globbed = [pat]
880 if globbed:
880 if globbed:
881 ret.extend(globbed)
881 ret.extend(globbed)
882 continue
882 continue
883 ret.append(kindpat)
883 ret.append(kindpat)
884 return ret
884 return ret
885
885
886 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
886 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
887 badfn=None):
887 badfn=None):
888 '''Return a matcher and the patterns that were used.
888 '''Return a matcher and the patterns that were used.
889 The matcher will warn about bad matches, unless an alternate badfn callback
889 The matcher will warn about bad matches, unless an alternate badfn callback
890 is provided.'''
890 is provided.'''
891 if pats == ("",):
891 if pats == ("",):
892 pats = []
892 pats = []
893 if opts is None:
893 if opts is None:
894 opts = {}
894 opts = {}
895 if not globbed and default == 'relpath':
895 if not globbed and default == 'relpath':
896 pats = expandpats(pats or [])
896 pats = expandpats(pats or [])
897
897
898 def bad(f, msg):
898 def bad(f, msg):
899 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
899 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
900
900
901 if badfn is None:
901 if badfn is None:
902 badfn = bad
902 badfn = bad
903
903
904 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
904 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
905 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
905 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
906
906
907 if m.always():
907 if m.always():
908 pats = []
908 pats = []
909 return m, pats
909 return m, pats
910
910
911 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
911 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
912 badfn=None):
912 badfn=None):
913 '''Return a matcher that will warn about bad matches.'''
913 '''Return a matcher that will warn about bad matches.'''
914 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
914 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
915
915
916 def matchall(repo):
916 def matchall(repo):
917 '''Return a matcher that will efficiently match everything.'''
917 '''Return a matcher that will efficiently match everything.'''
918 return matchmod.always(repo.root, repo.getcwd())
918 return matchmod.always(repo.root, repo.getcwd())
919
919
920 def matchfiles(repo, files, badfn=None):
920 def matchfiles(repo, files, badfn=None):
921 '''Return a matcher that will efficiently match exactly these files.'''
921 '''Return a matcher that will efficiently match exactly these files.'''
922 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
922 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
923
923
924 def origpath(ui, repo, filepath):
924 def origpath(ui, repo, filepath):
925 '''customize where .orig files are created
925 '''customize where .orig files are created
926
926
927 Fetch user defined path from config file: [ui] origbackuppath = <path>
927 Fetch user defined path from config file: [ui] origbackuppath = <path>
928 Fall back to default (filepath) if not specified
928 Fall back to default (filepath) if not specified
929 '''
929 '''
930 origbackuppath = ui.config('ui', 'origbackuppath', None)
930 origbackuppath = ui.config('ui', 'origbackuppath', None)
931 if origbackuppath is None:
931 if origbackuppath is None:
932 return filepath + ".orig"
932 return filepath + ".orig"
933
933
934 filepathfromroot = os.path.relpath(filepath, start=repo.root)
934 filepathfromroot = os.path.relpath(filepath, start=repo.root)
935 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
935 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
936
936
937 origbackupdir = repo.vfs.dirname(fullorigpath)
937 origbackupdir = repo.vfs.dirname(fullorigpath)
938 if not repo.vfs.exists(origbackupdir):
938 if not repo.vfs.exists(origbackupdir):
939 ui.note(_('creating directory: %s\n') % origbackupdir)
939 ui.note(_('creating directory: %s\n') % origbackupdir)
940 util.makedirs(origbackupdir)
940 util.makedirs(origbackupdir)
941
941
942 return fullorigpath + ".orig"
942 return fullorigpath + ".orig"
943
943
944 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
944 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
945 if opts is None:
945 if opts is None:
946 opts = {}
946 opts = {}
947 m = matcher
947 m = matcher
948 if dry_run is None:
948 if dry_run is None:
949 dry_run = opts.get('dry_run')
949 dry_run = opts.get('dry_run')
950 if similarity is None:
950 if similarity is None:
951 similarity = float(opts.get('similarity') or 0)
951 similarity = float(opts.get('similarity') or 0)
952
952
953 ret = 0
953 ret = 0
954 join = lambda f: os.path.join(prefix, f)
954 join = lambda f: os.path.join(prefix, f)
955
955
956 wctx = repo[None]
956 wctx = repo[None]
957 for subpath in sorted(wctx.substate):
957 for subpath in sorted(wctx.substate):
958 submatch = matchmod.subdirmatcher(subpath, m)
958 submatch = matchmod.subdirmatcher(subpath, m)
959 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
959 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
960 sub = wctx.sub(subpath)
960 sub = wctx.sub(subpath)
961 try:
961 try:
962 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
962 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
963 ret = 1
963 ret = 1
964 except error.LookupError:
964 except error.LookupError:
965 repo.ui.status(_("skipping missing subrepository: %s\n")
965 repo.ui.status(_("skipping missing subrepository: %s\n")
966 % join(subpath))
966 % join(subpath))
967
967
968 rejected = []
968 rejected = []
969 def badfn(f, msg):
969 def badfn(f, msg):
970 if f in m.files():
970 if f in m.files():
971 m.bad(f, msg)
971 m.bad(f, msg)
972 rejected.append(f)
972 rejected.append(f)
973
973
974 badmatch = matchmod.badmatch(m, badfn)
974 badmatch = matchmod.badmatch(m, badfn)
975 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
975 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
976 badmatch)
976 badmatch)
977
977
978 unknownset = set(unknown + forgotten)
978 unknownset = set(unknown + forgotten)
979 toprint = unknownset.copy()
979 toprint = unknownset.copy()
980 toprint.update(deleted)
980 toprint.update(deleted)
981 for abs in sorted(toprint):
981 for abs in sorted(toprint):
982 if repo.ui.verbose or not m.exact(abs):
982 if repo.ui.verbose or not m.exact(abs):
983 if abs in unknownset:
983 if abs in unknownset:
984 status = _('adding %s\n') % m.uipath(abs)
984 status = _('adding %s\n') % m.uipath(abs)
985 else:
985 else:
986 status = _('removing %s\n') % m.uipath(abs)
986 status = _('removing %s\n') % m.uipath(abs)
987 repo.ui.status(status)
987 repo.ui.status(status)
988
988
989 renames = _findrenames(repo, m, added + unknown, removed + deleted,
989 renames = _findrenames(repo, m, added + unknown, removed + deleted,
990 similarity)
990 similarity)
991
991
992 if not dry_run:
992 if not dry_run:
993 _markchanges(repo, unknown + forgotten, deleted, renames)
993 _markchanges(repo, unknown + forgotten, deleted, renames)
994
994
995 for f in rejected:
995 for f in rejected:
996 if f in m.files():
996 if f in m.files():
997 return 1
997 return 1
998 return ret
998 return ret
999
999
1000 def marktouched(repo, files, similarity=0.0):
1000 def marktouched(repo, files, similarity=0.0):
1001 '''Assert that files have somehow been operated upon. files are relative to
1001 '''Assert that files have somehow been operated upon. files are relative to
1002 the repo root.'''
1002 the repo root.'''
1003 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1003 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1004 rejected = []
1004 rejected = []
1005
1005
1006 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1006 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1007
1007
1008 if repo.ui.verbose:
1008 if repo.ui.verbose:
1009 unknownset = set(unknown + forgotten)
1009 unknownset = set(unknown + forgotten)
1010 toprint = unknownset.copy()
1010 toprint = unknownset.copy()
1011 toprint.update(deleted)
1011 toprint.update(deleted)
1012 for abs in sorted(toprint):
1012 for abs in sorted(toprint):
1013 if abs in unknownset:
1013 if abs in unknownset:
1014 status = _('adding %s\n') % abs
1014 status = _('adding %s\n') % abs
1015 else:
1015 else:
1016 status = _('removing %s\n') % abs
1016 status = _('removing %s\n') % abs
1017 repo.ui.status(status)
1017 repo.ui.status(status)
1018
1018
1019 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1019 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1020 similarity)
1020 similarity)
1021
1021
1022 _markchanges(repo, unknown + forgotten, deleted, renames)
1022 _markchanges(repo, unknown + forgotten, deleted, renames)
1023
1023
1024 for f in rejected:
1024 for f in rejected:
1025 if f in m.files():
1025 if f in m.files():
1026 return 1
1026 return 1
1027 return 0
1027 return 0
1028
1028
1029 def _interestingfiles(repo, matcher):
1029 def _interestingfiles(repo, matcher):
1030 '''Walk dirstate with matcher, looking for files that addremove would care
1030 '''Walk dirstate with matcher, looking for files that addremove would care
1031 about.
1031 about.
1032
1032
1033 This is different from dirstate.status because it doesn't care about
1033 This is different from dirstate.status because it doesn't care about
1034 whether files are modified or clean.'''
1034 whether files are modified or clean.'''
1035 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1035 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1036 audit_path = pathutil.pathauditor(repo.root)
1036 audit_path = pathutil.pathauditor(repo.root)
1037
1037
1038 ctx = repo[None]
1038 ctx = repo[None]
1039 dirstate = repo.dirstate
1039 dirstate = repo.dirstate
1040 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1040 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1041 full=False)
1041 full=False)
1042 for abs, st in walkresults.iteritems():
1042 for abs, st in walkresults.iteritems():
1043 dstate = dirstate[abs]
1043 dstate = dirstate[abs]
1044 if dstate == '?' and audit_path.check(abs):
1044 if dstate == '?' and audit_path.check(abs):
1045 unknown.append(abs)
1045 unknown.append(abs)
1046 elif dstate != 'r' and not st:
1046 elif dstate != 'r' and not st:
1047 deleted.append(abs)
1047 deleted.append(abs)
1048 elif dstate == 'r' and st:
1048 elif dstate == 'r' and st:
1049 forgotten.append(abs)
1049 forgotten.append(abs)
1050 # for finding renames
1050 # for finding renames
1051 elif dstate == 'r' and not st:
1051 elif dstate == 'r' and not st:
1052 removed.append(abs)
1052 removed.append(abs)
1053 elif dstate == 'a':
1053 elif dstate == 'a':
1054 added.append(abs)
1054 added.append(abs)
1055
1055
1056 return added, unknown, deleted, removed, forgotten
1056 return added, unknown, deleted, removed, forgotten
1057
1057
1058 def _findrenames(repo, matcher, added, removed, similarity):
1058 def _findrenames(repo, matcher, added, removed, similarity):
1059 '''Find renames from removed files to added ones.'''
1059 '''Find renames from removed files to added ones.'''
1060 renames = {}
1060 renames = {}
1061 if similarity > 0:
1061 if similarity > 0:
1062 for old, new, score in similar.findrenames(repo, added, removed,
1062 for old, new, score in similar.findrenames(repo, added, removed,
1063 similarity):
1063 similarity):
1064 if (repo.ui.verbose or not matcher.exact(old)
1064 if (repo.ui.verbose or not matcher.exact(old)
1065 or not matcher.exact(new)):
1065 or not matcher.exact(new)):
1066 repo.ui.status(_('recording removal of %s as rename to %s '
1066 repo.ui.status(_('recording removal of %s as rename to %s '
1067 '(%d%% similar)\n') %
1067 '(%d%% similar)\n') %
1068 (matcher.rel(old), matcher.rel(new),
1068 (matcher.rel(old), matcher.rel(new),
1069 score * 100))
1069 score * 100))
1070 renames[new] = old
1070 renames[new] = old
1071 return renames
1071 return renames
1072
1072
1073 def _markchanges(repo, unknown, deleted, renames):
1073 def _markchanges(repo, unknown, deleted, renames):
1074 '''Marks the files in unknown as added, the files in deleted as removed,
1074 '''Marks the files in unknown as added, the files in deleted as removed,
1075 and the files in renames as copied.'''
1075 and the files in renames as copied.'''
1076 wctx = repo[None]
1076 wctx = repo[None]
1077 with repo.wlock():
1077 with repo.wlock():
1078 wctx.forget(deleted)
1078 wctx.forget(deleted)
1079 wctx.add(unknown)
1079 wctx.add(unknown)
1080 for new, old in renames.iteritems():
1080 for new, old in renames.iteritems():
1081 wctx.copy(old, new)
1081 wctx.copy(old, new)
1082
1082
1083 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1083 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1084 """Update the dirstate to reflect the intent of copying src to dst. For
1084 """Update the dirstate to reflect the intent of copying src to dst. For
1085 different reasons it might not end with dst being marked as copied from src.
1085 different reasons it might not end with dst being marked as copied from src.
1086 """
1086 """
1087 origsrc = repo.dirstate.copied(src) or src
1087 origsrc = repo.dirstate.copied(src) or src
1088 if dst == origsrc: # copying back a copy?
1088 if dst == origsrc: # copying back a copy?
1089 if repo.dirstate[dst] not in 'mn' and not dryrun:
1089 if repo.dirstate[dst] not in 'mn' and not dryrun:
1090 repo.dirstate.normallookup(dst)
1090 repo.dirstate.normallookup(dst)
1091 else:
1091 else:
1092 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1092 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1093 if not ui.quiet:
1093 if not ui.quiet:
1094 ui.warn(_("%s has not been committed yet, so no copy "
1094 ui.warn(_("%s has not been committed yet, so no copy "
1095 "data will be stored for %s.\n")
1095 "data will be stored for %s.\n")
1096 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1096 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1097 if repo.dirstate[dst] in '?r' and not dryrun:
1097 if repo.dirstate[dst] in '?r' and not dryrun:
1098 wctx.add([dst])
1098 wctx.add([dst])
1099 elif not dryrun:
1099 elif not dryrun:
1100 wctx.copy(origsrc, dst)
1100 wctx.copy(origsrc, dst)
1101
1101
1102 def readrequires(opener, supported):
1102 def readrequires(opener, supported):
1103 '''Reads and parses .hg/requires and checks if all entries found
1103 '''Reads and parses .hg/requires and checks if all entries found
1104 are in the list of supported features.'''
1104 are in the list of supported features.'''
1105 requirements = set(opener.read("requires").splitlines())
1105 requirements = set(opener.read("requires").splitlines())
1106 missings = []
1106 missings = []
1107 for r in requirements:
1107 for r in requirements:
1108 if r not in supported:
1108 if r not in supported:
1109 if not r or not r[0].isalnum():
1109 if not r or not r[0].isalnum():
1110 raise error.RequirementError(_(".hg/requires file is corrupt"))
1110 raise error.RequirementError(_(".hg/requires file is corrupt"))
1111 missings.append(r)
1111 missings.append(r)
1112 missings.sort()
1112 missings.sort()
1113 if missings:
1113 if missings:
1114 raise error.RequirementError(
1114 raise error.RequirementError(
1115 _("repository requires features unknown to this Mercurial: %s")
1115 _("repository requires features unknown to this Mercurial: %s")
1116 % " ".join(missings),
1116 % " ".join(missings),
1117 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1117 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1118 " for more information"))
1118 " for more information"))
1119 return requirements
1119 return requirements
1120
1120
1121 def writerequires(opener, requirements):
1121 def writerequires(opener, requirements):
1122 with opener('requires', 'w') as fp:
1122 with opener('requires', 'w') as fp:
1123 for r in sorted(requirements):
1123 for r in sorted(requirements):
1124 fp.write("%s\n" % r)
1124 fp.write("%s\n" % r)
1125
1125
1126 class filecachesubentry(object):
1126 class filecachesubentry(object):
1127 def __init__(self, path, stat):
1127 def __init__(self, path, stat):
1128 self.path = path
1128 self.path = path
1129 self.cachestat = None
1129 self.cachestat = None
1130 self._cacheable = None
1130 self._cacheable = None
1131
1131
1132 if stat:
1132 if stat:
1133 self.cachestat = filecachesubentry.stat(self.path)
1133 self.cachestat = filecachesubentry.stat(self.path)
1134
1134
1135 if self.cachestat:
1135 if self.cachestat:
1136 self._cacheable = self.cachestat.cacheable()
1136 self._cacheable = self.cachestat.cacheable()
1137 else:
1137 else:
1138 # None means we don't know yet
1138 # None means we don't know yet
1139 self._cacheable = None
1139 self._cacheable = None
1140
1140
1141 def refresh(self):
1141 def refresh(self):
1142 if self.cacheable():
1142 if self.cacheable():
1143 self.cachestat = filecachesubentry.stat(self.path)
1143 self.cachestat = filecachesubentry.stat(self.path)
1144
1144
1145 def cacheable(self):
1145 def cacheable(self):
1146 if self._cacheable is not None:
1146 if self._cacheable is not None:
1147 return self._cacheable
1147 return self._cacheable
1148
1148
1149 # we don't know yet, assume it is for now
1149 # we don't know yet, assume it is for now
1150 return True
1150 return True
1151
1151
1152 def changed(self):
1152 def changed(self):
1153 # no point in going further if we can't cache it
1153 # no point in going further if we can't cache it
1154 if not self.cacheable():
1154 if not self.cacheable():
1155 return True
1155 return True
1156
1156
1157 newstat = filecachesubentry.stat(self.path)
1157 newstat = filecachesubentry.stat(self.path)
1158
1158
1159 # we may not know if it's cacheable yet, check again now
1159 # we may not know if it's cacheable yet, check again now
1160 if newstat and self._cacheable is None:
1160 if newstat and self._cacheable is None:
1161 self._cacheable = newstat.cacheable()
1161 self._cacheable = newstat.cacheable()
1162
1162
1163 # check again
1163 # check again
1164 if not self._cacheable:
1164 if not self._cacheable:
1165 return True
1165 return True
1166
1166
1167 if self.cachestat != newstat:
1167 if self.cachestat != newstat:
1168 self.cachestat = newstat
1168 self.cachestat = newstat
1169 return True
1169 return True
1170 else:
1170 else:
1171 return False
1171 return False
1172
1172
1173 @staticmethod
1173 @staticmethod
1174 def stat(path):
1174 def stat(path):
1175 try:
1175 try:
1176 return util.cachestat(path)
1176 return util.cachestat(path)
1177 except OSError as e:
1177 except OSError as e:
1178 if e.errno != errno.ENOENT:
1178 if e.errno != errno.ENOENT:
1179 raise
1179 raise
1180
1180
1181 class filecacheentry(object):
1181 class filecacheentry(object):
1182 def __init__(self, paths, stat=True):
1182 def __init__(self, paths, stat=True):
1183 self._entries = []
1183 self._entries = []
1184 for path in paths:
1184 for path in paths:
1185 self._entries.append(filecachesubentry(path, stat))
1185 self._entries.append(filecachesubentry(path, stat))
1186
1186
1187 def changed(self):
1187 def changed(self):
1188 '''true if any entry has changed'''
1188 '''true if any entry has changed'''
1189 for entry in self._entries:
1189 for entry in self._entries:
1190 if entry.changed():
1190 if entry.changed():
1191 return True
1191 return True
1192 return False
1192 return False
1193
1193
1194 def refresh(self):
1194 def refresh(self):
1195 for entry in self._entries:
1195 for entry in self._entries:
1196 entry.refresh()
1196 entry.refresh()
1197
1197
1198 class filecache(object):
1198 class filecache(object):
1199 '''A property like decorator that tracks files under .hg/ for updates.
1199 '''A property like decorator that tracks files under .hg/ for updates.
1200
1200
1201 Records stat info when called in _filecache.
1201 Records stat info when called in _filecache.
1202
1202
1203 On subsequent calls, compares old stat info with new info, and recreates the
1203 On subsequent calls, compares old stat info with new info, and recreates the
1204 object when any of the files changes, updating the new stat info in
1204 object when any of the files changes, updating the new stat info in
1205 _filecache.
1205 _filecache.
1206
1206
1207 Mercurial either atomic renames or appends for files under .hg,
1207 Mercurial either atomic renames or appends for files under .hg,
1208 so to ensure the cache is reliable we need the filesystem to be able
1208 so to ensure the cache is reliable we need the filesystem to be able
1209 to tell us if a file has been replaced. If it can't, we fallback to
1209 to tell us if a file has been replaced. If it can't, we fallback to
1210 recreating the object on every call (essentially the same behavior as
1210 recreating the object on every call (essentially the same behavior as
1211 propertycache).
1211 propertycache).
1212
1212
1213 '''
1213 '''
1214 def __init__(self, *paths):
1214 def __init__(self, *paths):
1215 self.paths = paths
1215 self.paths = paths
1216
1216
1217 def join(self, obj, fname):
1217 def join(self, obj, fname):
1218 """Used to compute the runtime path of a cached file.
1218 """Used to compute the runtime path of a cached file.
1219
1219
1220 Users should subclass filecache and provide their own version of this
1220 Users should subclass filecache and provide their own version of this
1221 function to call the appropriate join function on 'obj' (an instance
1221 function to call the appropriate join function on 'obj' (an instance
1222 of the class that its member function was decorated).
1222 of the class that its member function was decorated).
1223 """
1223 """
1224 return obj.join(fname)
1224 return obj.join(fname)
1225
1225
1226 def __call__(self, func):
1226 def __call__(self, func):
1227 self.func = func
1227 self.func = func
1228 self.name = func.__name__
1228 self.name = func.__name__
1229 return self
1229 return self
1230
1230
1231 def __get__(self, obj, type=None):
1231 def __get__(self, obj, type=None):
1232 # if accessed on the class, return the descriptor itself.
1232 # if accessed on the class, return the descriptor itself.
1233 if obj is None:
1233 if obj is None:
1234 return self
1234 return self
1235 # do we need to check if the file changed?
1235 # do we need to check if the file changed?
1236 if self.name in obj.__dict__:
1236 if self.name in obj.__dict__:
1237 assert self.name in obj._filecache, self.name
1237 assert self.name in obj._filecache, self.name
1238 return obj.__dict__[self.name]
1238 return obj.__dict__[self.name]
1239
1239
1240 entry = obj._filecache.get(self.name)
1240 entry = obj._filecache.get(self.name)
1241
1241
1242 if entry:
1242 if entry:
1243 if entry.changed():
1243 if entry.changed():
1244 entry.obj = self.func(obj)
1244 entry.obj = self.func(obj)
1245 else:
1245 else:
1246 paths = [self.join(obj, path) for path in self.paths]
1246 paths = [self.join(obj, path) for path in self.paths]
1247
1247
1248 # We stat -before- creating the object so our cache doesn't lie if
1248 # We stat -before- creating the object so our cache doesn't lie if
1249 # a writer modified between the time we read and stat
1249 # a writer modified between the time we read and stat
1250 entry = filecacheentry(paths, True)
1250 entry = filecacheentry(paths, True)
1251 entry.obj = self.func(obj)
1251 entry.obj = self.func(obj)
1252
1252
1253 obj._filecache[self.name] = entry
1253 obj._filecache[self.name] = entry
1254
1254
1255 obj.__dict__[self.name] = entry.obj
1255 obj.__dict__[self.name] = entry.obj
1256 return entry.obj
1256 return entry.obj
1257
1257
1258 def __set__(self, obj, value):
1258 def __set__(self, obj, value):
1259 if self.name not in obj._filecache:
1259 if self.name not in obj._filecache:
1260 # we add an entry for the missing value because X in __dict__
1260 # we add an entry for the missing value because X in __dict__
1261 # implies X in _filecache
1261 # implies X in _filecache
1262 paths = [self.join(obj, path) for path in self.paths]
1262 paths = [self.join(obj, path) for path in self.paths]
1263 ce = filecacheentry(paths, False)
1263 ce = filecacheentry(paths, False)
1264 obj._filecache[self.name] = ce
1264 obj._filecache[self.name] = ce
1265 else:
1265 else:
1266 ce = obj._filecache[self.name]
1266 ce = obj._filecache[self.name]
1267
1267
1268 ce.obj = value # update cached copy
1268 ce.obj = value # update cached copy
1269 obj.__dict__[self.name] = value # update copy returned by obj.x
1269 obj.__dict__[self.name] = value # update copy returned by obj.x
1270
1270
1271 def __delete__(self, obj):
1271 def __delete__(self, obj):
1272 try:
1272 try:
1273 del obj.__dict__[self.name]
1273 del obj.__dict__[self.name]
1274 except KeyError:
1274 except KeyError:
1275 raise AttributeError(self.name)
1275 raise AttributeError(self.name)
1276
1276
1277 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1277 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1278 if lock is None:
1278 if lock is None:
1279 raise error.LockInheritanceContractViolation(
1279 raise error.LockInheritanceContractViolation(
1280 'lock can only be inherited while held')
1280 'lock can only be inherited while held')
1281 if environ is None:
1281 if environ is None:
1282 environ = {}
1282 environ = {}
1283 with lock.inherit() as locker:
1283 with lock.inherit() as locker:
1284 environ[envvar] = locker
1284 environ[envvar] = locker
1285 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1285 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1286
1286
1287 def wlocksub(repo, cmd, *args, **kwargs):
1287 def wlocksub(repo, cmd, *args, **kwargs):
1288 """run cmd as a subprocess that allows inheriting repo's wlock
1288 """run cmd as a subprocess that allows inheriting repo's wlock
1289
1289
1290 This can only be called while the wlock is held. This takes all the
1290 This can only be called while the wlock is held. This takes all the
1291 arguments that ui.system does, and returns the exit code of the
1291 arguments that ui.system does, and returns the exit code of the
1292 subprocess."""
1292 subprocess."""
1293 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1293 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1294 **kwargs)
1294 **kwargs)
1295
1295
1296 def gdinitconfig(ui):
1296 def gdinitconfig(ui):
1297 """helper function to know if a repo should be created as general delta
1297 """helper function to know if a repo should be created as general delta
1298 """
1298 """
1299 # experimental config: format.generaldelta
1299 # experimental config: format.generaldelta
1300 return (ui.configbool('format', 'generaldelta', False)
1300 return (ui.configbool('format', 'generaldelta', False)
1301 or ui.configbool('format', 'usegeneraldelta', True))
1301 or ui.configbool('format', 'usegeneraldelta', True))
1302
1302
1303 def gddeltaconfig(ui):
1303 def gddeltaconfig(ui):
1304 """helper function to know if incoming delta should be optimised
1304 """helper function to know if incoming delta should be optimised
1305 """
1305 """
1306 # experimental config: format.generaldelta
1306 # experimental config: format.generaldelta
1307 return ui.configbool('format', 'generaldelta', False)
1307 return ui.configbool('format', 'generaldelta', False)
1308
1308
1309 class closewrapbase(object):
1309 class closewrapbase(object):
1310 """Base class of wrapper, which hooks closing
1310 """Base class of wrapper, which hooks closing
1311
1311
1312 Do not instantiate outside of the vfs layer.
1312 Do not instantiate outside of the vfs layer.
1313 """
1313 """
1314 def __init__(self, fh):
1314 def __init__(self, fh):
1315 object.__setattr__(self, '_origfh', fh)
1315 object.__setattr__(self, '_origfh', fh)
1316
1316
1317 def __getattr__(self, attr):
1317 def __getattr__(self, attr):
1318 return getattr(self._origfh, attr)
1318 return getattr(self._origfh, attr)
1319
1319
1320 def __setattr__(self, attr, value):
1320 def __setattr__(self, attr, value):
1321 return setattr(self._origfh, attr, value)
1321 return setattr(self._origfh, attr, value)
1322
1322
1323 def __delattr__(self, attr):
1323 def __delattr__(self, attr):
1324 return delattr(self._origfh, attr)
1324 return delattr(self._origfh, attr)
1325
1325
1326 def __enter__(self):
1326 def __enter__(self):
1327 return self._origfh.__enter__()
1327 return self._origfh.__enter__()
1328
1328
1329 def __exit__(self, exc_type, exc_value, exc_tb):
1329 def __exit__(self, exc_type, exc_value, exc_tb):
1330 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1330 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1331
1331
1332 def close(self):
1332 def close(self):
1333 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1333 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1334
1334
1335 class delayclosedfile(closewrapbase):
1335 class delayclosedfile(closewrapbase):
1336 """Proxy for a file object whose close is delayed.
1336 """Proxy for a file object whose close is delayed.
1337
1337
1338 Do not instantiate outside of the vfs layer.
1338 Do not instantiate outside of the vfs layer.
1339 """
1339 """
1340 def __init__(self, fh, closer):
1340 def __init__(self, fh, closer):
1341 super(delayclosedfile, self).__init__(fh)
1341 super(delayclosedfile, self).__init__(fh)
1342 object.__setattr__(self, '_closer', closer)
1342 object.__setattr__(self, '_closer', closer)
1343
1343
1344 def __exit__(self, exc_type, exc_value, exc_tb):
1344 def __exit__(self, exc_type, exc_value, exc_tb):
1345 self._closer.close(self._origfh)
1345 self._closer.close(self._origfh)
1346
1346
1347 def close(self):
1347 def close(self):
1348 self._closer.close(self._origfh)
1348 self._closer.close(self._origfh)
1349
1349
1350 class backgroundfilecloser(object):
1350 class backgroundfilecloser(object):
1351 """Coordinates background closing of file handles on multiple threads."""
1351 """Coordinates background closing of file handles on multiple threads."""
1352 def __init__(self, ui, expectedcount=-1):
1352 def __init__(self, ui, expectedcount=-1):
1353 self._running = False
1353 self._running = False
1354 self._entered = False
1354 self._entered = False
1355 self._threads = []
1355 self._threads = []
1356 self._threadexception = None
1356 self._threadexception = None
1357
1357
1358 # Only Windows/NTFS has slow file closing. So only enable by default
1358 # Only Windows/NTFS has slow file closing. So only enable by default
1359 # on that platform. But allow to be enabled elsewhere for testing.
1359 # on that platform. But allow to be enabled elsewhere for testing.
1360 defaultenabled = os.name == 'nt'
1360 defaultenabled = os.name == 'nt'
1361 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1361 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1362
1362
1363 if not enabled:
1363 if not enabled:
1364 return
1364 return
1365
1365
1366 # There is overhead to starting and stopping the background threads.
1366 # There is overhead to starting and stopping the background threads.
1367 # Don't do background processing unless the file count is large enough
1367 # Don't do background processing unless the file count is large enough
1368 # to justify it.
1368 # to justify it.
1369 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1369 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1370 2048)
1370 2048)
1371 # FUTURE dynamically start background threads after minfilecount closes.
1371 # FUTURE dynamically start background threads after minfilecount closes.
1372 # (We don't currently have any callers that don't know their file count)
1372 # (We don't currently have any callers that don't know their file count)
1373 if expectedcount > 0 and expectedcount < minfilecount:
1373 if expectedcount > 0 and expectedcount < minfilecount:
1374 return
1374 return
1375
1375
1376 # Windows defaults to a limit of 512 open files. A buffer of 128
1376 # Windows defaults to a limit of 512 open files. A buffer of 128
1377 # should give us enough headway.
1377 # should give us enough headway.
1378 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1378 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1379 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1379 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1380
1380
1381 ui.debug('starting %d threads for background file closing\n' %
1381 ui.debug('starting %d threads for background file closing\n' %
1382 threadcount)
1382 threadcount)
1383
1383
1384 self._queue = util.queue(maxsize=maxqueue)
1384 self._queue = util.queue(maxsize=maxqueue)
1385 self._running = True
1385 self._running = True
1386
1386
1387 for i in range(threadcount):
1387 for i in range(threadcount):
1388 t = threading.Thread(target=self._worker, name='backgroundcloser')
1388 t = threading.Thread(target=self._worker, name='backgroundcloser')
1389 self._threads.append(t)
1389 self._threads.append(t)
1390 t.start()
1390 t.start()
1391
1391
1392 def __enter__(self):
1392 def __enter__(self):
1393 self._entered = True
1393 self._entered = True
1394 return self
1394 return self
1395
1395
1396 def __exit__(self, exc_type, exc_value, exc_tb):
1396 def __exit__(self, exc_type, exc_value, exc_tb):
1397 self._running = False
1397 self._running = False
1398
1398
1399 # Wait for threads to finish closing so open files don't linger for
1399 # Wait for threads to finish closing so open files don't linger for
1400 # longer than lifetime of context manager.
1400 # longer than lifetime of context manager.
1401 for t in self._threads:
1401 for t in self._threads:
1402 t.join()
1402 t.join()
1403
1403
1404 def _worker(self):
1404 def _worker(self):
1405 """Main routine for worker thread."""
1405 """Main routine for worker thread."""
1406 while True:
1406 while True:
1407 try:
1407 try:
1408 fh = self._queue.get(block=True, timeout=0.100)
1408 fh = self._queue.get(block=True, timeout=0.100)
1409 # Need to catch or the thread will terminate and
1409 # Need to catch or the thread will terminate and
1410 # we could orphan file descriptors.
1410 # we could orphan file descriptors.
1411 try:
1411 try:
1412 fh.close()
1412 fh.close()
1413 except Exception as e:
1413 except Exception as e:
1414 # Stash so can re-raise from main thread later.
1414 # Stash so can re-raise from main thread later.
1415 self._threadexception = e
1415 self._threadexception = e
1416 except util.empty:
1416 except util.empty:
1417 if not self._running:
1417 if not self._running:
1418 break
1418 break
1419
1419
1420 def close(self, fh):
1420 def close(self, fh):
1421 """Schedule a file for closing."""
1421 """Schedule a file for closing."""
1422 if not self._entered:
1422 if not self._entered:
1423 raise error.Abort(_('can only call close() when context manager '
1423 raise error.Abort(_('can only call close() when context manager '
1424 'active'))
1424 'active'))
1425
1425
1426 # If a background thread encountered an exception, raise now so we fail
1426 # If a background thread encountered an exception, raise now so we fail
1427 # fast. Otherwise we may potentially go on for minutes until the error
1427 # fast. Otherwise we may potentially go on for minutes until the error
1428 # is acted on.
1428 # is acted on.
1429 if self._threadexception:
1429 if self._threadexception:
1430 e = self._threadexception
1430 e = self._threadexception
1431 self._threadexception = None
1431 self._threadexception = None
1432 raise e
1432 raise e
1433
1433
1434 # If we're not actively running, close synchronously.
1434 # If we're not actively running, close synchronously.
1435 if not self._running:
1435 if not self._running:
1436 fh.close()
1436 fh.close()
1437 return
1437 return
1438
1438
1439 self._queue.put(fh, block=True, timeout=None)
1439 self._queue.put(fh, block=True, timeout=None)
1440
1440
1441 class checkambigatclosing(closewrapbase):
1441 class checkambigatclosing(closewrapbase):
1442 """Proxy for a file object, to avoid ambiguity of file stat
1442 """Proxy for a file object, to avoid ambiguity of file stat
1443
1443
1444 See also util.filestat for detail about "ambiguity of file stat".
1444 See also util.filestat for detail about "ambiguity of file stat".
1445
1445
1446 This proxy is useful only if the target file is guarded by any
1446 This proxy is useful only if the target file is guarded by any
1447 lock (e.g. repo.lock or repo.wlock)
1447 lock (e.g. repo.lock or repo.wlock)
1448
1448
1449 Do not instantiate outside of the vfs layer.
1449 Do not instantiate outside of the vfs layer.
1450 """
1450 """
1451 def __init__(self, fh):
1451 def __init__(self, fh):
1452 super(checkambigatclosing, self).__init__(fh)
1452 super(checkambigatclosing, self).__init__(fh)
1453 object.__setattr__(self, '_oldstat', util.filestat(fh.name))
1453 object.__setattr__(self, '_oldstat', util.filestat(fh.name))
1454
1454
1455 def _checkambig(self):
1455 def _checkambig(self):
1456 oldstat = self._oldstat
1456 oldstat = self._oldstat
1457 if oldstat.stat:
1457 if oldstat.stat:
1458 newstat = util.filestat(self._origfh.name)
1458 newstat = util.filestat(self._origfh.name)
1459 if newstat.isambig(oldstat):
1459 if newstat.isambig(oldstat):
1460 # stat of changed file is ambiguous to original one
1460 # stat of changed file is ambiguous to original one
1461 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1461 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1462 os.utime(self._origfh.name, (advanced, advanced))
1462 os.utime(self._origfh.name, (advanced, advanced))
1463
1463
1464 def __exit__(self, exc_type, exc_value, exc_tb):
1464 def __exit__(self, exc_type, exc_value, exc_tb):
1465 self._origfh.__exit__(exc_type, exc_value, exc_tb)
1465 self._origfh.__exit__(exc_type, exc_value, exc_tb)
1466 self._checkambig()
1466 self._checkambig()
1467
1467
1468 def close(self):
1468 def close(self):
1469 self._origfh.close()
1469 self._origfh.close()
1470 self._checkambig()
1470 self._checkambig()
General Comments 0
You need to be logged in to leave comments. Login now