##// END OF EJS Templates
scmutil.addremove: use iteritems on walk results...
Siddharth Agarwal -
r18865:835e9dfd default
parent child Browse files
Show More
@@ -1,893 +1,892 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from mercurial.node import nullrev
9 from mercurial.node import nullrev
10 import util, error, osutil, revset, similar, encoding, phases
10 import util, error, osutil, revset, similar, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, re, stat, glob
12 import os, errno, re, stat, glob
13
13
14 if os.name == 'nt':
14 if os.name == 'nt':
15 import scmwindows as scmplatform
15 import scmwindows as scmplatform
16 else:
16 else:
17 import scmposix as scmplatform
17 import scmposix as scmplatform
18
18
19 systemrcpath = scmplatform.systemrcpath
19 systemrcpath = scmplatform.systemrcpath
20 userrcpath = scmplatform.userrcpath
20 userrcpath = scmplatform.userrcpath
21
21
22 def nochangesfound(ui, repo, excluded=None):
22 def nochangesfound(ui, repo, excluded=None):
23 '''Report no changes for push/pull, excluded is None or a list of
23 '''Report no changes for push/pull, excluded is None or a list of
24 nodes excluded from the push/pull.
24 nodes excluded from the push/pull.
25 '''
25 '''
26 secretlist = []
26 secretlist = []
27 if excluded:
27 if excluded:
28 for n in excluded:
28 for n in excluded:
29 if n not in repo:
29 if n not in repo:
30 # discovery should not have included the filtered revision,
30 # discovery should not have included the filtered revision,
31 # we have to explicitly exclude it until discovery is cleanup.
31 # we have to explicitly exclude it until discovery is cleanup.
32 continue
32 continue
33 ctx = repo[n]
33 ctx = repo[n]
34 if ctx.phase() >= phases.secret and not ctx.extinct():
34 if ctx.phase() >= phases.secret and not ctx.extinct():
35 secretlist.append(n)
35 secretlist.append(n)
36
36
37 if secretlist:
37 if secretlist:
38 ui.status(_("no changes found (ignored %d secret changesets)\n")
38 ui.status(_("no changes found (ignored %d secret changesets)\n")
39 % len(secretlist))
39 % len(secretlist))
40 else:
40 else:
41 ui.status(_("no changes found\n"))
41 ui.status(_("no changes found\n"))
42
42
43 def checknewlabel(repo, lbl, kind):
43 def checknewlabel(repo, lbl, kind):
44 if lbl in ['tip', '.', 'null']:
44 if lbl in ['tip', '.', 'null']:
45 raise util.Abort(_("the name '%s' is reserved") % lbl)
45 raise util.Abort(_("the name '%s' is reserved") % lbl)
46 for c in (':', '\0', '\n', '\r'):
46 for c in (':', '\0', '\n', '\r'):
47 if c in lbl:
47 if c in lbl:
48 raise util.Abort(_("%r cannot be used in a name") % c)
48 raise util.Abort(_("%r cannot be used in a name") % c)
49 try:
49 try:
50 int(lbl)
50 int(lbl)
51 raise util.Abort(_("a %s cannot have an integer as its name") % kind)
51 raise util.Abort(_("a %s cannot have an integer as its name") % kind)
52 except ValueError:
52 except ValueError:
53 pass
53 pass
54
54
55 def checkfilename(f):
55 def checkfilename(f):
56 '''Check that the filename f is an acceptable filename for a tracked file'''
56 '''Check that the filename f is an acceptable filename for a tracked file'''
57 if '\r' in f or '\n' in f:
57 if '\r' in f or '\n' in f:
58 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
58 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
59
59
60 def checkportable(ui, f):
60 def checkportable(ui, f):
61 '''Check if filename f is portable and warn or abort depending on config'''
61 '''Check if filename f is portable and warn or abort depending on config'''
62 checkfilename(f)
62 checkfilename(f)
63 abort, warn = checkportabilityalert(ui)
63 abort, warn = checkportabilityalert(ui)
64 if abort or warn:
64 if abort or warn:
65 msg = util.checkwinfilename(f)
65 msg = util.checkwinfilename(f)
66 if msg:
66 if msg:
67 msg = "%s: %r" % (msg, f)
67 msg = "%s: %r" % (msg, f)
68 if abort:
68 if abort:
69 raise util.Abort(msg)
69 raise util.Abort(msg)
70 ui.warn(_("warning: %s\n") % msg)
70 ui.warn(_("warning: %s\n") % msg)
71
71
72 def checkportabilityalert(ui):
72 def checkportabilityalert(ui):
73 '''check if the user's config requests nothing, a warning, or abort for
73 '''check if the user's config requests nothing, a warning, or abort for
74 non-portable filenames'''
74 non-portable filenames'''
75 val = ui.config('ui', 'portablefilenames', 'warn')
75 val = ui.config('ui', 'portablefilenames', 'warn')
76 lval = val.lower()
76 lval = val.lower()
77 bval = util.parsebool(val)
77 bval = util.parsebool(val)
78 abort = os.name == 'nt' or lval == 'abort'
78 abort = os.name == 'nt' or lval == 'abort'
79 warn = bval or lval == 'warn'
79 warn = bval or lval == 'warn'
80 if bval is None and not (warn or abort or lval == 'ignore'):
80 if bval is None and not (warn or abort or lval == 'ignore'):
81 raise error.ConfigError(
81 raise error.ConfigError(
82 _("ui.portablefilenames value is invalid ('%s')") % val)
82 _("ui.portablefilenames value is invalid ('%s')") % val)
83 return abort, warn
83 return abort, warn
84
84
85 class casecollisionauditor(object):
85 class casecollisionauditor(object):
86 def __init__(self, ui, abort, dirstate):
86 def __init__(self, ui, abort, dirstate):
87 self._ui = ui
87 self._ui = ui
88 self._abort = abort
88 self._abort = abort
89 allfiles = '\0'.join(dirstate._map)
89 allfiles = '\0'.join(dirstate._map)
90 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
90 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
91 self._dirstate = dirstate
91 self._dirstate = dirstate
92 # The purpose of _newfiles is so that we don't complain about
92 # The purpose of _newfiles is so that we don't complain about
93 # case collisions if someone were to call this object with the
93 # case collisions if someone were to call this object with the
94 # same filename twice.
94 # same filename twice.
95 self._newfiles = set()
95 self._newfiles = set()
96
96
97 def __call__(self, f):
97 def __call__(self, f):
98 fl = encoding.lower(f)
98 fl = encoding.lower(f)
99 if (fl in self._loweredfiles and f not in self._dirstate and
99 if (fl in self._loweredfiles and f not in self._dirstate and
100 f not in self._newfiles):
100 f not in self._newfiles):
101 msg = _('possible case-folding collision for %s') % f
101 msg = _('possible case-folding collision for %s') % f
102 if self._abort:
102 if self._abort:
103 raise util.Abort(msg)
103 raise util.Abort(msg)
104 self._ui.warn(_("warning: %s\n") % msg)
104 self._ui.warn(_("warning: %s\n") % msg)
105 self._loweredfiles.add(fl)
105 self._loweredfiles.add(fl)
106 self._newfiles.add(f)
106 self._newfiles.add(f)
107
107
108 class pathauditor(object):
108 class pathauditor(object):
109 '''ensure that a filesystem path contains no banned components.
109 '''ensure that a filesystem path contains no banned components.
110 the following properties of a path are checked:
110 the following properties of a path are checked:
111
111
112 - ends with a directory separator
112 - ends with a directory separator
113 - under top-level .hg
113 - under top-level .hg
114 - starts at the root of a windows drive
114 - starts at the root of a windows drive
115 - contains ".."
115 - contains ".."
116 - traverses a symlink (e.g. a/symlink_here/b)
116 - traverses a symlink (e.g. a/symlink_here/b)
117 - inside a nested repository (a callback can be used to approve
117 - inside a nested repository (a callback can be used to approve
118 some nested repositories, e.g., subrepositories)
118 some nested repositories, e.g., subrepositories)
119 '''
119 '''
120
120
121 def __init__(self, root, callback=None):
121 def __init__(self, root, callback=None):
122 self.audited = set()
122 self.audited = set()
123 self.auditeddir = set()
123 self.auditeddir = set()
124 self.root = root
124 self.root = root
125 self.callback = callback
125 self.callback = callback
126 if os.path.lexists(root) and not util.checkcase(root):
126 if os.path.lexists(root) and not util.checkcase(root):
127 self.normcase = util.normcase
127 self.normcase = util.normcase
128 else:
128 else:
129 self.normcase = lambda x: x
129 self.normcase = lambda x: x
130
130
131 def __call__(self, path):
131 def __call__(self, path):
132 '''Check the relative path.
132 '''Check the relative path.
133 path may contain a pattern (e.g. foodir/**.txt)'''
133 path may contain a pattern (e.g. foodir/**.txt)'''
134
134
135 path = util.localpath(path)
135 path = util.localpath(path)
136 normpath = self.normcase(path)
136 normpath = self.normcase(path)
137 if normpath in self.audited:
137 if normpath in self.audited:
138 return
138 return
139 # AIX ignores "/" at end of path, others raise EISDIR.
139 # AIX ignores "/" at end of path, others raise EISDIR.
140 if util.endswithsep(path):
140 if util.endswithsep(path):
141 raise util.Abort(_("path ends in directory separator: %s") % path)
141 raise util.Abort(_("path ends in directory separator: %s") % path)
142 parts = util.splitpath(path)
142 parts = util.splitpath(path)
143 if (os.path.splitdrive(path)[0]
143 if (os.path.splitdrive(path)[0]
144 or parts[0].lower() in ('.hg', '.hg.', '')
144 or parts[0].lower() in ('.hg', '.hg.', '')
145 or os.pardir in parts):
145 or os.pardir in parts):
146 raise util.Abort(_("path contains illegal component: %s") % path)
146 raise util.Abort(_("path contains illegal component: %s") % path)
147 if '.hg' in path.lower():
147 if '.hg' in path.lower():
148 lparts = [p.lower() for p in parts]
148 lparts = [p.lower() for p in parts]
149 for p in '.hg', '.hg.':
149 for p in '.hg', '.hg.':
150 if p in lparts[1:]:
150 if p in lparts[1:]:
151 pos = lparts.index(p)
151 pos = lparts.index(p)
152 base = os.path.join(*parts[:pos])
152 base = os.path.join(*parts[:pos])
153 raise util.Abort(_("path '%s' is inside nested repo %r")
153 raise util.Abort(_("path '%s' is inside nested repo %r")
154 % (path, base))
154 % (path, base))
155
155
156 normparts = util.splitpath(normpath)
156 normparts = util.splitpath(normpath)
157 assert len(parts) == len(normparts)
157 assert len(parts) == len(normparts)
158
158
159 parts.pop()
159 parts.pop()
160 normparts.pop()
160 normparts.pop()
161 prefixes = []
161 prefixes = []
162 while parts:
162 while parts:
163 prefix = os.sep.join(parts)
163 prefix = os.sep.join(parts)
164 normprefix = os.sep.join(normparts)
164 normprefix = os.sep.join(normparts)
165 if normprefix in self.auditeddir:
165 if normprefix in self.auditeddir:
166 break
166 break
167 curpath = os.path.join(self.root, prefix)
167 curpath = os.path.join(self.root, prefix)
168 try:
168 try:
169 st = os.lstat(curpath)
169 st = os.lstat(curpath)
170 except OSError, err:
170 except OSError, err:
171 # EINVAL can be raised as invalid path syntax under win32.
171 # EINVAL can be raised as invalid path syntax under win32.
172 # They must be ignored for patterns can be checked too.
172 # They must be ignored for patterns can be checked too.
173 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
173 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
174 raise
174 raise
175 else:
175 else:
176 if stat.S_ISLNK(st.st_mode):
176 if stat.S_ISLNK(st.st_mode):
177 raise util.Abort(
177 raise util.Abort(
178 _('path %r traverses symbolic link %r')
178 _('path %r traverses symbolic link %r')
179 % (path, prefix))
179 % (path, prefix))
180 elif (stat.S_ISDIR(st.st_mode) and
180 elif (stat.S_ISDIR(st.st_mode) and
181 os.path.isdir(os.path.join(curpath, '.hg'))):
181 os.path.isdir(os.path.join(curpath, '.hg'))):
182 if not self.callback or not self.callback(curpath):
182 if not self.callback or not self.callback(curpath):
183 raise util.Abort(_("path '%s' is inside nested "
183 raise util.Abort(_("path '%s' is inside nested "
184 "repo %r")
184 "repo %r")
185 % (path, prefix))
185 % (path, prefix))
186 prefixes.append(normprefix)
186 prefixes.append(normprefix)
187 parts.pop()
187 parts.pop()
188 normparts.pop()
188 normparts.pop()
189
189
190 self.audited.add(normpath)
190 self.audited.add(normpath)
191 # only add prefixes to the cache after checking everything: we don't
191 # only add prefixes to the cache after checking everything: we don't
192 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
192 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
193 self.auditeddir.update(prefixes)
193 self.auditeddir.update(prefixes)
194
194
195 def check(self, path):
195 def check(self, path):
196 try:
196 try:
197 self(path)
197 self(path)
198 return True
198 return True
199 except (OSError, util.Abort):
199 except (OSError, util.Abort):
200 return False
200 return False
201
201
202 class abstractvfs(object):
202 class abstractvfs(object):
203 """Abstract base class; cannot be instantiated"""
203 """Abstract base class; cannot be instantiated"""
204
204
205 def __init__(self, *args, **kwargs):
205 def __init__(self, *args, **kwargs):
206 '''Prevent instantiation; don't call this from subclasses.'''
206 '''Prevent instantiation; don't call this from subclasses.'''
207 raise NotImplementedError('attempted instantiating ' + str(type(self)))
207 raise NotImplementedError('attempted instantiating ' + str(type(self)))
208
208
209 def tryread(self, path):
209 def tryread(self, path):
210 '''gracefully return an empty string for missing files'''
210 '''gracefully return an empty string for missing files'''
211 try:
211 try:
212 return self.read(path)
212 return self.read(path)
213 except IOError, inst:
213 except IOError, inst:
214 if inst.errno != errno.ENOENT:
214 if inst.errno != errno.ENOENT:
215 raise
215 raise
216 return ""
216 return ""
217
217
218 def read(self, path):
218 def read(self, path):
219 fp = self(path, 'rb')
219 fp = self(path, 'rb')
220 try:
220 try:
221 return fp.read()
221 return fp.read()
222 finally:
222 finally:
223 fp.close()
223 fp.close()
224
224
225 def write(self, path, data):
225 def write(self, path, data):
226 fp = self(path, 'wb')
226 fp = self(path, 'wb')
227 try:
227 try:
228 return fp.write(data)
228 return fp.write(data)
229 finally:
229 finally:
230 fp.close()
230 fp.close()
231
231
232 def append(self, path, data):
232 def append(self, path, data):
233 fp = self(path, 'ab')
233 fp = self(path, 'ab')
234 try:
234 try:
235 return fp.write(data)
235 return fp.write(data)
236 finally:
236 finally:
237 fp.close()
237 fp.close()
238
238
239 def exists(self, path=None):
239 def exists(self, path=None):
240 return os.path.exists(self.join(path))
240 return os.path.exists(self.join(path))
241
241
242 def isdir(self, path=None):
242 def isdir(self, path=None):
243 return os.path.isdir(self.join(path))
243 return os.path.isdir(self.join(path))
244
244
245 def makedir(self, path=None, notindexed=True):
245 def makedir(self, path=None, notindexed=True):
246 return util.makedir(self.join(path), notindexed)
246 return util.makedir(self.join(path), notindexed)
247
247
248 def makedirs(self, path=None, mode=None):
248 def makedirs(self, path=None, mode=None):
249 return util.makedirs(self.join(path), mode)
249 return util.makedirs(self.join(path), mode)
250
250
251 def mkdir(self, path=None):
251 def mkdir(self, path=None):
252 return os.mkdir(self.join(path))
252 return os.mkdir(self.join(path))
253
253
254 def readdir(self, path=None, stat=None, skip=None):
254 def readdir(self, path=None, stat=None, skip=None):
255 return osutil.listdir(self.join(path), stat, skip)
255 return osutil.listdir(self.join(path), stat, skip)
256
256
257 def stat(self, path=None):
257 def stat(self, path=None):
258 return os.stat(self.join(path))
258 return os.stat(self.join(path))
259
259
260 class vfs(abstractvfs):
260 class vfs(abstractvfs):
261 '''Operate files relative to a base directory
261 '''Operate files relative to a base directory
262
262
263 This class is used to hide the details of COW semantics and
263 This class is used to hide the details of COW semantics and
264 remote file access from higher level code.
264 remote file access from higher level code.
265 '''
265 '''
266 def __init__(self, base, audit=True, expand=False):
266 def __init__(self, base, audit=True, expand=False):
267 if expand:
267 if expand:
268 base = os.path.realpath(util.expandpath(base))
268 base = os.path.realpath(util.expandpath(base))
269 self.base = base
269 self.base = base
270 self._setmustaudit(audit)
270 self._setmustaudit(audit)
271 self.createmode = None
271 self.createmode = None
272 self._trustnlink = None
272 self._trustnlink = None
273
273
274 def _getmustaudit(self):
274 def _getmustaudit(self):
275 return self._audit
275 return self._audit
276
276
277 def _setmustaudit(self, onoff):
277 def _setmustaudit(self, onoff):
278 self._audit = onoff
278 self._audit = onoff
279 if onoff:
279 if onoff:
280 self.audit = pathauditor(self.base)
280 self.audit = pathauditor(self.base)
281 else:
281 else:
282 self.audit = util.always
282 self.audit = util.always
283
283
284 mustaudit = property(_getmustaudit, _setmustaudit)
284 mustaudit = property(_getmustaudit, _setmustaudit)
285
285
286 @util.propertycache
286 @util.propertycache
287 def _cansymlink(self):
287 def _cansymlink(self):
288 return util.checklink(self.base)
288 return util.checklink(self.base)
289
289
290 @util.propertycache
290 @util.propertycache
291 def _chmod(self):
291 def _chmod(self):
292 return util.checkexec(self.base)
292 return util.checkexec(self.base)
293
293
294 def _fixfilemode(self, name):
294 def _fixfilemode(self, name):
295 if self.createmode is None or not self._chmod:
295 if self.createmode is None or not self._chmod:
296 return
296 return
297 os.chmod(name, self.createmode & 0666)
297 os.chmod(name, self.createmode & 0666)
298
298
299 def __call__(self, path, mode="r", text=False, atomictemp=False):
299 def __call__(self, path, mode="r", text=False, atomictemp=False):
300 if self._audit:
300 if self._audit:
301 r = util.checkosfilename(path)
301 r = util.checkosfilename(path)
302 if r:
302 if r:
303 raise util.Abort("%s: %r" % (r, path))
303 raise util.Abort("%s: %r" % (r, path))
304 self.audit(path)
304 self.audit(path)
305 f = self.join(path)
305 f = self.join(path)
306
306
307 if not text and "b" not in mode:
307 if not text and "b" not in mode:
308 mode += "b" # for that other OS
308 mode += "b" # for that other OS
309
309
310 nlink = -1
310 nlink = -1
311 if mode not in ('r', 'rb'):
311 if mode not in ('r', 'rb'):
312 dirname, basename = util.split(f)
312 dirname, basename = util.split(f)
313 # If basename is empty, then the path is malformed because it points
313 # If basename is empty, then the path is malformed because it points
314 # to a directory. Let the posixfile() call below raise IOError.
314 # to a directory. Let the posixfile() call below raise IOError.
315 if basename:
315 if basename:
316 if atomictemp:
316 if atomictemp:
317 util.ensuredirs(dirname, self.createmode)
317 util.ensuredirs(dirname, self.createmode)
318 return util.atomictempfile(f, mode, self.createmode)
318 return util.atomictempfile(f, mode, self.createmode)
319 try:
319 try:
320 if 'w' in mode:
320 if 'w' in mode:
321 util.unlink(f)
321 util.unlink(f)
322 nlink = 0
322 nlink = 0
323 else:
323 else:
324 # nlinks() may behave differently for files on Windows
324 # nlinks() may behave differently for files on Windows
325 # shares if the file is open.
325 # shares if the file is open.
326 fd = util.posixfile(f)
326 fd = util.posixfile(f)
327 nlink = util.nlinks(f)
327 nlink = util.nlinks(f)
328 if nlink < 1:
328 if nlink < 1:
329 nlink = 2 # force mktempcopy (issue1922)
329 nlink = 2 # force mktempcopy (issue1922)
330 fd.close()
330 fd.close()
331 except (OSError, IOError), e:
331 except (OSError, IOError), e:
332 if e.errno != errno.ENOENT:
332 if e.errno != errno.ENOENT:
333 raise
333 raise
334 nlink = 0
334 nlink = 0
335 util.ensuredirs(dirname, self.createmode)
335 util.ensuredirs(dirname, self.createmode)
336 if nlink > 0:
336 if nlink > 0:
337 if self._trustnlink is None:
337 if self._trustnlink is None:
338 self._trustnlink = nlink > 1 or util.checknlink(f)
338 self._trustnlink = nlink > 1 or util.checknlink(f)
339 if nlink > 1 or not self._trustnlink:
339 if nlink > 1 or not self._trustnlink:
340 util.rename(util.mktempcopy(f), f)
340 util.rename(util.mktempcopy(f), f)
341 fp = util.posixfile(f, mode)
341 fp = util.posixfile(f, mode)
342 if nlink == 0:
342 if nlink == 0:
343 self._fixfilemode(f)
343 self._fixfilemode(f)
344 return fp
344 return fp
345
345
346 def symlink(self, src, dst):
346 def symlink(self, src, dst):
347 self.audit(dst)
347 self.audit(dst)
348 linkname = self.join(dst)
348 linkname = self.join(dst)
349 try:
349 try:
350 os.unlink(linkname)
350 os.unlink(linkname)
351 except OSError:
351 except OSError:
352 pass
352 pass
353
353
354 util.ensuredirs(os.path.dirname(linkname), self.createmode)
354 util.ensuredirs(os.path.dirname(linkname), self.createmode)
355
355
356 if self._cansymlink:
356 if self._cansymlink:
357 try:
357 try:
358 os.symlink(src, linkname)
358 os.symlink(src, linkname)
359 except OSError, err:
359 except OSError, err:
360 raise OSError(err.errno, _('could not symlink to %r: %s') %
360 raise OSError(err.errno, _('could not symlink to %r: %s') %
361 (src, err.strerror), linkname)
361 (src, err.strerror), linkname)
362 else:
362 else:
363 self.write(dst, src)
363 self.write(dst, src)
364
364
365 def join(self, path):
365 def join(self, path):
366 if path:
366 if path:
367 return os.path.join(self.base, path)
367 return os.path.join(self.base, path)
368 else:
368 else:
369 return self.base
369 return self.base
370
370
371 opener = vfs
371 opener = vfs
372
372
373 class auditvfs(object):
373 class auditvfs(object):
374 def __init__(self, vfs):
374 def __init__(self, vfs):
375 self.vfs = vfs
375 self.vfs = vfs
376
376
377 def _getmustaudit(self):
377 def _getmustaudit(self):
378 return self.vfs.mustaudit
378 return self.vfs.mustaudit
379
379
380 def _setmustaudit(self, onoff):
380 def _setmustaudit(self, onoff):
381 self.vfs.mustaudit = onoff
381 self.vfs.mustaudit = onoff
382
382
383 mustaudit = property(_getmustaudit, _setmustaudit)
383 mustaudit = property(_getmustaudit, _setmustaudit)
384
384
385 class filtervfs(abstractvfs, auditvfs):
385 class filtervfs(abstractvfs, auditvfs):
386 '''Wrapper vfs for filtering filenames with a function.'''
386 '''Wrapper vfs for filtering filenames with a function.'''
387
387
388 def __init__(self, vfs, filter):
388 def __init__(self, vfs, filter):
389 auditvfs.__init__(self, vfs)
389 auditvfs.__init__(self, vfs)
390 self._filter = filter
390 self._filter = filter
391
391
392 def __call__(self, path, *args, **kwargs):
392 def __call__(self, path, *args, **kwargs):
393 return self.vfs(self._filter(path), *args, **kwargs)
393 return self.vfs(self._filter(path), *args, **kwargs)
394
394
395 def join(self, path):
395 def join(self, path):
396 if path:
396 if path:
397 return self.vfs.join(self._filter(path))
397 return self.vfs.join(self._filter(path))
398 else:
398 else:
399 return self.vfs.join(path)
399 return self.vfs.join(path)
400
400
401 filteropener = filtervfs
401 filteropener = filtervfs
402
402
403 class readonlyvfs(abstractvfs, auditvfs):
403 class readonlyvfs(abstractvfs, auditvfs):
404 '''Wrapper vfs preventing any writing.'''
404 '''Wrapper vfs preventing any writing.'''
405
405
406 def __init__(self, vfs):
406 def __init__(self, vfs):
407 auditvfs.__init__(self, vfs)
407 auditvfs.__init__(self, vfs)
408
408
409 def __call__(self, path, mode='r', *args, **kw):
409 def __call__(self, path, mode='r', *args, **kw):
410 if mode not in ('r', 'rb'):
410 if mode not in ('r', 'rb'):
411 raise util.Abort('this vfs is read only')
411 raise util.Abort('this vfs is read only')
412 return self.vfs(path, mode, *args, **kw)
412 return self.vfs(path, mode, *args, **kw)
413
413
414
414
415 def canonpath(root, cwd, myname, auditor=None):
415 def canonpath(root, cwd, myname, auditor=None):
416 '''return the canonical path of myname, given cwd and root'''
416 '''return the canonical path of myname, given cwd and root'''
417 if util.endswithsep(root):
417 if util.endswithsep(root):
418 rootsep = root
418 rootsep = root
419 else:
419 else:
420 rootsep = root + os.sep
420 rootsep = root + os.sep
421 name = myname
421 name = myname
422 if not os.path.isabs(name):
422 if not os.path.isabs(name):
423 name = os.path.join(root, cwd, name)
423 name = os.path.join(root, cwd, name)
424 name = os.path.normpath(name)
424 name = os.path.normpath(name)
425 if auditor is None:
425 if auditor is None:
426 auditor = pathauditor(root)
426 auditor = pathauditor(root)
427 if name != rootsep and name.startswith(rootsep):
427 if name != rootsep and name.startswith(rootsep):
428 name = name[len(rootsep):]
428 name = name[len(rootsep):]
429 auditor(name)
429 auditor(name)
430 return util.pconvert(name)
430 return util.pconvert(name)
431 elif name == root:
431 elif name == root:
432 return ''
432 return ''
433 else:
433 else:
434 # Determine whether `name' is in the hierarchy at or beneath `root',
434 # Determine whether `name' is in the hierarchy at or beneath `root',
435 # by iterating name=dirname(name) until that causes no change (can't
435 # by iterating name=dirname(name) until that causes no change (can't
436 # check name == '/', because that doesn't work on windows). The list
436 # check name == '/', because that doesn't work on windows). The list
437 # `rel' holds the reversed list of components making up the relative
437 # `rel' holds the reversed list of components making up the relative
438 # file name we want.
438 # file name we want.
439 rel = []
439 rel = []
440 while True:
440 while True:
441 try:
441 try:
442 s = util.samefile(name, root)
442 s = util.samefile(name, root)
443 except OSError:
443 except OSError:
444 s = False
444 s = False
445 if s:
445 if s:
446 if not rel:
446 if not rel:
447 # name was actually the same as root (maybe a symlink)
447 # name was actually the same as root (maybe a symlink)
448 return ''
448 return ''
449 rel.reverse()
449 rel.reverse()
450 name = os.path.join(*rel)
450 name = os.path.join(*rel)
451 auditor(name)
451 auditor(name)
452 return util.pconvert(name)
452 return util.pconvert(name)
453 dirname, basename = util.split(name)
453 dirname, basename = util.split(name)
454 rel.append(basename)
454 rel.append(basename)
455 if dirname == name:
455 if dirname == name:
456 break
456 break
457 name = dirname
457 name = dirname
458
458
459 raise util.Abort(_("%s not under root '%s'") % (myname, root))
459 raise util.Abort(_("%s not under root '%s'") % (myname, root))
460
460
461 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
461 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
462 '''yield every hg repository under path, always recursively.
462 '''yield every hg repository under path, always recursively.
463 The recurse flag will only control recursion into repo working dirs'''
463 The recurse flag will only control recursion into repo working dirs'''
464 def errhandler(err):
464 def errhandler(err):
465 if err.filename == path:
465 if err.filename == path:
466 raise err
466 raise err
467 samestat = getattr(os.path, 'samestat', None)
467 samestat = getattr(os.path, 'samestat', None)
468 if followsym and samestat is not None:
468 if followsym and samestat is not None:
469 def adddir(dirlst, dirname):
469 def adddir(dirlst, dirname):
470 match = False
470 match = False
471 dirstat = os.stat(dirname)
471 dirstat = os.stat(dirname)
472 for lstdirstat in dirlst:
472 for lstdirstat in dirlst:
473 if samestat(dirstat, lstdirstat):
473 if samestat(dirstat, lstdirstat):
474 match = True
474 match = True
475 break
475 break
476 if not match:
476 if not match:
477 dirlst.append(dirstat)
477 dirlst.append(dirstat)
478 return not match
478 return not match
479 else:
479 else:
480 followsym = False
480 followsym = False
481
481
482 if (seen_dirs is None) and followsym:
482 if (seen_dirs is None) and followsym:
483 seen_dirs = []
483 seen_dirs = []
484 adddir(seen_dirs, path)
484 adddir(seen_dirs, path)
485 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
485 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
486 dirs.sort()
486 dirs.sort()
487 if '.hg' in dirs:
487 if '.hg' in dirs:
488 yield root # found a repository
488 yield root # found a repository
489 qroot = os.path.join(root, '.hg', 'patches')
489 qroot = os.path.join(root, '.hg', 'patches')
490 if os.path.isdir(os.path.join(qroot, '.hg')):
490 if os.path.isdir(os.path.join(qroot, '.hg')):
491 yield qroot # we have a patch queue repo here
491 yield qroot # we have a patch queue repo here
492 if recurse:
492 if recurse:
493 # avoid recursing inside the .hg directory
493 # avoid recursing inside the .hg directory
494 dirs.remove('.hg')
494 dirs.remove('.hg')
495 else:
495 else:
496 dirs[:] = [] # don't descend further
496 dirs[:] = [] # don't descend further
497 elif followsym:
497 elif followsym:
498 newdirs = []
498 newdirs = []
499 for d in dirs:
499 for d in dirs:
500 fname = os.path.join(root, d)
500 fname = os.path.join(root, d)
501 if adddir(seen_dirs, fname):
501 if adddir(seen_dirs, fname):
502 if os.path.islink(fname):
502 if os.path.islink(fname):
503 for hgname in walkrepos(fname, True, seen_dirs):
503 for hgname in walkrepos(fname, True, seen_dirs):
504 yield hgname
504 yield hgname
505 else:
505 else:
506 newdirs.append(d)
506 newdirs.append(d)
507 dirs[:] = newdirs
507 dirs[:] = newdirs
508
508
509 def osrcpath():
509 def osrcpath():
510 '''return default os-specific hgrc search path'''
510 '''return default os-specific hgrc search path'''
511 path = systemrcpath()
511 path = systemrcpath()
512 path.extend(userrcpath())
512 path.extend(userrcpath())
513 path = [os.path.normpath(f) for f in path]
513 path = [os.path.normpath(f) for f in path]
514 return path
514 return path
515
515
516 _rcpath = None
516 _rcpath = None
517
517
518 def rcpath():
518 def rcpath():
519 '''return hgrc search path. if env var HGRCPATH is set, use it.
519 '''return hgrc search path. if env var HGRCPATH is set, use it.
520 for each item in path, if directory, use files ending in .rc,
520 for each item in path, if directory, use files ending in .rc,
521 else use item.
521 else use item.
522 make HGRCPATH empty to only look in .hg/hgrc of current repo.
522 make HGRCPATH empty to only look in .hg/hgrc of current repo.
523 if no HGRCPATH, use default os-specific path.'''
523 if no HGRCPATH, use default os-specific path.'''
524 global _rcpath
524 global _rcpath
525 if _rcpath is None:
525 if _rcpath is None:
526 if 'HGRCPATH' in os.environ:
526 if 'HGRCPATH' in os.environ:
527 _rcpath = []
527 _rcpath = []
528 for p in os.environ['HGRCPATH'].split(os.pathsep):
528 for p in os.environ['HGRCPATH'].split(os.pathsep):
529 if not p:
529 if not p:
530 continue
530 continue
531 p = util.expandpath(p)
531 p = util.expandpath(p)
532 if os.path.isdir(p):
532 if os.path.isdir(p):
533 for f, kind in osutil.listdir(p):
533 for f, kind in osutil.listdir(p):
534 if f.endswith('.rc'):
534 if f.endswith('.rc'):
535 _rcpath.append(os.path.join(p, f))
535 _rcpath.append(os.path.join(p, f))
536 else:
536 else:
537 _rcpath.append(p)
537 _rcpath.append(p)
538 else:
538 else:
539 _rcpath = osrcpath()
539 _rcpath = osrcpath()
540 return _rcpath
540 return _rcpath
541
541
542 def revsingle(repo, revspec, default='.'):
542 def revsingle(repo, revspec, default='.'):
543 if not revspec:
543 if not revspec:
544 return repo[default]
544 return repo[default]
545
545
546 l = revrange(repo, [revspec])
546 l = revrange(repo, [revspec])
547 if len(l) < 1:
547 if len(l) < 1:
548 raise util.Abort(_('empty revision set'))
548 raise util.Abort(_('empty revision set'))
549 return repo[l[-1]]
549 return repo[l[-1]]
550
550
551 def revpair(repo, revs):
551 def revpair(repo, revs):
552 if not revs:
552 if not revs:
553 return repo.dirstate.p1(), None
553 return repo.dirstate.p1(), None
554
554
555 l = revrange(repo, revs)
555 l = revrange(repo, revs)
556
556
557 if len(l) == 0:
557 if len(l) == 0:
558 if revs:
558 if revs:
559 raise util.Abort(_('empty revision range'))
559 raise util.Abort(_('empty revision range'))
560 return repo.dirstate.p1(), None
560 return repo.dirstate.p1(), None
561
561
562 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
562 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
563 return repo.lookup(l[0]), None
563 return repo.lookup(l[0]), None
564
564
565 return repo.lookup(l[0]), repo.lookup(l[-1])
565 return repo.lookup(l[0]), repo.lookup(l[-1])
566
566
567 _revrangesep = ':'
567 _revrangesep = ':'
568
568
569 def revrange(repo, revs):
569 def revrange(repo, revs):
570 """Yield revision as strings from a list of revision specifications."""
570 """Yield revision as strings from a list of revision specifications."""
571
571
572 def revfix(repo, val, defval):
572 def revfix(repo, val, defval):
573 if not val and val != 0 and defval is not None:
573 if not val and val != 0 and defval is not None:
574 return defval
574 return defval
575 return repo[val].rev()
575 return repo[val].rev()
576
576
577 seen, l = set(), []
577 seen, l = set(), []
578 for spec in revs:
578 for spec in revs:
579 if l and not seen:
579 if l and not seen:
580 seen = set(l)
580 seen = set(l)
581 # attempt to parse old-style ranges first to deal with
581 # attempt to parse old-style ranges first to deal with
582 # things like old-tag which contain query metacharacters
582 # things like old-tag which contain query metacharacters
583 try:
583 try:
584 if isinstance(spec, int):
584 if isinstance(spec, int):
585 seen.add(spec)
585 seen.add(spec)
586 l.append(spec)
586 l.append(spec)
587 continue
587 continue
588
588
589 if _revrangesep in spec:
589 if _revrangesep in spec:
590 start, end = spec.split(_revrangesep, 1)
590 start, end = spec.split(_revrangesep, 1)
591 start = revfix(repo, start, 0)
591 start = revfix(repo, start, 0)
592 end = revfix(repo, end, len(repo) - 1)
592 end = revfix(repo, end, len(repo) - 1)
593 if end == nullrev and start <= 0:
593 if end == nullrev and start <= 0:
594 start = nullrev
594 start = nullrev
595 rangeiter = repo.changelog.revs(start, end)
595 rangeiter = repo.changelog.revs(start, end)
596 if not seen and not l:
596 if not seen and not l:
597 # by far the most common case: revs = ["-1:0"]
597 # by far the most common case: revs = ["-1:0"]
598 l = list(rangeiter)
598 l = list(rangeiter)
599 # defer syncing seen until next iteration
599 # defer syncing seen until next iteration
600 continue
600 continue
601 newrevs = set(rangeiter)
601 newrevs = set(rangeiter)
602 if seen:
602 if seen:
603 newrevs.difference_update(seen)
603 newrevs.difference_update(seen)
604 seen.update(newrevs)
604 seen.update(newrevs)
605 else:
605 else:
606 seen = newrevs
606 seen = newrevs
607 l.extend(sorted(newrevs, reverse=start > end))
607 l.extend(sorted(newrevs, reverse=start > end))
608 continue
608 continue
609 elif spec and spec in repo: # single unquoted rev
609 elif spec and spec in repo: # single unquoted rev
610 rev = revfix(repo, spec, None)
610 rev = revfix(repo, spec, None)
611 if rev in seen:
611 if rev in seen:
612 continue
612 continue
613 seen.add(rev)
613 seen.add(rev)
614 l.append(rev)
614 l.append(rev)
615 continue
615 continue
616 except error.RepoLookupError:
616 except error.RepoLookupError:
617 pass
617 pass
618
618
619 # fall through to new-style queries if old-style fails
619 # fall through to new-style queries if old-style fails
620 m = revset.match(repo.ui, spec)
620 m = revset.match(repo.ui, spec)
621 dl = [r for r in m(repo, list(repo)) if r not in seen]
621 dl = [r for r in m(repo, list(repo)) if r not in seen]
622 l.extend(dl)
622 l.extend(dl)
623 seen.update(dl)
623 seen.update(dl)
624
624
625 return l
625 return l
626
626
627 def expandpats(pats):
627 def expandpats(pats):
628 if not util.expandglobs:
628 if not util.expandglobs:
629 return list(pats)
629 return list(pats)
630 ret = []
630 ret = []
631 for p in pats:
631 for p in pats:
632 kind, name = matchmod._patsplit(p, None)
632 kind, name = matchmod._patsplit(p, None)
633 if kind is None:
633 if kind is None:
634 try:
634 try:
635 globbed = glob.glob(name)
635 globbed = glob.glob(name)
636 except re.error:
636 except re.error:
637 globbed = [name]
637 globbed = [name]
638 if globbed:
638 if globbed:
639 ret.extend(globbed)
639 ret.extend(globbed)
640 continue
640 continue
641 ret.append(p)
641 ret.append(p)
642 return ret
642 return ret
643
643
644 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
644 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
645 if pats == ("",):
645 if pats == ("",):
646 pats = []
646 pats = []
647 if not globbed and default == 'relpath':
647 if not globbed and default == 'relpath':
648 pats = expandpats(pats or [])
648 pats = expandpats(pats or [])
649
649
650 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
650 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
651 default)
651 default)
652 def badfn(f, msg):
652 def badfn(f, msg):
653 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
653 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
654 m.bad = badfn
654 m.bad = badfn
655 return m, pats
655 return m, pats
656
656
657 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
657 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
658 return matchandpats(ctx, pats, opts, globbed, default)[0]
658 return matchandpats(ctx, pats, opts, globbed, default)[0]
659
659
660 def matchall(repo):
660 def matchall(repo):
661 return matchmod.always(repo.root, repo.getcwd())
661 return matchmod.always(repo.root, repo.getcwd())
662
662
663 def matchfiles(repo, files):
663 def matchfiles(repo, files):
664 return matchmod.exact(repo.root, repo.getcwd(), files)
664 return matchmod.exact(repo.root, repo.getcwd(), files)
665
665
666 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
666 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
667 if dry_run is None:
667 if dry_run is None:
668 dry_run = opts.get('dry_run')
668 dry_run = opts.get('dry_run')
669 if similarity is None:
669 if similarity is None:
670 similarity = float(opts.get('similarity') or 0)
670 similarity = float(opts.get('similarity') or 0)
671 # we'd use status here, except handling of symlinks and ignore is tricky
671 # we'd use status here, except handling of symlinks and ignore is tricky
672 added, unknown, deleted, removed = [], [], [], []
672 added, unknown, deleted, removed = [], [], [], []
673 audit_path = pathauditor(repo.root)
673 audit_path = pathauditor(repo.root)
674 m = match(repo[None], pats, opts)
674 m = match(repo[None], pats, opts)
675 rejected = []
675 rejected = []
676 m.bad = lambda x, y: rejected.append(x)
676 m.bad = lambda x, y: rejected.append(x)
677
677
678 ctx = repo[None]
678 ctx = repo[None]
679 dirstate = repo.dirstate
679 dirstate = repo.dirstate
680 walkresults = dirstate.walk(m, sorted(ctx.substate), True, False)
680 walkresults = dirstate.walk(m, sorted(ctx.substate), True, False)
681 for abs in walkresults:
681 for abs, st in walkresults.iteritems():
682 st = walkresults[abs]
683 dstate = dirstate[abs]
682 dstate = dirstate[abs]
684 if dstate == '?' and audit_path.check(abs):
683 if dstate == '?' and audit_path.check(abs):
685 unknown.append(abs)
684 unknown.append(abs)
686 elif dstate != 'r' and not st:
685 elif dstate != 'r' and not st:
687 deleted.append(abs)
686 deleted.append(abs)
688 # for finding renames
687 # for finding renames
689 elif dstate == 'r':
688 elif dstate == 'r':
690 removed.append(abs)
689 removed.append(abs)
691 elif dstate == 'a':
690 elif dstate == 'a':
692 added.append(abs)
691 added.append(abs)
693
692
694 unknownset = set(unknown)
693 unknownset = set(unknown)
695 toprint = unknownset.copy()
694 toprint = unknownset.copy()
696 toprint.update(deleted)
695 toprint.update(deleted)
697 for abs in sorted(toprint):
696 for abs in sorted(toprint):
698 if repo.ui.verbose or not m.exact(abs):
697 if repo.ui.verbose or not m.exact(abs):
699 rel = m.rel(abs)
698 rel = m.rel(abs)
700 if abs in unknownset:
699 if abs in unknownset:
701 status = _('adding %s\n') % ((pats and rel) or abs)
700 status = _('adding %s\n') % ((pats and rel) or abs)
702 else:
701 else:
703 status = _('removing %s\n') % ((pats and rel) or abs)
702 status = _('removing %s\n') % ((pats and rel) or abs)
704 repo.ui.status(status)
703 repo.ui.status(status)
705
704
706 copies = {}
705 copies = {}
707 if similarity > 0:
706 if similarity > 0:
708 for old, new, score in similar.findrenames(repo,
707 for old, new, score in similar.findrenames(repo,
709 added + unknown, removed + deleted, similarity):
708 added + unknown, removed + deleted, similarity):
710 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
709 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
711 repo.ui.status(_('recording removal of %s as rename to %s '
710 repo.ui.status(_('recording removal of %s as rename to %s '
712 '(%d%% similar)\n') %
711 '(%d%% similar)\n') %
713 (m.rel(old), m.rel(new), score * 100))
712 (m.rel(old), m.rel(new), score * 100))
714 copies[new] = old
713 copies[new] = old
715
714
716 if not dry_run:
715 if not dry_run:
717 wctx = repo[None]
716 wctx = repo[None]
718 wlock = repo.wlock()
717 wlock = repo.wlock()
719 try:
718 try:
720 wctx.forget(deleted)
719 wctx.forget(deleted)
721 wctx.add(unknown)
720 wctx.add(unknown)
722 for new, old in copies.iteritems():
721 for new, old in copies.iteritems():
723 wctx.copy(old, new)
722 wctx.copy(old, new)
724 finally:
723 finally:
725 wlock.release()
724 wlock.release()
726
725
727 for f in rejected:
726 for f in rejected:
728 if f in m.files():
727 if f in m.files():
729 return 1
728 return 1
730 return 0
729 return 0
731
730
732 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
731 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
733 """Update the dirstate to reflect the intent of copying src to dst. For
732 """Update the dirstate to reflect the intent of copying src to dst. For
734 different reasons it might not end with dst being marked as copied from src.
733 different reasons it might not end with dst being marked as copied from src.
735 """
734 """
736 origsrc = repo.dirstate.copied(src) or src
735 origsrc = repo.dirstate.copied(src) or src
737 if dst == origsrc: # copying back a copy?
736 if dst == origsrc: # copying back a copy?
738 if repo.dirstate[dst] not in 'mn' and not dryrun:
737 if repo.dirstate[dst] not in 'mn' and not dryrun:
739 repo.dirstate.normallookup(dst)
738 repo.dirstate.normallookup(dst)
740 else:
739 else:
741 if repo.dirstate[origsrc] == 'a' and origsrc == src:
740 if repo.dirstate[origsrc] == 'a' and origsrc == src:
742 if not ui.quiet:
741 if not ui.quiet:
743 ui.warn(_("%s has not been committed yet, so no copy "
742 ui.warn(_("%s has not been committed yet, so no copy "
744 "data will be stored for %s.\n")
743 "data will be stored for %s.\n")
745 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
744 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
746 if repo.dirstate[dst] in '?r' and not dryrun:
745 if repo.dirstate[dst] in '?r' and not dryrun:
747 wctx.add([dst])
746 wctx.add([dst])
748 elif not dryrun:
747 elif not dryrun:
749 wctx.copy(origsrc, dst)
748 wctx.copy(origsrc, dst)
750
749
751 def readrequires(opener, supported):
750 def readrequires(opener, supported):
752 '''Reads and parses .hg/requires and checks if all entries found
751 '''Reads and parses .hg/requires and checks if all entries found
753 are in the list of supported features.'''
752 are in the list of supported features.'''
754 requirements = set(opener.read("requires").splitlines())
753 requirements = set(opener.read("requires").splitlines())
755 missings = []
754 missings = []
756 for r in requirements:
755 for r in requirements:
757 if r not in supported:
756 if r not in supported:
758 if not r or not r[0].isalnum():
757 if not r or not r[0].isalnum():
759 raise error.RequirementError(_(".hg/requires file is corrupt"))
758 raise error.RequirementError(_(".hg/requires file is corrupt"))
760 missings.append(r)
759 missings.append(r)
761 missings.sort()
760 missings.sort()
762 if missings:
761 if missings:
763 raise error.RequirementError(
762 raise error.RequirementError(
764 _("unknown repository format: requires features '%s' (upgrade "
763 _("unknown repository format: requires features '%s' (upgrade "
765 "Mercurial)") % "', '".join(missings))
764 "Mercurial)") % "', '".join(missings))
766 return requirements
765 return requirements
767
766
768 class filecacheentry(object):
767 class filecacheentry(object):
769 def __init__(self, path, stat=True):
768 def __init__(self, path, stat=True):
770 self.path = path
769 self.path = path
771 self.cachestat = None
770 self.cachestat = None
772 self._cacheable = None
771 self._cacheable = None
773
772
774 if stat:
773 if stat:
775 self.cachestat = filecacheentry.stat(self.path)
774 self.cachestat = filecacheentry.stat(self.path)
776
775
777 if self.cachestat:
776 if self.cachestat:
778 self._cacheable = self.cachestat.cacheable()
777 self._cacheable = self.cachestat.cacheable()
779 else:
778 else:
780 # None means we don't know yet
779 # None means we don't know yet
781 self._cacheable = None
780 self._cacheable = None
782
781
783 def refresh(self):
782 def refresh(self):
784 if self.cacheable():
783 if self.cacheable():
785 self.cachestat = filecacheentry.stat(self.path)
784 self.cachestat = filecacheentry.stat(self.path)
786
785
787 def cacheable(self):
786 def cacheable(self):
788 if self._cacheable is not None:
787 if self._cacheable is not None:
789 return self._cacheable
788 return self._cacheable
790
789
791 # we don't know yet, assume it is for now
790 # we don't know yet, assume it is for now
792 return True
791 return True
793
792
794 def changed(self):
793 def changed(self):
795 # no point in going further if we can't cache it
794 # no point in going further if we can't cache it
796 if not self.cacheable():
795 if not self.cacheable():
797 return True
796 return True
798
797
799 newstat = filecacheentry.stat(self.path)
798 newstat = filecacheentry.stat(self.path)
800
799
801 # we may not know if it's cacheable yet, check again now
800 # we may not know if it's cacheable yet, check again now
802 if newstat and self._cacheable is None:
801 if newstat and self._cacheable is None:
803 self._cacheable = newstat.cacheable()
802 self._cacheable = newstat.cacheable()
804
803
805 # check again
804 # check again
806 if not self._cacheable:
805 if not self._cacheable:
807 return True
806 return True
808
807
809 if self.cachestat != newstat:
808 if self.cachestat != newstat:
810 self.cachestat = newstat
809 self.cachestat = newstat
811 return True
810 return True
812 else:
811 else:
813 return False
812 return False
814
813
815 @staticmethod
814 @staticmethod
816 def stat(path):
815 def stat(path):
817 try:
816 try:
818 return util.cachestat(path)
817 return util.cachestat(path)
819 except OSError, e:
818 except OSError, e:
820 if e.errno != errno.ENOENT:
819 if e.errno != errno.ENOENT:
821 raise
820 raise
822
821
823 class filecache(object):
822 class filecache(object):
824 '''A property like decorator that tracks a file under .hg/ for updates.
823 '''A property like decorator that tracks a file under .hg/ for updates.
825
824
826 Records stat info when called in _filecache.
825 Records stat info when called in _filecache.
827
826
828 On subsequent calls, compares old stat info with new info, and recreates
827 On subsequent calls, compares old stat info with new info, and recreates
829 the object when needed, updating the new stat info in _filecache.
828 the object when needed, updating the new stat info in _filecache.
830
829
831 Mercurial either atomic renames or appends for files under .hg,
830 Mercurial either atomic renames or appends for files under .hg,
832 so to ensure the cache is reliable we need the filesystem to be able
831 so to ensure the cache is reliable we need the filesystem to be able
833 to tell us if a file has been replaced. If it can't, we fallback to
832 to tell us if a file has been replaced. If it can't, we fallback to
834 recreating the object on every call (essentially the same behaviour as
833 recreating the object on every call (essentially the same behaviour as
835 propertycache).'''
834 propertycache).'''
836 def __init__(self, path):
835 def __init__(self, path):
837 self.path = path
836 self.path = path
838
837
839 def join(self, obj, fname):
838 def join(self, obj, fname):
840 """Used to compute the runtime path of the cached file.
839 """Used to compute the runtime path of the cached file.
841
840
842 Users should subclass filecache and provide their own version of this
841 Users should subclass filecache and provide their own version of this
843 function to call the appropriate join function on 'obj' (an instance
842 function to call the appropriate join function on 'obj' (an instance
844 of the class that its member function was decorated).
843 of the class that its member function was decorated).
845 """
844 """
846 return obj.join(fname)
845 return obj.join(fname)
847
846
848 def __call__(self, func):
847 def __call__(self, func):
849 self.func = func
848 self.func = func
850 self.name = func.__name__
849 self.name = func.__name__
851 return self
850 return self
852
851
853 def __get__(self, obj, type=None):
852 def __get__(self, obj, type=None):
854 # do we need to check if the file changed?
853 # do we need to check if the file changed?
855 if self.name in obj.__dict__:
854 if self.name in obj.__dict__:
856 assert self.name in obj._filecache, self.name
855 assert self.name in obj._filecache, self.name
857 return obj.__dict__[self.name]
856 return obj.__dict__[self.name]
858
857
859 entry = obj._filecache.get(self.name)
858 entry = obj._filecache.get(self.name)
860
859
861 if entry:
860 if entry:
862 if entry.changed():
861 if entry.changed():
863 entry.obj = self.func(obj)
862 entry.obj = self.func(obj)
864 else:
863 else:
865 path = self.join(obj, self.path)
864 path = self.join(obj, self.path)
866
865
867 # We stat -before- creating the object so our cache doesn't lie if
866 # We stat -before- creating the object so our cache doesn't lie if
868 # a writer modified between the time we read and stat
867 # a writer modified between the time we read and stat
869 entry = filecacheentry(path)
868 entry = filecacheentry(path)
870 entry.obj = self.func(obj)
869 entry.obj = self.func(obj)
871
870
872 obj._filecache[self.name] = entry
871 obj._filecache[self.name] = entry
873
872
874 obj.__dict__[self.name] = entry.obj
873 obj.__dict__[self.name] = entry.obj
875 return entry.obj
874 return entry.obj
876
875
877 def __set__(self, obj, value):
876 def __set__(self, obj, value):
878 if self.name not in obj._filecache:
877 if self.name not in obj._filecache:
879 # we add an entry for the missing value because X in __dict__
878 # we add an entry for the missing value because X in __dict__
880 # implies X in _filecache
879 # implies X in _filecache
881 ce = filecacheentry(self.join(obj, self.path), False)
880 ce = filecacheentry(self.join(obj, self.path), False)
882 obj._filecache[self.name] = ce
881 obj._filecache[self.name] = ce
883 else:
882 else:
884 ce = obj._filecache[self.name]
883 ce = obj._filecache[self.name]
885
884
886 ce.obj = value # update cached copy
885 ce.obj = value # update cached copy
887 obj.__dict__[self.name] = value # update copy returned by obj.x
886 obj.__dict__[self.name] = value # update copy returned by obj.x
888
887
889 def __delete__(self, obj):
888 def __delete__(self, obj):
890 try:
889 try:
891 del obj.__dict__[self.name]
890 del obj.__dict__[self.name]
892 except KeyError:
891 except KeyError:
893 raise AttributeError(self.name)
892 raise AttributeError(self.name)
General Comments 0
You need to be logged in to leave comments. Login now