##// END OF EJS Templates
vfs: add "isfile()"
FUJIWARA Katsunori -
r20085:589d6bb5 default
parent child Browse files
Show More
@@ -1,907 +1,910 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from mercurial.node import nullrev
9 from mercurial.node import nullrev
10 import util, error, osutil, revset, similar, encoding, phases, parsers
10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 import pathutil
11 import pathutil
12 import match as matchmod
12 import match as matchmod
13 import os, errno, re, glob
13 import os, errno, re, glob
14
14
15 if os.name == 'nt':
15 if os.name == 'nt':
16 import scmwindows as scmplatform
16 import scmwindows as scmplatform
17 else:
17 else:
18 import scmposix as scmplatform
18 import scmposix as scmplatform
19
19
20 systemrcpath = scmplatform.systemrcpath
20 systemrcpath = scmplatform.systemrcpath
21 userrcpath = scmplatform.userrcpath
21 userrcpath = scmplatform.userrcpath
22
22
23 def nochangesfound(ui, repo, excluded=None):
23 def nochangesfound(ui, repo, excluded=None):
24 '''Report no changes for push/pull, excluded is None or a list of
24 '''Report no changes for push/pull, excluded is None or a list of
25 nodes excluded from the push/pull.
25 nodes excluded from the push/pull.
26 '''
26 '''
27 secretlist = []
27 secretlist = []
28 if excluded:
28 if excluded:
29 for n in excluded:
29 for n in excluded:
30 if n not in repo:
30 if n not in repo:
31 # discovery should not have included the filtered revision,
31 # discovery should not have included the filtered revision,
32 # we have to explicitly exclude it until discovery is cleanup.
32 # we have to explicitly exclude it until discovery is cleanup.
33 continue
33 continue
34 ctx = repo[n]
34 ctx = repo[n]
35 if ctx.phase() >= phases.secret and not ctx.extinct():
35 if ctx.phase() >= phases.secret and not ctx.extinct():
36 secretlist.append(n)
36 secretlist.append(n)
37
37
38 if secretlist:
38 if secretlist:
39 ui.status(_("no changes found (ignored %d secret changesets)\n")
39 ui.status(_("no changes found (ignored %d secret changesets)\n")
40 % len(secretlist))
40 % len(secretlist))
41 else:
41 else:
42 ui.status(_("no changes found\n"))
42 ui.status(_("no changes found\n"))
43
43
44 def checknewlabel(repo, lbl, kind):
44 def checknewlabel(repo, lbl, kind):
45 # Do not use the "kind" parameter in ui output.
45 # Do not use the "kind" parameter in ui output.
46 # It makes strings difficult to translate.
46 # It makes strings difficult to translate.
47 if lbl in ['tip', '.', 'null']:
47 if lbl in ['tip', '.', 'null']:
48 raise util.Abort(_("the name '%s' is reserved") % lbl)
48 raise util.Abort(_("the name '%s' is reserved") % lbl)
49 for c in (':', '\0', '\n', '\r'):
49 for c in (':', '\0', '\n', '\r'):
50 if c in lbl:
50 if c in lbl:
51 raise util.Abort(_("%r cannot be used in a name") % c)
51 raise util.Abort(_("%r cannot be used in a name") % c)
52 try:
52 try:
53 int(lbl)
53 int(lbl)
54 raise util.Abort(_("cannot use an integer as a name"))
54 raise util.Abort(_("cannot use an integer as a name"))
55 except ValueError:
55 except ValueError:
56 pass
56 pass
57
57
58 def checkfilename(f):
58 def checkfilename(f):
59 '''Check that the filename f is an acceptable filename for a tracked file'''
59 '''Check that the filename f is an acceptable filename for a tracked file'''
60 if '\r' in f or '\n' in f:
60 if '\r' in f or '\n' in f:
61 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
61 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
62
62
63 def checkportable(ui, f):
63 def checkportable(ui, f):
64 '''Check if filename f is portable and warn or abort depending on config'''
64 '''Check if filename f is portable and warn or abort depending on config'''
65 checkfilename(f)
65 checkfilename(f)
66 abort, warn = checkportabilityalert(ui)
66 abort, warn = checkportabilityalert(ui)
67 if abort or warn:
67 if abort or warn:
68 msg = util.checkwinfilename(f)
68 msg = util.checkwinfilename(f)
69 if msg:
69 if msg:
70 msg = "%s: %r" % (msg, f)
70 msg = "%s: %r" % (msg, f)
71 if abort:
71 if abort:
72 raise util.Abort(msg)
72 raise util.Abort(msg)
73 ui.warn(_("warning: %s\n") % msg)
73 ui.warn(_("warning: %s\n") % msg)
74
74
75 def checkportabilityalert(ui):
75 def checkportabilityalert(ui):
76 '''check if the user's config requests nothing, a warning, or abort for
76 '''check if the user's config requests nothing, a warning, or abort for
77 non-portable filenames'''
77 non-portable filenames'''
78 val = ui.config('ui', 'portablefilenames', 'warn')
78 val = ui.config('ui', 'portablefilenames', 'warn')
79 lval = val.lower()
79 lval = val.lower()
80 bval = util.parsebool(val)
80 bval = util.parsebool(val)
81 abort = os.name == 'nt' or lval == 'abort'
81 abort = os.name == 'nt' or lval == 'abort'
82 warn = bval or lval == 'warn'
82 warn = bval or lval == 'warn'
83 if bval is None and not (warn or abort or lval == 'ignore'):
83 if bval is None and not (warn or abort or lval == 'ignore'):
84 raise error.ConfigError(
84 raise error.ConfigError(
85 _("ui.portablefilenames value is invalid ('%s')") % val)
85 _("ui.portablefilenames value is invalid ('%s')") % val)
86 return abort, warn
86 return abort, warn
87
87
88 class casecollisionauditor(object):
88 class casecollisionauditor(object):
89 def __init__(self, ui, abort, dirstate):
89 def __init__(self, ui, abort, dirstate):
90 self._ui = ui
90 self._ui = ui
91 self._abort = abort
91 self._abort = abort
92 allfiles = '\0'.join(dirstate._map)
92 allfiles = '\0'.join(dirstate._map)
93 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
93 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
94 self._dirstate = dirstate
94 self._dirstate = dirstate
95 # The purpose of _newfiles is so that we don't complain about
95 # The purpose of _newfiles is so that we don't complain about
96 # case collisions if someone were to call this object with the
96 # case collisions if someone were to call this object with the
97 # same filename twice.
97 # same filename twice.
98 self._newfiles = set()
98 self._newfiles = set()
99
99
100 def __call__(self, f):
100 def __call__(self, f):
101 if f in self._newfiles:
101 if f in self._newfiles:
102 return
102 return
103 fl = encoding.lower(f)
103 fl = encoding.lower(f)
104 if fl in self._loweredfiles and f not in self._dirstate:
104 if fl in self._loweredfiles and f not in self._dirstate:
105 msg = _('possible case-folding collision for %s') % f
105 msg = _('possible case-folding collision for %s') % f
106 if self._abort:
106 if self._abort:
107 raise util.Abort(msg)
107 raise util.Abort(msg)
108 self._ui.warn(_("warning: %s\n") % msg)
108 self._ui.warn(_("warning: %s\n") % msg)
109 self._loweredfiles.add(fl)
109 self._loweredfiles.add(fl)
110 self._newfiles.add(f)
110 self._newfiles.add(f)
111
111
112 class abstractvfs(object):
112 class abstractvfs(object):
113 """Abstract base class; cannot be instantiated"""
113 """Abstract base class; cannot be instantiated"""
114
114
115 def __init__(self, *args, **kwargs):
115 def __init__(self, *args, **kwargs):
116 '''Prevent instantiation; don't call this from subclasses.'''
116 '''Prevent instantiation; don't call this from subclasses.'''
117 raise NotImplementedError('attempted instantiating ' + str(type(self)))
117 raise NotImplementedError('attempted instantiating ' + str(type(self)))
118
118
119 def tryread(self, path):
119 def tryread(self, path):
120 '''gracefully return an empty string for missing files'''
120 '''gracefully return an empty string for missing files'''
121 try:
121 try:
122 return self.read(path)
122 return self.read(path)
123 except IOError, inst:
123 except IOError, inst:
124 if inst.errno != errno.ENOENT:
124 if inst.errno != errno.ENOENT:
125 raise
125 raise
126 return ""
126 return ""
127
127
128 def open(self, path, mode="r", text=False, atomictemp=False):
128 def open(self, path, mode="r", text=False, atomictemp=False):
129 self.open = self.__call__
129 self.open = self.__call__
130 return self.__call__(path, mode, text, atomictemp)
130 return self.__call__(path, mode, text, atomictemp)
131
131
132 def read(self, path):
132 def read(self, path):
133 fp = self(path, 'rb')
133 fp = self(path, 'rb')
134 try:
134 try:
135 return fp.read()
135 return fp.read()
136 finally:
136 finally:
137 fp.close()
137 fp.close()
138
138
139 def write(self, path, data):
139 def write(self, path, data):
140 fp = self(path, 'wb')
140 fp = self(path, 'wb')
141 try:
141 try:
142 return fp.write(data)
142 return fp.write(data)
143 finally:
143 finally:
144 fp.close()
144 fp.close()
145
145
146 def append(self, path, data):
146 def append(self, path, data):
147 fp = self(path, 'ab')
147 fp = self(path, 'ab')
148 try:
148 try:
149 return fp.write(data)
149 return fp.write(data)
150 finally:
150 finally:
151 fp.close()
151 fp.close()
152
152
153 def exists(self, path=None):
153 def exists(self, path=None):
154 return os.path.exists(self.join(path))
154 return os.path.exists(self.join(path))
155
155
156 def fstat(self, fp):
156 def fstat(self, fp):
157 return util.fstat(fp)
157 return util.fstat(fp)
158
158
159 def isdir(self, path=None):
159 def isdir(self, path=None):
160 return os.path.isdir(self.join(path))
160 return os.path.isdir(self.join(path))
161
161
162 def isfile(self, path=None):
163 return os.path.isfile(self.join(path))
164
162 def islink(self, path=None):
165 def islink(self, path=None):
163 return os.path.islink(self.join(path))
166 return os.path.islink(self.join(path))
164
167
165 def lstat(self, path=None):
168 def lstat(self, path=None):
166 return os.lstat(self.join(path))
169 return os.lstat(self.join(path))
167
170
168 def makedir(self, path=None, notindexed=True):
171 def makedir(self, path=None, notindexed=True):
169 return util.makedir(self.join(path), notindexed)
172 return util.makedir(self.join(path), notindexed)
170
173
171 def makedirs(self, path=None, mode=None):
174 def makedirs(self, path=None, mode=None):
172 return util.makedirs(self.join(path), mode)
175 return util.makedirs(self.join(path), mode)
173
176
174 def mkdir(self, path=None):
177 def mkdir(self, path=None):
175 return os.mkdir(self.join(path))
178 return os.mkdir(self.join(path))
176
179
177 def readdir(self, path=None, stat=None, skip=None):
180 def readdir(self, path=None, stat=None, skip=None):
178 return osutil.listdir(self.join(path), stat, skip)
181 return osutil.listdir(self.join(path), stat, skip)
179
182
180 def rename(self, src, dst):
183 def rename(self, src, dst):
181 return util.rename(self.join(src), self.join(dst))
184 return util.rename(self.join(src), self.join(dst))
182
185
183 def readlink(self, path):
186 def readlink(self, path):
184 return os.readlink(self.join(path))
187 return os.readlink(self.join(path))
185
188
186 def setflags(self, path, l, x):
189 def setflags(self, path, l, x):
187 return util.setflags(self.join(path), l, x)
190 return util.setflags(self.join(path), l, x)
188
191
189 def stat(self, path=None):
192 def stat(self, path=None):
190 return os.stat(self.join(path))
193 return os.stat(self.join(path))
191
194
192 def unlink(self, path=None):
195 def unlink(self, path=None):
193 return util.unlink(self.join(path))
196 return util.unlink(self.join(path))
194
197
195 def utime(self, path=None, t=None):
198 def utime(self, path=None, t=None):
196 return os.utime(self.join(path), t)
199 return os.utime(self.join(path), t)
197
200
198 class vfs(abstractvfs):
201 class vfs(abstractvfs):
199 '''Operate files relative to a base directory
202 '''Operate files relative to a base directory
200
203
201 This class is used to hide the details of COW semantics and
204 This class is used to hide the details of COW semantics and
202 remote file access from higher level code.
205 remote file access from higher level code.
203 '''
206 '''
204 def __init__(self, base, audit=True, expandpath=False, realpath=False):
207 def __init__(self, base, audit=True, expandpath=False, realpath=False):
205 if expandpath:
208 if expandpath:
206 base = util.expandpath(base)
209 base = util.expandpath(base)
207 if realpath:
210 if realpath:
208 base = os.path.realpath(base)
211 base = os.path.realpath(base)
209 self.base = base
212 self.base = base
210 self._setmustaudit(audit)
213 self._setmustaudit(audit)
211 self.createmode = None
214 self.createmode = None
212 self._trustnlink = None
215 self._trustnlink = None
213
216
214 def _getmustaudit(self):
217 def _getmustaudit(self):
215 return self._audit
218 return self._audit
216
219
217 def _setmustaudit(self, onoff):
220 def _setmustaudit(self, onoff):
218 self._audit = onoff
221 self._audit = onoff
219 if onoff:
222 if onoff:
220 self.audit = pathutil.pathauditor(self.base)
223 self.audit = pathutil.pathauditor(self.base)
221 else:
224 else:
222 self.audit = util.always
225 self.audit = util.always
223
226
224 mustaudit = property(_getmustaudit, _setmustaudit)
227 mustaudit = property(_getmustaudit, _setmustaudit)
225
228
226 @util.propertycache
229 @util.propertycache
227 def _cansymlink(self):
230 def _cansymlink(self):
228 return util.checklink(self.base)
231 return util.checklink(self.base)
229
232
230 @util.propertycache
233 @util.propertycache
231 def _chmod(self):
234 def _chmod(self):
232 return util.checkexec(self.base)
235 return util.checkexec(self.base)
233
236
234 def _fixfilemode(self, name):
237 def _fixfilemode(self, name):
235 if self.createmode is None or not self._chmod:
238 if self.createmode is None or not self._chmod:
236 return
239 return
237 os.chmod(name, self.createmode & 0666)
240 os.chmod(name, self.createmode & 0666)
238
241
239 def __call__(self, path, mode="r", text=False, atomictemp=False):
242 def __call__(self, path, mode="r", text=False, atomictemp=False):
240 if self._audit:
243 if self._audit:
241 r = util.checkosfilename(path)
244 r = util.checkosfilename(path)
242 if r:
245 if r:
243 raise util.Abort("%s: %r" % (r, path))
246 raise util.Abort("%s: %r" % (r, path))
244 self.audit(path)
247 self.audit(path)
245 f = self.join(path)
248 f = self.join(path)
246
249
247 if not text and "b" not in mode:
250 if not text and "b" not in mode:
248 mode += "b" # for that other OS
251 mode += "b" # for that other OS
249
252
250 nlink = -1
253 nlink = -1
251 if mode not in ('r', 'rb'):
254 if mode not in ('r', 'rb'):
252 dirname, basename = util.split(f)
255 dirname, basename = util.split(f)
253 # If basename is empty, then the path is malformed because it points
256 # If basename is empty, then the path is malformed because it points
254 # to a directory. Let the posixfile() call below raise IOError.
257 # to a directory. Let the posixfile() call below raise IOError.
255 if basename:
258 if basename:
256 if atomictemp:
259 if atomictemp:
257 util.ensuredirs(dirname, self.createmode)
260 util.ensuredirs(dirname, self.createmode)
258 return util.atomictempfile(f, mode, self.createmode)
261 return util.atomictempfile(f, mode, self.createmode)
259 try:
262 try:
260 if 'w' in mode:
263 if 'w' in mode:
261 util.unlink(f)
264 util.unlink(f)
262 nlink = 0
265 nlink = 0
263 else:
266 else:
264 # nlinks() may behave differently for files on Windows
267 # nlinks() may behave differently for files on Windows
265 # shares if the file is open.
268 # shares if the file is open.
266 fd = util.posixfile(f)
269 fd = util.posixfile(f)
267 nlink = util.nlinks(f)
270 nlink = util.nlinks(f)
268 if nlink < 1:
271 if nlink < 1:
269 nlink = 2 # force mktempcopy (issue1922)
272 nlink = 2 # force mktempcopy (issue1922)
270 fd.close()
273 fd.close()
271 except (OSError, IOError), e:
274 except (OSError, IOError), e:
272 if e.errno != errno.ENOENT:
275 if e.errno != errno.ENOENT:
273 raise
276 raise
274 nlink = 0
277 nlink = 0
275 util.ensuredirs(dirname, self.createmode)
278 util.ensuredirs(dirname, self.createmode)
276 if nlink > 0:
279 if nlink > 0:
277 if self._trustnlink is None:
280 if self._trustnlink is None:
278 self._trustnlink = nlink > 1 or util.checknlink(f)
281 self._trustnlink = nlink > 1 or util.checknlink(f)
279 if nlink > 1 or not self._trustnlink:
282 if nlink > 1 or not self._trustnlink:
280 util.rename(util.mktempcopy(f), f)
283 util.rename(util.mktempcopy(f), f)
281 fp = util.posixfile(f, mode)
284 fp = util.posixfile(f, mode)
282 if nlink == 0:
285 if nlink == 0:
283 self._fixfilemode(f)
286 self._fixfilemode(f)
284 return fp
287 return fp
285
288
286 def symlink(self, src, dst):
289 def symlink(self, src, dst):
287 self.audit(dst)
290 self.audit(dst)
288 linkname = self.join(dst)
291 linkname = self.join(dst)
289 try:
292 try:
290 os.unlink(linkname)
293 os.unlink(linkname)
291 except OSError:
294 except OSError:
292 pass
295 pass
293
296
294 util.ensuredirs(os.path.dirname(linkname), self.createmode)
297 util.ensuredirs(os.path.dirname(linkname), self.createmode)
295
298
296 if self._cansymlink:
299 if self._cansymlink:
297 try:
300 try:
298 os.symlink(src, linkname)
301 os.symlink(src, linkname)
299 except OSError, err:
302 except OSError, err:
300 raise OSError(err.errno, _('could not symlink to %r: %s') %
303 raise OSError(err.errno, _('could not symlink to %r: %s') %
301 (src, err.strerror), linkname)
304 (src, err.strerror), linkname)
302 else:
305 else:
303 self.write(dst, src)
306 self.write(dst, src)
304
307
305 def join(self, path):
308 def join(self, path):
306 if path:
309 if path:
307 return os.path.join(self.base, path)
310 return os.path.join(self.base, path)
308 else:
311 else:
309 return self.base
312 return self.base
310
313
311 opener = vfs
314 opener = vfs
312
315
313 class auditvfs(object):
316 class auditvfs(object):
314 def __init__(self, vfs):
317 def __init__(self, vfs):
315 self.vfs = vfs
318 self.vfs = vfs
316
319
317 def _getmustaudit(self):
320 def _getmustaudit(self):
318 return self.vfs.mustaudit
321 return self.vfs.mustaudit
319
322
320 def _setmustaudit(self, onoff):
323 def _setmustaudit(self, onoff):
321 self.vfs.mustaudit = onoff
324 self.vfs.mustaudit = onoff
322
325
323 mustaudit = property(_getmustaudit, _setmustaudit)
326 mustaudit = property(_getmustaudit, _setmustaudit)
324
327
325 class filtervfs(abstractvfs, auditvfs):
328 class filtervfs(abstractvfs, auditvfs):
326 '''Wrapper vfs for filtering filenames with a function.'''
329 '''Wrapper vfs for filtering filenames with a function.'''
327
330
328 def __init__(self, vfs, filter):
331 def __init__(self, vfs, filter):
329 auditvfs.__init__(self, vfs)
332 auditvfs.__init__(self, vfs)
330 self._filter = filter
333 self._filter = filter
331
334
332 def __call__(self, path, *args, **kwargs):
335 def __call__(self, path, *args, **kwargs):
333 return self.vfs(self._filter(path), *args, **kwargs)
336 return self.vfs(self._filter(path), *args, **kwargs)
334
337
335 def join(self, path):
338 def join(self, path):
336 if path:
339 if path:
337 return self.vfs.join(self._filter(path))
340 return self.vfs.join(self._filter(path))
338 else:
341 else:
339 return self.vfs.join(path)
342 return self.vfs.join(path)
340
343
341 filteropener = filtervfs
344 filteropener = filtervfs
342
345
343 class readonlyvfs(abstractvfs, auditvfs):
346 class readonlyvfs(abstractvfs, auditvfs):
344 '''Wrapper vfs preventing any writing.'''
347 '''Wrapper vfs preventing any writing.'''
345
348
346 def __init__(self, vfs):
349 def __init__(self, vfs):
347 auditvfs.__init__(self, vfs)
350 auditvfs.__init__(self, vfs)
348
351
349 def __call__(self, path, mode='r', *args, **kw):
352 def __call__(self, path, mode='r', *args, **kw):
350 if mode not in ('r', 'rb'):
353 if mode not in ('r', 'rb'):
351 raise util.Abort('this vfs is read only')
354 raise util.Abort('this vfs is read only')
352 return self.vfs(path, mode, *args, **kw)
355 return self.vfs(path, mode, *args, **kw)
353
356
354
357
355 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
358 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
356 '''yield every hg repository under path, always recursively.
359 '''yield every hg repository under path, always recursively.
357 The recurse flag will only control recursion into repo working dirs'''
360 The recurse flag will only control recursion into repo working dirs'''
358 def errhandler(err):
361 def errhandler(err):
359 if err.filename == path:
362 if err.filename == path:
360 raise err
363 raise err
361 samestat = getattr(os.path, 'samestat', None)
364 samestat = getattr(os.path, 'samestat', None)
362 if followsym and samestat is not None:
365 if followsym and samestat is not None:
363 def adddir(dirlst, dirname):
366 def adddir(dirlst, dirname):
364 match = False
367 match = False
365 dirstat = os.stat(dirname)
368 dirstat = os.stat(dirname)
366 for lstdirstat in dirlst:
369 for lstdirstat in dirlst:
367 if samestat(dirstat, lstdirstat):
370 if samestat(dirstat, lstdirstat):
368 match = True
371 match = True
369 break
372 break
370 if not match:
373 if not match:
371 dirlst.append(dirstat)
374 dirlst.append(dirstat)
372 return not match
375 return not match
373 else:
376 else:
374 followsym = False
377 followsym = False
375
378
376 if (seen_dirs is None) and followsym:
379 if (seen_dirs is None) and followsym:
377 seen_dirs = []
380 seen_dirs = []
378 adddir(seen_dirs, path)
381 adddir(seen_dirs, path)
379 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
382 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
380 dirs.sort()
383 dirs.sort()
381 if '.hg' in dirs:
384 if '.hg' in dirs:
382 yield root # found a repository
385 yield root # found a repository
383 qroot = os.path.join(root, '.hg', 'patches')
386 qroot = os.path.join(root, '.hg', 'patches')
384 if os.path.isdir(os.path.join(qroot, '.hg')):
387 if os.path.isdir(os.path.join(qroot, '.hg')):
385 yield qroot # we have a patch queue repo here
388 yield qroot # we have a patch queue repo here
386 if recurse:
389 if recurse:
387 # avoid recursing inside the .hg directory
390 # avoid recursing inside the .hg directory
388 dirs.remove('.hg')
391 dirs.remove('.hg')
389 else:
392 else:
390 dirs[:] = [] # don't descend further
393 dirs[:] = [] # don't descend further
391 elif followsym:
394 elif followsym:
392 newdirs = []
395 newdirs = []
393 for d in dirs:
396 for d in dirs:
394 fname = os.path.join(root, d)
397 fname = os.path.join(root, d)
395 if adddir(seen_dirs, fname):
398 if adddir(seen_dirs, fname):
396 if os.path.islink(fname):
399 if os.path.islink(fname):
397 for hgname in walkrepos(fname, True, seen_dirs):
400 for hgname in walkrepos(fname, True, seen_dirs):
398 yield hgname
401 yield hgname
399 else:
402 else:
400 newdirs.append(d)
403 newdirs.append(d)
401 dirs[:] = newdirs
404 dirs[:] = newdirs
402
405
403 def osrcpath():
406 def osrcpath():
404 '''return default os-specific hgrc search path'''
407 '''return default os-specific hgrc search path'''
405 path = systemrcpath()
408 path = systemrcpath()
406 path.extend(userrcpath())
409 path.extend(userrcpath())
407 path = [os.path.normpath(f) for f in path]
410 path = [os.path.normpath(f) for f in path]
408 return path
411 return path
409
412
410 _rcpath = None
413 _rcpath = None
411
414
412 def rcpath():
415 def rcpath():
413 '''return hgrc search path. if env var HGRCPATH is set, use it.
416 '''return hgrc search path. if env var HGRCPATH is set, use it.
414 for each item in path, if directory, use files ending in .rc,
417 for each item in path, if directory, use files ending in .rc,
415 else use item.
418 else use item.
416 make HGRCPATH empty to only look in .hg/hgrc of current repo.
419 make HGRCPATH empty to only look in .hg/hgrc of current repo.
417 if no HGRCPATH, use default os-specific path.'''
420 if no HGRCPATH, use default os-specific path.'''
418 global _rcpath
421 global _rcpath
419 if _rcpath is None:
422 if _rcpath is None:
420 if 'HGRCPATH' in os.environ:
423 if 'HGRCPATH' in os.environ:
421 _rcpath = []
424 _rcpath = []
422 for p in os.environ['HGRCPATH'].split(os.pathsep):
425 for p in os.environ['HGRCPATH'].split(os.pathsep):
423 if not p:
426 if not p:
424 continue
427 continue
425 p = util.expandpath(p)
428 p = util.expandpath(p)
426 if os.path.isdir(p):
429 if os.path.isdir(p):
427 for f, kind in osutil.listdir(p):
430 for f, kind in osutil.listdir(p):
428 if f.endswith('.rc'):
431 if f.endswith('.rc'):
429 _rcpath.append(os.path.join(p, f))
432 _rcpath.append(os.path.join(p, f))
430 else:
433 else:
431 _rcpath.append(p)
434 _rcpath.append(p)
432 else:
435 else:
433 _rcpath = osrcpath()
436 _rcpath = osrcpath()
434 return _rcpath
437 return _rcpath
435
438
436 def revsingle(repo, revspec, default='.'):
439 def revsingle(repo, revspec, default='.'):
437 if not revspec and revspec != 0:
440 if not revspec and revspec != 0:
438 return repo[default]
441 return repo[default]
439
442
440 l = revrange(repo, [revspec])
443 l = revrange(repo, [revspec])
441 if len(l) < 1:
444 if len(l) < 1:
442 raise util.Abort(_('empty revision set'))
445 raise util.Abort(_('empty revision set'))
443 return repo[l[-1]]
446 return repo[l[-1]]
444
447
445 def revpair(repo, revs):
448 def revpair(repo, revs):
446 if not revs:
449 if not revs:
447 return repo.dirstate.p1(), None
450 return repo.dirstate.p1(), None
448
451
449 l = revrange(repo, revs)
452 l = revrange(repo, revs)
450
453
451 if len(l) == 0:
454 if len(l) == 0:
452 if revs:
455 if revs:
453 raise util.Abort(_('empty revision range'))
456 raise util.Abort(_('empty revision range'))
454 return repo.dirstate.p1(), None
457 return repo.dirstate.p1(), None
455
458
456 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
459 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
457 return repo.lookup(l[0]), None
460 return repo.lookup(l[0]), None
458
461
459 return repo.lookup(l[0]), repo.lookup(l[-1])
462 return repo.lookup(l[0]), repo.lookup(l[-1])
460
463
461 _revrangesep = ':'
464 _revrangesep = ':'
462
465
463 def revrange(repo, revs):
466 def revrange(repo, revs):
464 """Yield revision as strings from a list of revision specifications."""
467 """Yield revision as strings from a list of revision specifications."""
465
468
466 def revfix(repo, val, defval):
469 def revfix(repo, val, defval):
467 if not val and val != 0 and defval is not None:
470 if not val and val != 0 and defval is not None:
468 return defval
471 return defval
469 return repo[val].rev()
472 return repo[val].rev()
470
473
471 seen, l = set(), []
474 seen, l = set(), []
472 for spec in revs:
475 for spec in revs:
473 if l and not seen:
476 if l and not seen:
474 seen = set(l)
477 seen = set(l)
475 # attempt to parse old-style ranges first to deal with
478 # attempt to parse old-style ranges first to deal with
476 # things like old-tag which contain query metacharacters
479 # things like old-tag which contain query metacharacters
477 try:
480 try:
478 if isinstance(spec, int):
481 if isinstance(spec, int):
479 seen.add(spec)
482 seen.add(spec)
480 l.append(spec)
483 l.append(spec)
481 continue
484 continue
482
485
483 if _revrangesep in spec:
486 if _revrangesep in spec:
484 start, end = spec.split(_revrangesep, 1)
487 start, end = spec.split(_revrangesep, 1)
485 start = revfix(repo, start, 0)
488 start = revfix(repo, start, 0)
486 end = revfix(repo, end, len(repo) - 1)
489 end = revfix(repo, end, len(repo) - 1)
487 if end == nullrev and start <= 0:
490 if end == nullrev and start <= 0:
488 start = nullrev
491 start = nullrev
489 rangeiter = repo.changelog.revs(start, end)
492 rangeiter = repo.changelog.revs(start, end)
490 if not seen and not l:
493 if not seen and not l:
491 # by far the most common case: revs = ["-1:0"]
494 # by far the most common case: revs = ["-1:0"]
492 l = list(rangeiter)
495 l = list(rangeiter)
493 # defer syncing seen until next iteration
496 # defer syncing seen until next iteration
494 continue
497 continue
495 newrevs = set(rangeiter)
498 newrevs = set(rangeiter)
496 if seen:
499 if seen:
497 newrevs.difference_update(seen)
500 newrevs.difference_update(seen)
498 seen.update(newrevs)
501 seen.update(newrevs)
499 else:
502 else:
500 seen = newrevs
503 seen = newrevs
501 l.extend(sorted(newrevs, reverse=start > end))
504 l.extend(sorted(newrevs, reverse=start > end))
502 continue
505 continue
503 elif spec and spec in repo: # single unquoted rev
506 elif spec and spec in repo: # single unquoted rev
504 rev = revfix(repo, spec, None)
507 rev = revfix(repo, spec, None)
505 if rev in seen:
508 if rev in seen:
506 continue
509 continue
507 seen.add(rev)
510 seen.add(rev)
508 l.append(rev)
511 l.append(rev)
509 continue
512 continue
510 except error.RepoLookupError:
513 except error.RepoLookupError:
511 pass
514 pass
512
515
513 # fall through to new-style queries if old-style fails
516 # fall through to new-style queries if old-style fails
514 m = revset.match(repo.ui, spec)
517 m = revset.match(repo.ui, spec)
515 dl = [r for r in m(repo, list(repo)) if r not in seen]
518 dl = [r for r in m(repo, list(repo)) if r not in seen]
516 l.extend(dl)
519 l.extend(dl)
517 seen.update(dl)
520 seen.update(dl)
518
521
519 return l
522 return l
520
523
521 def expandpats(pats):
524 def expandpats(pats):
522 if not util.expandglobs:
525 if not util.expandglobs:
523 return list(pats)
526 return list(pats)
524 ret = []
527 ret = []
525 for p in pats:
528 for p in pats:
526 kind, name = matchmod._patsplit(p, None)
529 kind, name = matchmod._patsplit(p, None)
527 if kind is None:
530 if kind is None:
528 try:
531 try:
529 globbed = glob.glob(name)
532 globbed = glob.glob(name)
530 except re.error:
533 except re.error:
531 globbed = [name]
534 globbed = [name]
532 if globbed:
535 if globbed:
533 ret.extend(globbed)
536 ret.extend(globbed)
534 continue
537 continue
535 ret.append(p)
538 ret.append(p)
536 return ret
539 return ret
537
540
538 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
541 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
539 if pats == ("",):
542 if pats == ("",):
540 pats = []
543 pats = []
541 if not globbed and default == 'relpath':
544 if not globbed and default == 'relpath':
542 pats = expandpats(pats or [])
545 pats = expandpats(pats or [])
543
546
544 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
547 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
545 default)
548 default)
546 def badfn(f, msg):
549 def badfn(f, msg):
547 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
550 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
548 m.bad = badfn
551 m.bad = badfn
549 return m, pats
552 return m, pats
550
553
551 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
554 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
552 return matchandpats(ctx, pats, opts, globbed, default)[0]
555 return matchandpats(ctx, pats, opts, globbed, default)[0]
553
556
554 def matchall(repo):
557 def matchall(repo):
555 return matchmod.always(repo.root, repo.getcwd())
558 return matchmod.always(repo.root, repo.getcwd())
556
559
557 def matchfiles(repo, files):
560 def matchfiles(repo, files):
558 return matchmod.exact(repo.root, repo.getcwd(), files)
561 return matchmod.exact(repo.root, repo.getcwd(), files)
559
562
560 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
563 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
561 if dry_run is None:
564 if dry_run is None:
562 dry_run = opts.get('dry_run')
565 dry_run = opts.get('dry_run')
563 if similarity is None:
566 if similarity is None:
564 similarity = float(opts.get('similarity') or 0)
567 similarity = float(opts.get('similarity') or 0)
565 # we'd use status here, except handling of symlinks and ignore is tricky
568 # we'd use status here, except handling of symlinks and ignore is tricky
566 m = match(repo[None], pats, opts)
569 m = match(repo[None], pats, opts)
567 rejected = []
570 rejected = []
568 m.bad = lambda x, y: rejected.append(x)
571 m.bad = lambda x, y: rejected.append(x)
569
572
570 added, unknown, deleted, removed = _interestingfiles(repo, m)
573 added, unknown, deleted, removed = _interestingfiles(repo, m)
571
574
572 unknownset = set(unknown)
575 unknownset = set(unknown)
573 toprint = unknownset.copy()
576 toprint = unknownset.copy()
574 toprint.update(deleted)
577 toprint.update(deleted)
575 for abs in sorted(toprint):
578 for abs in sorted(toprint):
576 if repo.ui.verbose or not m.exact(abs):
579 if repo.ui.verbose or not m.exact(abs):
577 rel = m.rel(abs)
580 rel = m.rel(abs)
578 if abs in unknownset:
581 if abs in unknownset:
579 status = _('adding %s\n') % ((pats and rel) or abs)
582 status = _('adding %s\n') % ((pats and rel) or abs)
580 else:
583 else:
581 status = _('removing %s\n') % ((pats and rel) or abs)
584 status = _('removing %s\n') % ((pats and rel) or abs)
582 repo.ui.status(status)
585 repo.ui.status(status)
583
586
584 renames = _findrenames(repo, m, added + unknown, removed + deleted,
587 renames = _findrenames(repo, m, added + unknown, removed + deleted,
585 similarity)
588 similarity)
586
589
587 if not dry_run:
590 if not dry_run:
588 _markchanges(repo, unknown, deleted, renames)
591 _markchanges(repo, unknown, deleted, renames)
589
592
590 for f in rejected:
593 for f in rejected:
591 if f in m.files():
594 if f in m.files():
592 return 1
595 return 1
593 return 0
596 return 0
594
597
595 def marktouched(repo, files, similarity=0.0):
598 def marktouched(repo, files, similarity=0.0):
596 '''Assert that files have somehow been operated upon. files are relative to
599 '''Assert that files have somehow been operated upon. files are relative to
597 the repo root.'''
600 the repo root.'''
598 m = matchfiles(repo, files)
601 m = matchfiles(repo, files)
599 rejected = []
602 rejected = []
600 m.bad = lambda x, y: rejected.append(x)
603 m.bad = lambda x, y: rejected.append(x)
601
604
602 added, unknown, deleted, removed = _interestingfiles(repo, m)
605 added, unknown, deleted, removed = _interestingfiles(repo, m)
603
606
604 if repo.ui.verbose:
607 if repo.ui.verbose:
605 unknownset = set(unknown)
608 unknownset = set(unknown)
606 toprint = unknownset.copy()
609 toprint = unknownset.copy()
607 toprint.update(deleted)
610 toprint.update(deleted)
608 for abs in sorted(toprint):
611 for abs in sorted(toprint):
609 if abs in unknownset:
612 if abs in unknownset:
610 status = _('adding %s\n') % abs
613 status = _('adding %s\n') % abs
611 else:
614 else:
612 status = _('removing %s\n') % abs
615 status = _('removing %s\n') % abs
613 repo.ui.status(status)
616 repo.ui.status(status)
614
617
615 renames = _findrenames(repo, m, added + unknown, removed + deleted,
618 renames = _findrenames(repo, m, added + unknown, removed + deleted,
616 similarity)
619 similarity)
617
620
618 _markchanges(repo, unknown, deleted, renames)
621 _markchanges(repo, unknown, deleted, renames)
619
622
620 for f in rejected:
623 for f in rejected:
621 if f in m.files():
624 if f in m.files():
622 return 1
625 return 1
623 return 0
626 return 0
624
627
625 def _interestingfiles(repo, matcher):
628 def _interestingfiles(repo, matcher):
626 '''Walk dirstate with matcher, looking for files that addremove would care
629 '''Walk dirstate with matcher, looking for files that addremove would care
627 about.
630 about.
628
631
629 This is different from dirstate.status because it doesn't care about
632 This is different from dirstate.status because it doesn't care about
630 whether files are modified or clean.'''
633 whether files are modified or clean.'''
631 added, unknown, deleted, removed = [], [], [], []
634 added, unknown, deleted, removed = [], [], [], []
632 audit_path = pathutil.pathauditor(repo.root)
635 audit_path = pathutil.pathauditor(repo.root)
633
636
634 ctx = repo[None]
637 ctx = repo[None]
635 dirstate = repo.dirstate
638 dirstate = repo.dirstate
636 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
639 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
637 full=False)
640 full=False)
638 for abs, st in walkresults.iteritems():
641 for abs, st in walkresults.iteritems():
639 dstate = dirstate[abs]
642 dstate = dirstate[abs]
640 if dstate == '?' and audit_path.check(abs):
643 if dstate == '?' and audit_path.check(abs):
641 unknown.append(abs)
644 unknown.append(abs)
642 elif dstate != 'r' and not st:
645 elif dstate != 'r' and not st:
643 deleted.append(abs)
646 deleted.append(abs)
644 # for finding renames
647 # for finding renames
645 elif dstate == 'r':
648 elif dstate == 'r':
646 removed.append(abs)
649 removed.append(abs)
647 elif dstate == 'a':
650 elif dstate == 'a':
648 added.append(abs)
651 added.append(abs)
649
652
650 return added, unknown, deleted, removed
653 return added, unknown, deleted, removed
651
654
652 def _findrenames(repo, matcher, added, removed, similarity):
655 def _findrenames(repo, matcher, added, removed, similarity):
653 '''Find renames from removed files to added ones.'''
656 '''Find renames from removed files to added ones.'''
654 renames = {}
657 renames = {}
655 if similarity > 0:
658 if similarity > 0:
656 for old, new, score in similar.findrenames(repo, added, removed,
659 for old, new, score in similar.findrenames(repo, added, removed,
657 similarity):
660 similarity):
658 if (repo.ui.verbose or not matcher.exact(old)
661 if (repo.ui.verbose or not matcher.exact(old)
659 or not matcher.exact(new)):
662 or not matcher.exact(new)):
660 repo.ui.status(_('recording removal of %s as rename to %s '
663 repo.ui.status(_('recording removal of %s as rename to %s '
661 '(%d%% similar)\n') %
664 '(%d%% similar)\n') %
662 (matcher.rel(old), matcher.rel(new),
665 (matcher.rel(old), matcher.rel(new),
663 score * 100))
666 score * 100))
664 renames[new] = old
667 renames[new] = old
665 return renames
668 return renames
666
669
667 def _markchanges(repo, unknown, deleted, renames):
670 def _markchanges(repo, unknown, deleted, renames):
668 '''Marks the files in unknown as added, the files in deleted as removed,
671 '''Marks the files in unknown as added, the files in deleted as removed,
669 and the files in renames as copied.'''
672 and the files in renames as copied.'''
670 wctx = repo[None]
673 wctx = repo[None]
671 wlock = repo.wlock()
674 wlock = repo.wlock()
672 try:
675 try:
673 wctx.forget(deleted)
676 wctx.forget(deleted)
674 wctx.add(unknown)
677 wctx.add(unknown)
675 for new, old in renames.iteritems():
678 for new, old in renames.iteritems():
676 wctx.copy(old, new)
679 wctx.copy(old, new)
677 finally:
680 finally:
678 wlock.release()
681 wlock.release()
679
682
680 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
683 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
681 """Update the dirstate to reflect the intent of copying src to dst. For
684 """Update the dirstate to reflect the intent of copying src to dst. For
682 different reasons it might not end with dst being marked as copied from src.
685 different reasons it might not end with dst being marked as copied from src.
683 """
686 """
684 origsrc = repo.dirstate.copied(src) or src
687 origsrc = repo.dirstate.copied(src) or src
685 if dst == origsrc: # copying back a copy?
688 if dst == origsrc: # copying back a copy?
686 if repo.dirstate[dst] not in 'mn' and not dryrun:
689 if repo.dirstate[dst] not in 'mn' and not dryrun:
687 repo.dirstate.normallookup(dst)
690 repo.dirstate.normallookup(dst)
688 else:
691 else:
689 if repo.dirstate[origsrc] == 'a' and origsrc == src:
692 if repo.dirstate[origsrc] == 'a' and origsrc == src:
690 if not ui.quiet:
693 if not ui.quiet:
691 ui.warn(_("%s has not been committed yet, so no copy "
694 ui.warn(_("%s has not been committed yet, so no copy "
692 "data will be stored for %s.\n")
695 "data will be stored for %s.\n")
693 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
696 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
694 if repo.dirstate[dst] in '?r' and not dryrun:
697 if repo.dirstate[dst] in '?r' and not dryrun:
695 wctx.add([dst])
698 wctx.add([dst])
696 elif not dryrun:
699 elif not dryrun:
697 wctx.copy(origsrc, dst)
700 wctx.copy(origsrc, dst)
698
701
699 def readrequires(opener, supported):
702 def readrequires(opener, supported):
700 '''Reads and parses .hg/requires and checks if all entries found
703 '''Reads and parses .hg/requires and checks if all entries found
701 are in the list of supported features.'''
704 are in the list of supported features.'''
702 requirements = set(opener.read("requires").splitlines())
705 requirements = set(opener.read("requires").splitlines())
703 missings = []
706 missings = []
704 for r in requirements:
707 for r in requirements:
705 if r not in supported:
708 if r not in supported:
706 if not r or not r[0].isalnum():
709 if not r or not r[0].isalnum():
707 raise error.RequirementError(_(".hg/requires file is corrupt"))
710 raise error.RequirementError(_(".hg/requires file is corrupt"))
708 missings.append(r)
711 missings.append(r)
709 missings.sort()
712 missings.sort()
710 if missings:
713 if missings:
711 raise error.RequirementError(
714 raise error.RequirementError(
712 _("unknown repository format: requires features '%s' (upgrade "
715 _("unknown repository format: requires features '%s' (upgrade "
713 "Mercurial)") % "', '".join(missings))
716 "Mercurial)") % "', '".join(missings))
714 return requirements
717 return requirements
715
718
716 class filecachesubentry(object):
719 class filecachesubentry(object):
717 def __init__(self, path, stat):
720 def __init__(self, path, stat):
718 self.path = path
721 self.path = path
719 self.cachestat = None
722 self.cachestat = None
720 self._cacheable = None
723 self._cacheable = None
721
724
722 if stat:
725 if stat:
723 self.cachestat = filecachesubentry.stat(self.path)
726 self.cachestat = filecachesubentry.stat(self.path)
724
727
725 if self.cachestat:
728 if self.cachestat:
726 self._cacheable = self.cachestat.cacheable()
729 self._cacheable = self.cachestat.cacheable()
727 else:
730 else:
728 # None means we don't know yet
731 # None means we don't know yet
729 self._cacheable = None
732 self._cacheable = None
730
733
731 def refresh(self):
734 def refresh(self):
732 if self.cacheable():
735 if self.cacheable():
733 self.cachestat = filecachesubentry.stat(self.path)
736 self.cachestat = filecachesubentry.stat(self.path)
734
737
735 def cacheable(self):
738 def cacheable(self):
736 if self._cacheable is not None:
739 if self._cacheable is not None:
737 return self._cacheable
740 return self._cacheable
738
741
739 # we don't know yet, assume it is for now
742 # we don't know yet, assume it is for now
740 return True
743 return True
741
744
742 def changed(self):
745 def changed(self):
743 # no point in going further if we can't cache it
746 # no point in going further if we can't cache it
744 if not self.cacheable():
747 if not self.cacheable():
745 return True
748 return True
746
749
747 newstat = filecachesubentry.stat(self.path)
750 newstat = filecachesubentry.stat(self.path)
748
751
749 # we may not know if it's cacheable yet, check again now
752 # we may not know if it's cacheable yet, check again now
750 if newstat and self._cacheable is None:
753 if newstat and self._cacheable is None:
751 self._cacheable = newstat.cacheable()
754 self._cacheable = newstat.cacheable()
752
755
753 # check again
756 # check again
754 if not self._cacheable:
757 if not self._cacheable:
755 return True
758 return True
756
759
757 if self.cachestat != newstat:
760 if self.cachestat != newstat:
758 self.cachestat = newstat
761 self.cachestat = newstat
759 return True
762 return True
760 else:
763 else:
761 return False
764 return False
762
765
763 @staticmethod
766 @staticmethod
764 def stat(path):
767 def stat(path):
765 try:
768 try:
766 return util.cachestat(path)
769 return util.cachestat(path)
767 except OSError, e:
770 except OSError, e:
768 if e.errno != errno.ENOENT:
771 if e.errno != errno.ENOENT:
769 raise
772 raise
770
773
771 class filecacheentry(object):
774 class filecacheentry(object):
772 def __init__(self, paths, stat=True):
775 def __init__(self, paths, stat=True):
773 self._entries = []
776 self._entries = []
774 for path in paths:
777 for path in paths:
775 self._entries.append(filecachesubentry(path, stat))
778 self._entries.append(filecachesubentry(path, stat))
776
779
777 def changed(self):
780 def changed(self):
778 '''true if any entry has changed'''
781 '''true if any entry has changed'''
779 for entry in self._entries:
782 for entry in self._entries:
780 if entry.changed():
783 if entry.changed():
781 return True
784 return True
782 return False
785 return False
783
786
784 def refresh(self):
787 def refresh(self):
785 for entry in self._entries:
788 for entry in self._entries:
786 entry.refresh()
789 entry.refresh()
787
790
788 class filecache(object):
791 class filecache(object):
789 '''A property like decorator that tracks files under .hg/ for updates.
792 '''A property like decorator that tracks files under .hg/ for updates.
790
793
791 Records stat info when called in _filecache.
794 Records stat info when called in _filecache.
792
795
793 On subsequent calls, compares old stat info with new info, and recreates the
796 On subsequent calls, compares old stat info with new info, and recreates the
794 object when any of the files changes, updating the new stat info in
797 object when any of the files changes, updating the new stat info in
795 _filecache.
798 _filecache.
796
799
797 Mercurial either atomic renames or appends for files under .hg,
800 Mercurial either atomic renames or appends for files under .hg,
798 so to ensure the cache is reliable we need the filesystem to be able
801 so to ensure the cache is reliable we need the filesystem to be able
799 to tell us if a file has been replaced. If it can't, we fallback to
802 to tell us if a file has been replaced. If it can't, we fallback to
800 recreating the object on every call (essentially the same behaviour as
803 recreating the object on every call (essentially the same behaviour as
801 propertycache).
804 propertycache).
802
805
803 '''
806 '''
804 def __init__(self, *paths):
807 def __init__(self, *paths):
805 self.paths = paths
808 self.paths = paths
806
809
807 def join(self, obj, fname):
810 def join(self, obj, fname):
808 """Used to compute the runtime path of a cached file.
811 """Used to compute the runtime path of a cached file.
809
812
810 Users should subclass filecache and provide their own version of this
813 Users should subclass filecache and provide their own version of this
811 function to call the appropriate join function on 'obj' (an instance
814 function to call the appropriate join function on 'obj' (an instance
812 of the class that its member function was decorated).
815 of the class that its member function was decorated).
813 """
816 """
814 return obj.join(fname)
817 return obj.join(fname)
815
818
816 def __call__(self, func):
819 def __call__(self, func):
817 self.func = func
820 self.func = func
818 self.name = func.__name__
821 self.name = func.__name__
819 return self
822 return self
820
823
821 def __get__(self, obj, type=None):
824 def __get__(self, obj, type=None):
822 # do we need to check if the file changed?
825 # do we need to check if the file changed?
823 if self.name in obj.__dict__:
826 if self.name in obj.__dict__:
824 assert self.name in obj._filecache, self.name
827 assert self.name in obj._filecache, self.name
825 return obj.__dict__[self.name]
828 return obj.__dict__[self.name]
826
829
827 entry = obj._filecache.get(self.name)
830 entry = obj._filecache.get(self.name)
828
831
829 if entry:
832 if entry:
830 if entry.changed():
833 if entry.changed():
831 entry.obj = self.func(obj)
834 entry.obj = self.func(obj)
832 else:
835 else:
833 paths = [self.join(obj, path) for path in self.paths]
836 paths = [self.join(obj, path) for path in self.paths]
834
837
835 # We stat -before- creating the object so our cache doesn't lie if
838 # We stat -before- creating the object so our cache doesn't lie if
836 # a writer modified between the time we read and stat
839 # a writer modified between the time we read and stat
837 entry = filecacheentry(paths, True)
840 entry = filecacheentry(paths, True)
838 entry.obj = self.func(obj)
841 entry.obj = self.func(obj)
839
842
840 obj._filecache[self.name] = entry
843 obj._filecache[self.name] = entry
841
844
842 obj.__dict__[self.name] = entry.obj
845 obj.__dict__[self.name] = entry.obj
843 return entry.obj
846 return entry.obj
844
847
845 def __set__(self, obj, value):
848 def __set__(self, obj, value):
846 if self.name not in obj._filecache:
849 if self.name not in obj._filecache:
847 # we add an entry for the missing value because X in __dict__
850 # we add an entry for the missing value because X in __dict__
848 # implies X in _filecache
851 # implies X in _filecache
849 paths = [self.join(obj, path) for path in self.paths]
852 paths = [self.join(obj, path) for path in self.paths]
850 ce = filecacheentry(paths, False)
853 ce = filecacheentry(paths, False)
851 obj._filecache[self.name] = ce
854 obj._filecache[self.name] = ce
852 else:
855 else:
853 ce = obj._filecache[self.name]
856 ce = obj._filecache[self.name]
854
857
855 ce.obj = value # update cached copy
858 ce.obj = value # update cached copy
856 obj.__dict__[self.name] = value # update copy returned by obj.x
859 obj.__dict__[self.name] = value # update copy returned by obj.x
857
860
858 def __delete__(self, obj):
861 def __delete__(self, obj):
859 try:
862 try:
860 del obj.__dict__[self.name]
863 del obj.__dict__[self.name]
861 except KeyError:
864 except KeyError:
862 raise AttributeError(self.name)
865 raise AttributeError(self.name)
863
866
864 class dirs(object):
867 class dirs(object):
865 '''a multiset of directory names from a dirstate or manifest'''
868 '''a multiset of directory names from a dirstate or manifest'''
866
869
867 def __init__(self, map, skip=None):
870 def __init__(self, map, skip=None):
868 self._dirs = {}
871 self._dirs = {}
869 addpath = self.addpath
872 addpath = self.addpath
870 if util.safehasattr(map, 'iteritems') and skip is not None:
873 if util.safehasattr(map, 'iteritems') and skip is not None:
871 for f, s in map.iteritems():
874 for f, s in map.iteritems():
872 if s[0] != skip:
875 if s[0] != skip:
873 addpath(f)
876 addpath(f)
874 else:
877 else:
875 for f in map:
878 for f in map:
876 addpath(f)
879 addpath(f)
877
880
878 def addpath(self, path):
881 def addpath(self, path):
879 dirs = self._dirs
882 dirs = self._dirs
880 for base in finddirs(path):
883 for base in finddirs(path):
881 if base in dirs:
884 if base in dirs:
882 dirs[base] += 1
885 dirs[base] += 1
883 return
886 return
884 dirs[base] = 1
887 dirs[base] = 1
885
888
886 def delpath(self, path):
889 def delpath(self, path):
887 dirs = self._dirs
890 dirs = self._dirs
888 for base in finddirs(path):
891 for base in finddirs(path):
889 if dirs[base] > 1:
892 if dirs[base] > 1:
890 dirs[base] -= 1
893 dirs[base] -= 1
891 return
894 return
892 del dirs[base]
895 del dirs[base]
893
896
894 def __iter__(self):
897 def __iter__(self):
895 return self._dirs.iterkeys()
898 return self._dirs.iterkeys()
896
899
897 def __contains__(self, d):
900 def __contains__(self, d):
898 return d in self._dirs
901 return d in self._dirs
899
902
900 if util.safehasattr(parsers, 'dirs'):
903 if util.safehasattr(parsers, 'dirs'):
901 dirs = parsers.dirs
904 dirs = parsers.dirs
902
905
903 def finddirs(path):
906 def finddirs(path):
904 pos = path.rfind('/')
907 pos = path.rfind('/')
905 while pos != -1:
908 while pos != -1:
906 yield path[:pos]
909 yield path[:pos]
907 pos = path.rfind('/', 0, pos)
910 pos = path.rfind('/', 0, pos)
General Comments 0
You need to be logged in to leave comments. Login now