##// END OF EJS Templates
scmutil: migrate finddirs from dirstate
Bryan O'Sullivan -
r18897:38982de2 default
parent child Browse files
Show More
@@ -1,838 +1,832 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 import errno
7 import errno
8
8
9 from node import nullid
9 from node import nullid
10 from i18n import _
10 from i18n import _
11 import scmutil, util, ignore, osutil, parsers, encoding
11 import scmutil, util, ignore, osutil, parsers, encoding
12 import os, stat, errno, gc
12 import os, stat, errno, gc
13
13
14 propertycache = util.propertycache
14 propertycache = util.propertycache
15 filecache = scmutil.filecache
15 filecache = scmutil.filecache
16 _rangemask = 0x7fffffff
16 _rangemask = 0x7fffffff
17
17
18 class repocache(filecache):
18 class repocache(filecache):
19 """filecache for files in .hg/"""
19 """filecache for files in .hg/"""
20 def join(self, obj, fname):
20 def join(self, obj, fname):
21 return obj._opener.join(fname)
21 return obj._opener.join(fname)
22
22
23 class rootcache(filecache):
23 class rootcache(filecache):
24 """filecache for files in the repository root"""
24 """filecache for files in the repository root"""
25 def join(self, obj, fname):
25 def join(self, obj, fname):
26 return obj._join(fname)
26 return obj._join(fname)
27
27
28 def _finddirs(path):
29 pos = path.rfind('/')
30 while pos != -1:
31 yield path[:pos]
32 pos = path.rfind('/', 0, pos)
33
34 def _incdirs(dirs, path):
28 def _incdirs(dirs, path):
35 for base in _finddirs(path):
29 for base in scmutil.finddirs(path):
36 if base in dirs:
30 if base in dirs:
37 dirs[base] += 1
31 dirs[base] += 1
38 return
32 return
39 dirs[base] = 1
33 dirs[base] = 1
40
34
41 def _decdirs(dirs, path):
35 def _decdirs(dirs, path):
42 for base in _finddirs(path):
36 for base in scmutil.finddirs(path):
43 if dirs[base] > 1:
37 if dirs[base] > 1:
44 dirs[base] -= 1
38 dirs[base] -= 1
45 return
39 return
46 del dirs[base]
40 del dirs[base]
47
41
48 class dirstate(object):
42 class dirstate(object):
49
43
50 def __init__(self, opener, ui, root, validate):
44 def __init__(self, opener, ui, root, validate):
51 '''Create a new dirstate object.
45 '''Create a new dirstate object.
52
46
53 opener is an open()-like callable that can be used to open the
47 opener is an open()-like callable that can be used to open the
54 dirstate file; root is the root of the directory tracked by
48 dirstate file; root is the root of the directory tracked by
55 the dirstate.
49 the dirstate.
56 '''
50 '''
57 self._opener = opener
51 self._opener = opener
58 self._validate = validate
52 self._validate = validate
59 self._root = root
53 self._root = root
60 self._rootdir = os.path.join(root, '')
54 self._rootdir = os.path.join(root, '')
61 self._dirty = False
55 self._dirty = False
62 self._dirtypl = False
56 self._dirtypl = False
63 self._lastnormaltime = 0
57 self._lastnormaltime = 0
64 self._ui = ui
58 self._ui = ui
65 self._filecache = {}
59 self._filecache = {}
66
60
67 @propertycache
61 @propertycache
68 def _map(self):
62 def _map(self):
69 '''Return the dirstate contents as a map from filename to
63 '''Return the dirstate contents as a map from filename to
70 (state, mode, size, time).'''
64 (state, mode, size, time).'''
71 self._read()
65 self._read()
72 return self._map
66 return self._map
73
67
74 @propertycache
68 @propertycache
75 def _copymap(self):
69 def _copymap(self):
76 self._read()
70 self._read()
77 return self._copymap
71 return self._copymap
78
72
79 @propertycache
73 @propertycache
80 def _foldmap(self):
74 def _foldmap(self):
81 f = {}
75 f = {}
82 for name in self._map:
76 for name in self._map:
83 f[util.normcase(name)] = name
77 f[util.normcase(name)] = name
84 for name in self._dirs:
78 for name in self._dirs:
85 f[util.normcase(name)] = name
79 f[util.normcase(name)] = name
86 f['.'] = '.' # prevents useless util.fspath() invocation
80 f['.'] = '.' # prevents useless util.fspath() invocation
87 return f
81 return f
88
82
89 @repocache('branch')
83 @repocache('branch')
90 def _branch(self):
84 def _branch(self):
91 try:
85 try:
92 return self._opener.read("branch").strip() or "default"
86 return self._opener.read("branch").strip() or "default"
93 except IOError, inst:
87 except IOError, inst:
94 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
95 raise
89 raise
96 return "default"
90 return "default"
97
91
98 @propertycache
92 @propertycache
99 def _pl(self):
93 def _pl(self):
100 try:
94 try:
101 fp = self._opener("dirstate")
95 fp = self._opener("dirstate")
102 st = fp.read(40)
96 st = fp.read(40)
103 fp.close()
97 fp.close()
104 l = len(st)
98 l = len(st)
105 if l == 40:
99 if l == 40:
106 return st[:20], st[20:40]
100 return st[:20], st[20:40]
107 elif l > 0 and l < 40:
101 elif l > 0 and l < 40:
108 raise util.Abort(_('working directory state appears damaged!'))
102 raise util.Abort(_('working directory state appears damaged!'))
109 except IOError, err:
103 except IOError, err:
110 if err.errno != errno.ENOENT:
104 if err.errno != errno.ENOENT:
111 raise
105 raise
112 return [nullid, nullid]
106 return [nullid, nullid]
113
107
114 @propertycache
108 @propertycache
115 def _dirs(self):
109 def _dirs(self):
116 dirs = {}
110 dirs = {}
117 for f, s in self._map.iteritems():
111 for f, s in self._map.iteritems():
118 if s[0] != 'r':
112 if s[0] != 'r':
119 _incdirs(dirs, f)
113 _incdirs(dirs, f)
120 return dirs
114 return dirs
121
115
122 def dirs(self):
116 def dirs(self):
123 return self._dirs
117 return self._dirs
124
118
125 @rootcache('.hgignore')
119 @rootcache('.hgignore')
126 def _ignore(self):
120 def _ignore(self):
127 files = [self._join('.hgignore')]
121 files = [self._join('.hgignore')]
128 for name, path in self._ui.configitems("ui"):
122 for name, path in self._ui.configitems("ui"):
129 if name == 'ignore' or name.startswith('ignore.'):
123 if name == 'ignore' or name.startswith('ignore.'):
130 files.append(util.expandpath(path))
124 files.append(util.expandpath(path))
131 return ignore.ignore(self._root, files, self._ui.warn)
125 return ignore.ignore(self._root, files, self._ui.warn)
132
126
133 @propertycache
127 @propertycache
134 def _slash(self):
128 def _slash(self):
135 return self._ui.configbool('ui', 'slash') and os.sep != '/'
129 return self._ui.configbool('ui', 'slash') and os.sep != '/'
136
130
137 @propertycache
131 @propertycache
138 def _checklink(self):
132 def _checklink(self):
139 return util.checklink(self._root)
133 return util.checklink(self._root)
140
134
141 @propertycache
135 @propertycache
142 def _checkexec(self):
136 def _checkexec(self):
143 return util.checkexec(self._root)
137 return util.checkexec(self._root)
144
138
145 @propertycache
139 @propertycache
146 def _checkcase(self):
140 def _checkcase(self):
147 return not util.checkcase(self._join('.hg'))
141 return not util.checkcase(self._join('.hg'))
148
142
149 def _join(self, f):
143 def _join(self, f):
150 # much faster than os.path.join()
144 # much faster than os.path.join()
151 # it's safe because f is always a relative path
145 # it's safe because f is always a relative path
152 return self._rootdir + f
146 return self._rootdir + f
153
147
154 def flagfunc(self, buildfallback):
148 def flagfunc(self, buildfallback):
155 if self._checklink and self._checkexec:
149 if self._checklink and self._checkexec:
156 def f(x):
150 def f(x):
157 try:
151 try:
158 st = os.lstat(self._join(x))
152 st = os.lstat(self._join(x))
159 if util.statislink(st):
153 if util.statislink(st):
160 return 'l'
154 return 'l'
161 if util.statisexec(st):
155 if util.statisexec(st):
162 return 'x'
156 return 'x'
163 except OSError:
157 except OSError:
164 pass
158 pass
165 return ''
159 return ''
166 return f
160 return f
167
161
168 fallback = buildfallback()
162 fallback = buildfallback()
169 if self._checklink:
163 if self._checklink:
170 def f(x):
164 def f(x):
171 if os.path.islink(self._join(x)):
165 if os.path.islink(self._join(x)):
172 return 'l'
166 return 'l'
173 if 'x' in fallback(x):
167 if 'x' in fallback(x):
174 return 'x'
168 return 'x'
175 return ''
169 return ''
176 return f
170 return f
177 if self._checkexec:
171 if self._checkexec:
178 def f(x):
172 def f(x):
179 if 'l' in fallback(x):
173 if 'l' in fallback(x):
180 return 'l'
174 return 'l'
181 if util.isexec(self._join(x)):
175 if util.isexec(self._join(x)):
182 return 'x'
176 return 'x'
183 return ''
177 return ''
184 return f
178 return f
185 else:
179 else:
186 return fallback
180 return fallback
187
181
188 def getcwd(self):
182 def getcwd(self):
189 cwd = os.getcwd()
183 cwd = os.getcwd()
190 if cwd == self._root:
184 if cwd == self._root:
191 return ''
185 return ''
192 # self._root ends with a path separator if self._root is '/' or 'C:\'
186 # self._root ends with a path separator if self._root is '/' or 'C:\'
193 rootsep = self._root
187 rootsep = self._root
194 if not util.endswithsep(rootsep):
188 if not util.endswithsep(rootsep):
195 rootsep += os.sep
189 rootsep += os.sep
196 if cwd.startswith(rootsep):
190 if cwd.startswith(rootsep):
197 return cwd[len(rootsep):]
191 return cwd[len(rootsep):]
198 else:
192 else:
199 # we're outside the repo. return an absolute path.
193 # we're outside the repo. return an absolute path.
200 return cwd
194 return cwd
201
195
202 def pathto(self, f, cwd=None):
196 def pathto(self, f, cwd=None):
203 if cwd is None:
197 if cwd is None:
204 cwd = self.getcwd()
198 cwd = self.getcwd()
205 path = util.pathto(self._root, cwd, f)
199 path = util.pathto(self._root, cwd, f)
206 if self._slash:
200 if self._slash:
207 return util.normpath(path)
201 return util.normpath(path)
208 return path
202 return path
209
203
210 def __getitem__(self, key):
204 def __getitem__(self, key):
211 '''Return the current state of key (a filename) in the dirstate.
205 '''Return the current state of key (a filename) in the dirstate.
212
206
213 States are:
207 States are:
214 n normal
208 n normal
215 m needs merging
209 m needs merging
216 r marked for removal
210 r marked for removal
217 a marked for addition
211 a marked for addition
218 ? not tracked
212 ? not tracked
219 '''
213 '''
220 return self._map.get(key, ("?",))[0]
214 return self._map.get(key, ("?",))[0]
221
215
222 def __contains__(self, key):
216 def __contains__(self, key):
223 return key in self._map
217 return key in self._map
224
218
225 def __iter__(self):
219 def __iter__(self):
226 for x in sorted(self._map):
220 for x in sorted(self._map):
227 yield x
221 yield x
228
222
229 def iteritems(self):
223 def iteritems(self):
230 return self._map.iteritems()
224 return self._map.iteritems()
231
225
232 def parents(self):
226 def parents(self):
233 return [self._validate(p) for p in self._pl]
227 return [self._validate(p) for p in self._pl]
234
228
235 def p1(self):
229 def p1(self):
236 return self._validate(self._pl[0])
230 return self._validate(self._pl[0])
237
231
238 def p2(self):
232 def p2(self):
239 return self._validate(self._pl[1])
233 return self._validate(self._pl[1])
240
234
241 def branch(self):
235 def branch(self):
242 return encoding.tolocal(self._branch)
236 return encoding.tolocal(self._branch)
243
237
244 def setparents(self, p1, p2=nullid):
238 def setparents(self, p1, p2=nullid):
245 """Set dirstate parents to p1 and p2.
239 """Set dirstate parents to p1 and p2.
246
240
247 When moving from two parents to one, 'm' merged entries a
241 When moving from two parents to one, 'm' merged entries a
248 adjusted to normal and previous copy records discarded and
242 adjusted to normal and previous copy records discarded and
249 returned by the call.
243 returned by the call.
250
244
251 See localrepo.setparents()
245 See localrepo.setparents()
252 """
246 """
253 self._dirty = self._dirtypl = True
247 self._dirty = self._dirtypl = True
254 oldp2 = self._pl[1]
248 oldp2 = self._pl[1]
255 self._pl = p1, p2
249 self._pl = p1, p2
256 copies = {}
250 copies = {}
257 if oldp2 != nullid and p2 == nullid:
251 if oldp2 != nullid and p2 == nullid:
258 # Discard 'm' markers when moving away from a merge state
252 # Discard 'm' markers when moving away from a merge state
259 for f, s in self._map.iteritems():
253 for f, s in self._map.iteritems():
260 if s[0] == 'm':
254 if s[0] == 'm':
261 if f in self._copymap:
255 if f in self._copymap:
262 copies[f] = self._copymap[f]
256 copies[f] = self._copymap[f]
263 self.normallookup(f)
257 self.normallookup(f)
264 return copies
258 return copies
265
259
266 def setbranch(self, branch):
260 def setbranch(self, branch):
267 self._branch = encoding.fromlocal(branch)
261 self._branch = encoding.fromlocal(branch)
268 f = self._opener('branch', 'w', atomictemp=True)
262 f = self._opener('branch', 'w', atomictemp=True)
269 try:
263 try:
270 f.write(self._branch + '\n')
264 f.write(self._branch + '\n')
271 f.close()
265 f.close()
272
266
273 # make sure filecache has the correct stat info for _branch after
267 # make sure filecache has the correct stat info for _branch after
274 # replacing the underlying file
268 # replacing the underlying file
275 ce = self._filecache['_branch']
269 ce = self._filecache['_branch']
276 if ce:
270 if ce:
277 ce.refresh()
271 ce.refresh()
278 except: # re-raises
272 except: # re-raises
279 f.discard()
273 f.discard()
280 raise
274 raise
281
275
282 def _read(self):
276 def _read(self):
283 self._map = {}
277 self._map = {}
284 self._copymap = {}
278 self._copymap = {}
285 try:
279 try:
286 st = self._opener.read("dirstate")
280 st = self._opener.read("dirstate")
287 except IOError, err:
281 except IOError, err:
288 if err.errno != errno.ENOENT:
282 if err.errno != errno.ENOENT:
289 raise
283 raise
290 return
284 return
291 if not st:
285 if not st:
292 return
286 return
293
287
294 # Python's garbage collector triggers a GC each time a certain number
288 # Python's garbage collector triggers a GC each time a certain number
295 # of container objects (the number being defined by
289 # of container objects (the number being defined by
296 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
290 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
297 # for each file in the dirstate. The C version then immediately marks
291 # for each file in the dirstate. The C version then immediately marks
298 # them as not to be tracked by the collector. However, this has no
292 # them as not to be tracked by the collector. However, this has no
299 # effect on when GCs are triggered, only on what objects the GC looks
293 # effect on when GCs are triggered, only on what objects the GC looks
300 # into. This means that O(number of files) GCs are unavoidable.
294 # into. This means that O(number of files) GCs are unavoidable.
301 # Depending on when in the process's lifetime the dirstate is parsed,
295 # Depending on when in the process's lifetime the dirstate is parsed,
302 # this can get very expensive. As a workaround, disable GC while
296 # this can get very expensive. As a workaround, disable GC while
303 # parsing the dirstate.
297 # parsing the dirstate.
304 gcenabled = gc.isenabled()
298 gcenabled = gc.isenabled()
305 gc.disable()
299 gc.disable()
306 try:
300 try:
307 p = parsers.parse_dirstate(self._map, self._copymap, st)
301 p = parsers.parse_dirstate(self._map, self._copymap, st)
308 finally:
302 finally:
309 if gcenabled:
303 if gcenabled:
310 gc.enable()
304 gc.enable()
311 if not self._dirtypl:
305 if not self._dirtypl:
312 self._pl = p
306 self._pl = p
313
307
314 def invalidate(self):
308 def invalidate(self):
315 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
309 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
316 "_ignore"):
310 "_ignore"):
317 if a in self.__dict__:
311 if a in self.__dict__:
318 delattr(self, a)
312 delattr(self, a)
319 self._lastnormaltime = 0
313 self._lastnormaltime = 0
320 self._dirty = False
314 self._dirty = False
321
315
322 def copy(self, source, dest):
316 def copy(self, source, dest):
323 """Mark dest as a copy of source. Unmark dest if source is None."""
317 """Mark dest as a copy of source. Unmark dest if source is None."""
324 if source == dest:
318 if source == dest:
325 return
319 return
326 self._dirty = True
320 self._dirty = True
327 if source is not None:
321 if source is not None:
328 self._copymap[dest] = source
322 self._copymap[dest] = source
329 elif dest in self._copymap:
323 elif dest in self._copymap:
330 del self._copymap[dest]
324 del self._copymap[dest]
331
325
332 def copied(self, file):
326 def copied(self, file):
333 return self._copymap.get(file, None)
327 return self._copymap.get(file, None)
334
328
335 def copies(self):
329 def copies(self):
336 return self._copymap
330 return self._copymap
337
331
338 def _droppath(self, f):
332 def _droppath(self, f):
339 if self[f] not in "?r" and "_dirs" in self.__dict__:
333 if self[f] not in "?r" and "_dirs" in self.__dict__:
340 _decdirs(self._dirs, f)
334 _decdirs(self._dirs, f)
341
335
342 def _addpath(self, f, state, mode, size, mtime):
336 def _addpath(self, f, state, mode, size, mtime):
343 oldstate = self[f]
337 oldstate = self[f]
344 if state == 'a' or oldstate == 'r':
338 if state == 'a' or oldstate == 'r':
345 scmutil.checkfilename(f)
339 scmutil.checkfilename(f)
346 if f in self._dirs:
340 if f in self._dirs:
347 raise util.Abort(_('directory %r already in dirstate') % f)
341 raise util.Abort(_('directory %r already in dirstate') % f)
348 # shadows
342 # shadows
349 for d in _finddirs(f):
343 for d in scmutil.finddirs(f):
350 if d in self._dirs:
344 if d in self._dirs:
351 break
345 break
352 if d in self._map and self[d] != 'r':
346 if d in self._map and self[d] != 'r':
353 raise util.Abort(
347 raise util.Abort(
354 _('file %r in dirstate clashes with %r') % (d, f))
348 _('file %r in dirstate clashes with %r') % (d, f))
355 if oldstate in "?r" and "_dirs" in self.__dict__:
349 if oldstate in "?r" and "_dirs" in self.__dict__:
356 _incdirs(self._dirs, f)
350 _incdirs(self._dirs, f)
357 self._dirty = True
351 self._dirty = True
358 self._map[f] = (state, mode, size, mtime)
352 self._map[f] = (state, mode, size, mtime)
359
353
360 def normal(self, f):
354 def normal(self, f):
361 '''Mark a file normal and clean.'''
355 '''Mark a file normal and clean.'''
362 s = os.lstat(self._join(f))
356 s = os.lstat(self._join(f))
363 mtime = int(s.st_mtime)
357 mtime = int(s.st_mtime)
364 self._addpath(f, 'n', s.st_mode,
358 self._addpath(f, 'n', s.st_mode,
365 s.st_size & _rangemask, mtime & _rangemask)
359 s.st_size & _rangemask, mtime & _rangemask)
366 if f in self._copymap:
360 if f in self._copymap:
367 del self._copymap[f]
361 del self._copymap[f]
368 if mtime > self._lastnormaltime:
362 if mtime > self._lastnormaltime:
369 # Remember the most recent modification timeslot for status(),
363 # Remember the most recent modification timeslot for status(),
370 # to make sure we won't miss future size-preserving file content
364 # to make sure we won't miss future size-preserving file content
371 # modifications that happen within the same timeslot.
365 # modifications that happen within the same timeslot.
372 self._lastnormaltime = mtime
366 self._lastnormaltime = mtime
373
367
374 def normallookup(self, f):
368 def normallookup(self, f):
375 '''Mark a file normal, but possibly dirty.'''
369 '''Mark a file normal, but possibly dirty.'''
376 if self._pl[1] != nullid and f in self._map:
370 if self._pl[1] != nullid and f in self._map:
377 # if there is a merge going on and the file was either
371 # if there is a merge going on and the file was either
378 # in state 'm' (-1) or coming from other parent (-2) before
372 # in state 'm' (-1) or coming from other parent (-2) before
379 # being removed, restore that state.
373 # being removed, restore that state.
380 entry = self._map[f]
374 entry = self._map[f]
381 if entry[0] == 'r' and entry[2] in (-1, -2):
375 if entry[0] == 'r' and entry[2] in (-1, -2):
382 source = self._copymap.get(f)
376 source = self._copymap.get(f)
383 if entry[2] == -1:
377 if entry[2] == -1:
384 self.merge(f)
378 self.merge(f)
385 elif entry[2] == -2:
379 elif entry[2] == -2:
386 self.otherparent(f)
380 self.otherparent(f)
387 if source:
381 if source:
388 self.copy(source, f)
382 self.copy(source, f)
389 return
383 return
390 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
384 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
391 return
385 return
392 self._addpath(f, 'n', 0, -1, -1)
386 self._addpath(f, 'n', 0, -1, -1)
393 if f in self._copymap:
387 if f in self._copymap:
394 del self._copymap[f]
388 del self._copymap[f]
395
389
396 def otherparent(self, f):
390 def otherparent(self, f):
397 '''Mark as coming from the other parent, always dirty.'''
391 '''Mark as coming from the other parent, always dirty.'''
398 if self._pl[1] == nullid:
392 if self._pl[1] == nullid:
399 raise util.Abort(_("setting %r to other parent "
393 raise util.Abort(_("setting %r to other parent "
400 "only allowed in merges") % f)
394 "only allowed in merges") % f)
401 self._addpath(f, 'n', 0, -2, -1)
395 self._addpath(f, 'n', 0, -2, -1)
402 if f in self._copymap:
396 if f in self._copymap:
403 del self._copymap[f]
397 del self._copymap[f]
404
398
405 def add(self, f):
399 def add(self, f):
406 '''Mark a file added.'''
400 '''Mark a file added.'''
407 self._addpath(f, 'a', 0, -1, -1)
401 self._addpath(f, 'a', 0, -1, -1)
408 if f in self._copymap:
402 if f in self._copymap:
409 del self._copymap[f]
403 del self._copymap[f]
410
404
411 def remove(self, f):
405 def remove(self, f):
412 '''Mark a file removed.'''
406 '''Mark a file removed.'''
413 self._dirty = True
407 self._dirty = True
414 self._droppath(f)
408 self._droppath(f)
415 size = 0
409 size = 0
416 if self._pl[1] != nullid and f in self._map:
410 if self._pl[1] != nullid and f in self._map:
417 # backup the previous state
411 # backup the previous state
418 entry = self._map[f]
412 entry = self._map[f]
419 if entry[0] == 'm': # merge
413 if entry[0] == 'm': # merge
420 size = -1
414 size = -1
421 elif entry[0] == 'n' and entry[2] == -2: # other parent
415 elif entry[0] == 'n' and entry[2] == -2: # other parent
422 size = -2
416 size = -2
423 self._map[f] = ('r', 0, size, 0)
417 self._map[f] = ('r', 0, size, 0)
424 if size == 0 and f in self._copymap:
418 if size == 0 and f in self._copymap:
425 del self._copymap[f]
419 del self._copymap[f]
426
420
427 def merge(self, f):
421 def merge(self, f):
428 '''Mark a file merged.'''
422 '''Mark a file merged.'''
429 if self._pl[1] == nullid:
423 if self._pl[1] == nullid:
430 return self.normallookup(f)
424 return self.normallookup(f)
431 s = os.lstat(self._join(f))
425 s = os.lstat(self._join(f))
432 self._addpath(f, 'm', s.st_mode,
426 self._addpath(f, 'm', s.st_mode,
433 s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
427 s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
434 if f in self._copymap:
428 if f in self._copymap:
435 del self._copymap[f]
429 del self._copymap[f]
436
430
437 def drop(self, f):
431 def drop(self, f):
438 '''Drop a file from the dirstate'''
432 '''Drop a file from the dirstate'''
439 if f in self._map:
433 if f in self._map:
440 self._dirty = True
434 self._dirty = True
441 self._droppath(f)
435 self._droppath(f)
442 del self._map[f]
436 del self._map[f]
443
437
444 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
438 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
445 normed = util.normcase(path)
439 normed = util.normcase(path)
446 folded = self._foldmap.get(normed, None)
440 folded = self._foldmap.get(normed, None)
447 if folded is None:
441 if folded is None:
448 if isknown:
442 if isknown:
449 folded = path
443 folded = path
450 else:
444 else:
451 if exists is None:
445 if exists is None:
452 exists = os.path.lexists(os.path.join(self._root, path))
446 exists = os.path.lexists(os.path.join(self._root, path))
453 if not exists:
447 if not exists:
454 # Maybe a path component exists
448 # Maybe a path component exists
455 if not ignoremissing and '/' in path:
449 if not ignoremissing and '/' in path:
456 d, f = path.rsplit('/', 1)
450 d, f = path.rsplit('/', 1)
457 d = self._normalize(d, isknown, ignoremissing, None)
451 d = self._normalize(d, isknown, ignoremissing, None)
458 folded = d + "/" + f
452 folded = d + "/" + f
459 else:
453 else:
460 # No path components, preserve original case
454 # No path components, preserve original case
461 folded = path
455 folded = path
462 else:
456 else:
463 # recursively normalize leading directory components
457 # recursively normalize leading directory components
464 # against dirstate
458 # against dirstate
465 if '/' in normed:
459 if '/' in normed:
466 d, f = normed.rsplit('/', 1)
460 d, f = normed.rsplit('/', 1)
467 d = self._normalize(d, isknown, ignoremissing, True)
461 d = self._normalize(d, isknown, ignoremissing, True)
468 r = self._root + "/" + d
462 r = self._root + "/" + d
469 folded = d + "/" + util.fspath(f, r)
463 folded = d + "/" + util.fspath(f, r)
470 else:
464 else:
471 folded = util.fspath(normed, self._root)
465 folded = util.fspath(normed, self._root)
472 self._foldmap[normed] = folded
466 self._foldmap[normed] = folded
473
467
474 return folded
468 return folded
475
469
476 def normalize(self, path, isknown=False, ignoremissing=False):
470 def normalize(self, path, isknown=False, ignoremissing=False):
477 '''
471 '''
478 normalize the case of a pathname when on a casefolding filesystem
472 normalize the case of a pathname when on a casefolding filesystem
479
473
480 isknown specifies whether the filename came from walking the
474 isknown specifies whether the filename came from walking the
481 disk, to avoid extra filesystem access.
475 disk, to avoid extra filesystem access.
482
476
483 If ignoremissing is True, missing path are returned
477 If ignoremissing is True, missing path are returned
484 unchanged. Otherwise, we try harder to normalize possibly
478 unchanged. Otherwise, we try harder to normalize possibly
485 existing path components.
479 existing path components.
486
480
487 The normalized case is determined based on the following precedence:
481 The normalized case is determined based on the following precedence:
488
482
489 - version of name already stored in the dirstate
483 - version of name already stored in the dirstate
490 - version of name stored on disk
484 - version of name stored on disk
491 - version provided via command arguments
485 - version provided via command arguments
492 '''
486 '''
493
487
494 if self._checkcase:
488 if self._checkcase:
495 return self._normalize(path, isknown, ignoremissing)
489 return self._normalize(path, isknown, ignoremissing)
496 return path
490 return path
497
491
498 def clear(self):
492 def clear(self):
499 self._map = {}
493 self._map = {}
500 if "_dirs" in self.__dict__:
494 if "_dirs" in self.__dict__:
501 delattr(self, "_dirs")
495 delattr(self, "_dirs")
502 self._copymap = {}
496 self._copymap = {}
503 self._pl = [nullid, nullid]
497 self._pl = [nullid, nullid]
504 self._lastnormaltime = 0
498 self._lastnormaltime = 0
505 self._dirty = True
499 self._dirty = True
506
500
507 def rebuild(self, parent, allfiles, changedfiles=None):
501 def rebuild(self, parent, allfiles, changedfiles=None):
508 changedfiles = changedfiles or allfiles
502 changedfiles = changedfiles or allfiles
509 oldmap = self._map
503 oldmap = self._map
510 self.clear()
504 self.clear()
511 for f in allfiles:
505 for f in allfiles:
512 if f not in changedfiles:
506 if f not in changedfiles:
513 self._map[f] = oldmap[f]
507 self._map[f] = oldmap[f]
514 else:
508 else:
515 if 'x' in allfiles.flags(f):
509 if 'x' in allfiles.flags(f):
516 self._map[f] = ('n', 0777, -1, 0)
510 self._map[f] = ('n', 0777, -1, 0)
517 else:
511 else:
518 self._map[f] = ('n', 0666, -1, 0)
512 self._map[f] = ('n', 0666, -1, 0)
519 self._pl = (parent, nullid)
513 self._pl = (parent, nullid)
520 self._dirty = True
514 self._dirty = True
521
515
522 def write(self):
516 def write(self):
523 if not self._dirty:
517 if not self._dirty:
524 return
518 return
525 st = self._opener("dirstate", "w", atomictemp=True)
519 st = self._opener("dirstate", "w", atomictemp=True)
526
520
527 def finish(s):
521 def finish(s):
528 st.write(s)
522 st.write(s)
529 st.close()
523 st.close()
530 self._lastnormaltime = 0
524 self._lastnormaltime = 0
531 self._dirty = self._dirtypl = False
525 self._dirty = self._dirtypl = False
532
526
533 # use the modification time of the newly created temporary file as the
527 # use the modification time of the newly created temporary file as the
534 # filesystem's notion of 'now'
528 # filesystem's notion of 'now'
535 now = util.fstat(st).st_mtime
529 now = util.fstat(st).st_mtime
536 finish(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
530 finish(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
537
531
538 def _dirignore(self, f):
532 def _dirignore(self, f):
539 if f == '.':
533 if f == '.':
540 return False
534 return False
541 if self._ignore(f):
535 if self._ignore(f):
542 return True
536 return True
543 for p in _finddirs(f):
537 for p in scmutil.finddirs(f):
544 if self._ignore(p):
538 if self._ignore(p):
545 return True
539 return True
546 return False
540 return False
547
541
548 def walk(self, match, subrepos, unknown, ignored):
542 def walk(self, match, subrepos, unknown, ignored):
549 '''
543 '''
550 Walk recursively through the directory tree, finding all files
544 Walk recursively through the directory tree, finding all files
551 matched by match.
545 matched by match.
552
546
553 Return a dict mapping filename to stat-like object (either
547 Return a dict mapping filename to stat-like object (either
554 mercurial.osutil.stat instance or return value of os.stat()).
548 mercurial.osutil.stat instance or return value of os.stat()).
555 '''
549 '''
556
550
557 def fwarn(f, msg):
551 def fwarn(f, msg):
558 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
552 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
559 return False
553 return False
560
554
561 def badtype(mode):
555 def badtype(mode):
562 kind = _('unknown')
556 kind = _('unknown')
563 if stat.S_ISCHR(mode):
557 if stat.S_ISCHR(mode):
564 kind = _('character device')
558 kind = _('character device')
565 elif stat.S_ISBLK(mode):
559 elif stat.S_ISBLK(mode):
566 kind = _('block device')
560 kind = _('block device')
567 elif stat.S_ISFIFO(mode):
561 elif stat.S_ISFIFO(mode):
568 kind = _('fifo')
562 kind = _('fifo')
569 elif stat.S_ISSOCK(mode):
563 elif stat.S_ISSOCK(mode):
570 kind = _('socket')
564 kind = _('socket')
571 elif stat.S_ISDIR(mode):
565 elif stat.S_ISDIR(mode):
572 kind = _('directory')
566 kind = _('directory')
573 return _('unsupported file type (type is %s)') % kind
567 return _('unsupported file type (type is %s)') % kind
574
568
575 ignore = self._ignore
569 ignore = self._ignore
576 dirignore = self._dirignore
570 dirignore = self._dirignore
577 if ignored:
571 if ignored:
578 ignore = util.never
572 ignore = util.never
579 dirignore = util.never
573 dirignore = util.never
580 elif not unknown:
574 elif not unknown:
581 # if unknown and ignored are False, skip step 2
575 # if unknown and ignored are False, skip step 2
582 ignore = util.always
576 ignore = util.always
583 dirignore = util.always
577 dirignore = util.always
584
578
585 matchfn = match.matchfn
579 matchfn = match.matchfn
586 matchalways = match.always()
580 matchalways = match.always()
587 badfn = match.bad
581 badfn = match.bad
588 dmap = self._map
582 dmap = self._map
589 normpath = util.normpath
583 normpath = util.normpath
590 listdir = osutil.listdir
584 listdir = osutil.listdir
591 lstat = os.lstat
585 lstat = os.lstat
592 getkind = stat.S_IFMT
586 getkind = stat.S_IFMT
593 dirkind = stat.S_IFDIR
587 dirkind = stat.S_IFDIR
594 regkind = stat.S_IFREG
588 regkind = stat.S_IFREG
595 lnkkind = stat.S_IFLNK
589 lnkkind = stat.S_IFLNK
596 join = self._join
590 join = self._join
597 work = []
591 work = []
598 wadd = work.append
592 wadd = work.append
599
593
600 exact = skipstep3 = False
594 exact = skipstep3 = False
601 if matchfn == match.exact: # match.exact
595 if matchfn == match.exact: # match.exact
602 exact = True
596 exact = True
603 dirignore = util.always # skip step 2
597 dirignore = util.always # skip step 2
604 elif match.files() and not match.anypats(): # match.match, no patterns
598 elif match.files() and not match.anypats(): # match.match, no patterns
605 skipstep3 = True
599 skipstep3 = True
606
600
607 if not exact and self._checkcase:
601 if not exact and self._checkcase:
608 normalize = self._normalize
602 normalize = self._normalize
609 skipstep3 = False
603 skipstep3 = False
610 else:
604 else:
611 normalize = None
605 normalize = None
612
606
613 files = sorted(match.files())
607 files = sorted(match.files())
614 subrepos.sort()
608 subrepos.sort()
615 i, j = 0, 0
609 i, j = 0, 0
616 while i < len(files) and j < len(subrepos):
610 while i < len(files) and j < len(subrepos):
617 subpath = subrepos[j] + "/"
611 subpath = subrepos[j] + "/"
618 if files[i] < subpath:
612 if files[i] < subpath:
619 i += 1
613 i += 1
620 continue
614 continue
621 while i < len(files) and files[i].startswith(subpath):
615 while i < len(files) and files[i].startswith(subpath):
622 del files[i]
616 del files[i]
623 j += 1
617 j += 1
624
618
625 if not files or '.' in files:
619 if not files or '.' in files:
626 files = ['']
620 files = ['']
627 results = dict.fromkeys(subrepos)
621 results = dict.fromkeys(subrepos)
628 results['.hg'] = None
622 results['.hg'] = None
629
623
630 # step 1: find all explicit files
624 # step 1: find all explicit files
631 for ff in files:
625 for ff in files:
632 if normalize:
626 if normalize:
633 nf = normalize(normpath(ff), False, True)
627 nf = normalize(normpath(ff), False, True)
634 else:
628 else:
635 nf = normpath(ff)
629 nf = normpath(ff)
636 if nf in results:
630 if nf in results:
637 continue
631 continue
638
632
639 try:
633 try:
640 st = lstat(join(nf))
634 st = lstat(join(nf))
641 kind = getkind(st.st_mode)
635 kind = getkind(st.st_mode)
642 if kind == dirkind:
636 if kind == dirkind:
643 skipstep3 = False
637 skipstep3 = False
644 if nf in dmap:
638 if nf in dmap:
645 #file deleted on disk but still in dirstate
639 #file deleted on disk but still in dirstate
646 results[nf] = None
640 results[nf] = None
647 match.dir(nf)
641 match.dir(nf)
648 if not dirignore(nf):
642 if not dirignore(nf):
649 wadd(nf)
643 wadd(nf)
650 elif kind == regkind or kind == lnkkind:
644 elif kind == regkind or kind == lnkkind:
651 results[nf] = st
645 results[nf] = st
652 else:
646 else:
653 badfn(ff, badtype(kind))
647 badfn(ff, badtype(kind))
654 if nf in dmap:
648 if nf in dmap:
655 results[nf] = None
649 results[nf] = None
656 except OSError, inst:
650 except OSError, inst:
657 if nf in dmap: # does it exactly match a file?
651 if nf in dmap: # does it exactly match a file?
658 results[nf] = None
652 results[nf] = None
659 else: # does it match a directory?
653 else: # does it match a directory?
660 prefix = nf + "/"
654 prefix = nf + "/"
661 for fn in dmap:
655 for fn in dmap:
662 if fn.startswith(prefix):
656 if fn.startswith(prefix):
663 match.dir(nf)
657 match.dir(nf)
664 skipstep3 = False
658 skipstep3 = False
665 break
659 break
666 else:
660 else:
667 badfn(ff, inst.strerror)
661 badfn(ff, inst.strerror)
668
662
669 # step 2: visit subdirectories
663 # step 2: visit subdirectories
670 while work:
664 while work:
671 nd = work.pop()
665 nd = work.pop()
672 skip = None
666 skip = None
673 if nd == '.':
667 if nd == '.':
674 nd = ''
668 nd = ''
675 else:
669 else:
676 skip = '.hg'
670 skip = '.hg'
677 try:
671 try:
678 entries = listdir(join(nd), stat=True, skip=skip)
672 entries = listdir(join(nd), stat=True, skip=skip)
679 except OSError, inst:
673 except OSError, inst:
680 if inst.errno in (errno.EACCES, errno.ENOENT):
674 if inst.errno in (errno.EACCES, errno.ENOENT):
681 fwarn(nd, inst.strerror)
675 fwarn(nd, inst.strerror)
682 continue
676 continue
683 raise
677 raise
684 for f, kind, st in entries:
678 for f, kind, st in entries:
685 if normalize:
679 if normalize:
686 nf = normalize(nd and (nd + "/" + f) or f, True, True)
680 nf = normalize(nd and (nd + "/" + f) or f, True, True)
687 else:
681 else:
688 nf = nd and (nd + "/" + f) or f
682 nf = nd and (nd + "/" + f) or f
689 if nf not in results:
683 if nf not in results:
690 if kind == dirkind:
684 if kind == dirkind:
691 if not ignore(nf):
685 if not ignore(nf):
692 match.dir(nf)
686 match.dir(nf)
693 wadd(nf)
687 wadd(nf)
694 if nf in dmap and (matchalways or matchfn(nf)):
688 if nf in dmap and (matchalways or matchfn(nf)):
695 results[nf] = None
689 results[nf] = None
696 elif kind == regkind or kind == lnkkind:
690 elif kind == regkind or kind == lnkkind:
697 if nf in dmap:
691 if nf in dmap:
698 if matchalways or matchfn(nf):
692 if matchalways or matchfn(nf):
699 results[nf] = st
693 results[nf] = st
700 elif (matchalways or matchfn(nf)) and not ignore(nf):
694 elif (matchalways or matchfn(nf)) and not ignore(nf):
701 results[nf] = st
695 results[nf] = st
702 elif nf in dmap and (matchalways or matchfn(nf)):
696 elif nf in dmap and (matchalways or matchfn(nf)):
703 results[nf] = None
697 results[nf] = None
704
698
705 for s in subrepos:
699 for s in subrepos:
706 del results[s]
700 del results[s]
707 del results['.hg']
701 del results['.hg']
708
702
709 # step 3: report unseen items in the dmap hash
703 # step 3: report unseen items in the dmap hash
710 if not skipstep3 and not exact:
704 if not skipstep3 and not exact:
711 if not results and matchalways:
705 if not results and matchalways:
712 visit = dmap.keys()
706 visit = dmap.keys()
713 else:
707 else:
714 visit = [f for f in dmap if f not in results and matchfn(f)]
708 visit = [f for f in dmap if f not in results and matchfn(f)]
715 visit.sort()
709 visit.sort()
716
710
717 if unknown:
711 if unknown:
718 # unknown == True means we walked the full directory tree above.
712 # unknown == True means we walked the full directory tree above.
719 # So if a file is not seen it was either a) not matching matchfn
713 # So if a file is not seen it was either a) not matching matchfn
720 # b) ignored, c) missing, or d) under a symlink directory.
714 # b) ignored, c) missing, or d) under a symlink directory.
721 audit_path = scmutil.pathauditor(self._root)
715 audit_path = scmutil.pathauditor(self._root)
722
716
723 for nf in iter(visit):
717 for nf in iter(visit):
724 # Report ignored items in the dmap as long as they are not
718 # Report ignored items in the dmap as long as they are not
725 # under a symlink directory.
719 # under a symlink directory.
726 if ignore(nf) and audit_path.check(nf):
720 if ignore(nf) and audit_path.check(nf):
727 try:
721 try:
728 results[nf] = lstat(join(nf))
722 results[nf] = lstat(join(nf))
729 except OSError:
723 except OSError:
730 # file doesn't exist
724 # file doesn't exist
731 results[nf] = None
725 results[nf] = None
732 else:
726 else:
733 # It's either missing or under a symlink directory
727 # It's either missing or under a symlink directory
734 results[nf] = None
728 results[nf] = None
735 else:
729 else:
736 # We may not have walked the full directory tree above,
730 # We may not have walked the full directory tree above,
737 # so stat everything we missed.
731 # so stat everything we missed.
738 nf = iter(visit).next
732 nf = iter(visit).next
739 for st in util.statfiles([join(i) for i in visit]):
733 for st in util.statfiles([join(i) for i in visit]):
740 results[nf()] = st
734 results[nf()] = st
741 return results
735 return results
742
736
743 def status(self, match, subrepos, ignored, clean, unknown):
737 def status(self, match, subrepos, ignored, clean, unknown):
744 '''Determine the status of the working copy relative to the
738 '''Determine the status of the working copy relative to the
745 dirstate and return a tuple of lists (unsure, modified, added,
739 dirstate and return a tuple of lists (unsure, modified, added,
746 removed, deleted, unknown, ignored, clean), where:
740 removed, deleted, unknown, ignored, clean), where:
747
741
748 unsure:
742 unsure:
749 files that might have been modified since the dirstate was
743 files that might have been modified since the dirstate was
750 written, but need to be read to be sure (size is the same
744 written, but need to be read to be sure (size is the same
751 but mtime differs)
745 but mtime differs)
752 modified:
746 modified:
753 files that have definitely been modified since the dirstate
747 files that have definitely been modified since the dirstate
754 was written (different size or mode)
748 was written (different size or mode)
755 added:
749 added:
756 files that have been explicitly added with hg add
750 files that have been explicitly added with hg add
757 removed:
751 removed:
758 files that have been explicitly removed with hg remove
752 files that have been explicitly removed with hg remove
759 deleted:
753 deleted:
760 files that have been deleted through other means ("missing")
754 files that have been deleted through other means ("missing")
761 unknown:
755 unknown:
762 files not in the dirstate that are not ignored
756 files not in the dirstate that are not ignored
763 ignored:
757 ignored:
764 files not in the dirstate that are ignored
758 files not in the dirstate that are ignored
765 (by _dirignore())
759 (by _dirignore())
766 clean:
760 clean:
767 files that have definitely not been modified since the
761 files that have definitely not been modified since the
768 dirstate was written
762 dirstate was written
769 '''
763 '''
770 listignored, listclean, listunknown = ignored, clean, unknown
764 listignored, listclean, listunknown = ignored, clean, unknown
771 lookup, modified, added, unknown, ignored = [], [], [], [], []
765 lookup, modified, added, unknown, ignored = [], [], [], [], []
772 removed, deleted, clean = [], [], []
766 removed, deleted, clean = [], [], []
773
767
774 dmap = self._map
768 dmap = self._map
775 ladd = lookup.append # aka "unsure"
769 ladd = lookup.append # aka "unsure"
776 madd = modified.append
770 madd = modified.append
777 aadd = added.append
771 aadd = added.append
778 uadd = unknown.append
772 uadd = unknown.append
779 iadd = ignored.append
773 iadd = ignored.append
780 radd = removed.append
774 radd = removed.append
781 dadd = deleted.append
775 dadd = deleted.append
782 cadd = clean.append
776 cadd = clean.append
783 mexact = match.exact
777 mexact = match.exact
784 dirignore = self._dirignore
778 dirignore = self._dirignore
785 checkexec = self._checkexec
779 checkexec = self._checkexec
786 checklink = self._checklink
780 checklink = self._checklink
787 copymap = self._copymap
781 copymap = self._copymap
788 lastnormaltime = self._lastnormaltime
782 lastnormaltime = self._lastnormaltime
789
783
790 lnkkind = stat.S_IFLNK
784 lnkkind = stat.S_IFLNK
791
785
792 for fn, st in self.walk(match, subrepos, listunknown,
786 for fn, st in self.walk(match, subrepos, listunknown,
793 listignored).iteritems():
787 listignored).iteritems():
794 if fn not in dmap:
788 if fn not in dmap:
795 if (listignored or mexact(fn)) and dirignore(fn):
789 if (listignored or mexact(fn)) and dirignore(fn):
796 if listignored:
790 if listignored:
797 iadd(fn)
791 iadd(fn)
798 elif listunknown:
792 elif listunknown:
799 uadd(fn)
793 uadd(fn)
800 continue
794 continue
801
795
802 state, mode, size, time = dmap[fn]
796 state, mode, size, time = dmap[fn]
803
797
804 if not st and state in "nma":
798 if not st and state in "nma":
805 dadd(fn)
799 dadd(fn)
806 elif state == 'n':
800 elif state == 'n':
807 # The "mode & lnkkind != lnkkind or self._checklink"
801 # The "mode & lnkkind != lnkkind or self._checklink"
808 # lines are an expansion of "islink => checklink"
802 # lines are an expansion of "islink => checklink"
809 # where islink means "is this a link?" and checklink
803 # where islink means "is this a link?" and checklink
810 # means "can we check links?".
804 # means "can we check links?".
811 mtime = int(st.st_mtime)
805 mtime = int(st.st_mtime)
812 if (size >= 0 and
806 if (size >= 0 and
813 ((size != st.st_size and size != st.st_size & _rangemask)
807 ((size != st.st_size and size != st.st_size & _rangemask)
814 or ((mode ^ st.st_mode) & 0100 and checkexec))
808 or ((mode ^ st.st_mode) & 0100 and checkexec))
815 and (mode & lnkkind != lnkkind or checklink)
809 and (mode & lnkkind != lnkkind or checklink)
816 or size == -2 # other parent
810 or size == -2 # other parent
817 or fn in copymap):
811 or fn in copymap):
818 madd(fn)
812 madd(fn)
819 elif ((time != mtime and time != mtime & _rangemask)
813 elif ((time != mtime and time != mtime & _rangemask)
820 and (mode & lnkkind != lnkkind or checklink)):
814 and (mode & lnkkind != lnkkind or checklink)):
821 ladd(fn)
815 ladd(fn)
822 elif mtime == lastnormaltime:
816 elif mtime == lastnormaltime:
823 # fn may have been changed in the same timeslot without
817 # fn may have been changed in the same timeslot without
824 # changing its size. This can happen if we quickly do
818 # changing its size. This can happen if we quickly do
825 # multiple commits in a single transaction.
819 # multiple commits in a single transaction.
826 # Force lookup, so we don't miss such a racy file change.
820 # Force lookup, so we don't miss such a racy file change.
827 ladd(fn)
821 ladd(fn)
828 elif listclean:
822 elif listclean:
829 cadd(fn)
823 cadd(fn)
830 elif state == 'm':
824 elif state == 'm':
831 madd(fn)
825 madd(fn)
832 elif state == 'a':
826 elif state == 'a':
833 aadd(fn)
827 aadd(fn)
834 elif state == 'r':
828 elif state == 'r':
835 radd(fn)
829 radd(fn)
836
830
837 return (lookup, modified, added, removed, deleted, unknown, ignored,
831 return (lookup, modified, added, removed, deleted, unknown, ignored,
838 clean)
832 clean)
@@ -1,892 +1,898 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from mercurial.node import nullrev
9 from mercurial.node import nullrev
10 import util, error, osutil, revset, similar, encoding, phases
10 import util, error, osutil, revset, similar, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, re, stat, glob
12 import os, errno, re, stat, glob
13
13
14 if os.name == 'nt':
14 if os.name == 'nt':
15 import scmwindows as scmplatform
15 import scmwindows as scmplatform
16 else:
16 else:
17 import scmposix as scmplatform
17 import scmposix as scmplatform
18
18
19 systemrcpath = scmplatform.systemrcpath
19 systemrcpath = scmplatform.systemrcpath
20 userrcpath = scmplatform.userrcpath
20 userrcpath = scmplatform.userrcpath
21
21
22 def nochangesfound(ui, repo, excluded=None):
22 def nochangesfound(ui, repo, excluded=None):
23 '''Report no changes for push/pull, excluded is None or a list of
23 '''Report no changes for push/pull, excluded is None or a list of
24 nodes excluded from the push/pull.
24 nodes excluded from the push/pull.
25 '''
25 '''
26 secretlist = []
26 secretlist = []
27 if excluded:
27 if excluded:
28 for n in excluded:
28 for n in excluded:
29 if n not in repo:
29 if n not in repo:
30 # discovery should not have included the filtered revision,
30 # discovery should not have included the filtered revision,
31 # we have to explicitly exclude it until discovery is cleanup.
31 # we have to explicitly exclude it until discovery is cleanup.
32 continue
32 continue
33 ctx = repo[n]
33 ctx = repo[n]
34 if ctx.phase() >= phases.secret and not ctx.extinct():
34 if ctx.phase() >= phases.secret and not ctx.extinct():
35 secretlist.append(n)
35 secretlist.append(n)
36
36
37 if secretlist:
37 if secretlist:
38 ui.status(_("no changes found (ignored %d secret changesets)\n")
38 ui.status(_("no changes found (ignored %d secret changesets)\n")
39 % len(secretlist))
39 % len(secretlist))
40 else:
40 else:
41 ui.status(_("no changes found\n"))
41 ui.status(_("no changes found\n"))
42
42
43 def checknewlabel(repo, lbl, kind):
43 def checknewlabel(repo, lbl, kind):
44 if lbl in ['tip', '.', 'null']:
44 if lbl in ['tip', '.', 'null']:
45 raise util.Abort(_("the name '%s' is reserved") % lbl)
45 raise util.Abort(_("the name '%s' is reserved") % lbl)
46 for c in (':', '\0', '\n', '\r'):
46 for c in (':', '\0', '\n', '\r'):
47 if c in lbl:
47 if c in lbl:
48 raise util.Abort(_("%r cannot be used in a name") % c)
48 raise util.Abort(_("%r cannot be used in a name") % c)
49 try:
49 try:
50 int(lbl)
50 int(lbl)
51 raise util.Abort(_("a %s cannot have an integer as its name") % kind)
51 raise util.Abort(_("a %s cannot have an integer as its name") % kind)
52 except ValueError:
52 except ValueError:
53 pass
53 pass
54
54
55 def checkfilename(f):
55 def checkfilename(f):
56 '''Check that the filename f is an acceptable filename for a tracked file'''
56 '''Check that the filename f is an acceptable filename for a tracked file'''
57 if '\r' in f or '\n' in f:
57 if '\r' in f or '\n' in f:
58 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
58 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
59
59
60 def checkportable(ui, f):
60 def checkportable(ui, f):
61 '''Check if filename f is portable and warn or abort depending on config'''
61 '''Check if filename f is portable and warn or abort depending on config'''
62 checkfilename(f)
62 checkfilename(f)
63 abort, warn = checkportabilityalert(ui)
63 abort, warn = checkportabilityalert(ui)
64 if abort or warn:
64 if abort or warn:
65 msg = util.checkwinfilename(f)
65 msg = util.checkwinfilename(f)
66 if msg:
66 if msg:
67 msg = "%s: %r" % (msg, f)
67 msg = "%s: %r" % (msg, f)
68 if abort:
68 if abort:
69 raise util.Abort(msg)
69 raise util.Abort(msg)
70 ui.warn(_("warning: %s\n") % msg)
70 ui.warn(_("warning: %s\n") % msg)
71
71
72 def checkportabilityalert(ui):
72 def checkportabilityalert(ui):
73 '''check if the user's config requests nothing, a warning, or abort for
73 '''check if the user's config requests nothing, a warning, or abort for
74 non-portable filenames'''
74 non-portable filenames'''
75 val = ui.config('ui', 'portablefilenames', 'warn')
75 val = ui.config('ui', 'portablefilenames', 'warn')
76 lval = val.lower()
76 lval = val.lower()
77 bval = util.parsebool(val)
77 bval = util.parsebool(val)
78 abort = os.name == 'nt' or lval == 'abort'
78 abort = os.name == 'nt' or lval == 'abort'
79 warn = bval or lval == 'warn'
79 warn = bval or lval == 'warn'
80 if bval is None and not (warn or abort or lval == 'ignore'):
80 if bval is None and not (warn or abort or lval == 'ignore'):
81 raise error.ConfigError(
81 raise error.ConfigError(
82 _("ui.portablefilenames value is invalid ('%s')") % val)
82 _("ui.portablefilenames value is invalid ('%s')") % val)
83 return abort, warn
83 return abort, warn
84
84
85 class casecollisionauditor(object):
85 class casecollisionauditor(object):
86 def __init__(self, ui, abort, dirstate):
86 def __init__(self, ui, abort, dirstate):
87 self._ui = ui
87 self._ui = ui
88 self._abort = abort
88 self._abort = abort
89 allfiles = '\0'.join(dirstate._map)
89 allfiles = '\0'.join(dirstate._map)
90 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
90 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
91 self._dirstate = dirstate
91 self._dirstate = dirstate
92 # The purpose of _newfiles is so that we don't complain about
92 # The purpose of _newfiles is so that we don't complain about
93 # case collisions if someone were to call this object with the
93 # case collisions if someone were to call this object with the
94 # same filename twice.
94 # same filename twice.
95 self._newfiles = set()
95 self._newfiles = set()
96
96
97 def __call__(self, f):
97 def __call__(self, f):
98 fl = encoding.lower(f)
98 fl = encoding.lower(f)
99 if (fl in self._loweredfiles and f not in self._dirstate and
99 if (fl in self._loweredfiles and f not in self._dirstate and
100 f not in self._newfiles):
100 f not in self._newfiles):
101 msg = _('possible case-folding collision for %s') % f
101 msg = _('possible case-folding collision for %s') % f
102 if self._abort:
102 if self._abort:
103 raise util.Abort(msg)
103 raise util.Abort(msg)
104 self._ui.warn(_("warning: %s\n") % msg)
104 self._ui.warn(_("warning: %s\n") % msg)
105 self._loweredfiles.add(fl)
105 self._loweredfiles.add(fl)
106 self._newfiles.add(f)
106 self._newfiles.add(f)
107
107
108 class pathauditor(object):
108 class pathauditor(object):
109 '''ensure that a filesystem path contains no banned components.
109 '''ensure that a filesystem path contains no banned components.
110 the following properties of a path are checked:
110 the following properties of a path are checked:
111
111
112 - ends with a directory separator
112 - ends with a directory separator
113 - under top-level .hg
113 - under top-level .hg
114 - starts at the root of a windows drive
114 - starts at the root of a windows drive
115 - contains ".."
115 - contains ".."
116 - traverses a symlink (e.g. a/symlink_here/b)
116 - traverses a symlink (e.g. a/symlink_here/b)
117 - inside a nested repository (a callback can be used to approve
117 - inside a nested repository (a callback can be used to approve
118 some nested repositories, e.g., subrepositories)
118 some nested repositories, e.g., subrepositories)
119 '''
119 '''
120
120
121 def __init__(self, root, callback=None):
121 def __init__(self, root, callback=None):
122 self.audited = set()
122 self.audited = set()
123 self.auditeddir = set()
123 self.auditeddir = set()
124 self.root = root
124 self.root = root
125 self.callback = callback
125 self.callback = callback
126 if os.path.lexists(root) and not util.checkcase(root):
126 if os.path.lexists(root) and not util.checkcase(root):
127 self.normcase = util.normcase
127 self.normcase = util.normcase
128 else:
128 else:
129 self.normcase = lambda x: x
129 self.normcase = lambda x: x
130
130
131 def __call__(self, path):
131 def __call__(self, path):
132 '''Check the relative path.
132 '''Check the relative path.
133 path may contain a pattern (e.g. foodir/**.txt)'''
133 path may contain a pattern (e.g. foodir/**.txt)'''
134
134
135 path = util.localpath(path)
135 path = util.localpath(path)
136 normpath = self.normcase(path)
136 normpath = self.normcase(path)
137 if normpath in self.audited:
137 if normpath in self.audited:
138 return
138 return
139 # AIX ignores "/" at end of path, others raise EISDIR.
139 # AIX ignores "/" at end of path, others raise EISDIR.
140 if util.endswithsep(path):
140 if util.endswithsep(path):
141 raise util.Abort(_("path ends in directory separator: %s") % path)
141 raise util.Abort(_("path ends in directory separator: %s") % path)
142 parts = util.splitpath(path)
142 parts = util.splitpath(path)
143 if (os.path.splitdrive(path)[0]
143 if (os.path.splitdrive(path)[0]
144 or parts[0].lower() in ('.hg', '.hg.', '')
144 or parts[0].lower() in ('.hg', '.hg.', '')
145 or os.pardir in parts):
145 or os.pardir in parts):
146 raise util.Abort(_("path contains illegal component: %s") % path)
146 raise util.Abort(_("path contains illegal component: %s") % path)
147 if '.hg' in path.lower():
147 if '.hg' in path.lower():
148 lparts = [p.lower() for p in parts]
148 lparts = [p.lower() for p in parts]
149 for p in '.hg', '.hg.':
149 for p in '.hg', '.hg.':
150 if p in lparts[1:]:
150 if p in lparts[1:]:
151 pos = lparts.index(p)
151 pos = lparts.index(p)
152 base = os.path.join(*parts[:pos])
152 base = os.path.join(*parts[:pos])
153 raise util.Abort(_("path '%s' is inside nested repo %r")
153 raise util.Abort(_("path '%s' is inside nested repo %r")
154 % (path, base))
154 % (path, base))
155
155
156 normparts = util.splitpath(normpath)
156 normparts = util.splitpath(normpath)
157 assert len(parts) == len(normparts)
157 assert len(parts) == len(normparts)
158
158
159 parts.pop()
159 parts.pop()
160 normparts.pop()
160 normparts.pop()
161 prefixes = []
161 prefixes = []
162 while parts:
162 while parts:
163 prefix = os.sep.join(parts)
163 prefix = os.sep.join(parts)
164 normprefix = os.sep.join(normparts)
164 normprefix = os.sep.join(normparts)
165 if normprefix in self.auditeddir:
165 if normprefix in self.auditeddir:
166 break
166 break
167 curpath = os.path.join(self.root, prefix)
167 curpath = os.path.join(self.root, prefix)
168 try:
168 try:
169 st = os.lstat(curpath)
169 st = os.lstat(curpath)
170 except OSError, err:
170 except OSError, err:
171 # EINVAL can be raised as invalid path syntax under win32.
171 # EINVAL can be raised as invalid path syntax under win32.
172 # They must be ignored for patterns can be checked too.
172 # They must be ignored for patterns can be checked too.
173 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
173 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
174 raise
174 raise
175 else:
175 else:
176 if stat.S_ISLNK(st.st_mode):
176 if stat.S_ISLNK(st.st_mode):
177 raise util.Abort(
177 raise util.Abort(
178 _('path %r traverses symbolic link %r')
178 _('path %r traverses symbolic link %r')
179 % (path, prefix))
179 % (path, prefix))
180 elif (stat.S_ISDIR(st.st_mode) and
180 elif (stat.S_ISDIR(st.st_mode) and
181 os.path.isdir(os.path.join(curpath, '.hg'))):
181 os.path.isdir(os.path.join(curpath, '.hg'))):
182 if not self.callback or not self.callback(curpath):
182 if not self.callback or not self.callback(curpath):
183 raise util.Abort(_("path '%s' is inside nested "
183 raise util.Abort(_("path '%s' is inside nested "
184 "repo %r")
184 "repo %r")
185 % (path, prefix))
185 % (path, prefix))
186 prefixes.append(normprefix)
186 prefixes.append(normprefix)
187 parts.pop()
187 parts.pop()
188 normparts.pop()
188 normparts.pop()
189
189
190 self.audited.add(normpath)
190 self.audited.add(normpath)
191 # only add prefixes to the cache after checking everything: we don't
191 # only add prefixes to the cache after checking everything: we don't
192 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
192 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
193 self.auditeddir.update(prefixes)
193 self.auditeddir.update(prefixes)
194
194
195 def check(self, path):
195 def check(self, path):
196 try:
196 try:
197 self(path)
197 self(path)
198 return True
198 return True
199 except (OSError, util.Abort):
199 except (OSError, util.Abort):
200 return False
200 return False
201
201
202 class abstractvfs(object):
202 class abstractvfs(object):
203 """Abstract base class; cannot be instantiated"""
203 """Abstract base class; cannot be instantiated"""
204
204
205 def __init__(self, *args, **kwargs):
205 def __init__(self, *args, **kwargs):
206 '''Prevent instantiation; don't call this from subclasses.'''
206 '''Prevent instantiation; don't call this from subclasses.'''
207 raise NotImplementedError('attempted instantiating ' + str(type(self)))
207 raise NotImplementedError('attempted instantiating ' + str(type(self)))
208
208
209 def tryread(self, path):
209 def tryread(self, path):
210 '''gracefully return an empty string for missing files'''
210 '''gracefully return an empty string for missing files'''
211 try:
211 try:
212 return self.read(path)
212 return self.read(path)
213 except IOError, inst:
213 except IOError, inst:
214 if inst.errno != errno.ENOENT:
214 if inst.errno != errno.ENOENT:
215 raise
215 raise
216 return ""
216 return ""
217
217
218 def read(self, path):
218 def read(self, path):
219 fp = self(path, 'rb')
219 fp = self(path, 'rb')
220 try:
220 try:
221 return fp.read()
221 return fp.read()
222 finally:
222 finally:
223 fp.close()
223 fp.close()
224
224
225 def write(self, path, data):
225 def write(self, path, data):
226 fp = self(path, 'wb')
226 fp = self(path, 'wb')
227 try:
227 try:
228 return fp.write(data)
228 return fp.write(data)
229 finally:
229 finally:
230 fp.close()
230 fp.close()
231
231
232 def append(self, path, data):
232 def append(self, path, data):
233 fp = self(path, 'ab')
233 fp = self(path, 'ab')
234 try:
234 try:
235 return fp.write(data)
235 return fp.write(data)
236 finally:
236 finally:
237 fp.close()
237 fp.close()
238
238
239 def exists(self, path=None):
239 def exists(self, path=None):
240 return os.path.exists(self.join(path))
240 return os.path.exists(self.join(path))
241
241
242 def isdir(self, path=None):
242 def isdir(self, path=None):
243 return os.path.isdir(self.join(path))
243 return os.path.isdir(self.join(path))
244
244
245 def makedir(self, path=None, notindexed=True):
245 def makedir(self, path=None, notindexed=True):
246 return util.makedir(self.join(path), notindexed)
246 return util.makedir(self.join(path), notindexed)
247
247
248 def makedirs(self, path=None, mode=None):
248 def makedirs(self, path=None, mode=None):
249 return util.makedirs(self.join(path), mode)
249 return util.makedirs(self.join(path), mode)
250
250
251 def mkdir(self, path=None):
251 def mkdir(self, path=None):
252 return os.mkdir(self.join(path))
252 return os.mkdir(self.join(path))
253
253
254 def readdir(self, path=None, stat=None, skip=None):
254 def readdir(self, path=None, stat=None, skip=None):
255 return osutil.listdir(self.join(path), stat, skip)
255 return osutil.listdir(self.join(path), stat, skip)
256
256
257 def stat(self, path=None):
257 def stat(self, path=None):
258 return os.stat(self.join(path))
258 return os.stat(self.join(path))
259
259
260 class vfs(abstractvfs):
260 class vfs(abstractvfs):
261 '''Operate files relative to a base directory
261 '''Operate files relative to a base directory
262
262
263 This class is used to hide the details of COW semantics and
263 This class is used to hide the details of COW semantics and
264 remote file access from higher level code.
264 remote file access from higher level code.
265 '''
265 '''
266 def __init__(self, base, audit=True, expand=False):
266 def __init__(self, base, audit=True, expand=False):
267 if expand:
267 if expand:
268 base = os.path.realpath(util.expandpath(base))
268 base = os.path.realpath(util.expandpath(base))
269 self.base = base
269 self.base = base
270 self._setmustaudit(audit)
270 self._setmustaudit(audit)
271 self.createmode = None
271 self.createmode = None
272 self._trustnlink = None
272 self._trustnlink = None
273
273
274 def _getmustaudit(self):
274 def _getmustaudit(self):
275 return self._audit
275 return self._audit
276
276
277 def _setmustaudit(self, onoff):
277 def _setmustaudit(self, onoff):
278 self._audit = onoff
278 self._audit = onoff
279 if onoff:
279 if onoff:
280 self.audit = pathauditor(self.base)
280 self.audit = pathauditor(self.base)
281 else:
281 else:
282 self.audit = util.always
282 self.audit = util.always
283
283
284 mustaudit = property(_getmustaudit, _setmustaudit)
284 mustaudit = property(_getmustaudit, _setmustaudit)
285
285
286 @util.propertycache
286 @util.propertycache
287 def _cansymlink(self):
287 def _cansymlink(self):
288 return util.checklink(self.base)
288 return util.checklink(self.base)
289
289
290 @util.propertycache
290 @util.propertycache
291 def _chmod(self):
291 def _chmod(self):
292 return util.checkexec(self.base)
292 return util.checkexec(self.base)
293
293
294 def _fixfilemode(self, name):
294 def _fixfilemode(self, name):
295 if self.createmode is None or not self._chmod:
295 if self.createmode is None or not self._chmod:
296 return
296 return
297 os.chmod(name, self.createmode & 0666)
297 os.chmod(name, self.createmode & 0666)
298
298
299 def __call__(self, path, mode="r", text=False, atomictemp=False):
299 def __call__(self, path, mode="r", text=False, atomictemp=False):
300 if self._audit:
300 if self._audit:
301 r = util.checkosfilename(path)
301 r = util.checkosfilename(path)
302 if r:
302 if r:
303 raise util.Abort("%s: %r" % (r, path))
303 raise util.Abort("%s: %r" % (r, path))
304 self.audit(path)
304 self.audit(path)
305 f = self.join(path)
305 f = self.join(path)
306
306
307 if not text and "b" not in mode:
307 if not text and "b" not in mode:
308 mode += "b" # for that other OS
308 mode += "b" # for that other OS
309
309
310 nlink = -1
310 nlink = -1
311 if mode not in ('r', 'rb'):
311 if mode not in ('r', 'rb'):
312 dirname, basename = util.split(f)
312 dirname, basename = util.split(f)
313 # If basename is empty, then the path is malformed because it points
313 # If basename is empty, then the path is malformed because it points
314 # to a directory. Let the posixfile() call below raise IOError.
314 # to a directory. Let the posixfile() call below raise IOError.
315 if basename:
315 if basename:
316 if atomictemp:
316 if atomictemp:
317 util.ensuredirs(dirname, self.createmode)
317 util.ensuredirs(dirname, self.createmode)
318 return util.atomictempfile(f, mode, self.createmode)
318 return util.atomictempfile(f, mode, self.createmode)
319 try:
319 try:
320 if 'w' in mode:
320 if 'w' in mode:
321 util.unlink(f)
321 util.unlink(f)
322 nlink = 0
322 nlink = 0
323 else:
323 else:
324 # nlinks() may behave differently for files on Windows
324 # nlinks() may behave differently for files on Windows
325 # shares if the file is open.
325 # shares if the file is open.
326 fd = util.posixfile(f)
326 fd = util.posixfile(f)
327 nlink = util.nlinks(f)
327 nlink = util.nlinks(f)
328 if nlink < 1:
328 if nlink < 1:
329 nlink = 2 # force mktempcopy (issue1922)
329 nlink = 2 # force mktempcopy (issue1922)
330 fd.close()
330 fd.close()
331 except (OSError, IOError), e:
331 except (OSError, IOError), e:
332 if e.errno != errno.ENOENT:
332 if e.errno != errno.ENOENT:
333 raise
333 raise
334 nlink = 0
334 nlink = 0
335 util.ensuredirs(dirname, self.createmode)
335 util.ensuredirs(dirname, self.createmode)
336 if nlink > 0:
336 if nlink > 0:
337 if self._trustnlink is None:
337 if self._trustnlink is None:
338 self._trustnlink = nlink > 1 or util.checknlink(f)
338 self._trustnlink = nlink > 1 or util.checknlink(f)
339 if nlink > 1 or not self._trustnlink:
339 if nlink > 1 or not self._trustnlink:
340 util.rename(util.mktempcopy(f), f)
340 util.rename(util.mktempcopy(f), f)
341 fp = util.posixfile(f, mode)
341 fp = util.posixfile(f, mode)
342 if nlink == 0:
342 if nlink == 0:
343 self._fixfilemode(f)
343 self._fixfilemode(f)
344 return fp
344 return fp
345
345
346 def symlink(self, src, dst):
346 def symlink(self, src, dst):
347 self.audit(dst)
347 self.audit(dst)
348 linkname = self.join(dst)
348 linkname = self.join(dst)
349 try:
349 try:
350 os.unlink(linkname)
350 os.unlink(linkname)
351 except OSError:
351 except OSError:
352 pass
352 pass
353
353
354 util.ensuredirs(os.path.dirname(linkname), self.createmode)
354 util.ensuredirs(os.path.dirname(linkname), self.createmode)
355
355
356 if self._cansymlink:
356 if self._cansymlink:
357 try:
357 try:
358 os.symlink(src, linkname)
358 os.symlink(src, linkname)
359 except OSError, err:
359 except OSError, err:
360 raise OSError(err.errno, _('could not symlink to %r: %s') %
360 raise OSError(err.errno, _('could not symlink to %r: %s') %
361 (src, err.strerror), linkname)
361 (src, err.strerror), linkname)
362 else:
362 else:
363 self.write(dst, src)
363 self.write(dst, src)
364
364
365 def join(self, path):
365 def join(self, path):
366 if path:
366 if path:
367 return os.path.join(self.base, path)
367 return os.path.join(self.base, path)
368 else:
368 else:
369 return self.base
369 return self.base
370
370
371 opener = vfs
371 opener = vfs
372
372
373 class auditvfs(object):
373 class auditvfs(object):
374 def __init__(self, vfs):
374 def __init__(self, vfs):
375 self.vfs = vfs
375 self.vfs = vfs
376
376
377 def _getmustaudit(self):
377 def _getmustaudit(self):
378 return self.vfs.mustaudit
378 return self.vfs.mustaudit
379
379
380 def _setmustaudit(self, onoff):
380 def _setmustaudit(self, onoff):
381 self.vfs.mustaudit = onoff
381 self.vfs.mustaudit = onoff
382
382
383 mustaudit = property(_getmustaudit, _setmustaudit)
383 mustaudit = property(_getmustaudit, _setmustaudit)
384
384
385 class filtervfs(abstractvfs, auditvfs):
385 class filtervfs(abstractvfs, auditvfs):
386 '''Wrapper vfs for filtering filenames with a function.'''
386 '''Wrapper vfs for filtering filenames with a function.'''
387
387
388 def __init__(self, vfs, filter):
388 def __init__(self, vfs, filter):
389 auditvfs.__init__(self, vfs)
389 auditvfs.__init__(self, vfs)
390 self._filter = filter
390 self._filter = filter
391
391
392 def __call__(self, path, *args, **kwargs):
392 def __call__(self, path, *args, **kwargs):
393 return self.vfs(self._filter(path), *args, **kwargs)
393 return self.vfs(self._filter(path), *args, **kwargs)
394
394
395 def join(self, path):
395 def join(self, path):
396 if path:
396 if path:
397 return self.vfs.join(self._filter(path))
397 return self.vfs.join(self._filter(path))
398 else:
398 else:
399 return self.vfs.join(path)
399 return self.vfs.join(path)
400
400
401 filteropener = filtervfs
401 filteropener = filtervfs
402
402
403 class readonlyvfs(abstractvfs, auditvfs):
403 class readonlyvfs(abstractvfs, auditvfs):
404 '''Wrapper vfs preventing any writing.'''
404 '''Wrapper vfs preventing any writing.'''
405
405
406 def __init__(self, vfs):
406 def __init__(self, vfs):
407 auditvfs.__init__(self, vfs)
407 auditvfs.__init__(self, vfs)
408
408
409 def __call__(self, path, mode='r', *args, **kw):
409 def __call__(self, path, mode='r', *args, **kw):
410 if mode not in ('r', 'rb'):
410 if mode not in ('r', 'rb'):
411 raise util.Abort('this vfs is read only')
411 raise util.Abort('this vfs is read only')
412 return self.vfs(path, mode, *args, **kw)
412 return self.vfs(path, mode, *args, **kw)
413
413
414
414
415 def canonpath(root, cwd, myname, auditor=None):
415 def canonpath(root, cwd, myname, auditor=None):
416 '''return the canonical path of myname, given cwd and root'''
416 '''return the canonical path of myname, given cwd and root'''
417 if util.endswithsep(root):
417 if util.endswithsep(root):
418 rootsep = root
418 rootsep = root
419 else:
419 else:
420 rootsep = root + os.sep
420 rootsep = root + os.sep
421 name = myname
421 name = myname
422 if not os.path.isabs(name):
422 if not os.path.isabs(name):
423 name = os.path.join(root, cwd, name)
423 name = os.path.join(root, cwd, name)
424 name = os.path.normpath(name)
424 name = os.path.normpath(name)
425 if auditor is None:
425 if auditor is None:
426 auditor = pathauditor(root)
426 auditor = pathauditor(root)
427 if name != rootsep and name.startswith(rootsep):
427 if name != rootsep and name.startswith(rootsep):
428 name = name[len(rootsep):]
428 name = name[len(rootsep):]
429 auditor(name)
429 auditor(name)
430 return util.pconvert(name)
430 return util.pconvert(name)
431 elif name == root:
431 elif name == root:
432 return ''
432 return ''
433 else:
433 else:
434 # Determine whether `name' is in the hierarchy at or beneath `root',
434 # Determine whether `name' is in the hierarchy at or beneath `root',
435 # by iterating name=dirname(name) until that causes no change (can't
435 # by iterating name=dirname(name) until that causes no change (can't
436 # check name == '/', because that doesn't work on windows). The list
436 # check name == '/', because that doesn't work on windows). The list
437 # `rel' holds the reversed list of components making up the relative
437 # `rel' holds the reversed list of components making up the relative
438 # file name we want.
438 # file name we want.
439 rel = []
439 rel = []
440 while True:
440 while True:
441 try:
441 try:
442 s = util.samefile(name, root)
442 s = util.samefile(name, root)
443 except OSError:
443 except OSError:
444 s = False
444 s = False
445 if s:
445 if s:
446 if not rel:
446 if not rel:
447 # name was actually the same as root (maybe a symlink)
447 # name was actually the same as root (maybe a symlink)
448 return ''
448 return ''
449 rel.reverse()
449 rel.reverse()
450 name = os.path.join(*rel)
450 name = os.path.join(*rel)
451 auditor(name)
451 auditor(name)
452 return util.pconvert(name)
452 return util.pconvert(name)
453 dirname, basename = util.split(name)
453 dirname, basename = util.split(name)
454 rel.append(basename)
454 rel.append(basename)
455 if dirname == name:
455 if dirname == name:
456 break
456 break
457 name = dirname
457 name = dirname
458
458
459 raise util.Abort(_("%s not under root '%s'") % (myname, root))
459 raise util.Abort(_("%s not under root '%s'") % (myname, root))
460
460
461 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
461 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
462 '''yield every hg repository under path, always recursively.
462 '''yield every hg repository under path, always recursively.
463 The recurse flag will only control recursion into repo working dirs'''
463 The recurse flag will only control recursion into repo working dirs'''
464 def errhandler(err):
464 def errhandler(err):
465 if err.filename == path:
465 if err.filename == path:
466 raise err
466 raise err
467 samestat = getattr(os.path, 'samestat', None)
467 samestat = getattr(os.path, 'samestat', None)
468 if followsym and samestat is not None:
468 if followsym and samestat is not None:
469 def adddir(dirlst, dirname):
469 def adddir(dirlst, dirname):
470 match = False
470 match = False
471 dirstat = os.stat(dirname)
471 dirstat = os.stat(dirname)
472 for lstdirstat in dirlst:
472 for lstdirstat in dirlst:
473 if samestat(dirstat, lstdirstat):
473 if samestat(dirstat, lstdirstat):
474 match = True
474 match = True
475 break
475 break
476 if not match:
476 if not match:
477 dirlst.append(dirstat)
477 dirlst.append(dirstat)
478 return not match
478 return not match
479 else:
479 else:
480 followsym = False
480 followsym = False
481
481
482 if (seen_dirs is None) and followsym:
482 if (seen_dirs is None) and followsym:
483 seen_dirs = []
483 seen_dirs = []
484 adddir(seen_dirs, path)
484 adddir(seen_dirs, path)
485 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
485 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
486 dirs.sort()
486 dirs.sort()
487 if '.hg' in dirs:
487 if '.hg' in dirs:
488 yield root # found a repository
488 yield root # found a repository
489 qroot = os.path.join(root, '.hg', 'patches')
489 qroot = os.path.join(root, '.hg', 'patches')
490 if os.path.isdir(os.path.join(qroot, '.hg')):
490 if os.path.isdir(os.path.join(qroot, '.hg')):
491 yield qroot # we have a patch queue repo here
491 yield qroot # we have a patch queue repo here
492 if recurse:
492 if recurse:
493 # avoid recursing inside the .hg directory
493 # avoid recursing inside the .hg directory
494 dirs.remove('.hg')
494 dirs.remove('.hg')
495 else:
495 else:
496 dirs[:] = [] # don't descend further
496 dirs[:] = [] # don't descend further
497 elif followsym:
497 elif followsym:
498 newdirs = []
498 newdirs = []
499 for d in dirs:
499 for d in dirs:
500 fname = os.path.join(root, d)
500 fname = os.path.join(root, d)
501 if adddir(seen_dirs, fname):
501 if adddir(seen_dirs, fname):
502 if os.path.islink(fname):
502 if os.path.islink(fname):
503 for hgname in walkrepos(fname, True, seen_dirs):
503 for hgname in walkrepos(fname, True, seen_dirs):
504 yield hgname
504 yield hgname
505 else:
505 else:
506 newdirs.append(d)
506 newdirs.append(d)
507 dirs[:] = newdirs
507 dirs[:] = newdirs
508
508
509 def osrcpath():
509 def osrcpath():
510 '''return default os-specific hgrc search path'''
510 '''return default os-specific hgrc search path'''
511 path = systemrcpath()
511 path = systemrcpath()
512 path.extend(userrcpath())
512 path.extend(userrcpath())
513 path = [os.path.normpath(f) for f in path]
513 path = [os.path.normpath(f) for f in path]
514 return path
514 return path
515
515
516 _rcpath = None
516 _rcpath = None
517
517
518 def rcpath():
518 def rcpath():
519 '''return hgrc search path. if env var HGRCPATH is set, use it.
519 '''return hgrc search path. if env var HGRCPATH is set, use it.
520 for each item in path, if directory, use files ending in .rc,
520 for each item in path, if directory, use files ending in .rc,
521 else use item.
521 else use item.
522 make HGRCPATH empty to only look in .hg/hgrc of current repo.
522 make HGRCPATH empty to only look in .hg/hgrc of current repo.
523 if no HGRCPATH, use default os-specific path.'''
523 if no HGRCPATH, use default os-specific path.'''
524 global _rcpath
524 global _rcpath
525 if _rcpath is None:
525 if _rcpath is None:
526 if 'HGRCPATH' in os.environ:
526 if 'HGRCPATH' in os.environ:
527 _rcpath = []
527 _rcpath = []
528 for p in os.environ['HGRCPATH'].split(os.pathsep):
528 for p in os.environ['HGRCPATH'].split(os.pathsep):
529 if not p:
529 if not p:
530 continue
530 continue
531 p = util.expandpath(p)
531 p = util.expandpath(p)
532 if os.path.isdir(p):
532 if os.path.isdir(p):
533 for f, kind in osutil.listdir(p):
533 for f, kind in osutil.listdir(p):
534 if f.endswith('.rc'):
534 if f.endswith('.rc'):
535 _rcpath.append(os.path.join(p, f))
535 _rcpath.append(os.path.join(p, f))
536 else:
536 else:
537 _rcpath.append(p)
537 _rcpath.append(p)
538 else:
538 else:
539 _rcpath = osrcpath()
539 _rcpath = osrcpath()
540 return _rcpath
540 return _rcpath
541
541
542 def revsingle(repo, revspec, default='.'):
542 def revsingle(repo, revspec, default='.'):
543 if not revspec:
543 if not revspec:
544 return repo[default]
544 return repo[default]
545
545
546 l = revrange(repo, [revspec])
546 l = revrange(repo, [revspec])
547 if len(l) < 1:
547 if len(l) < 1:
548 raise util.Abort(_('empty revision set'))
548 raise util.Abort(_('empty revision set'))
549 return repo[l[-1]]
549 return repo[l[-1]]
550
550
551 def revpair(repo, revs):
551 def revpair(repo, revs):
552 if not revs:
552 if not revs:
553 return repo.dirstate.p1(), None
553 return repo.dirstate.p1(), None
554
554
555 l = revrange(repo, revs)
555 l = revrange(repo, revs)
556
556
557 if len(l) == 0:
557 if len(l) == 0:
558 if revs:
558 if revs:
559 raise util.Abort(_('empty revision range'))
559 raise util.Abort(_('empty revision range'))
560 return repo.dirstate.p1(), None
560 return repo.dirstate.p1(), None
561
561
562 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
562 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
563 return repo.lookup(l[0]), None
563 return repo.lookup(l[0]), None
564
564
565 return repo.lookup(l[0]), repo.lookup(l[-1])
565 return repo.lookup(l[0]), repo.lookup(l[-1])
566
566
567 _revrangesep = ':'
567 _revrangesep = ':'
568
568
569 def revrange(repo, revs):
569 def revrange(repo, revs):
570 """Yield revision as strings from a list of revision specifications."""
570 """Yield revision as strings from a list of revision specifications."""
571
571
572 def revfix(repo, val, defval):
572 def revfix(repo, val, defval):
573 if not val and val != 0 and defval is not None:
573 if not val and val != 0 and defval is not None:
574 return defval
574 return defval
575 return repo[val].rev()
575 return repo[val].rev()
576
576
577 seen, l = set(), []
577 seen, l = set(), []
578 for spec in revs:
578 for spec in revs:
579 if l and not seen:
579 if l and not seen:
580 seen = set(l)
580 seen = set(l)
581 # attempt to parse old-style ranges first to deal with
581 # attempt to parse old-style ranges first to deal with
582 # things like old-tag which contain query metacharacters
582 # things like old-tag which contain query metacharacters
583 try:
583 try:
584 if isinstance(spec, int):
584 if isinstance(spec, int):
585 seen.add(spec)
585 seen.add(spec)
586 l.append(spec)
586 l.append(spec)
587 continue
587 continue
588
588
589 if _revrangesep in spec:
589 if _revrangesep in spec:
590 start, end = spec.split(_revrangesep, 1)
590 start, end = spec.split(_revrangesep, 1)
591 start = revfix(repo, start, 0)
591 start = revfix(repo, start, 0)
592 end = revfix(repo, end, len(repo) - 1)
592 end = revfix(repo, end, len(repo) - 1)
593 if end == nullrev and start <= 0:
593 if end == nullrev and start <= 0:
594 start = nullrev
594 start = nullrev
595 rangeiter = repo.changelog.revs(start, end)
595 rangeiter = repo.changelog.revs(start, end)
596 if not seen and not l:
596 if not seen and not l:
597 # by far the most common case: revs = ["-1:0"]
597 # by far the most common case: revs = ["-1:0"]
598 l = list(rangeiter)
598 l = list(rangeiter)
599 # defer syncing seen until next iteration
599 # defer syncing seen until next iteration
600 continue
600 continue
601 newrevs = set(rangeiter)
601 newrevs = set(rangeiter)
602 if seen:
602 if seen:
603 newrevs.difference_update(seen)
603 newrevs.difference_update(seen)
604 seen.update(newrevs)
604 seen.update(newrevs)
605 else:
605 else:
606 seen = newrevs
606 seen = newrevs
607 l.extend(sorted(newrevs, reverse=start > end))
607 l.extend(sorted(newrevs, reverse=start > end))
608 continue
608 continue
609 elif spec and spec in repo: # single unquoted rev
609 elif spec and spec in repo: # single unquoted rev
610 rev = revfix(repo, spec, None)
610 rev = revfix(repo, spec, None)
611 if rev in seen:
611 if rev in seen:
612 continue
612 continue
613 seen.add(rev)
613 seen.add(rev)
614 l.append(rev)
614 l.append(rev)
615 continue
615 continue
616 except error.RepoLookupError:
616 except error.RepoLookupError:
617 pass
617 pass
618
618
619 # fall through to new-style queries if old-style fails
619 # fall through to new-style queries if old-style fails
620 m = revset.match(repo.ui, spec)
620 m = revset.match(repo.ui, spec)
621 dl = [r for r in m(repo, list(repo)) if r not in seen]
621 dl = [r for r in m(repo, list(repo)) if r not in seen]
622 l.extend(dl)
622 l.extend(dl)
623 seen.update(dl)
623 seen.update(dl)
624
624
625 return l
625 return l
626
626
627 def expandpats(pats):
627 def expandpats(pats):
628 if not util.expandglobs:
628 if not util.expandglobs:
629 return list(pats)
629 return list(pats)
630 ret = []
630 ret = []
631 for p in pats:
631 for p in pats:
632 kind, name = matchmod._patsplit(p, None)
632 kind, name = matchmod._patsplit(p, None)
633 if kind is None:
633 if kind is None:
634 try:
634 try:
635 globbed = glob.glob(name)
635 globbed = glob.glob(name)
636 except re.error:
636 except re.error:
637 globbed = [name]
637 globbed = [name]
638 if globbed:
638 if globbed:
639 ret.extend(globbed)
639 ret.extend(globbed)
640 continue
640 continue
641 ret.append(p)
641 ret.append(p)
642 return ret
642 return ret
643
643
644 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
644 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
645 if pats == ("",):
645 if pats == ("",):
646 pats = []
646 pats = []
647 if not globbed and default == 'relpath':
647 if not globbed and default == 'relpath':
648 pats = expandpats(pats or [])
648 pats = expandpats(pats or [])
649
649
650 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
650 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
651 default)
651 default)
652 def badfn(f, msg):
652 def badfn(f, msg):
653 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
653 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
654 m.bad = badfn
654 m.bad = badfn
655 return m, pats
655 return m, pats
656
656
657 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
657 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
658 return matchandpats(ctx, pats, opts, globbed, default)[0]
658 return matchandpats(ctx, pats, opts, globbed, default)[0]
659
659
660 def matchall(repo):
660 def matchall(repo):
661 return matchmod.always(repo.root, repo.getcwd())
661 return matchmod.always(repo.root, repo.getcwd())
662
662
663 def matchfiles(repo, files):
663 def matchfiles(repo, files):
664 return matchmod.exact(repo.root, repo.getcwd(), files)
664 return matchmod.exact(repo.root, repo.getcwd(), files)
665
665
666 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
666 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
667 if dry_run is None:
667 if dry_run is None:
668 dry_run = opts.get('dry_run')
668 dry_run = opts.get('dry_run')
669 if similarity is None:
669 if similarity is None:
670 similarity = float(opts.get('similarity') or 0)
670 similarity = float(opts.get('similarity') or 0)
671 # we'd use status here, except handling of symlinks and ignore is tricky
671 # we'd use status here, except handling of symlinks and ignore is tricky
672 added, unknown, deleted, removed = [], [], [], []
672 added, unknown, deleted, removed = [], [], [], []
673 audit_path = pathauditor(repo.root)
673 audit_path = pathauditor(repo.root)
674 m = match(repo[None], pats, opts)
674 m = match(repo[None], pats, opts)
675 rejected = []
675 rejected = []
676 m.bad = lambda x, y: rejected.append(x)
676 m.bad = lambda x, y: rejected.append(x)
677
677
678 ctx = repo[None]
678 ctx = repo[None]
679 dirstate = repo.dirstate
679 dirstate = repo.dirstate
680 walkresults = dirstate.walk(m, sorted(ctx.substate), True, False)
680 walkresults = dirstate.walk(m, sorted(ctx.substate), True, False)
681 for abs, st in walkresults.iteritems():
681 for abs, st in walkresults.iteritems():
682 dstate = dirstate[abs]
682 dstate = dirstate[abs]
683 if dstate == '?' and audit_path.check(abs):
683 if dstate == '?' and audit_path.check(abs):
684 unknown.append(abs)
684 unknown.append(abs)
685 elif dstate != 'r' and not st:
685 elif dstate != 'r' and not st:
686 deleted.append(abs)
686 deleted.append(abs)
687 # for finding renames
687 # for finding renames
688 elif dstate == 'r':
688 elif dstate == 'r':
689 removed.append(abs)
689 removed.append(abs)
690 elif dstate == 'a':
690 elif dstate == 'a':
691 added.append(abs)
691 added.append(abs)
692
692
693 unknownset = set(unknown)
693 unknownset = set(unknown)
694 toprint = unknownset.copy()
694 toprint = unknownset.copy()
695 toprint.update(deleted)
695 toprint.update(deleted)
696 for abs in sorted(toprint):
696 for abs in sorted(toprint):
697 if repo.ui.verbose or not m.exact(abs):
697 if repo.ui.verbose or not m.exact(abs):
698 rel = m.rel(abs)
698 rel = m.rel(abs)
699 if abs in unknownset:
699 if abs in unknownset:
700 status = _('adding %s\n') % ((pats and rel) or abs)
700 status = _('adding %s\n') % ((pats and rel) or abs)
701 else:
701 else:
702 status = _('removing %s\n') % ((pats and rel) or abs)
702 status = _('removing %s\n') % ((pats and rel) or abs)
703 repo.ui.status(status)
703 repo.ui.status(status)
704
704
705 copies = {}
705 copies = {}
706 if similarity > 0:
706 if similarity > 0:
707 for old, new, score in similar.findrenames(repo,
707 for old, new, score in similar.findrenames(repo,
708 added + unknown, removed + deleted, similarity):
708 added + unknown, removed + deleted, similarity):
709 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
709 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
710 repo.ui.status(_('recording removal of %s as rename to %s '
710 repo.ui.status(_('recording removal of %s as rename to %s '
711 '(%d%% similar)\n') %
711 '(%d%% similar)\n') %
712 (m.rel(old), m.rel(new), score * 100))
712 (m.rel(old), m.rel(new), score * 100))
713 copies[new] = old
713 copies[new] = old
714
714
715 if not dry_run:
715 if not dry_run:
716 wctx = repo[None]
716 wctx = repo[None]
717 wlock = repo.wlock()
717 wlock = repo.wlock()
718 try:
718 try:
719 wctx.forget(deleted)
719 wctx.forget(deleted)
720 wctx.add(unknown)
720 wctx.add(unknown)
721 for new, old in copies.iteritems():
721 for new, old in copies.iteritems():
722 wctx.copy(old, new)
722 wctx.copy(old, new)
723 finally:
723 finally:
724 wlock.release()
724 wlock.release()
725
725
726 for f in rejected:
726 for f in rejected:
727 if f in m.files():
727 if f in m.files():
728 return 1
728 return 1
729 return 0
729 return 0
730
730
731 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
731 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
732 """Update the dirstate to reflect the intent of copying src to dst. For
732 """Update the dirstate to reflect the intent of copying src to dst. For
733 different reasons it might not end with dst being marked as copied from src.
733 different reasons it might not end with dst being marked as copied from src.
734 """
734 """
735 origsrc = repo.dirstate.copied(src) or src
735 origsrc = repo.dirstate.copied(src) or src
736 if dst == origsrc: # copying back a copy?
736 if dst == origsrc: # copying back a copy?
737 if repo.dirstate[dst] not in 'mn' and not dryrun:
737 if repo.dirstate[dst] not in 'mn' and not dryrun:
738 repo.dirstate.normallookup(dst)
738 repo.dirstate.normallookup(dst)
739 else:
739 else:
740 if repo.dirstate[origsrc] == 'a' and origsrc == src:
740 if repo.dirstate[origsrc] == 'a' and origsrc == src:
741 if not ui.quiet:
741 if not ui.quiet:
742 ui.warn(_("%s has not been committed yet, so no copy "
742 ui.warn(_("%s has not been committed yet, so no copy "
743 "data will be stored for %s.\n")
743 "data will be stored for %s.\n")
744 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
744 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
745 if repo.dirstate[dst] in '?r' and not dryrun:
745 if repo.dirstate[dst] in '?r' and not dryrun:
746 wctx.add([dst])
746 wctx.add([dst])
747 elif not dryrun:
747 elif not dryrun:
748 wctx.copy(origsrc, dst)
748 wctx.copy(origsrc, dst)
749
749
750 def readrequires(opener, supported):
750 def readrequires(opener, supported):
751 '''Reads and parses .hg/requires and checks if all entries found
751 '''Reads and parses .hg/requires and checks if all entries found
752 are in the list of supported features.'''
752 are in the list of supported features.'''
753 requirements = set(opener.read("requires").splitlines())
753 requirements = set(opener.read("requires").splitlines())
754 missings = []
754 missings = []
755 for r in requirements:
755 for r in requirements:
756 if r not in supported:
756 if r not in supported:
757 if not r or not r[0].isalnum():
757 if not r or not r[0].isalnum():
758 raise error.RequirementError(_(".hg/requires file is corrupt"))
758 raise error.RequirementError(_(".hg/requires file is corrupt"))
759 missings.append(r)
759 missings.append(r)
760 missings.sort()
760 missings.sort()
761 if missings:
761 if missings:
762 raise error.RequirementError(
762 raise error.RequirementError(
763 _("unknown repository format: requires features '%s' (upgrade "
763 _("unknown repository format: requires features '%s' (upgrade "
764 "Mercurial)") % "', '".join(missings))
764 "Mercurial)") % "', '".join(missings))
765 return requirements
765 return requirements
766
766
767 class filecacheentry(object):
767 class filecacheentry(object):
768 def __init__(self, path, stat=True):
768 def __init__(self, path, stat=True):
769 self.path = path
769 self.path = path
770 self.cachestat = None
770 self.cachestat = None
771 self._cacheable = None
771 self._cacheable = None
772
772
773 if stat:
773 if stat:
774 self.cachestat = filecacheentry.stat(self.path)
774 self.cachestat = filecacheentry.stat(self.path)
775
775
776 if self.cachestat:
776 if self.cachestat:
777 self._cacheable = self.cachestat.cacheable()
777 self._cacheable = self.cachestat.cacheable()
778 else:
778 else:
779 # None means we don't know yet
779 # None means we don't know yet
780 self._cacheable = None
780 self._cacheable = None
781
781
782 def refresh(self):
782 def refresh(self):
783 if self.cacheable():
783 if self.cacheable():
784 self.cachestat = filecacheentry.stat(self.path)
784 self.cachestat = filecacheentry.stat(self.path)
785
785
786 def cacheable(self):
786 def cacheable(self):
787 if self._cacheable is not None:
787 if self._cacheable is not None:
788 return self._cacheable
788 return self._cacheable
789
789
790 # we don't know yet, assume it is for now
790 # we don't know yet, assume it is for now
791 return True
791 return True
792
792
793 def changed(self):
793 def changed(self):
794 # no point in going further if we can't cache it
794 # no point in going further if we can't cache it
795 if not self.cacheable():
795 if not self.cacheable():
796 return True
796 return True
797
797
798 newstat = filecacheentry.stat(self.path)
798 newstat = filecacheentry.stat(self.path)
799
799
800 # we may not know if it's cacheable yet, check again now
800 # we may not know if it's cacheable yet, check again now
801 if newstat and self._cacheable is None:
801 if newstat and self._cacheable is None:
802 self._cacheable = newstat.cacheable()
802 self._cacheable = newstat.cacheable()
803
803
804 # check again
804 # check again
805 if not self._cacheable:
805 if not self._cacheable:
806 return True
806 return True
807
807
808 if self.cachestat != newstat:
808 if self.cachestat != newstat:
809 self.cachestat = newstat
809 self.cachestat = newstat
810 return True
810 return True
811 else:
811 else:
812 return False
812 return False
813
813
814 @staticmethod
814 @staticmethod
815 def stat(path):
815 def stat(path):
816 try:
816 try:
817 return util.cachestat(path)
817 return util.cachestat(path)
818 except OSError, e:
818 except OSError, e:
819 if e.errno != errno.ENOENT:
819 if e.errno != errno.ENOENT:
820 raise
820 raise
821
821
822 class filecache(object):
822 class filecache(object):
823 '''A property like decorator that tracks a file under .hg/ for updates.
823 '''A property like decorator that tracks a file under .hg/ for updates.
824
824
825 Records stat info when called in _filecache.
825 Records stat info when called in _filecache.
826
826
827 On subsequent calls, compares old stat info with new info, and recreates
827 On subsequent calls, compares old stat info with new info, and recreates
828 the object when needed, updating the new stat info in _filecache.
828 the object when needed, updating the new stat info in _filecache.
829
829
830 Mercurial either atomic renames or appends for files under .hg,
830 Mercurial either atomic renames or appends for files under .hg,
831 so to ensure the cache is reliable we need the filesystem to be able
831 so to ensure the cache is reliable we need the filesystem to be able
832 to tell us if a file has been replaced. If it can't, we fallback to
832 to tell us if a file has been replaced. If it can't, we fallback to
833 recreating the object on every call (essentially the same behaviour as
833 recreating the object on every call (essentially the same behaviour as
834 propertycache).'''
834 propertycache).'''
835 def __init__(self, path):
835 def __init__(self, path):
836 self.path = path
836 self.path = path
837
837
838 def join(self, obj, fname):
838 def join(self, obj, fname):
839 """Used to compute the runtime path of the cached file.
839 """Used to compute the runtime path of the cached file.
840
840
841 Users should subclass filecache and provide their own version of this
841 Users should subclass filecache and provide their own version of this
842 function to call the appropriate join function on 'obj' (an instance
842 function to call the appropriate join function on 'obj' (an instance
843 of the class that its member function was decorated).
843 of the class that its member function was decorated).
844 """
844 """
845 return obj.join(fname)
845 return obj.join(fname)
846
846
847 def __call__(self, func):
847 def __call__(self, func):
848 self.func = func
848 self.func = func
849 self.name = func.__name__
849 self.name = func.__name__
850 return self
850 return self
851
851
852 def __get__(self, obj, type=None):
852 def __get__(self, obj, type=None):
853 # do we need to check if the file changed?
853 # do we need to check if the file changed?
854 if self.name in obj.__dict__:
854 if self.name in obj.__dict__:
855 assert self.name in obj._filecache, self.name
855 assert self.name in obj._filecache, self.name
856 return obj.__dict__[self.name]
856 return obj.__dict__[self.name]
857
857
858 entry = obj._filecache.get(self.name)
858 entry = obj._filecache.get(self.name)
859
859
860 if entry:
860 if entry:
861 if entry.changed():
861 if entry.changed():
862 entry.obj = self.func(obj)
862 entry.obj = self.func(obj)
863 else:
863 else:
864 path = self.join(obj, self.path)
864 path = self.join(obj, self.path)
865
865
866 # We stat -before- creating the object so our cache doesn't lie if
866 # We stat -before- creating the object so our cache doesn't lie if
867 # a writer modified between the time we read and stat
867 # a writer modified between the time we read and stat
868 entry = filecacheentry(path)
868 entry = filecacheentry(path)
869 entry.obj = self.func(obj)
869 entry.obj = self.func(obj)
870
870
871 obj._filecache[self.name] = entry
871 obj._filecache[self.name] = entry
872
872
873 obj.__dict__[self.name] = entry.obj
873 obj.__dict__[self.name] = entry.obj
874 return entry.obj
874 return entry.obj
875
875
876 def __set__(self, obj, value):
876 def __set__(self, obj, value):
877 if self.name not in obj._filecache:
877 if self.name not in obj._filecache:
878 # we add an entry for the missing value because X in __dict__
878 # we add an entry for the missing value because X in __dict__
879 # implies X in _filecache
879 # implies X in _filecache
880 ce = filecacheentry(self.join(obj, self.path), False)
880 ce = filecacheentry(self.join(obj, self.path), False)
881 obj._filecache[self.name] = ce
881 obj._filecache[self.name] = ce
882 else:
882 else:
883 ce = obj._filecache[self.name]
883 ce = obj._filecache[self.name]
884
884
885 ce.obj = value # update cached copy
885 ce.obj = value # update cached copy
886 obj.__dict__[self.name] = value # update copy returned by obj.x
886 obj.__dict__[self.name] = value # update copy returned by obj.x
887
887
888 def __delete__(self, obj):
888 def __delete__(self, obj):
889 try:
889 try:
890 del obj.__dict__[self.name]
890 del obj.__dict__[self.name]
891 except KeyError:
891 except KeyError:
892 raise AttributeError(self.name)
892 raise AttributeError(self.name)
893
894 def finddirs(path):
895 pos = path.rfind('/')
896 while pos != -1:
897 yield path[:pos]
898 pos = path.rfind('/', 0, pos)
General Comments 0
You need to be logged in to leave comments. Login now