##// END OF EJS Templates
dirstate: state that getcwd() shouldn't be used to get real file path...
Yuya Nishihara -
r26293:3d24f31c default
parent child Browse files
Show More
@@ -1,1034 +1,1040 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid
8 from node import nullid
9 from i18n import _
9 from i18n import _
10 import scmutil, util, osutil, parsers, encoding, pathutil
10 import scmutil, util, osutil, parsers, encoding, pathutil
11 import os, stat, errno
11 import os, stat, errno
12 import match as matchmod
12 import match as matchmod
13
13
14 propertycache = util.propertycache
14 propertycache = util.propertycache
15 filecache = scmutil.filecache
15 filecache = scmutil.filecache
16 _rangemask = 0x7fffffff
16 _rangemask = 0x7fffffff
17
17
18 dirstatetuple = parsers.dirstatetuple
18 dirstatetuple = parsers.dirstatetuple
19
19
20 class repocache(filecache):
20 class repocache(filecache):
21 """filecache for files in .hg/"""
21 """filecache for files in .hg/"""
22 def join(self, obj, fname):
22 def join(self, obj, fname):
23 return obj._opener.join(fname)
23 return obj._opener.join(fname)
24
24
25 class rootcache(filecache):
25 class rootcache(filecache):
26 """filecache for files in the repository root"""
26 """filecache for files in the repository root"""
27 def join(self, obj, fname):
27 def join(self, obj, fname):
28 return obj._join(fname)
28 return obj._join(fname)
29
29
30 class dirstate(object):
30 class dirstate(object):
31
31
32 def __init__(self, opener, ui, root, validate):
32 def __init__(self, opener, ui, root, validate):
33 '''Create a new dirstate object.
33 '''Create a new dirstate object.
34
34
35 opener is an open()-like callable that can be used to open the
35 opener is an open()-like callable that can be used to open the
36 dirstate file; root is the root of the directory tracked by
36 dirstate file; root is the root of the directory tracked by
37 the dirstate.
37 the dirstate.
38 '''
38 '''
39 self._opener = opener
39 self._opener = opener
40 self._validate = validate
40 self._validate = validate
41 self._root = root
41 self._root = root
42 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
42 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
43 # UNC path pointing to root share (issue4557)
43 # UNC path pointing to root share (issue4557)
44 self._rootdir = pathutil.normasprefix(root)
44 self._rootdir = pathutil.normasprefix(root)
45 self._dirty = False
45 self._dirty = False
46 self._dirtypl = False
46 self._dirtypl = False
47 self._lastnormaltime = 0
47 self._lastnormaltime = 0
48 self._ui = ui
48 self._ui = ui
49 self._filecache = {}
49 self._filecache = {}
50 self._parentwriters = 0
50 self._parentwriters = 0
51 self._filename = 'dirstate'
51 self._filename = 'dirstate'
52
52
53 def beginparentchange(self):
53 def beginparentchange(self):
54 '''Marks the beginning of a set of changes that involve changing
54 '''Marks the beginning of a set of changes that involve changing
55 the dirstate parents. If there is an exception during this time,
55 the dirstate parents. If there is an exception during this time,
56 the dirstate will not be written when the wlock is released. This
56 the dirstate will not be written when the wlock is released. This
57 prevents writing an incoherent dirstate where the parent doesn't
57 prevents writing an incoherent dirstate where the parent doesn't
58 match the contents.
58 match the contents.
59 '''
59 '''
60 self._parentwriters += 1
60 self._parentwriters += 1
61
61
62 def endparentchange(self):
62 def endparentchange(self):
63 '''Marks the end of a set of changes that involve changing the
63 '''Marks the end of a set of changes that involve changing the
64 dirstate parents. Once all parent changes have been marked done,
64 dirstate parents. Once all parent changes have been marked done,
65 the wlock will be free to write the dirstate on release.
65 the wlock will be free to write the dirstate on release.
66 '''
66 '''
67 if self._parentwriters > 0:
67 if self._parentwriters > 0:
68 self._parentwriters -= 1
68 self._parentwriters -= 1
69
69
70 def pendingparentchange(self):
70 def pendingparentchange(self):
71 '''Returns true if the dirstate is in the middle of a set of changes
71 '''Returns true if the dirstate is in the middle of a set of changes
72 that modify the dirstate parent.
72 that modify the dirstate parent.
73 '''
73 '''
74 return self._parentwriters > 0
74 return self._parentwriters > 0
75
75
76 @propertycache
76 @propertycache
77 def _map(self):
77 def _map(self):
78 '''Return the dirstate contents as a map from filename to
78 '''Return the dirstate contents as a map from filename to
79 (state, mode, size, time).'''
79 (state, mode, size, time).'''
80 self._read()
80 self._read()
81 return self._map
81 return self._map
82
82
83 @propertycache
83 @propertycache
84 def _copymap(self):
84 def _copymap(self):
85 self._read()
85 self._read()
86 return self._copymap
86 return self._copymap
87
87
88 @propertycache
88 @propertycache
89 def _filefoldmap(self):
89 def _filefoldmap(self):
90 try:
90 try:
91 makefilefoldmap = parsers.make_file_foldmap
91 makefilefoldmap = parsers.make_file_foldmap
92 except AttributeError:
92 except AttributeError:
93 pass
93 pass
94 else:
94 else:
95 return makefilefoldmap(self._map, util.normcasespec,
95 return makefilefoldmap(self._map, util.normcasespec,
96 util.normcasefallback)
96 util.normcasefallback)
97
97
98 f = {}
98 f = {}
99 normcase = util.normcase
99 normcase = util.normcase
100 for name, s in self._map.iteritems():
100 for name, s in self._map.iteritems():
101 if s[0] != 'r':
101 if s[0] != 'r':
102 f[normcase(name)] = name
102 f[normcase(name)] = name
103 f['.'] = '.' # prevents useless util.fspath() invocation
103 f['.'] = '.' # prevents useless util.fspath() invocation
104 return f
104 return f
105
105
106 @propertycache
106 @propertycache
107 def _dirfoldmap(self):
107 def _dirfoldmap(self):
108 f = {}
108 f = {}
109 normcase = util.normcase
109 normcase = util.normcase
110 for name in self._dirs:
110 for name in self._dirs:
111 f[normcase(name)] = name
111 f[normcase(name)] = name
112 return f
112 return f
113
113
114 @repocache('branch')
114 @repocache('branch')
115 def _branch(self):
115 def _branch(self):
116 try:
116 try:
117 return self._opener.read("branch").strip() or "default"
117 return self._opener.read("branch").strip() or "default"
118 except IOError as inst:
118 except IOError as inst:
119 if inst.errno != errno.ENOENT:
119 if inst.errno != errno.ENOENT:
120 raise
120 raise
121 return "default"
121 return "default"
122
122
123 @propertycache
123 @propertycache
124 def _pl(self):
124 def _pl(self):
125 try:
125 try:
126 fp = self._opener(self._filename)
126 fp = self._opener(self._filename)
127 st = fp.read(40)
127 st = fp.read(40)
128 fp.close()
128 fp.close()
129 l = len(st)
129 l = len(st)
130 if l == 40:
130 if l == 40:
131 return st[:20], st[20:40]
131 return st[:20], st[20:40]
132 elif l > 0 and l < 40:
132 elif l > 0 and l < 40:
133 raise util.Abort(_('working directory state appears damaged!'))
133 raise util.Abort(_('working directory state appears damaged!'))
134 except IOError as err:
134 except IOError as err:
135 if err.errno != errno.ENOENT:
135 if err.errno != errno.ENOENT:
136 raise
136 raise
137 return [nullid, nullid]
137 return [nullid, nullid]
138
138
139 @propertycache
139 @propertycache
140 def _dirs(self):
140 def _dirs(self):
141 return util.dirs(self._map, 'r')
141 return util.dirs(self._map, 'r')
142
142
143 def dirs(self):
143 def dirs(self):
144 return self._dirs
144 return self._dirs
145
145
146 @rootcache('.hgignore')
146 @rootcache('.hgignore')
147 def _ignore(self):
147 def _ignore(self):
148 files = []
148 files = []
149 if os.path.exists(self._join('.hgignore')):
149 if os.path.exists(self._join('.hgignore')):
150 files.append(self._join('.hgignore'))
150 files.append(self._join('.hgignore'))
151 for name, path in self._ui.configitems("ui"):
151 for name, path in self._ui.configitems("ui"):
152 if name == 'ignore' or name.startswith('ignore.'):
152 if name == 'ignore' or name.startswith('ignore.'):
153 # we need to use os.path.join here rather than self._join
153 # we need to use os.path.join here rather than self._join
154 # because path is arbitrary and user-specified
154 # because path is arbitrary and user-specified
155 files.append(os.path.join(self._rootdir, util.expandpath(path)))
155 files.append(os.path.join(self._rootdir, util.expandpath(path)))
156
156
157 if not files:
157 if not files:
158 return util.never
158 return util.never
159
159
160 pats = ['include:%s' % f for f in files]
160 pats = ['include:%s' % f for f in files]
161 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
161 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
162
162
163 @propertycache
163 @propertycache
164 def _slash(self):
164 def _slash(self):
165 return self._ui.configbool('ui', 'slash') and os.sep != '/'
165 return self._ui.configbool('ui', 'slash') and os.sep != '/'
166
166
167 @propertycache
167 @propertycache
168 def _checklink(self):
168 def _checklink(self):
169 return util.checklink(self._root)
169 return util.checklink(self._root)
170
170
171 @propertycache
171 @propertycache
172 def _checkexec(self):
172 def _checkexec(self):
173 return util.checkexec(self._root)
173 return util.checkexec(self._root)
174
174
175 @propertycache
175 @propertycache
176 def _checkcase(self):
176 def _checkcase(self):
177 return not util.checkcase(self._join('.hg'))
177 return not util.checkcase(self._join('.hg'))
178
178
179 def _join(self, f):
179 def _join(self, f):
180 # much faster than os.path.join()
180 # much faster than os.path.join()
181 # it's safe because f is always a relative path
181 # it's safe because f is always a relative path
182 return self._rootdir + f
182 return self._rootdir + f
183
183
184 def flagfunc(self, buildfallback):
184 def flagfunc(self, buildfallback):
185 if self._checklink and self._checkexec:
185 if self._checklink and self._checkexec:
186 def f(x):
186 def f(x):
187 try:
187 try:
188 st = os.lstat(self._join(x))
188 st = os.lstat(self._join(x))
189 if util.statislink(st):
189 if util.statislink(st):
190 return 'l'
190 return 'l'
191 if util.statisexec(st):
191 if util.statisexec(st):
192 return 'x'
192 return 'x'
193 except OSError:
193 except OSError:
194 pass
194 pass
195 return ''
195 return ''
196 return f
196 return f
197
197
198 fallback = buildfallback()
198 fallback = buildfallback()
199 if self._checklink:
199 if self._checklink:
200 def f(x):
200 def f(x):
201 if os.path.islink(self._join(x)):
201 if os.path.islink(self._join(x)):
202 return 'l'
202 return 'l'
203 if 'x' in fallback(x):
203 if 'x' in fallback(x):
204 return 'x'
204 return 'x'
205 return ''
205 return ''
206 return f
206 return f
207 if self._checkexec:
207 if self._checkexec:
208 def f(x):
208 def f(x):
209 if 'l' in fallback(x):
209 if 'l' in fallback(x):
210 return 'l'
210 return 'l'
211 if util.isexec(self._join(x)):
211 if util.isexec(self._join(x)):
212 return 'x'
212 return 'x'
213 return ''
213 return ''
214 return f
214 return f
215 else:
215 else:
216 return fallback
216 return fallback
217
217
218 @propertycache
218 @propertycache
219 def _cwd(self):
219 def _cwd(self):
220 return os.getcwd()
220 return os.getcwd()
221
221
222 def getcwd(self):
222 def getcwd(self):
223 '''Return the path from which a canonical path is calculated.
224
225 This path should be used to resolve file patterns or to convert
226 canonical paths back to file paths for display. It shouldn't be
227 used to get real file paths. Use vfs functions instead.
228 '''
223 cwd = self._cwd
229 cwd = self._cwd
224 if cwd == self._root:
230 if cwd == self._root:
225 return ''
231 return ''
226 # self._root ends with a path separator if self._root is '/' or 'C:\'
232 # self._root ends with a path separator if self._root is '/' or 'C:\'
227 rootsep = self._root
233 rootsep = self._root
228 if not util.endswithsep(rootsep):
234 if not util.endswithsep(rootsep):
229 rootsep += os.sep
235 rootsep += os.sep
230 if cwd.startswith(rootsep):
236 if cwd.startswith(rootsep):
231 return cwd[len(rootsep):]
237 return cwd[len(rootsep):]
232 else:
238 else:
233 # we're outside the repo. return an absolute path.
239 # we're outside the repo. return an absolute path.
234 return cwd
240 return cwd
235
241
236 def pathto(self, f, cwd=None):
242 def pathto(self, f, cwd=None):
237 if cwd is None:
243 if cwd is None:
238 cwd = self.getcwd()
244 cwd = self.getcwd()
239 path = util.pathto(self._root, cwd, f)
245 path = util.pathto(self._root, cwd, f)
240 if self._slash:
246 if self._slash:
241 return util.pconvert(path)
247 return util.pconvert(path)
242 return path
248 return path
243
249
244 def __getitem__(self, key):
250 def __getitem__(self, key):
245 '''Return the current state of key (a filename) in the dirstate.
251 '''Return the current state of key (a filename) in the dirstate.
246
252
247 States are:
253 States are:
248 n normal
254 n normal
249 m needs merging
255 m needs merging
250 r marked for removal
256 r marked for removal
251 a marked for addition
257 a marked for addition
252 ? not tracked
258 ? not tracked
253 '''
259 '''
254 return self._map.get(key, ("?",))[0]
260 return self._map.get(key, ("?",))[0]
255
261
256 def __contains__(self, key):
262 def __contains__(self, key):
257 return key in self._map
263 return key in self._map
258
264
259 def __iter__(self):
265 def __iter__(self):
260 for x in sorted(self._map):
266 for x in sorted(self._map):
261 yield x
267 yield x
262
268
263 def iteritems(self):
269 def iteritems(self):
264 return self._map.iteritems()
270 return self._map.iteritems()
265
271
266 def parents(self):
272 def parents(self):
267 return [self._validate(p) for p in self._pl]
273 return [self._validate(p) for p in self._pl]
268
274
269 def p1(self):
275 def p1(self):
270 return self._validate(self._pl[0])
276 return self._validate(self._pl[0])
271
277
272 def p2(self):
278 def p2(self):
273 return self._validate(self._pl[1])
279 return self._validate(self._pl[1])
274
280
275 def branch(self):
281 def branch(self):
276 return encoding.tolocal(self._branch)
282 return encoding.tolocal(self._branch)
277
283
278 def setparents(self, p1, p2=nullid):
284 def setparents(self, p1, p2=nullid):
279 """Set dirstate parents to p1 and p2.
285 """Set dirstate parents to p1 and p2.
280
286
281 When moving from two parents to one, 'm' merged entries a
287 When moving from two parents to one, 'm' merged entries a
282 adjusted to normal and previous copy records discarded and
288 adjusted to normal and previous copy records discarded and
283 returned by the call.
289 returned by the call.
284
290
285 See localrepo.setparents()
291 See localrepo.setparents()
286 """
292 """
287 if self._parentwriters == 0:
293 if self._parentwriters == 0:
288 raise ValueError("cannot set dirstate parent without "
294 raise ValueError("cannot set dirstate parent without "
289 "calling dirstate.beginparentchange")
295 "calling dirstate.beginparentchange")
290
296
291 self._dirty = self._dirtypl = True
297 self._dirty = self._dirtypl = True
292 oldp2 = self._pl[1]
298 oldp2 = self._pl[1]
293 self._pl = p1, p2
299 self._pl = p1, p2
294 copies = {}
300 copies = {}
295 if oldp2 != nullid and p2 == nullid:
301 if oldp2 != nullid and p2 == nullid:
296 for f, s in self._map.iteritems():
302 for f, s in self._map.iteritems():
297 # Discard 'm' markers when moving away from a merge state
303 # Discard 'm' markers when moving away from a merge state
298 if s[0] == 'm':
304 if s[0] == 'm':
299 if f in self._copymap:
305 if f in self._copymap:
300 copies[f] = self._copymap[f]
306 copies[f] = self._copymap[f]
301 self.normallookup(f)
307 self.normallookup(f)
302 # Also fix up otherparent markers
308 # Also fix up otherparent markers
303 elif s[0] == 'n' and s[2] == -2:
309 elif s[0] == 'n' and s[2] == -2:
304 if f in self._copymap:
310 if f in self._copymap:
305 copies[f] = self._copymap[f]
311 copies[f] = self._copymap[f]
306 self.add(f)
312 self.add(f)
307 return copies
313 return copies
308
314
309 def setbranch(self, branch):
315 def setbranch(self, branch):
310 self._branch = encoding.fromlocal(branch)
316 self._branch = encoding.fromlocal(branch)
311 f = self._opener('branch', 'w', atomictemp=True)
317 f = self._opener('branch', 'w', atomictemp=True)
312 try:
318 try:
313 f.write(self._branch + '\n')
319 f.write(self._branch + '\n')
314 f.close()
320 f.close()
315
321
316 # make sure filecache has the correct stat info for _branch after
322 # make sure filecache has the correct stat info for _branch after
317 # replacing the underlying file
323 # replacing the underlying file
318 ce = self._filecache['_branch']
324 ce = self._filecache['_branch']
319 if ce:
325 if ce:
320 ce.refresh()
326 ce.refresh()
321 except: # re-raises
327 except: # re-raises
322 f.discard()
328 f.discard()
323 raise
329 raise
324
330
325 def _read(self):
331 def _read(self):
326 self._map = {}
332 self._map = {}
327 self._copymap = {}
333 self._copymap = {}
328 try:
334 try:
329 fp = self._opener.open(self._filename)
335 fp = self._opener.open(self._filename)
330 try:
336 try:
331 st = fp.read()
337 st = fp.read()
332 finally:
338 finally:
333 fp.close()
339 fp.close()
334 except IOError as err:
340 except IOError as err:
335 if err.errno != errno.ENOENT:
341 if err.errno != errno.ENOENT:
336 raise
342 raise
337 return
343 return
338 if not st:
344 if not st:
339 return
345 return
340
346
341 if util.safehasattr(parsers, 'dict_new_presized'):
347 if util.safehasattr(parsers, 'dict_new_presized'):
342 # Make an estimate of the number of files in the dirstate based on
348 # Make an estimate of the number of files in the dirstate based on
343 # its size. From a linear regression on a set of real-world repos,
349 # its size. From a linear regression on a set of real-world repos,
344 # all over 10,000 files, the size of a dirstate entry is 85
350 # all over 10,000 files, the size of a dirstate entry is 85
345 # bytes. The cost of resizing is significantly higher than the cost
351 # bytes. The cost of resizing is significantly higher than the cost
346 # of filling in a larger presized dict, so subtract 20% from the
352 # of filling in a larger presized dict, so subtract 20% from the
347 # size.
353 # size.
348 #
354 #
349 # This heuristic is imperfect in many ways, so in a future dirstate
355 # This heuristic is imperfect in many ways, so in a future dirstate
350 # format update it makes sense to just record the number of entries
356 # format update it makes sense to just record the number of entries
351 # on write.
357 # on write.
352 self._map = parsers.dict_new_presized(len(st) / 71)
358 self._map = parsers.dict_new_presized(len(st) / 71)
353
359
354 # Python's garbage collector triggers a GC each time a certain number
360 # Python's garbage collector triggers a GC each time a certain number
355 # of container objects (the number being defined by
361 # of container objects (the number being defined by
356 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
362 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
357 # for each file in the dirstate. The C version then immediately marks
363 # for each file in the dirstate. The C version then immediately marks
358 # them as not to be tracked by the collector. However, this has no
364 # them as not to be tracked by the collector. However, this has no
359 # effect on when GCs are triggered, only on what objects the GC looks
365 # effect on when GCs are triggered, only on what objects the GC looks
360 # into. This means that O(number of files) GCs are unavoidable.
366 # into. This means that O(number of files) GCs are unavoidable.
361 # Depending on when in the process's lifetime the dirstate is parsed,
367 # Depending on when in the process's lifetime the dirstate is parsed,
362 # this can get very expensive. As a workaround, disable GC while
368 # this can get very expensive. As a workaround, disable GC while
363 # parsing the dirstate.
369 # parsing the dirstate.
364 #
370 #
365 # (we cannot decorate the function directly since it is in a C module)
371 # (we cannot decorate the function directly since it is in a C module)
366 parse_dirstate = util.nogc(parsers.parse_dirstate)
372 parse_dirstate = util.nogc(parsers.parse_dirstate)
367 p = parse_dirstate(self._map, self._copymap, st)
373 p = parse_dirstate(self._map, self._copymap, st)
368 if not self._dirtypl:
374 if not self._dirtypl:
369 self._pl = p
375 self._pl = p
370
376
371 def invalidate(self):
377 def invalidate(self):
372 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
378 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
373 "_pl", "_dirs", "_ignore"):
379 "_pl", "_dirs", "_ignore"):
374 if a in self.__dict__:
380 if a in self.__dict__:
375 delattr(self, a)
381 delattr(self, a)
376 self._lastnormaltime = 0
382 self._lastnormaltime = 0
377 self._dirty = False
383 self._dirty = False
378 self._parentwriters = 0
384 self._parentwriters = 0
379
385
380 def copy(self, source, dest):
386 def copy(self, source, dest):
381 """Mark dest as a copy of source. Unmark dest if source is None."""
387 """Mark dest as a copy of source. Unmark dest if source is None."""
382 if source == dest:
388 if source == dest:
383 return
389 return
384 self._dirty = True
390 self._dirty = True
385 if source is not None:
391 if source is not None:
386 self._copymap[dest] = source
392 self._copymap[dest] = source
387 elif dest in self._copymap:
393 elif dest in self._copymap:
388 del self._copymap[dest]
394 del self._copymap[dest]
389
395
390 def copied(self, file):
396 def copied(self, file):
391 return self._copymap.get(file, None)
397 return self._copymap.get(file, None)
392
398
393 def copies(self):
399 def copies(self):
394 return self._copymap
400 return self._copymap
395
401
396 def _droppath(self, f):
402 def _droppath(self, f):
397 if self[f] not in "?r" and "_dirs" in self.__dict__:
403 if self[f] not in "?r" and "_dirs" in self.__dict__:
398 self._dirs.delpath(f)
404 self._dirs.delpath(f)
399
405
400 def _addpath(self, f, state, mode, size, mtime):
406 def _addpath(self, f, state, mode, size, mtime):
401 oldstate = self[f]
407 oldstate = self[f]
402 if state == 'a' or oldstate == 'r':
408 if state == 'a' or oldstate == 'r':
403 scmutil.checkfilename(f)
409 scmutil.checkfilename(f)
404 if f in self._dirs:
410 if f in self._dirs:
405 raise util.Abort(_('directory %r already in dirstate') % f)
411 raise util.Abort(_('directory %r already in dirstate') % f)
406 # shadows
412 # shadows
407 for d in util.finddirs(f):
413 for d in util.finddirs(f):
408 if d in self._dirs:
414 if d in self._dirs:
409 break
415 break
410 if d in self._map and self[d] != 'r':
416 if d in self._map and self[d] != 'r':
411 raise util.Abort(
417 raise util.Abort(
412 _('file %r in dirstate clashes with %r') % (d, f))
418 _('file %r in dirstate clashes with %r') % (d, f))
413 if oldstate in "?r" and "_dirs" in self.__dict__:
419 if oldstate in "?r" and "_dirs" in self.__dict__:
414 self._dirs.addpath(f)
420 self._dirs.addpath(f)
415 self._dirty = True
421 self._dirty = True
416 self._map[f] = dirstatetuple(state, mode, size, mtime)
422 self._map[f] = dirstatetuple(state, mode, size, mtime)
417
423
418 def normal(self, f):
424 def normal(self, f):
419 '''Mark a file normal and clean.'''
425 '''Mark a file normal and clean.'''
420 s = os.lstat(self._join(f))
426 s = os.lstat(self._join(f))
421 mtime = int(s.st_mtime)
427 mtime = int(s.st_mtime)
422 self._addpath(f, 'n', s.st_mode,
428 self._addpath(f, 'n', s.st_mode,
423 s.st_size & _rangemask, mtime & _rangemask)
429 s.st_size & _rangemask, mtime & _rangemask)
424 if f in self._copymap:
430 if f in self._copymap:
425 del self._copymap[f]
431 del self._copymap[f]
426 if mtime > self._lastnormaltime:
432 if mtime > self._lastnormaltime:
427 # Remember the most recent modification timeslot for status(),
433 # Remember the most recent modification timeslot for status(),
428 # to make sure we won't miss future size-preserving file content
434 # to make sure we won't miss future size-preserving file content
429 # modifications that happen within the same timeslot.
435 # modifications that happen within the same timeslot.
430 self._lastnormaltime = mtime
436 self._lastnormaltime = mtime
431
437
432 def normallookup(self, f):
438 def normallookup(self, f):
433 '''Mark a file normal, but possibly dirty.'''
439 '''Mark a file normal, but possibly dirty.'''
434 if self._pl[1] != nullid and f in self._map:
440 if self._pl[1] != nullid and f in self._map:
435 # if there is a merge going on and the file was either
441 # if there is a merge going on and the file was either
436 # in state 'm' (-1) or coming from other parent (-2) before
442 # in state 'm' (-1) or coming from other parent (-2) before
437 # being removed, restore that state.
443 # being removed, restore that state.
438 entry = self._map[f]
444 entry = self._map[f]
439 if entry[0] == 'r' and entry[2] in (-1, -2):
445 if entry[0] == 'r' and entry[2] in (-1, -2):
440 source = self._copymap.get(f)
446 source = self._copymap.get(f)
441 if entry[2] == -1:
447 if entry[2] == -1:
442 self.merge(f)
448 self.merge(f)
443 elif entry[2] == -2:
449 elif entry[2] == -2:
444 self.otherparent(f)
450 self.otherparent(f)
445 if source:
451 if source:
446 self.copy(source, f)
452 self.copy(source, f)
447 return
453 return
448 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
454 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
449 return
455 return
450 self._addpath(f, 'n', 0, -1, -1)
456 self._addpath(f, 'n', 0, -1, -1)
451 if f in self._copymap:
457 if f in self._copymap:
452 del self._copymap[f]
458 del self._copymap[f]
453
459
454 def otherparent(self, f):
460 def otherparent(self, f):
455 '''Mark as coming from the other parent, always dirty.'''
461 '''Mark as coming from the other parent, always dirty.'''
456 if self._pl[1] == nullid:
462 if self._pl[1] == nullid:
457 raise util.Abort(_("setting %r to other parent "
463 raise util.Abort(_("setting %r to other parent "
458 "only allowed in merges") % f)
464 "only allowed in merges") % f)
459 if f in self and self[f] == 'n':
465 if f in self and self[f] == 'n':
460 # merge-like
466 # merge-like
461 self._addpath(f, 'm', 0, -2, -1)
467 self._addpath(f, 'm', 0, -2, -1)
462 else:
468 else:
463 # add-like
469 # add-like
464 self._addpath(f, 'n', 0, -2, -1)
470 self._addpath(f, 'n', 0, -2, -1)
465
471
466 if f in self._copymap:
472 if f in self._copymap:
467 del self._copymap[f]
473 del self._copymap[f]
468
474
469 def add(self, f):
475 def add(self, f):
470 '''Mark a file added.'''
476 '''Mark a file added.'''
471 self._addpath(f, 'a', 0, -1, -1)
477 self._addpath(f, 'a', 0, -1, -1)
472 if f in self._copymap:
478 if f in self._copymap:
473 del self._copymap[f]
479 del self._copymap[f]
474
480
475 def remove(self, f):
481 def remove(self, f):
476 '''Mark a file removed.'''
482 '''Mark a file removed.'''
477 self._dirty = True
483 self._dirty = True
478 self._droppath(f)
484 self._droppath(f)
479 size = 0
485 size = 0
480 if self._pl[1] != nullid and f in self._map:
486 if self._pl[1] != nullid and f in self._map:
481 # backup the previous state
487 # backup the previous state
482 entry = self._map[f]
488 entry = self._map[f]
483 if entry[0] == 'm': # merge
489 if entry[0] == 'm': # merge
484 size = -1
490 size = -1
485 elif entry[0] == 'n' and entry[2] == -2: # other parent
491 elif entry[0] == 'n' and entry[2] == -2: # other parent
486 size = -2
492 size = -2
487 self._map[f] = dirstatetuple('r', 0, size, 0)
493 self._map[f] = dirstatetuple('r', 0, size, 0)
488 if size == 0 and f in self._copymap:
494 if size == 0 and f in self._copymap:
489 del self._copymap[f]
495 del self._copymap[f]
490
496
491 def merge(self, f):
497 def merge(self, f):
492 '''Mark a file merged.'''
498 '''Mark a file merged.'''
493 if self._pl[1] == nullid:
499 if self._pl[1] == nullid:
494 return self.normallookup(f)
500 return self.normallookup(f)
495 return self.otherparent(f)
501 return self.otherparent(f)
496
502
497 def drop(self, f):
503 def drop(self, f):
498 '''Drop a file from the dirstate'''
504 '''Drop a file from the dirstate'''
499 if f in self._map:
505 if f in self._map:
500 self._dirty = True
506 self._dirty = True
501 self._droppath(f)
507 self._droppath(f)
502 del self._map[f]
508 del self._map[f]
503
509
504 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
510 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
505 if exists is None:
511 if exists is None:
506 exists = os.path.lexists(os.path.join(self._root, path))
512 exists = os.path.lexists(os.path.join(self._root, path))
507 if not exists:
513 if not exists:
508 # Maybe a path component exists
514 # Maybe a path component exists
509 if not ignoremissing and '/' in path:
515 if not ignoremissing and '/' in path:
510 d, f = path.rsplit('/', 1)
516 d, f = path.rsplit('/', 1)
511 d = self._normalize(d, False, ignoremissing, None)
517 d = self._normalize(d, False, ignoremissing, None)
512 folded = d + "/" + f
518 folded = d + "/" + f
513 else:
519 else:
514 # No path components, preserve original case
520 # No path components, preserve original case
515 folded = path
521 folded = path
516 else:
522 else:
517 # recursively normalize leading directory components
523 # recursively normalize leading directory components
518 # against dirstate
524 # against dirstate
519 if '/' in normed:
525 if '/' in normed:
520 d, f = normed.rsplit('/', 1)
526 d, f = normed.rsplit('/', 1)
521 d = self._normalize(d, False, ignoremissing, True)
527 d = self._normalize(d, False, ignoremissing, True)
522 r = self._root + "/" + d
528 r = self._root + "/" + d
523 folded = d + "/" + util.fspath(f, r)
529 folded = d + "/" + util.fspath(f, r)
524 else:
530 else:
525 folded = util.fspath(normed, self._root)
531 folded = util.fspath(normed, self._root)
526 storemap[normed] = folded
532 storemap[normed] = folded
527
533
528 return folded
534 return folded
529
535
530 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
536 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
531 normed = util.normcase(path)
537 normed = util.normcase(path)
532 folded = self._filefoldmap.get(normed, None)
538 folded = self._filefoldmap.get(normed, None)
533 if folded is None:
539 if folded is None:
534 if isknown:
540 if isknown:
535 folded = path
541 folded = path
536 else:
542 else:
537 folded = self._discoverpath(path, normed, ignoremissing, exists,
543 folded = self._discoverpath(path, normed, ignoremissing, exists,
538 self._filefoldmap)
544 self._filefoldmap)
539 return folded
545 return folded
540
546
541 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
547 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
542 normed = util.normcase(path)
548 normed = util.normcase(path)
543 folded = self._filefoldmap.get(normed, None)
549 folded = self._filefoldmap.get(normed, None)
544 if folded is None:
550 if folded is None:
545 folded = self._dirfoldmap.get(normed, None)
551 folded = self._dirfoldmap.get(normed, None)
546 if folded is None:
552 if folded is None:
547 if isknown:
553 if isknown:
548 folded = path
554 folded = path
549 else:
555 else:
550 # store discovered result in dirfoldmap so that future
556 # store discovered result in dirfoldmap so that future
551 # normalizefile calls don't start matching directories
557 # normalizefile calls don't start matching directories
552 folded = self._discoverpath(path, normed, ignoremissing, exists,
558 folded = self._discoverpath(path, normed, ignoremissing, exists,
553 self._dirfoldmap)
559 self._dirfoldmap)
554 return folded
560 return folded
555
561
556 def normalize(self, path, isknown=False, ignoremissing=False):
562 def normalize(self, path, isknown=False, ignoremissing=False):
557 '''
563 '''
558 normalize the case of a pathname when on a casefolding filesystem
564 normalize the case of a pathname when on a casefolding filesystem
559
565
560 isknown specifies whether the filename came from walking the
566 isknown specifies whether the filename came from walking the
561 disk, to avoid extra filesystem access.
567 disk, to avoid extra filesystem access.
562
568
563 If ignoremissing is True, missing path are returned
569 If ignoremissing is True, missing path are returned
564 unchanged. Otherwise, we try harder to normalize possibly
570 unchanged. Otherwise, we try harder to normalize possibly
565 existing path components.
571 existing path components.
566
572
567 The normalized case is determined based on the following precedence:
573 The normalized case is determined based on the following precedence:
568
574
569 - version of name already stored in the dirstate
575 - version of name already stored in the dirstate
570 - version of name stored on disk
576 - version of name stored on disk
571 - version provided via command arguments
577 - version provided via command arguments
572 '''
578 '''
573
579
574 if self._checkcase:
580 if self._checkcase:
575 return self._normalize(path, isknown, ignoremissing)
581 return self._normalize(path, isknown, ignoremissing)
576 return path
582 return path
577
583
578 def clear(self):
584 def clear(self):
579 self._map = {}
585 self._map = {}
580 if "_dirs" in self.__dict__:
586 if "_dirs" in self.__dict__:
581 delattr(self, "_dirs")
587 delattr(self, "_dirs")
582 self._copymap = {}
588 self._copymap = {}
583 self._pl = [nullid, nullid]
589 self._pl = [nullid, nullid]
584 self._lastnormaltime = 0
590 self._lastnormaltime = 0
585 self._dirty = True
591 self._dirty = True
586
592
587 def rebuild(self, parent, allfiles, changedfiles=None):
593 def rebuild(self, parent, allfiles, changedfiles=None):
588 if changedfiles is None:
594 if changedfiles is None:
589 changedfiles = allfiles
595 changedfiles = allfiles
590 oldmap = self._map
596 oldmap = self._map
591 self.clear()
597 self.clear()
592 for f in allfiles:
598 for f in allfiles:
593 if f not in changedfiles:
599 if f not in changedfiles:
594 self._map[f] = oldmap[f]
600 self._map[f] = oldmap[f]
595 else:
601 else:
596 if 'x' in allfiles.flags(f):
602 if 'x' in allfiles.flags(f):
597 self._map[f] = dirstatetuple('n', 0o777, -1, 0)
603 self._map[f] = dirstatetuple('n', 0o777, -1, 0)
598 else:
604 else:
599 self._map[f] = dirstatetuple('n', 0o666, -1, 0)
605 self._map[f] = dirstatetuple('n', 0o666, -1, 0)
600 self._pl = (parent, nullid)
606 self._pl = (parent, nullid)
601 self._dirty = True
607 self._dirty = True
602
608
603 def write(self):
609 def write(self):
604 if not self._dirty:
610 if not self._dirty:
605 return
611 return
606
612
607 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
613 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
608 # timestamp of each entries in dirstate, because of 'now > mtime'
614 # timestamp of each entries in dirstate, because of 'now > mtime'
609 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
615 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
610 if delaywrite > 0:
616 if delaywrite > 0:
611 import time # to avoid useless import
617 import time # to avoid useless import
612 time.sleep(delaywrite)
618 time.sleep(delaywrite)
613
619
614 st = self._opener(self._filename, "w", atomictemp=True)
620 st = self._opener(self._filename, "w", atomictemp=True)
615 # use the modification time of the newly created temporary file as the
621 # use the modification time of the newly created temporary file as the
616 # filesystem's notion of 'now'
622 # filesystem's notion of 'now'
617 now = util.fstat(st).st_mtime
623 now = util.fstat(st).st_mtime
618 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
624 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
619 st.close()
625 st.close()
620 self._lastnormaltime = 0
626 self._lastnormaltime = 0
621 self._dirty = self._dirtypl = False
627 self._dirty = self._dirtypl = False
622
628
623 def _dirignore(self, f):
629 def _dirignore(self, f):
624 if f == '.':
630 if f == '.':
625 return False
631 return False
626 if self._ignore(f):
632 if self._ignore(f):
627 return True
633 return True
628 for p in util.finddirs(f):
634 for p in util.finddirs(f):
629 if self._ignore(p):
635 if self._ignore(p):
630 return True
636 return True
631 return False
637 return False
632
638
633 def _walkexplicit(self, match, subrepos):
639 def _walkexplicit(self, match, subrepos):
634 '''Get stat data about the files explicitly specified by match.
640 '''Get stat data about the files explicitly specified by match.
635
641
636 Return a triple (results, dirsfound, dirsnotfound).
642 Return a triple (results, dirsfound, dirsnotfound).
637 - results is a mapping from filename to stat result. It also contains
643 - results is a mapping from filename to stat result. It also contains
638 listings mapping subrepos and .hg to None.
644 listings mapping subrepos and .hg to None.
639 - dirsfound is a list of files found to be directories.
645 - dirsfound is a list of files found to be directories.
640 - dirsnotfound is a list of files that the dirstate thinks are
646 - dirsnotfound is a list of files that the dirstate thinks are
641 directories and that were not found.'''
647 directories and that were not found.'''
642
648
643 def badtype(mode):
649 def badtype(mode):
644 kind = _('unknown')
650 kind = _('unknown')
645 if stat.S_ISCHR(mode):
651 if stat.S_ISCHR(mode):
646 kind = _('character device')
652 kind = _('character device')
647 elif stat.S_ISBLK(mode):
653 elif stat.S_ISBLK(mode):
648 kind = _('block device')
654 kind = _('block device')
649 elif stat.S_ISFIFO(mode):
655 elif stat.S_ISFIFO(mode):
650 kind = _('fifo')
656 kind = _('fifo')
651 elif stat.S_ISSOCK(mode):
657 elif stat.S_ISSOCK(mode):
652 kind = _('socket')
658 kind = _('socket')
653 elif stat.S_ISDIR(mode):
659 elif stat.S_ISDIR(mode):
654 kind = _('directory')
660 kind = _('directory')
655 return _('unsupported file type (type is %s)') % kind
661 return _('unsupported file type (type is %s)') % kind
656
662
657 matchedir = match.explicitdir
663 matchedir = match.explicitdir
658 badfn = match.bad
664 badfn = match.bad
659 dmap = self._map
665 dmap = self._map
660 lstat = os.lstat
666 lstat = os.lstat
661 getkind = stat.S_IFMT
667 getkind = stat.S_IFMT
662 dirkind = stat.S_IFDIR
668 dirkind = stat.S_IFDIR
663 regkind = stat.S_IFREG
669 regkind = stat.S_IFREG
664 lnkkind = stat.S_IFLNK
670 lnkkind = stat.S_IFLNK
665 join = self._join
671 join = self._join
666 dirsfound = []
672 dirsfound = []
667 foundadd = dirsfound.append
673 foundadd = dirsfound.append
668 dirsnotfound = []
674 dirsnotfound = []
669 notfoundadd = dirsnotfound.append
675 notfoundadd = dirsnotfound.append
670
676
671 if not match.isexact() and self._checkcase:
677 if not match.isexact() and self._checkcase:
672 normalize = self._normalize
678 normalize = self._normalize
673 else:
679 else:
674 normalize = None
680 normalize = None
675
681
676 files = sorted(match.files())
682 files = sorted(match.files())
677 subrepos.sort()
683 subrepos.sort()
678 i, j = 0, 0
684 i, j = 0, 0
679 while i < len(files) and j < len(subrepos):
685 while i < len(files) and j < len(subrepos):
680 subpath = subrepos[j] + "/"
686 subpath = subrepos[j] + "/"
681 if files[i] < subpath:
687 if files[i] < subpath:
682 i += 1
688 i += 1
683 continue
689 continue
684 while i < len(files) and files[i].startswith(subpath):
690 while i < len(files) and files[i].startswith(subpath):
685 del files[i]
691 del files[i]
686 j += 1
692 j += 1
687
693
688 if not files or '.' in files:
694 if not files or '.' in files:
689 files = ['.']
695 files = ['.']
690 results = dict.fromkeys(subrepos)
696 results = dict.fromkeys(subrepos)
691 results['.hg'] = None
697 results['.hg'] = None
692
698
693 alldirs = None
699 alldirs = None
694 for ff in files:
700 for ff in files:
695 # constructing the foldmap is expensive, so don't do it for the
701 # constructing the foldmap is expensive, so don't do it for the
696 # common case where files is ['.']
702 # common case where files is ['.']
697 if normalize and ff != '.':
703 if normalize and ff != '.':
698 nf = normalize(ff, False, True)
704 nf = normalize(ff, False, True)
699 else:
705 else:
700 nf = ff
706 nf = ff
701 if nf in results:
707 if nf in results:
702 continue
708 continue
703
709
704 try:
710 try:
705 st = lstat(join(nf))
711 st = lstat(join(nf))
706 kind = getkind(st.st_mode)
712 kind = getkind(st.st_mode)
707 if kind == dirkind:
713 if kind == dirkind:
708 if nf in dmap:
714 if nf in dmap:
709 # file replaced by dir on disk but still in dirstate
715 # file replaced by dir on disk but still in dirstate
710 results[nf] = None
716 results[nf] = None
711 if matchedir:
717 if matchedir:
712 matchedir(nf)
718 matchedir(nf)
713 foundadd((nf, ff))
719 foundadd((nf, ff))
714 elif kind == regkind or kind == lnkkind:
720 elif kind == regkind or kind == lnkkind:
715 results[nf] = st
721 results[nf] = st
716 else:
722 else:
717 badfn(ff, badtype(kind))
723 badfn(ff, badtype(kind))
718 if nf in dmap:
724 if nf in dmap:
719 results[nf] = None
725 results[nf] = None
720 except OSError as inst: # nf not found on disk - it is dirstate only
726 except OSError as inst: # nf not found on disk - it is dirstate only
721 if nf in dmap: # does it exactly match a missing file?
727 if nf in dmap: # does it exactly match a missing file?
722 results[nf] = None
728 results[nf] = None
723 else: # does it match a missing directory?
729 else: # does it match a missing directory?
724 if alldirs is None:
730 if alldirs is None:
725 alldirs = util.dirs(dmap)
731 alldirs = util.dirs(dmap)
726 if nf in alldirs:
732 if nf in alldirs:
727 if matchedir:
733 if matchedir:
728 matchedir(nf)
734 matchedir(nf)
729 notfoundadd(nf)
735 notfoundadd(nf)
730 else:
736 else:
731 badfn(ff, inst.strerror)
737 badfn(ff, inst.strerror)
732
738
733 # Case insensitive filesystems cannot rely on lstat() failing to detect
739 # Case insensitive filesystems cannot rely on lstat() failing to detect
734 # a case-only rename. Prune the stat object for any file that does not
740 # a case-only rename. Prune the stat object for any file that does not
735 # match the case in the filesystem, if there are multiple files that
741 # match the case in the filesystem, if there are multiple files that
736 # normalize to the same path.
742 # normalize to the same path.
737 if match.isexact() and self._checkcase:
743 if match.isexact() and self._checkcase:
738 normed = {}
744 normed = {}
739
745
740 for f, st in results.iteritems():
746 for f, st in results.iteritems():
741 if st is None:
747 if st is None:
742 continue
748 continue
743
749
744 nc = util.normcase(f)
750 nc = util.normcase(f)
745 paths = normed.get(nc)
751 paths = normed.get(nc)
746
752
747 if paths is None:
753 if paths is None:
748 paths = set()
754 paths = set()
749 normed[nc] = paths
755 normed[nc] = paths
750
756
751 paths.add(f)
757 paths.add(f)
752
758
753 for norm, paths in normed.iteritems():
759 for norm, paths in normed.iteritems():
754 if len(paths) > 1:
760 if len(paths) > 1:
755 for path in paths:
761 for path in paths:
756 folded = self._discoverpath(path, norm, True, None,
762 folded = self._discoverpath(path, norm, True, None,
757 self._dirfoldmap)
763 self._dirfoldmap)
758 if path != folded:
764 if path != folded:
759 results[path] = None
765 results[path] = None
760
766
761 return results, dirsfound, dirsnotfound
767 return results, dirsfound, dirsnotfound
762
768
763 def walk(self, match, subrepos, unknown, ignored, full=True):
769 def walk(self, match, subrepos, unknown, ignored, full=True):
764 '''
770 '''
765 Walk recursively through the directory tree, finding all files
771 Walk recursively through the directory tree, finding all files
766 matched by match.
772 matched by match.
767
773
768 If full is False, maybe skip some known-clean files.
774 If full is False, maybe skip some known-clean files.
769
775
770 Return a dict mapping filename to stat-like object (either
776 Return a dict mapping filename to stat-like object (either
771 mercurial.osutil.stat instance or return value of os.stat()).
777 mercurial.osutil.stat instance or return value of os.stat()).
772
778
773 '''
779 '''
774 # full is a flag that extensions that hook into walk can use -- this
780 # full is a flag that extensions that hook into walk can use -- this
775 # implementation doesn't use it at all. This satisfies the contract
781 # implementation doesn't use it at all. This satisfies the contract
776 # because we only guarantee a "maybe".
782 # because we only guarantee a "maybe".
777
783
778 if ignored:
784 if ignored:
779 ignore = util.never
785 ignore = util.never
780 dirignore = util.never
786 dirignore = util.never
781 elif unknown:
787 elif unknown:
782 ignore = self._ignore
788 ignore = self._ignore
783 dirignore = self._dirignore
789 dirignore = self._dirignore
784 else:
790 else:
785 # if not unknown and not ignored, drop dir recursion and step 2
791 # if not unknown and not ignored, drop dir recursion and step 2
786 ignore = util.always
792 ignore = util.always
787 dirignore = util.always
793 dirignore = util.always
788
794
789 matchfn = match.matchfn
795 matchfn = match.matchfn
790 matchalways = match.always()
796 matchalways = match.always()
791 matchtdir = match.traversedir
797 matchtdir = match.traversedir
792 dmap = self._map
798 dmap = self._map
793 listdir = osutil.listdir
799 listdir = osutil.listdir
794 lstat = os.lstat
800 lstat = os.lstat
795 dirkind = stat.S_IFDIR
801 dirkind = stat.S_IFDIR
796 regkind = stat.S_IFREG
802 regkind = stat.S_IFREG
797 lnkkind = stat.S_IFLNK
803 lnkkind = stat.S_IFLNK
798 join = self._join
804 join = self._join
799
805
800 exact = skipstep3 = False
806 exact = skipstep3 = False
801 if match.isexact(): # match.exact
807 if match.isexact(): # match.exact
802 exact = True
808 exact = True
803 dirignore = util.always # skip step 2
809 dirignore = util.always # skip step 2
804 elif match.prefix(): # match.match, no patterns
810 elif match.prefix(): # match.match, no patterns
805 skipstep3 = True
811 skipstep3 = True
806
812
807 if not exact and self._checkcase:
813 if not exact and self._checkcase:
808 normalize = self._normalize
814 normalize = self._normalize
809 normalizefile = self._normalizefile
815 normalizefile = self._normalizefile
810 skipstep3 = False
816 skipstep3 = False
811 else:
817 else:
812 normalize = self._normalize
818 normalize = self._normalize
813 normalizefile = None
819 normalizefile = None
814
820
815 # step 1: find all explicit files
821 # step 1: find all explicit files
816 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
822 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
817
823
818 skipstep3 = skipstep3 and not (work or dirsnotfound)
824 skipstep3 = skipstep3 and not (work or dirsnotfound)
819 work = [d for d in work if not dirignore(d[0])]
825 work = [d for d in work if not dirignore(d[0])]
820
826
821 # step 2: visit subdirectories
827 # step 2: visit subdirectories
822 def traverse(work, alreadynormed):
828 def traverse(work, alreadynormed):
823 wadd = work.append
829 wadd = work.append
824 while work:
830 while work:
825 nd = work.pop()
831 nd = work.pop()
826 skip = None
832 skip = None
827 if nd == '.':
833 if nd == '.':
828 nd = ''
834 nd = ''
829 else:
835 else:
830 skip = '.hg'
836 skip = '.hg'
831 try:
837 try:
832 entries = listdir(join(nd), stat=True, skip=skip)
838 entries = listdir(join(nd), stat=True, skip=skip)
833 except OSError as inst:
839 except OSError as inst:
834 if inst.errno in (errno.EACCES, errno.ENOENT):
840 if inst.errno in (errno.EACCES, errno.ENOENT):
835 match.bad(self.pathto(nd), inst.strerror)
841 match.bad(self.pathto(nd), inst.strerror)
836 continue
842 continue
837 raise
843 raise
838 for f, kind, st in entries:
844 for f, kind, st in entries:
839 if normalizefile:
845 if normalizefile:
840 # even though f might be a directory, we're only
846 # even though f might be a directory, we're only
841 # interested in comparing it to files currently in the
847 # interested in comparing it to files currently in the
842 # dmap -- therefore normalizefile is enough
848 # dmap -- therefore normalizefile is enough
843 nf = normalizefile(nd and (nd + "/" + f) or f, True,
849 nf = normalizefile(nd and (nd + "/" + f) or f, True,
844 True)
850 True)
845 else:
851 else:
846 nf = nd and (nd + "/" + f) or f
852 nf = nd and (nd + "/" + f) or f
847 if nf not in results:
853 if nf not in results:
848 if kind == dirkind:
854 if kind == dirkind:
849 if not ignore(nf):
855 if not ignore(nf):
850 if matchtdir:
856 if matchtdir:
851 matchtdir(nf)
857 matchtdir(nf)
852 wadd(nf)
858 wadd(nf)
853 if nf in dmap and (matchalways or matchfn(nf)):
859 if nf in dmap and (matchalways or matchfn(nf)):
854 results[nf] = None
860 results[nf] = None
855 elif kind == regkind or kind == lnkkind:
861 elif kind == regkind or kind == lnkkind:
856 if nf in dmap:
862 if nf in dmap:
857 if matchalways or matchfn(nf):
863 if matchalways or matchfn(nf):
858 results[nf] = st
864 results[nf] = st
859 elif ((matchalways or matchfn(nf))
865 elif ((matchalways or matchfn(nf))
860 and not ignore(nf)):
866 and not ignore(nf)):
861 # unknown file -- normalize if necessary
867 # unknown file -- normalize if necessary
862 if not alreadynormed:
868 if not alreadynormed:
863 nf = normalize(nf, False, True)
869 nf = normalize(nf, False, True)
864 results[nf] = st
870 results[nf] = st
865 elif nf in dmap and (matchalways or matchfn(nf)):
871 elif nf in dmap and (matchalways or matchfn(nf)):
866 results[nf] = None
872 results[nf] = None
867
873
868 for nd, d in work:
874 for nd, d in work:
869 # alreadynormed means that processwork doesn't have to do any
875 # alreadynormed means that processwork doesn't have to do any
870 # expensive directory normalization
876 # expensive directory normalization
871 alreadynormed = not normalize or nd == d
877 alreadynormed = not normalize or nd == d
872 traverse([d], alreadynormed)
878 traverse([d], alreadynormed)
873
879
874 for s in subrepos:
880 for s in subrepos:
875 del results[s]
881 del results[s]
876 del results['.hg']
882 del results['.hg']
877
883
878 # step 3: visit remaining files from dmap
884 # step 3: visit remaining files from dmap
879 if not skipstep3 and not exact:
885 if not skipstep3 and not exact:
880 # If a dmap file is not in results yet, it was either
886 # If a dmap file is not in results yet, it was either
881 # a) not matching matchfn b) ignored, c) missing, or d) under a
887 # a) not matching matchfn b) ignored, c) missing, or d) under a
882 # symlink directory.
888 # symlink directory.
883 if not results and matchalways:
889 if not results and matchalways:
884 visit = dmap.keys()
890 visit = dmap.keys()
885 else:
891 else:
886 visit = [f for f in dmap if f not in results and matchfn(f)]
892 visit = [f for f in dmap if f not in results and matchfn(f)]
887 visit.sort()
893 visit.sort()
888
894
889 if unknown:
895 if unknown:
890 # unknown == True means we walked all dirs under the roots
896 # unknown == True means we walked all dirs under the roots
891 # that wasn't ignored, and everything that matched was stat'ed
897 # that wasn't ignored, and everything that matched was stat'ed
892 # and is already in results.
898 # and is already in results.
893 # The rest must thus be ignored or under a symlink.
899 # The rest must thus be ignored or under a symlink.
894 audit_path = pathutil.pathauditor(self._root)
900 audit_path = pathutil.pathauditor(self._root)
895
901
896 for nf in iter(visit):
902 for nf in iter(visit):
897 # If a stat for the same file was already added with a
903 # If a stat for the same file was already added with a
898 # different case, don't add one for this, since that would
904 # different case, don't add one for this, since that would
899 # make it appear as if the file exists under both names
905 # make it appear as if the file exists under both names
900 # on disk.
906 # on disk.
901 if (normalizefile and
907 if (normalizefile and
902 normalizefile(nf, True, True) in results):
908 normalizefile(nf, True, True) in results):
903 results[nf] = None
909 results[nf] = None
904 # Report ignored items in the dmap as long as they are not
910 # Report ignored items in the dmap as long as they are not
905 # under a symlink directory.
911 # under a symlink directory.
906 elif audit_path.check(nf):
912 elif audit_path.check(nf):
907 try:
913 try:
908 results[nf] = lstat(join(nf))
914 results[nf] = lstat(join(nf))
909 # file was just ignored, no links, and exists
915 # file was just ignored, no links, and exists
910 except OSError:
916 except OSError:
911 # file doesn't exist
917 # file doesn't exist
912 results[nf] = None
918 results[nf] = None
913 else:
919 else:
914 # It's either missing or under a symlink directory
920 # It's either missing or under a symlink directory
915 # which we in this case report as missing
921 # which we in this case report as missing
916 results[nf] = None
922 results[nf] = None
917 else:
923 else:
918 # We may not have walked the full directory tree above,
924 # We may not have walked the full directory tree above,
919 # so stat and check everything we missed.
925 # so stat and check everything we missed.
920 nf = iter(visit).next
926 nf = iter(visit).next
921 for st in util.statfiles([join(i) for i in visit]):
927 for st in util.statfiles([join(i) for i in visit]):
922 results[nf()] = st
928 results[nf()] = st
923 return results
929 return results
924
930
925 def status(self, match, subrepos, ignored, clean, unknown):
931 def status(self, match, subrepos, ignored, clean, unknown):
926 '''Determine the status of the working copy relative to the
932 '''Determine the status of the working copy relative to the
927 dirstate and return a pair of (unsure, status), where status is of type
933 dirstate and return a pair of (unsure, status), where status is of type
928 scmutil.status and:
934 scmutil.status and:
929
935
930 unsure:
936 unsure:
931 files that might have been modified since the dirstate was
937 files that might have been modified since the dirstate was
932 written, but need to be read to be sure (size is the same
938 written, but need to be read to be sure (size is the same
933 but mtime differs)
939 but mtime differs)
934 status.modified:
940 status.modified:
935 files that have definitely been modified since the dirstate
941 files that have definitely been modified since the dirstate
936 was written (different size or mode)
942 was written (different size or mode)
937 status.clean:
943 status.clean:
938 files that have definitely not been modified since the
944 files that have definitely not been modified since the
939 dirstate was written
945 dirstate was written
940 '''
946 '''
941 listignored, listclean, listunknown = ignored, clean, unknown
947 listignored, listclean, listunknown = ignored, clean, unknown
942 lookup, modified, added, unknown, ignored = [], [], [], [], []
948 lookup, modified, added, unknown, ignored = [], [], [], [], []
943 removed, deleted, clean = [], [], []
949 removed, deleted, clean = [], [], []
944
950
945 dmap = self._map
951 dmap = self._map
946 ladd = lookup.append # aka "unsure"
952 ladd = lookup.append # aka "unsure"
947 madd = modified.append
953 madd = modified.append
948 aadd = added.append
954 aadd = added.append
949 uadd = unknown.append
955 uadd = unknown.append
950 iadd = ignored.append
956 iadd = ignored.append
951 radd = removed.append
957 radd = removed.append
952 dadd = deleted.append
958 dadd = deleted.append
953 cadd = clean.append
959 cadd = clean.append
954 mexact = match.exact
960 mexact = match.exact
955 dirignore = self._dirignore
961 dirignore = self._dirignore
956 checkexec = self._checkexec
962 checkexec = self._checkexec
957 copymap = self._copymap
963 copymap = self._copymap
958 lastnormaltime = self._lastnormaltime
964 lastnormaltime = self._lastnormaltime
959
965
960 # We need to do full walks when either
966 # We need to do full walks when either
961 # - we're listing all clean files, or
967 # - we're listing all clean files, or
962 # - match.traversedir does something, because match.traversedir should
968 # - match.traversedir does something, because match.traversedir should
963 # be called for every dir in the working dir
969 # be called for every dir in the working dir
964 full = listclean or match.traversedir is not None
970 full = listclean or match.traversedir is not None
965 for fn, st in self.walk(match, subrepos, listunknown, listignored,
971 for fn, st in self.walk(match, subrepos, listunknown, listignored,
966 full=full).iteritems():
972 full=full).iteritems():
967 if fn not in dmap:
973 if fn not in dmap:
968 if (listignored or mexact(fn)) and dirignore(fn):
974 if (listignored or mexact(fn)) and dirignore(fn):
969 if listignored:
975 if listignored:
970 iadd(fn)
976 iadd(fn)
971 else:
977 else:
972 uadd(fn)
978 uadd(fn)
973 continue
979 continue
974
980
975 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
981 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
976 # written like that for performance reasons. dmap[fn] is not a
982 # written like that for performance reasons. dmap[fn] is not a
977 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
983 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
978 # opcode has fast paths when the value to be unpacked is a tuple or
984 # opcode has fast paths when the value to be unpacked is a tuple or
979 # a list, but falls back to creating a full-fledged iterator in
985 # a list, but falls back to creating a full-fledged iterator in
980 # general. That is much slower than simply accessing and storing the
986 # general. That is much slower than simply accessing and storing the
981 # tuple members one by one.
987 # tuple members one by one.
982 t = dmap[fn]
988 t = dmap[fn]
983 state = t[0]
989 state = t[0]
984 mode = t[1]
990 mode = t[1]
985 size = t[2]
991 size = t[2]
986 time = t[3]
992 time = t[3]
987
993
988 if not st and state in "nma":
994 if not st and state in "nma":
989 dadd(fn)
995 dadd(fn)
990 elif state == 'n':
996 elif state == 'n':
991 mtime = int(st.st_mtime)
997 mtime = int(st.st_mtime)
992 if (size >= 0 and
998 if (size >= 0 and
993 ((size != st.st_size and size != st.st_size & _rangemask)
999 ((size != st.st_size and size != st.st_size & _rangemask)
994 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1000 or ((mode ^ st.st_mode) & 0o100 and checkexec))
995 or size == -2 # other parent
1001 or size == -2 # other parent
996 or fn in copymap):
1002 or fn in copymap):
997 madd(fn)
1003 madd(fn)
998 elif time != mtime and time != mtime & _rangemask:
1004 elif time != mtime and time != mtime & _rangemask:
999 ladd(fn)
1005 ladd(fn)
1000 elif mtime == lastnormaltime:
1006 elif mtime == lastnormaltime:
1001 # fn may have just been marked as normal and it may have
1007 # fn may have just been marked as normal and it may have
1002 # changed in the same second without changing its size.
1008 # changed in the same second without changing its size.
1003 # This can happen if we quickly do multiple commits.
1009 # This can happen if we quickly do multiple commits.
1004 # Force lookup, so we don't miss such a racy file change.
1010 # Force lookup, so we don't miss such a racy file change.
1005 ladd(fn)
1011 ladd(fn)
1006 elif listclean:
1012 elif listclean:
1007 cadd(fn)
1013 cadd(fn)
1008 elif state == 'm':
1014 elif state == 'm':
1009 madd(fn)
1015 madd(fn)
1010 elif state == 'a':
1016 elif state == 'a':
1011 aadd(fn)
1017 aadd(fn)
1012 elif state == 'r':
1018 elif state == 'r':
1013 radd(fn)
1019 radd(fn)
1014
1020
1015 return (lookup, scmutil.status(modified, added, removed, deleted,
1021 return (lookup, scmutil.status(modified, added, removed, deleted,
1016 unknown, ignored, clean))
1022 unknown, ignored, clean))
1017
1023
1018 def matches(self, match):
1024 def matches(self, match):
1019 '''
1025 '''
1020 return files in the dirstate (in whatever state) filtered by match
1026 return files in the dirstate (in whatever state) filtered by match
1021 '''
1027 '''
1022 dmap = self._map
1028 dmap = self._map
1023 if match.always():
1029 if match.always():
1024 return dmap.keys()
1030 return dmap.keys()
1025 files = match.files()
1031 files = match.files()
1026 if match.isexact():
1032 if match.isexact():
1027 # fast path -- filter the other way around, since typically files is
1033 # fast path -- filter the other way around, since typically files is
1028 # much smaller than dmap
1034 # much smaller than dmap
1029 return [f for f in files if f in dmap]
1035 return [f for f in files if f in dmap]
1030 if match.prefix() and all(fn in dmap for fn in files):
1036 if match.prefix() and all(fn in dmap for fn in files):
1031 # fast path -- all the values are known to be files, so just return
1037 # fast path -- all the values are known to be files, so just return
1032 # that
1038 # that
1033 return list(files)
1039 return list(files)
1034 return [f for f in dmap if match(f)]
1040 return [f for f in dmap if match(f)]
General Comments 0
You need to be logged in to leave comments. Login now