##// END OF EJS Templates
ignore: use 'include:' rules instead of custom syntax...
Durham Goode -
r25216:dc562165 default
parent child Browse files
Show More
@@ -1,981 +1,987 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid
8 from node import nullid
9 from i18n import _
9 from i18n import _
10 import scmutil, util, ignore, osutil, parsers, encoding, pathutil
10 import scmutil, util, osutil, parsers, encoding, pathutil
11 import os, stat, errno
11 import os, stat, errno
12 import match as matchmod
12
13
13 propertycache = util.propertycache
14 propertycache = util.propertycache
14 filecache = scmutil.filecache
15 filecache = scmutil.filecache
15 _rangemask = 0x7fffffff
16 _rangemask = 0x7fffffff
16
17
17 dirstatetuple = parsers.dirstatetuple
18 dirstatetuple = parsers.dirstatetuple
18
19
19 class repocache(filecache):
20 class repocache(filecache):
20 """filecache for files in .hg/"""
21 """filecache for files in .hg/"""
21 def join(self, obj, fname):
22 def join(self, obj, fname):
22 return obj._opener.join(fname)
23 return obj._opener.join(fname)
23
24
24 class rootcache(filecache):
25 class rootcache(filecache):
25 """filecache for files in the repository root"""
26 """filecache for files in the repository root"""
26 def join(self, obj, fname):
27 def join(self, obj, fname):
27 return obj._join(fname)
28 return obj._join(fname)
28
29
29 class dirstate(object):
30 class dirstate(object):
30
31
31 def __init__(self, opener, ui, root, validate):
32 def __init__(self, opener, ui, root, validate):
32 '''Create a new dirstate object.
33 '''Create a new dirstate object.
33
34
34 opener is an open()-like callable that can be used to open the
35 opener is an open()-like callable that can be used to open the
35 dirstate file; root is the root of the directory tracked by
36 dirstate file; root is the root of the directory tracked by
36 the dirstate.
37 the dirstate.
37 '''
38 '''
38 self._opener = opener
39 self._opener = opener
39 self._validate = validate
40 self._validate = validate
40 self._root = root
41 self._root = root
41 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
42 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
42 # UNC path pointing to root share (issue4557)
43 # UNC path pointing to root share (issue4557)
43 self._rootdir = pathutil.normasprefix(root)
44 self._rootdir = pathutil.normasprefix(root)
44 self._dirty = False
45 self._dirty = False
45 self._dirtypl = False
46 self._dirtypl = False
46 self._lastnormaltime = 0
47 self._lastnormaltime = 0
47 self._ui = ui
48 self._ui = ui
48 self._filecache = {}
49 self._filecache = {}
49 self._parentwriters = 0
50 self._parentwriters = 0
50
51
51 def beginparentchange(self):
52 def beginparentchange(self):
52 '''Marks the beginning of a set of changes that involve changing
53 '''Marks the beginning of a set of changes that involve changing
53 the dirstate parents. If there is an exception during this time,
54 the dirstate parents. If there is an exception during this time,
54 the dirstate will not be written when the wlock is released. This
55 the dirstate will not be written when the wlock is released. This
55 prevents writing an incoherent dirstate where the parent doesn't
56 prevents writing an incoherent dirstate where the parent doesn't
56 match the contents.
57 match the contents.
57 '''
58 '''
58 self._parentwriters += 1
59 self._parentwriters += 1
59
60
60 def endparentchange(self):
61 def endparentchange(self):
61 '''Marks the end of a set of changes that involve changing the
62 '''Marks the end of a set of changes that involve changing the
62 dirstate parents. Once all parent changes have been marked done,
63 dirstate parents. Once all parent changes have been marked done,
63 the wlock will be free to write the dirstate on release.
64 the wlock will be free to write the dirstate on release.
64 '''
65 '''
65 if self._parentwriters > 0:
66 if self._parentwriters > 0:
66 self._parentwriters -= 1
67 self._parentwriters -= 1
67
68
68 def pendingparentchange(self):
69 def pendingparentchange(self):
69 '''Returns true if the dirstate is in the middle of a set of changes
70 '''Returns true if the dirstate is in the middle of a set of changes
70 that modify the dirstate parent.
71 that modify the dirstate parent.
71 '''
72 '''
72 return self._parentwriters > 0
73 return self._parentwriters > 0
73
74
74 @propertycache
75 @propertycache
75 def _map(self):
76 def _map(self):
76 '''Return the dirstate contents as a map from filename to
77 '''Return the dirstate contents as a map from filename to
77 (state, mode, size, time).'''
78 (state, mode, size, time).'''
78 self._read()
79 self._read()
79 return self._map
80 return self._map
80
81
81 @propertycache
82 @propertycache
82 def _copymap(self):
83 def _copymap(self):
83 self._read()
84 self._read()
84 return self._copymap
85 return self._copymap
85
86
86 @propertycache
87 @propertycache
87 def _filefoldmap(self):
88 def _filefoldmap(self):
88 try:
89 try:
89 makefilefoldmap = parsers.make_file_foldmap
90 makefilefoldmap = parsers.make_file_foldmap
90 except AttributeError:
91 except AttributeError:
91 pass
92 pass
92 else:
93 else:
93 return makefilefoldmap(self._map, util.normcasespec,
94 return makefilefoldmap(self._map, util.normcasespec,
94 util.normcasefallback)
95 util.normcasefallback)
95
96
96 f = {}
97 f = {}
97 normcase = util.normcase
98 normcase = util.normcase
98 for name, s in self._map.iteritems():
99 for name, s in self._map.iteritems():
99 if s[0] != 'r':
100 if s[0] != 'r':
100 f[normcase(name)] = name
101 f[normcase(name)] = name
101 f['.'] = '.' # prevents useless util.fspath() invocation
102 f['.'] = '.' # prevents useless util.fspath() invocation
102 return f
103 return f
103
104
104 @propertycache
105 @propertycache
105 def _dirfoldmap(self):
106 def _dirfoldmap(self):
106 f = {}
107 f = {}
107 normcase = util.normcase
108 normcase = util.normcase
108 for name in self._dirs:
109 for name in self._dirs:
109 f[normcase(name)] = name
110 f[normcase(name)] = name
110 return f
111 return f
111
112
112 @repocache('branch')
113 @repocache('branch')
113 def _branch(self):
114 def _branch(self):
114 try:
115 try:
115 return self._opener.read("branch").strip() or "default"
116 return self._opener.read("branch").strip() or "default"
116 except IOError, inst:
117 except IOError, inst:
117 if inst.errno != errno.ENOENT:
118 if inst.errno != errno.ENOENT:
118 raise
119 raise
119 return "default"
120 return "default"
120
121
121 @propertycache
122 @propertycache
122 def _pl(self):
123 def _pl(self):
123 try:
124 try:
124 fp = self._opener("dirstate")
125 fp = self._opener("dirstate")
125 st = fp.read(40)
126 st = fp.read(40)
126 fp.close()
127 fp.close()
127 l = len(st)
128 l = len(st)
128 if l == 40:
129 if l == 40:
129 return st[:20], st[20:40]
130 return st[:20], st[20:40]
130 elif l > 0 and l < 40:
131 elif l > 0 and l < 40:
131 raise util.Abort(_('working directory state appears damaged!'))
132 raise util.Abort(_('working directory state appears damaged!'))
132 except IOError, err:
133 except IOError, err:
133 if err.errno != errno.ENOENT:
134 if err.errno != errno.ENOENT:
134 raise
135 raise
135 return [nullid, nullid]
136 return [nullid, nullid]
136
137
137 @propertycache
138 @propertycache
138 def _dirs(self):
139 def _dirs(self):
139 return util.dirs(self._map, 'r')
140 return util.dirs(self._map, 'r')
140
141
141 def dirs(self):
142 def dirs(self):
142 return self._dirs
143 return self._dirs
143
144
144 @rootcache('.hgignore')
145 @rootcache('.hgignore')
145 def _ignore(self):
146 def _ignore(self):
146 files = []
147 files = []
147 if os.path.exists(self._join('.hgignore')):
148 if os.path.exists(self._join('.hgignore')):
148 files.append(self._join('.hgignore'))
149 files.append(self._join('.hgignore'))
149 for name, path in self._ui.configitems("ui"):
150 for name, path in self._ui.configitems("ui"):
150 if name == 'ignore' or name.startswith('ignore.'):
151 if name == 'ignore' or name.startswith('ignore.'):
151 # we need to use os.path.join here rather than self._join
152 # we need to use os.path.join here rather than self._join
152 # because path is arbitrary and user-specified
153 # because path is arbitrary and user-specified
153 files.append(os.path.join(self._rootdir, util.expandpath(path)))
154 files.append(os.path.join(self._rootdir, util.expandpath(path)))
154 return ignore.ignore(self._root, files, self._ui.warn)
155
156 if not files:
157 return util.never
158
159 pats = ['include:%s' % f for f in files]
160 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
155
161
156 @propertycache
162 @propertycache
157 def _slash(self):
163 def _slash(self):
158 return self._ui.configbool('ui', 'slash') and os.sep != '/'
164 return self._ui.configbool('ui', 'slash') and os.sep != '/'
159
165
160 @propertycache
166 @propertycache
161 def _checklink(self):
167 def _checklink(self):
162 return util.checklink(self._root)
168 return util.checklink(self._root)
163
169
164 @propertycache
170 @propertycache
165 def _checkexec(self):
171 def _checkexec(self):
166 return util.checkexec(self._root)
172 return util.checkexec(self._root)
167
173
168 @propertycache
174 @propertycache
169 def _checkcase(self):
175 def _checkcase(self):
170 return not util.checkcase(self._join('.hg'))
176 return not util.checkcase(self._join('.hg'))
171
177
172 def _join(self, f):
178 def _join(self, f):
173 # much faster than os.path.join()
179 # much faster than os.path.join()
174 # it's safe because f is always a relative path
180 # it's safe because f is always a relative path
175 return self._rootdir + f
181 return self._rootdir + f
176
182
177 def flagfunc(self, buildfallback):
183 def flagfunc(self, buildfallback):
178 if self._checklink and self._checkexec:
184 if self._checklink and self._checkexec:
179 def f(x):
185 def f(x):
180 try:
186 try:
181 st = os.lstat(self._join(x))
187 st = os.lstat(self._join(x))
182 if util.statislink(st):
188 if util.statislink(st):
183 return 'l'
189 return 'l'
184 if util.statisexec(st):
190 if util.statisexec(st):
185 return 'x'
191 return 'x'
186 except OSError:
192 except OSError:
187 pass
193 pass
188 return ''
194 return ''
189 return f
195 return f
190
196
191 fallback = buildfallback()
197 fallback = buildfallback()
192 if self._checklink:
198 if self._checklink:
193 def f(x):
199 def f(x):
194 if os.path.islink(self._join(x)):
200 if os.path.islink(self._join(x)):
195 return 'l'
201 return 'l'
196 if 'x' in fallback(x):
202 if 'x' in fallback(x):
197 return 'x'
203 return 'x'
198 return ''
204 return ''
199 return f
205 return f
200 if self._checkexec:
206 if self._checkexec:
201 def f(x):
207 def f(x):
202 if 'l' in fallback(x):
208 if 'l' in fallback(x):
203 return 'l'
209 return 'l'
204 if util.isexec(self._join(x)):
210 if util.isexec(self._join(x)):
205 return 'x'
211 return 'x'
206 return ''
212 return ''
207 return f
213 return f
208 else:
214 else:
209 return fallback
215 return fallback
210
216
211 @propertycache
217 @propertycache
212 def _cwd(self):
218 def _cwd(self):
213 return os.getcwd()
219 return os.getcwd()
214
220
215 def getcwd(self):
221 def getcwd(self):
216 cwd = self._cwd
222 cwd = self._cwd
217 if cwd == self._root:
223 if cwd == self._root:
218 return ''
224 return ''
219 # self._root ends with a path separator if self._root is '/' or 'C:\'
225 # self._root ends with a path separator if self._root is '/' or 'C:\'
220 rootsep = self._root
226 rootsep = self._root
221 if not util.endswithsep(rootsep):
227 if not util.endswithsep(rootsep):
222 rootsep += os.sep
228 rootsep += os.sep
223 if cwd.startswith(rootsep):
229 if cwd.startswith(rootsep):
224 return cwd[len(rootsep):]
230 return cwd[len(rootsep):]
225 else:
231 else:
226 # we're outside the repo. return an absolute path.
232 # we're outside the repo. return an absolute path.
227 return cwd
233 return cwd
228
234
229 def pathto(self, f, cwd=None):
235 def pathto(self, f, cwd=None):
230 if cwd is None:
236 if cwd is None:
231 cwd = self.getcwd()
237 cwd = self.getcwd()
232 path = util.pathto(self._root, cwd, f)
238 path = util.pathto(self._root, cwd, f)
233 if self._slash:
239 if self._slash:
234 return util.pconvert(path)
240 return util.pconvert(path)
235 return path
241 return path
236
242
237 def __getitem__(self, key):
243 def __getitem__(self, key):
238 '''Return the current state of key (a filename) in the dirstate.
244 '''Return the current state of key (a filename) in the dirstate.
239
245
240 States are:
246 States are:
241 n normal
247 n normal
242 m needs merging
248 m needs merging
243 r marked for removal
249 r marked for removal
244 a marked for addition
250 a marked for addition
245 ? not tracked
251 ? not tracked
246 '''
252 '''
247 return self._map.get(key, ("?",))[0]
253 return self._map.get(key, ("?",))[0]
248
254
249 def __contains__(self, key):
255 def __contains__(self, key):
250 return key in self._map
256 return key in self._map
251
257
252 def __iter__(self):
258 def __iter__(self):
253 for x in sorted(self._map):
259 for x in sorted(self._map):
254 yield x
260 yield x
255
261
256 def iteritems(self):
262 def iteritems(self):
257 return self._map.iteritems()
263 return self._map.iteritems()
258
264
259 def parents(self):
265 def parents(self):
260 return [self._validate(p) for p in self._pl]
266 return [self._validate(p) for p in self._pl]
261
267
262 def p1(self):
268 def p1(self):
263 return self._validate(self._pl[0])
269 return self._validate(self._pl[0])
264
270
265 def p2(self):
271 def p2(self):
266 return self._validate(self._pl[1])
272 return self._validate(self._pl[1])
267
273
268 def branch(self):
274 def branch(self):
269 return encoding.tolocal(self._branch)
275 return encoding.tolocal(self._branch)
270
276
271 def setparents(self, p1, p2=nullid):
277 def setparents(self, p1, p2=nullid):
272 """Set dirstate parents to p1 and p2.
278 """Set dirstate parents to p1 and p2.
273
279
274 When moving from two parents to one, 'm' merged entries a
280 When moving from two parents to one, 'm' merged entries a
275 adjusted to normal and previous copy records discarded and
281 adjusted to normal and previous copy records discarded and
276 returned by the call.
282 returned by the call.
277
283
278 See localrepo.setparents()
284 See localrepo.setparents()
279 """
285 """
280 if self._parentwriters == 0:
286 if self._parentwriters == 0:
281 raise ValueError("cannot set dirstate parent without "
287 raise ValueError("cannot set dirstate parent without "
282 "calling dirstate.beginparentchange")
288 "calling dirstate.beginparentchange")
283
289
284 self._dirty = self._dirtypl = True
290 self._dirty = self._dirtypl = True
285 oldp2 = self._pl[1]
291 oldp2 = self._pl[1]
286 self._pl = p1, p2
292 self._pl = p1, p2
287 copies = {}
293 copies = {}
288 if oldp2 != nullid and p2 == nullid:
294 if oldp2 != nullid and p2 == nullid:
289 for f, s in self._map.iteritems():
295 for f, s in self._map.iteritems():
290 # Discard 'm' markers when moving away from a merge state
296 # Discard 'm' markers when moving away from a merge state
291 if s[0] == 'm':
297 if s[0] == 'm':
292 if f in self._copymap:
298 if f in self._copymap:
293 copies[f] = self._copymap[f]
299 copies[f] = self._copymap[f]
294 self.normallookup(f)
300 self.normallookup(f)
295 # Also fix up otherparent markers
301 # Also fix up otherparent markers
296 elif s[0] == 'n' and s[2] == -2:
302 elif s[0] == 'n' and s[2] == -2:
297 if f in self._copymap:
303 if f in self._copymap:
298 copies[f] = self._copymap[f]
304 copies[f] = self._copymap[f]
299 self.add(f)
305 self.add(f)
300 return copies
306 return copies
301
307
302 def setbranch(self, branch):
308 def setbranch(self, branch):
303 self._branch = encoding.fromlocal(branch)
309 self._branch = encoding.fromlocal(branch)
304 f = self._opener('branch', 'w', atomictemp=True)
310 f = self._opener('branch', 'w', atomictemp=True)
305 try:
311 try:
306 f.write(self._branch + '\n')
312 f.write(self._branch + '\n')
307 f.close()
313 f.close()
308
314
309 # make sure filecache has the correct stat info for _branch after
315 # make sure filecache has the correct stat info for _branch after
310 # replacing the underlying file
316 # replacing the underlying file
311 ce = self._filecache['_branch']
317 ce = self._filecache['_branch']
312 if ce:
318 if ce:
313 ce.refresh()
319 ce.refresh()
314 except: # re-raises
320 except: # re-raises
315 f.discard()
321 f.discard()
316 raise
322 raise
317
323
318 def _read(self):
324 def _read(self):
319 self._map = {}
325 self._map = {}
320 self._copymap = {}
326 self._copymap = {}
321 try:
327 try:
322 st = self._opener.read("dirstate")
328 st = self._opener.read("dirstate")
323 except IOError, err:
329 except IOError, err:
324 if err.errno != errno.ENOENT:
330 if err.errno != errno.ENOENT:
325 raise
331 raise
326 return
332 return
327 if not st:
333 if not st:
328 return
334 return
329
335
330 # Python's garbage collector triggers a GC each time a certain number
336 # Python's garbage collector triggers a GC each time a certain number
331 # of container objects (the number being defined by
337 # of container objects (the number being defined by
332 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
338 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
333 # for each file in the dirstate. The C version then immediately marks
339 # for each file in the dirstate. The C version then immediately marks
334 # them as not to be tracked by the collector. However, this has no
340 # them as not to be tracked by the collector. However, this has no
335 # effect on when GCs are triggered, only on what objects the GC looks
341 # effect on when GCs are triggered, only on what objects the GC looks
336 # into. This means that O(number of files) GCs are unavoidable.
342 # into. This means that O(number of files) GCs are unavoidable.
337 # Depending on when in the process's lifetime the dirstate is parsed,
343 # Depending on when in the process's lifetime the dirstate is parsed,
338 # this can get very expensive. As a workaround, disable GC while
344 # this can get very expensive. As a workaround, disable GC while
339 # parsing the dirstate.
345 # parsing the dirstate.
340 #
346 #
341 # (we cannot decorate the function directly since it is in a C module)
347 # (we cannot decorate the function directly since it is in a C module)
342 parse_dirstate = util.nogc(parsers.parse_dirstate)
348 parse_dirstate = util.nogc(parsers.parse_dirstate)
343 p = parse_dirstate(self._map, self._copymap, st)
349 p = parse_dirstate(self._map, self._copymap, st)
344 if not self._dirtypl:
350 if not self._dirtypl:
345 self._pl = p
351 self._pl = p
346
352
347 def invalidate(self):
353 def invalidate(self):
348 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
354 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
349 "_pl", "_dirs", "_ignore"):
355 "_pl", "_dirs", "_ignore"):
350 if a in self.__dict__:
356 if a in self.__dict__:
351 delattr(self, a)
357 delattr(self, a)
352 self._lastnormaltime = 0
358 self._lastnormaltime = 0
353 self._dirty = False
359 self._dirty = False
354 self._parentwriters = 0
360 self._parentwriters = 0
355
361
356 def copy(self, source, dest):
362 def copy(self, source, dest):
357 """Mark dest as a copy of source. Unmark dest if source is None."""
363 """Mark dest as a copy of source. Unmark dest if source is None."""
358 if source == dest:
364 if source == dest:
359 return
365 return
360 self._dirty = True
366 self._dirty = True
361 if source is not None:
367 if source is not None:
362 self._copymap[dest] = source
368 self._copymap[dest] = source
363 elif dest in self._copymap:
369 elif dest in self._copymap:
364 del self._copymap[dest]
370 del self._copymap[dest]
365
371
366 def copied(self, file):
372 def copied(self, file):
367 return self._copymap.get(file, None)
373 return self._copymap.get(file, None)
368
374
369 def copies(self):
375 def copies(self):
370 return self._copymap
376 return self._copymap
371
377
372 def _droppath(self, f):
378 def _droppath(self, f):
373 if self[f] not in "?r" and "_dirs" in self.__dict__:
379 if self[f] not in "?r" and "_dirs" in self.__dict__:
374 self._dirs.delpath(f)
380 self._dirs.delpath(f)
375
381
376 def _addpath(self, f, state, mode, size, mtime):
382 def _addpath(self, f, state, mode, size, mtime):
377 oldstate = self[f]
383 oldstate = self[f]
378 if state == 'a' or oldstate == 'r':
384 if state == 'a' or oldstate == 'r':
379 scmutil.checkfilename(f)
385 scmutil.checkfilename(f)
380 if f in self._dirs:
386 if f in self._dirs:
381 raise util.Abort(_('directory %r already in dirstate') % f)
387 raise util.Abort(_('directory %r already in dirstate') % f)
382 # shadows
388 # shadows
383 for d in util.finddirs(f):
389 for d in util.finddirs(f):
384 if d in self._dirs:
390 if d in self._dirs:
385 break
391 break
386 if d in self._map and self[d] != 'r':
392 if d in self._map and self[d] != 'r':
387 raise util.Abort(
393 raise util.Abort(
388 _('file %r in dirstate clashes with %r') % (d, f))
394 _('file %r in dirstate clashes with %r') % (d, f))
389 if oldstate in "?r" and "_dirs" in self.__dict__:
395 if oldstate in "?r" and "_dirs" in self.__dict__:
390 self._dirs.addpath(f)
396 self._dirs.addpath(f)
391 self._dirty = True
397 self._dirty = True
392 self._map[f] = dirstatetuple(state, mode, size, mtime)
398 self._map[f] = dirstatetuple(state, mode, size, mtime)
393
399
394 def normal(self, f):
400 def normal(self, f):
395 '''Mark a file normal and clean.'''
401 '''Mark a file normal and clean.'''
396 s = os.lstat(self._join(f))
402 s = os.lstat(self._join(f))
397 mtime = int(s.st_mtime)
403 mtime = int(s.st_mtime)
398 self._addpath(f, 'n', s.st_mode,
404 self._addpath(f, 'n', s.st_mode,
399 s.st_size & _rangemask, mtime & _rangemask)
405 s.st_size & _rangemask, mtime & _rangemask)
400 if f in self._copymap:
406 if f in self._copymap:
401 del self._copymap[f]
407 del self._copymap[f]
402 if mtime > self._lastnormaltime:
408 if mtime > self._lastnormaltime:
403 # Remember the most recent modification timeslot for status(),
409 # Remember the most recent modification timeslot for status(),
404 # to make sure we won't miss future size-preserving file content
410 # to make sure we won't miss future size-preserving file content
405 # modifications that happen within the same timeslot.
411 # modifications that happen within the same timeslot.
406 self._lastnormaltime = mtime
412 self._lastnormaltime = mtime
407
413
408 def normallookup(self, f):
414 def normallookup(self, f):
409 '''Mark a file normal, but possibly dirty.'''
415 '''Mark a file normal, but possibly dirty.'''
410 if self._pl[1] != nullid and f in self._map:
416 if self._pl[1] != nullid and f in self._map:
411 # if there is a merge going on and the file was either
417 # if there is a merge going on and the file was either
412 # in state 'm' (-1) or coming from other parent (-2) before
418 # in state 'm' (-1) or coming from other parent (-2) before
413 # being removed, restore that state.
419 # being removed, restore that state.
414 entry = self._map[f]
420 entry = self._map[f]
415 if entry[0] == 'r' and entry[2] in (-1, -2):
421 if entry[0] == 'r' and entry[2] in (-1, -2):
416 source = self._copymap.get(f)
422 source = self._copymap.get(f)
417 if entry[2] == -1:
423 if entry[2] == -1:
418 self.merge(f)
424 self.merge(f)
419 elif entry[2] == -2:
425 elif entry[2] == -2:
420 self.otherparent(f)
426 self.otherparent(f)
421 if source:
427 if source:
422 self.copy(source, f)
428 self.copy(source, f)
423 return
429 return
424 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
430 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
425 return
431 return
426 self._addpath(f, 'n', 0, -1, -1)
432 self._addpath(f, 'n', 0, -1, -1)
427 if f in self._copymap:
433 if f in self._copymap:
428 del self._copymap[f]
434 del self._copymap[f]
429
435
430 def otherparent(self, f):
436 def otherparent(self, f):
431 '''Mark as coming from the other parent, always dirty.'''
437 '''Mark as coming from the other parent, always dirty.'''
432 if self._pl[1] == nullid:
438 if self._pl[1] == nullid:
433 raise util.Abort(_("setting %r to other parent "
439 raise util.Abort(_("setting %r to other parent "
434 "only allowed in merges") % f)
440 "only allowed in merges") % f)
435 if f in self and self[f] == 'n':
441 if f in self and self[f] == 'n':
436 # merge-like
442 # merge-like
437 self._addpath(f, 'm', 0, -2, -1)
443 self._addpath(f, 'm', 0, -2, -1)
438 else:
444 else:
439 # add-like
445 # add-like
440 self._addpath(f, 'n', 0, -2, -1)
446 self._addpath(f, 'n', 0, -2, -1)
441
447
442 if f in self._copymap:
448 if f in self._copymap:
443 del self._copymap[f]
449 del self._copymap[f]
444
450
445 def add(self, f):
451 def add(self, f):
446 '''Mark a file added.'''
452 '''Mark a file added.'''
447 self._addpath(f, 'a', 0, -1, -1)
453 self._addpath(f, 'a', 0, -1, -1)
448 if f in self._copymap:
454 if f in self._copymap:
449 del self._copymap[f]
455 del self._copymap[f]
450
456
451 def remove(self, f):
457 def remove(self, f):
452 '''Mark a file removed.'''
458 '''Mark a file removed.'''
453 self._dirty = True
459 self._dirty = True
454 self._droppath(f)
460 self._droppath(f)
455 size = 0
461 size = 0
456 if self._pl[1] != nullid and f in self._map:
462 if self._pl[1] != nullid and f in self._map:
457 # backup the previous state
463 # backup the previous state
458 entry = self._map[f]
464 entry = self._map[f]
459 if entry[0] == 'm': # merge
465 if entry[0] == 'm': # merge
460 size = -1
466 size = -1
461 elif entry[0] == 'n' and entry[2] == -2: # other parent
467 elif entry[0] == 'n' and entry[2] == -2: # other parent
462 size = -2
468 size = -2
463 self._map[f] = dirstatetuple('r', 0, size, 0)
469 self._map[f] = dirstatetuple('r', 0, size, 0)
464 if size == 0 and f in self._copymap:
470 if size == 0 and f in self._copymap:
465 del self._copymap[f]
471 del self._copymap[f]
466
472
467 def merge(self, f):
473 def merge(self, f):
468 '''Mark a file merged.'''
474 '''Mark a file merged.'''
469 if self._pl[1] == nullid:
475 if self._pl[1] == nullid:
470 return self.normallookup(f)
476 return self.normallookup(f)
471 return self.otherparent(f)
477 return self.otherparent(f)
472
478
473 def drop(self, f):
479 def drop(self, f):
474 '''Drop a file from the dirstate'''
480 '''Drop a file from the dirstate'''
475 if f in self._map:
481 if f in self._map:
476 self._dirty = True
482 self._dirty = True
477 self._droppath(f)
483 self._droppath(f)
478 del self._map[f]
484 del self._map[f]
479
485
480 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
486 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
481 if exists is None:
487 if exists is None:
482 exists = os.path.lexists(os.path.join(self._root, path))
488 exists = os.path.lexists(os.path.join(self._root, path))
483 if not exists:
489 if not exists:
484 # Maybe a path component exists
490 # Maybe a path component exists
485 if not ignoremissing and '/' in path:
491 if not ignoremissing and '/' in path:
486 d, f = path.rsplit('/', 1)
492 d, f = path.rsplit('/', 1)
487 d = self._normalize(d, False, ignoremissing, None)
493 d = self._normalize(d, False, ignoremissing, None)
488 folded = d + "/" + f
494 folded = d + "/" + f
489 else:
495 else:
490 # No path components, preserve original case
496 # No path components, preserve original case
491 folded = path
497 folded = path
492 else:
498 else:
493 # recursively normalize leading directory components
499 # recursively normalize leading directory components
494 # against dirstate
500 # against dirstate
495 if '/' in normed:
501 if '/' in normed:
496 d, f = normed.rsplit('/', 1)
502 d, f = normed.rsplit('/', 1)
497 d = self._normalize(d, False, ignoremissing, True)
503 d = self._normalize(d, False, ignoremissing, True)
498 r = self._root + "/" + d
504 r = self._root + "/" + d
499 folded = d + "/" + util.fspath(f, r)
505 folded = d + "/" + util.fspath(f, r)
500 else:
506 else:
501 folded = util.fspath(normed, self._root)
507 folded = util.fspath(normed, self._root)
502 storemap[normed] = folded
508 storemap[normed] = folded
503
509
504 return folded
510 return folded
505
511
506 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
512 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
507 normed = util.normcase(path)
513 normed = util.normcase(path)
508 folded = self._filefoldmap.get(normed, None)
514 folded = self._filefoldmap.get(normed, None)
509 if folded is None:
515 if folded is None:
510 if isknown:
516 if isknown:
511 folded = path
517 folded = path
512 else:
518 else:
513 folded = self._discoverpath(path, normed, ignoremissing, exists,
519 folded = self._discoverpath(path, normed, ignoremissing, exists,
514 self._filefoldmap)
520 self._filefoldmap)
515 return folded
521 return folded
516
522
517 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
523 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
518 normed = util.normcase(path)
524 normed = util.normcase(path)
519 folded = self._filefoldmap.get(normed, None)
525 folded = self._filefoldmap.get(normed, None)
520 if folded is None:
526 if folded is None:
521 folded = self._dirfoldmap.get(normed, None)
527 folded = self._dirfoldmap.get(normed, None)
522 if folded is None:
528 if folded is None:
523 if isknown:
529 if isknown:
524 folded = path
530 folded = path
525 else:
531 else:
526 # store discovered result in dirfoldmap so that future
532 # store discovered result in dirfoldmap so that future
527 # normalizefile calls don't start matching directories
533 # normalizefile calls don't start matching directories
528 folded = self._discoverpath(path, normed, ignoremissing, exists,
534 folded = self._discoverpath(path, normed, ignoremissing, exists,
529 self._dirfoldmap)
535 self._dirfoldmap)
530 return folded
536 return folded
531
537
532 def normalize(self, path, isknown=False, ignoremissing=False):
538 def normalize(self, path, isknown=False, ignoremissing=False):
533 '''
539 '''
534 normalize the case of a pathname when on a casefolding filesystem
540 normalize the case of a pathname when on a casefolding filesystem
535
541
536 isknown specifies whether the filename came from walking the
542 isknown specifies whether the filename came from walking the
537 disk, to avoid extra filesystem access.
543 disk, to avoid extra filesystem access.
538
544
539 If ignoremissing is True, missing path are returned
545 If ignoremissing is True, missing path are returned
540 unchanged. Otherwise, we try harder to normalize possibly
546 unchanged. Otherwise, we try harder to normalize possibly
541 existing path components.
547 existing path components.
542
548
543 The normalized case is determined based on the following precedence:
549 The normalized case is determined based on the following precedence:
544
550
545 - version of name already stored in the dirstate
551 - version of name already stored in the dirstate
546 - version of name stored on disk
552 - version of name stored on disk
547 - version provided via command arguments
553 - version provided via command arguments
548 '''
554 '''
549
555
550 if self._checkcase:
556 if self._checkcase:
551 return self._normalize(path, isknown, ignoremissing)
557 return self._normalize(path, isknown, ignoremissing)
552 return path
558 return path
553
559
554 def clear(self):
560 def clear(self):
555 self._map = {}
561 self._map = {}
556 if "_dirs" in self.__dict__:
562 if "_dirs" in self.__dict__:
557 delattr(self, "_dirs")
563 delattr(self, "_dirs")
558 self._copymap = {}
564 self._copymap = {}
559 self._pl = [nullid, nullid]
565 self._pl = [nullid, nullid]
560 self._lastnormaltime = 0
566 self._lastnormaltime = 0
561 self._dirty = True
567 self._dirty = True
562
568
563 def rebuild(self, parent, allfiles, changedfiles=None):
569 def rebuild(self, parent, allfiles, changedfiles=None):
564 changedfiles = changedfiles or allfiles
570 changedfiles = changedfiles or allfiles
565 oldmap = self._map
571 oldmap = self._map
566 self.clear()
572 self.clear()
567 for f in allfiles:
573 for f in allfiles:
568 if f not in changedfiles:
574 if f not in changedfiles:
569 self._map[f] = oldmap[f]
575 self._map[f] = oldmap[f]
570 else:
576 else:
571 if 'x' in allfiles.flags(f):
577 if 'x' in allfiles.flags(f):
572 self._map[f] = dirstatetuple('n', 0777, -1, 0)
578 self._map[f] = dirstatetuple('n', 0777, -1, 0)
573 else:
579 else:
574 self._map[f] = dirstatetuple('n', 0666, -1, 0)
580 self._map[f] = dirstatetuple('n', 0666, -1, 0)
575 self._pl = (parent, nullid)
581 self._pl = (parent, nullid)
576 self._dirty = True
582 self._dirty = True
577
583
578 def write(self):
584 def write(self):
579 if not self._dirty:
585 if not self._dirty:
580 return
586 return
581
587
582 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
588 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
583 # timestamp of each entries in dirstate, because of 'now > mtime'
589 # timestamp of each entries in dirstate, because of 'now > mtime'
584 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
590 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
585 if delaywrite > 0:
591 if delaywrite > 0:
586 import time # to avoid useless import
592 import time # to avoid useless import
587 time.sleep(delaywrite)
593 time.sleep(delaywrite)
588
594
589 st = self._opener("dirstate", "w", atomictemp=True)
595 st = self._opener("dirstate", "w", atomictemp=True)
590 # use the modification time of the newly created temporary file as the
596 # use the modification time of the newly created temporary file as the
591 # filesystem's notion of 'now'
597 # filesystem's notion of 'now'
592 now = util.fstat(st).st_mtime
598 now = util.fstat(st).st_mtime
593 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
599 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
594 st.close()
600 st.close()
595 self._lastnormaltime = 0
601 self._lastnormaltime = 0
596 self._dirty = self._dirtypl = False
602 self._dirty = self._dirtypl = False
597
603
598 def _dirignore(self, f):
604 def _dirignore(self, f):
599 if f == '.':
605 if f == '.':
600 return False
606 return False
601 if self._ignore(f):
607 if self._ignore(f):
602 return True
608 return True
603 for p in util.finddirs(f):
609 for p in util.finddirs(f):
604 if self._ignore(p):
610 if self._ignore(p):
605 return True
611 return True
606 return False
612 return False
607
613
608 def _walkexplicit(self, match, subrepos):
614 def _walkexplicit(self, match, subrepos):
609 '''Get stat data about the files explicitly specified by match.
615 '''Get stat data about the files explicitly specified by match.
610
616
611 Return a triple (results, dirsfound, dirsnotfound).
617 Return a triple (results, dirsfound, dirsnotfound).
612 - results is a mapping from filename to stat result. It also contains
618 - results is a mapping from filename to stat result. It also contains
613 listings mapping subrepos and .hg to None.
619 listings mapping subrepos and .hg to None.
614 - dirsfound is a list of files found to be directories.
620 - dirsfound is a list of files found to be directories.
615 - dirsnotfound is a list of files that the dirstate thinks are
621 - dirsnotfound is a list of files that the dirstate thinks are
616 directories and that were not found.'''
622 directories and that were not found.'''
617
623
618 def badtype(mode):
624 def badtype(mode):
619 kind = _('unknown')
625 kind = _('unknown')
620 if stat.S_ISCHR(mode):
626 if stat.S_ISCHR(mode):
621 kind = _('character device')
627 kind = _('character device')
622 elif stat.S_ISBLK(mode):
628 elif stat.S_ISBLK(mode):
623 kind = _('block device')
629 kind = _('block device')
624 elif stat.S_ISFIFO(mode):
630 elif stat.S_ISFIFO(mode):
625 kind = _('fifo')
631 kind = _('fifo')
626 elif stat.S_ISSOCK(mode):
632 elif stat.S_ISSOCK(mode):
627 kind = _('socket')
633 kind = _('socket')
628 elif stat.S_ISDIR(mode):
634 elif stat.S_ISDIR(mode):
629 kind = _('directory')
635 kind = _('directory')
630 return _('unsupported file type (type is %s)') % kind
636 return _('unsupported file type (type is %s)') % kind
631
637
632 matchedir = match.explicitdir
638 matchedir = match.explicitdir
633 badfn = match.bad
639 badfn = match.bad
634 dmap = self._map
640 dmap = self._map
635 lstat = os.lstat
641 lstat = os.lstat
636 getkind = stat.S_IFMT
642 getkind = stat.S_IFMT
637 dirkind = stat.S_IFDIR
643 dirkind = stat.S_IFDIR
638 regkind = stat.S_IFREG
644 regkind = stat.S_IFREG
639 lnkkind = stat.S_IFLNK
645 lnkkind = stat.S_IFLNK
640 join = self._join
646 join = self._join
641 dirsfound = []
647 dirsfound = []
642 foundadd = dirsfound.append
648 foundadd = dirsfound.append
643 dirsnotfound = []
649 dirsnotfound = []
644 notfoundadd = dirsnotfound.append
650 notfoundadd = dirsnotfound.append
645
651
646 if not match.isexact() and self._checkcase:
652 if not match.isexact() and self._checkcase:
647 normalize = self._normalize
653 normalize = self._normalize
648 else:
654 else:
649 normalize = None
655 normalize = None
650
656
651 files = sorted(match.files())
657 files = sorted(match.files())
652 subrepos.sort()
658 subrepos.sort()
653 i, j = 0, 0
659 i, j = 0, 0
654 while i < len(files) and j < len(subrepos):
660 while i < len(files) and j < len(subrepos):
655 subpath = subrepos[j] + "/"
661 subpath = subrepos[j] + "/"
656 if files[i] < subpath:
662 if files[i] < subpath:
657 i += 1
663 i += 1
658 continue
664 continue
659 while i < len(files) and files[i].startswith(subpath):
665 while i < len(files) and files[i].startswith(subpath):
660 del files[i]
666 del files[i]
661 j += 1
667 j += 1
662
668
663 if not files or '.' in files:
669 if not files or '.' in files:
664 files = ['.']
670 files = ['.']
665 results = dict.fromkeys(subrepos)
671 results = dict.fromkeys(subrepos)
666 results['.hg'] = None
672 results['.hg'] = None
667
673
668 alldirs = None
674 alldirs = None
669 for ff in files:
675 for ff in files:
670 # constructing the foldmap is expensive, so don't do it for the
676 # constructing the foldmap is expensive, so don't do it for the
671 # common case where files is ['.']
677 # common case where files is ['.']
672 if normalize and ff != '.':
678 if normalize and ff != '.':
673 nf = normalize(ff, False, True)
679 nf = normalize(ff, False, True)
674 else:
680 else:
675 nf = ff
681 nf = ff
676 if nf in results:
682 if nf in results:
677 continue
683 continue
678
684
679 try:
685 try:
680 st = lstat(join(nf))
686 st = lstat(join(nf))
681 kind = getkind(st.st_mode)
687 kind = getkind(st.st_mode)
682 if kind == dirkind:
688 if kind == dirkind:
683 if nf in dmap:
689 if nf in dmap:
684 # file replaced by dir on disk but still in dirstate
690 # file replaced by dir on disk but still in dirstate
685 results[nf] = None
691 results[nf] = None
686 if matchedir:
692 if matchedir:
687 matchedir(nf)
693 matchedir(nf)
688 foundadd((nf, ff))
694 foundadd((nf, ff))
689 elif kind == regkind or kind == lnkkind:
695 elif kind == regkind or kind == lnkkind:
690 results[nf] = st
696 results[nf] = st
691 else:
697 else:
692 badfn(ff, badtype(kind))
698 badfn(ff, badtype(kind))
693 if nf in dmap:
699 if nf in dmap:
694 results[nf] = None
700 results[nf] = None
695 except OSError, inst: # nf not found on disk - it is dirstate only
701 except OSError, inst: # nf not found on disk - it is dirstate only
696 if nf in dmap: # does it exactly match a missing file?
702 if nf in dmap: # does it exactly match a missing file?
697 results[nf] = None
703 results[nf] = None
698 else: # does it match a missing directory?
704 else: # does it match a missing directory?
699 if alldirs is None:
705 if alldirs is None:
700 alldirs = util.dirs(dmap)
706 alldirs = util.dirs(dmap)
701 if nf in alldirs:
707 if nf in alldirs:
702 if matchedir:
708 if matchedir:
703 matchedir(nf)
709 matchedir(nf)
704 notfoundadd(nf)
710 notfoundadd(nf)
705 else:
711 else:
706 badfn(ff, inst.strerror)
712 badfn(ff, inst.strerror)
707
713
708 return results, dirsfound, dirsnotfound
714 return results, dirsfound, dirsnotfound
709
715
710 def walk(self, match, subrepos, unknown, ignored, full=True):
716 def walk(self, match, subrepos, unknown, ignored, full=True):
711 '''
717 '''
712 Walk recursively through the directory tree, finding all files
718 Walk recursively through the directory tree, finding all files
713 matched by match.
719 matched by match.
714
720
715 If full is False, maybe skip some known-clean files.
721 If full is False, maybe skip some known-clean files.
716
722
717 Return a dict mapping filename to stat-like object (either
723 Return a dict mapping filename to stat-like object (either
718 mercurial.osutil.stat instance or return value of os.stat()).
724 mercurial.osutil.stat instance or return value of os.stat()).
719
725
720 '''
726 '''
721 # full is a flag that extensions that hook into walk can use -- this
727 # full is a flag that extensions that hook into walk can use -- this
722 # implementation doesn't use it at all. This satisfies the contract
728 # implementation doesn't use it at all. This satisfies the contract
723 # because we only guarantee a "maybe".
729 # because we only guarantee a "maybe".
724
730
725 if ignored:
731 if ignored:
726 ignore = util.never
732 ignore = util.never
727 dirignore = util.never
733 dirignore = util.never
728 elif unknown:
734 elif unknown:
729 ignore = self._ignore
735 ignore = self._ignore
730 dirignore = self._dirignore
736 dirignore = self._dirignore
731 else:
737 else:
732 # if not unknown and not ignored, drop dir recursion and step 2
738 # if not unknown and not ignored, drop dir recursion and step 2
733 ignore = util.always
739 ignore = util.always
734 dirignore = util.always
740 dirignore = util.always
735
741
736 matchfn = match.matchfn
742 matchfn = match.matchfn
737 matchalways = match.always()
743 matchalways = match.always()
738 matchtdir = match.traversedir
744 matchtdir = match.traversedir
739 dmap = self._map
745 dmap = self._map
740 listdir = osutil.listdir
746 listdir = osutil.listdir
741 lstat = os.lstat
747 lstat = os.lstat
742 dirkind = stat.S_IFDIR
748 dirkind = stat.S_IFDIR
743 regkind = stat.S_IFREG
749 regkind = stat.S_IFREG
744 lnkkind = stat.S_IFLNK
750 lnkkind = stat.S_IFLNK
745 join = self._join
751 join = self._join
746
752
747 exact = skipstep3 = False
753 exact = skipstep3 = False
748 if match.isexact(): # match.exact
754 if match.isexact(): # match.exact
749 exact = True
755 exact = True
750 dirignore = util.always # skip step 2
756 dirignore = util.always # skip step 2
751 elif match.files() and not match.anypats(): # match.match, no patterns
757 elif match.files() and not match.anypats(): # match.match, no patterns
752 skipstep3 = True
758 skipstep3 = True
753
759
754 if not exact and self._checkcase:
760 if not exact and self._checkcase:
755 normalize = self._normalize
761 normalize = self._normalize
756 normalizefile = self._normalizefile
762 normalizefile = self._normalizefile
757 skipstep3 = False
763 skipstep3 = False
758 else:
764 else:
759 normalize = self._normalize
765 normalize = self._normalize
760 normalizefile = None
766 normalizefile = None
761
767
762 # step 1: find all explicit files
768 # step 1: find all explicit files
763 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
769 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
764
770
765 skipstep3 = skipstep3 and not (work or dirsnotfound)
771 skipstep3 = skipstep3 and not (work or dirsnotfound)
766 work = [d for d in work if not dirignore(d[0])]
772 work = [d for d in work if not dirignore(d[0])]
767
773
768 # step 2: visit subdirectories
774 # step 2: visit subdirectories
769 def traverse(work, alreadynormed):
775 def traverse(work, alreadynormed):
770 wadd = work.append
776 wadd = work.append
771 while work:
777 while work:
772 nd = work.pop()
778 nd = work.pop()
773 skip = None
779 skip = None
774 if nd == '.':
780 if nd == '.':
775 nd = ''
781 nd = ''
776 else:
782 else:
777 skip = '.hg'
783 skip = '.hg'
778 try:
784 try:
779 entries = listdir(join(nd), stat=True, skip=skip)
785 entries = listdir(join(nd), stat=True, skip=skip)
780 except OSError, inst:
786 except OSError, inst:
781 if inst.errno in (errno.EACCES, errno.ENOENT):
787 if inst.errno in (errno.EACCES, errno.ENOENT):
782 match.bad(self.pathto(nd), inst.strerror)
788 match.bad(self.pathto(nd), inst.strerror)
783 continue
789 continue
784 raise
790 raise
785 for f, kind, st in entries:
791 for f, kind, st in entries:
786 if normalizefile:
792 if normalizefile:
787 # even though f might be a directory, we're only
793 # even though f might be a directory, we're only
788 # interested in comparing it to files currently in the
794 # interested in comparing it to files currently in the
789 # dmap -- therefore normalizefile is enough
795 # dmap -- therefore normalizefile is enough
790 nf = normalizefile(nd and (nd + "/" + f) or f, True,
796 nf = normalizefile(nd and (nd + "/" + f) or f, True,
791 True)
797 True)
792 else:
798 else:
793 nf = nd and (nd + "/" + f) or f
799 nf = nd and (nd + "/" + f) or f
794 if nf not in results:
800 if nf not in results:
795 if kind == dirkind:
801 if kind == dirkind:
796 if not ignore(nf):
802 if not ignore(nf):
797 if matchtdir:
803 if matchtdir:
798 matchtdir(nf)
804 matchtdir(nf)
799 wadd(nf)
805 wadd(nf)
800 if nf in dmap and (matchalways or matchfn(nf)):
806 if nf in dmap and (matchalways or matchfn(nf)):
801 results[nf] = None
807 results[nf] = None
802 elif kind == regkind or kind == lnkkind:
808 elif kind == regkind or kind == lnkkind:
803 if nf in dmap:
809 if nf in dmap:
804 if matchalways or matchfn(nf):
810 if matchalways or matchfn(nf):
805 results[nf] = st
811 results[nf] = st
806 elif ((matchalways or matchfn(nf))
812 elif ((matchalways or matchfn(nf))
807 and not ignore(nf)):
813 and not ignore(nf)):
808 # unknown file -- normalize if necessary
814 # unknown file -- normalize if necessary
809 if not alreadynormed:
815 if not alreadynormed:
810 nf = normalize(nf, False, True)
816 nf = normalize(nf, False, True)
811 results[nf] = st
817 results[nf] = st
812 elif nf in dmap and (matchalways or matchfn(nf)):
818 elif nf in dmap and (matchalways or matchfn(nf)):
813 results[nf] = None
819 results[nf] = None
814
820
815 for nd, d in work:
821 for nd, d in work:
816 # alreadynormed means that processwork doesn't have to do any
822 # alreadynormed means that processwork doesn't have to do any
817 # expensive directory normalization
823 # expensive directory normalization
818 alreadynormed = not normalize or nd == d
824 alreadynormed = not normalize or nd == d
819 traverse([d], alreadynormed)
825 traverse([d], alreadynormed)
820
826
821 for s in subrepos:
827 for s in subrepos:
822 del results[s]
828 del results[s]
823 del results['.hg']
829 del results['.hg']
824
830
825 # step 3: visit remaining files from dmap
831 # step 3: visit remaining files from dmap
826 if not skipstep3 and not exact:
832 if not skipstep3 and not exact:
827 # If a dmap file is not in results yet, it was either
833 # If a dmap file is not in results yet, it was either
828 # a) not matching matchfn b) ignored, c) missing, or d) under a
834 # a) not matching matchfn b) ignored, c) missing, or d) under a
829 # symlink directory.
835 # symlink directory.
830 if not results and matchalways:
836 if not results and matchalways:
831 visit = dmap.keys()
837 visit = dmap.keys()
832 else:
838 else:
833 visit = [f for f in dmap if f not in results and matchfn(f)]
839 visit = [f for f in dmap if f not in results and matchfn(f)]
834 visit.sort()
840 visit.sort()
835
841
836 if unknown:
842 if unknown:
837 # unknown == True means we walked all dirs under the roots
843 # unknown == True means we walked all dirs under the roots
838 # that wasn't ignored, and everything that matched was stat'ed
844 # that wasn't ignored, and everything that matched was stat'ed
839 # and is already in results.
845 # and is already in results.
840 # The rest must thus be ignored or under a symlink.
846 # The rest must thus be ignored or under a symlink.
841 audit_path = pathutil.pathauditor(self._root)
847 audit_path = pathutil.pathauditor(self._root)
842
848
843 for nf in iter(visit):
849 for nf in iter(visit):
844 # If a stat for the same file was already added with a
850 # If a stat for the same file was already added with a
845 # different case, don't add one for this, since that would
851 # different case, don't add one for this, since that would
846 # make it appear as if the file exists under both names
852 # make it appear as if the file exists under both names
847 # on disk.
853 # on disk.
848 if (normalizefile and
854 if (normalizefile and
849 normalizefile(nf, True, True) in results):
855 normalizefile(nf, True, True) in results):
850 results[nf] = None
856 results[nf] = None
851 # Report ignored items in the dmap as long as they are not
857 # Report ignored items in the dmap as long as they are not
852 # under a symlink directory.
858 # under a symlink directory.
853 elif audit_path.check(nf):
859 elif audit_path.check(nf):
854 try:
860 try:
855 results[nf] = lstat(join(nf))
861 results[nf] = lstat(join(nf))
856 # file was just ignored, no links, and exists
862 # file was just ignored, no links, and exists
857 except OSError:
863 except OSError:
858 # file doesn't exist
864 # file doesn't exist
859 results[nf] = None
865 results[nf] = None
860 else:
866 else:
861 # It's either missing or under a symlink directory
867 # It's either missing or under a symlink directory
862 # which we in this case report as missing
868 # which we in this case report as missing
863 results[nf] = None
869 results[nf] = None
864 else:
870 else:
865 # We may not have walked the full directory tree above,
871 # We may not have walked the full directory tree above,
866 # so stat and check everything we missed.
872 # so stat and check everything we missed.
867 nf = iter(visit).next
873 nf = iter(visit).next
868 for st in util.statfiles([join(i) for i in visit]):
874 for st in util.statfiles([join(i) for i in visit]):
869 results[nf()] = st
875 results[nf()] = st
870 return results
876 return results
871
877
872 def status(self, match, subrepos, ignored, clean, unknown):
878 def status(self, match, subrepos, ignored, clean, unknown):
873 '''Determine the status of the working copy relative to the
879 '''Determine the status of the working copy relative to the
874 dirstate and return a pair of (unsure, status), where status is of type
880 dirstate and return a pair of (unsure, status), where status is of type
875 scmutil.status and:
881 scmutil.status and:
876
882
877 unsure:
883 unsure:
878 files that might have been modified since the dirstate was
884 files that might have been modified since the dirstate was
879 written, but need to be read to be sure (size is the same
885 written, but need to be read to be sure (size is the same
880 but mtime differs)
886 but mtime differs)
881 status.modified:
887 status.modified:
882 files that have definitely been modified since the dirstate
888 files that have definitely been modified since the dirstate
883 was written (different size or mode)
889 was written (different size or mode)
884 status.clean:
890 status.clean:
885 files that have definitely not been modified since the
891 files that have definitely not been modified since the
886 dirstate was written
892 dirstate was written
887 '''
893 '''
888 listignored, listclean, listunknown = ignored, clean, unknown
894 listignored, listclean, listunknown = ignored, clean, unknown
889 lookup, modified, added, unknown, ignored = [], [], [], [], []
895 lookup, modified, added, unknown, ignored = [], [], [], [], []
890 removed, deleted, clean = [], [], []
896 removed, deleted, clean = [], [], []
891
897
892 dmap = self._map
898 dmap = self._map
893 ladd = lookup.append # aka "unsure"
899 ladd = lookup.append # aka "unsure"
894 madd = modified.append
900 madd = modified.append
895 aadd = added.append
901 aadd = added.append
896 uadd = unknown.append
902 uadd = unknown.append
897 iadd = ignored.append
903 iadd = ignored.append
898 radd = removed.append
904 radd = removed.append
899 dadd = deleted.append
905 dadd = deleted.append
900 cadd = clean.append
906 cadd = clean.append
901 mexact = match.exact
907 mexact = match.exact
902 dirignore = self._dirignore
908 dirignore = self._dirignore
903 checkexec = self._checkexec
909 checkexec = self._checkexec
904 copymap = self._copymap
910 copymap = self._copymap
905 lastnormaltime = self._lastnormaltime
911 lastnormaltime = self._lastnormaltime
906
912
907 # We need to do full walks when either
913 # We need to do full walks when either
908 # - we're listing all clean files, or
914 # - we're listing all clean files, or
909 # - match.traversedir does something, because match.traversedir should
915 # - match.traversedir does something, because match.traversedir should
910 # be called for every dir in the working dir
916 # be called for every dir in the working dir
911 full = listclean or match.traversedir is not None
917 full = listclean or match.traversedir is not None
912 for fn, st in self.walk(match, subrepos, listunknown, listignored,
918 for fn, st in self.walk(match, subrepos, listunknown, listignored,
913 full=full).iteritems():
919 full=full).iteritems():
914 if fn not in dmap:
920 if fn not in dmap:
915 if (listignored or mexact(fn)) and dirignore(fn):
921 if (listignored or mexact(fn)) and dirignore(fn):
916 if listignored:
922 if listignored:
917 iadd(fn)
923 iadd(fn)
918 else:
924 else:
919 uadd(fn)
925 uadd(fn)
920 continue
926 continue
921
927
922 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
928 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
923 # written like that for performance reasons. dmap[fn] is not a
929 # written like that for performance reasons. dmap[fn] is not a
924 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
930 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
925 # opcode has fast paths when the value to be unpacked is a tuple or
931 # opcode has fast paths when the value to be unpacked is a tuple or
926 # a list, but falls back to creating a full-fledged iterator in
932 # a list, but falls back to creating a full-fledged iterator in
927 # general. That is much slower than simply accessing and storing the
933 # general. That is much slower than simply accessing and storing the
928 # tuple members one by one.
934 # tuple members one by one.
929 t = dmap[fn]
935 t = dmap[fn]
930 state = t[0]
936 state = t[0]
931 mode = t[1]
937 mode = t[1]
932 size = t[2]
938 size = t[2]
933 time = t[3]
939 time = t[3]
934
940
935 if not st and state in "nma":
941 if not st and state in "nma":
936 dadd(fn)
942 dadd(fn)
937 elif state == 'n':
943 elif state == 'n':
938 mtime = int(st.st_mtime)
944 mtime = int(st.st_mtime)
939 if (size >= 0 and
945 if (size >= 0 and
940 ((size != st.st_size and size != st.st_size & _rangemask)
946 ((size != st.st_size and size != st.st_size & _rangemask)
941 or ((mode ^ st.st_mode) & 0100 and checkexec))
947 or ((mode ^ st.st_mode) & 0100 and checkexec))
942 or size == -2 # other parent
948 or size == -2 # other parent
943 or fn in copymap):
949 or fn in copymap):
944 madd(fn)
950 madd(fn)
945 elif time != mtime and time != mtime & _rangemask:
951 elif time != mtime and time != mtime & _rangemask:
946 ladd(fn)
952 ladd(fn)
947 elif mtime == lastnormaltime:
953 elif mtime == lastnormaltime:
948 # fn may have just been marked as normal and it may have
954 # fn may have just been marked as normal and it may have
949 # changed in the same second without changing its size.
955 # changed in the same second without changing its size.
950 # This can happen if we quickly do multiple commits.
956 # This can happen if we quickly do multiple commits.
951 # Force lookup, so we don't miss such a racy file change.
957 # Force lookup, so we don't miss such a racy file change.
952 ladd(fn)
958 ladd(fn)
953 elif listclean:
959 elif listclean:
954 cadd(fn)
960 cadd(fn)
955 elif state == 'm':
961 elif state == 'm':
956 madd(fn)
962 madd(fn)
957 elif state == 'a':
963 elif state == 'a':
958 aadd(fn)
964 aadd(fn)
959 elif state == 'r':
965 elif state == 'r':
960 radd(fn)
966 radd(fn)
961
967
962 return (lookup, scmutil.status(modified, added, removed, deleted,
968 return (lookup, scmutil.status(modified, added, removed, deleted,
963 unknown, ignored, clean))
969 unknown, ignored, clean))
964
970
965 def matches(self, match):
971 def matches(self, match):
966 '''
972 '''
967 return files in the dirstate (in whatever state) filtered by match
973 return files in the dirstate (in whatever state) filtered by match
968 '''
974 '''
969 dmap = self._map
975 dmap = self._map
970 if match.always():
976 if match.always():
971 return dmap.keys()
977 return dmap.keys()
972 files = match.files()
978 files = match.files()
973 if match.isexact():
979 if match.isexact():
974 # fast path -- filter the other way around, since typically files is
980 # fast path -- filter the other way around, since typically files is
975 # much smaller than dmap
981 # much smaller than dmap
976 return [f for f in files if f in dmap]
982 return [f for f in files if f in dmap]
977 if not match.anypats() and all(fn in dmap for fn in files):
983 if not match.anypats() and all(fn in dmap for fn in files):
978 # fast path -- all the values are known to be files, so just return
984 # fast path -- all the values are known to be files, so just return
979 # that
985 # that
980 return list(files)
986 return list(files)
981 return [f for f in dmap if match(f)]
987 return [f for f in dmap if match(f)]
@@ -1,573 +1,587 b''
1 # match.py - filename matching
1 # match.py - filename matching
2 #
2 #
3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import re
8 import re
9 import util, pathutil
9 import util, pathutil
10 from i18n import _
10 from i18n import _
11
11
12 propertycache = util.propertycache
12 propertycache = util.propertycache
13
13
14 def _rematcher(regex):
14 def _rematcher(regex):
15 '''compile the regexp with the best available regexp engine and return a
15 '''compile the regexp with the best available regexp engine and return a
16 matcher function'''
16 matcher function'''
17 m = util.re.compile(regex)
17 m = util.re.compile(regex)
18 try:
18 try:
19 # slightly faster, provided by facebook's re2 bindings
19 # slightly faster, provided by facebook's re2 bindings
20 return m.test_match
20 return m.test_match
21 except AttributeError:
21 except AttributeError:
22 return m.match
22 return m.match
23
23
24 def _expandsets(kindpats, ctx, listsubrepos):
24 def _expandsets(kindpats, ctx, listsubrepos):
25 '''Returns the kindpats list with the 'set' patterns expanded.'''
25 '''Returns the kindpats list with the 'set' patterns expanded.'''
26 fset = set()
26 fset = set()
27 other = []
27 other = []
28
28
29 for kind, pat, source in kindpats:
29 for kind, pat, source in kindpats:
30 if kind == 'set':
30 if kind == 'set':
31 if not ctx:
31 if not ctx:
32 raise util.Abort("fileset expression with no context")
32 raise util.Abort("fileset expression with no context")
33 s = ctx.getfileset(pat)
33 s = ctx.getfileset(pat)
34 fset.update(s)
34 fset.update(s)
35
35
36 if listsubrepos:
36 if listsubrepos:
37 for subpath in ctx.substate:
37 for subpath in ctx.substate:
38 s = ctx.sub(subpath).getfileset(pat)
38 s = ctx.sub(subpath).getfileset(pat)
39 fset.update(subpath + '/' + f for f in s)
39 fset.update(subpath + '/' + f for f in s)
40
40
41 continue
41 continue
42 other.append((kind, pat, source))
42 other.append((kind, pat, source))
43 return fset, other
43 return fset, other
44
44
45 def _kindpatsalwaysmatch(kindpats):
45 def _kindpatsalwaysmatch(kindpats):
46 """"Checks whether the kindspats match everything, as e.g.
46 """"Checks whether the kindspats match everything, as e.g.
47 'relpath:.' does.
47 'relpath:.' does.
48 """
48 """
49 for kind, pat, source in kindpats:
49 for kind, pat, source in kindpats:
50 if pat != '' or kind not in ['relpath', 'glob']:
50 if pat != '' or kind not in ['relpath', 'glob']:
51 return False
51 return False
52 return True
52 return True
53
53
54 class match(object):
54 class match(object):
55 def __init__(self, root, cwd, patterns, include=[], exclude=[],
55 def __init__(self, root, cwd, patterns, include=[], exclude=[],
56 default='glob', exact=False, auditor=None, ctx=None,
56 default='glob', exact=False, auditor=None, ctx=None,
57 listsubrepos=False, warn=None):
57 listsubrepos=False, warn=None):
58 """build an object to match a set of file patterns
58 """build an object to match a set of file patterns
59
59
60 arguments:
60 arguments:
61 root - the canonical root of the tree you're matching against
61 root - the canonical root of the tree you're matching against
62 cwd - the current working directory, if relevant
62 cwd - the current working directory, if relevant
63 patterns - patterns to find
63 patterns - patterns to find
64 include - patterns to include (unless they are excluded)
64 include - patterns to include (unless they are excluded)
65 exclude - patterns to exclude (even if they are included)
65 exclude - patterns to exclude (even if they are included)
66 default - if a pattern in patterns has no explicit type, assume this one
66 default - if a pattern in patterns has no explicit type, assume this one
67 exact - patterns are actually filenames (include/exclude still apply)
67 exact - patterns are actually filenames (include/exclude still apply)
68 warn - optional function used for printing warnings
68 warn - optional function used for printing warnings
69
69
70 a pattern is one of:
70 a pattern is one of:
71 'glob:<glob>' - a glob relative to cwd
71 'glob:<glob>' - a glob relative to cwd
72 're:<regexp>' - a regular expression
72 're:<regexp>' - a regular expression
73 'path:<path>' - a path relative to repository root
73 'path:<path>' - a path relative to repository root
74 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
74 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
75 'relpath:<path>' - a path relative to cwd
75 'relpath:<path>' - a path relative to cwd
76 'relre:<regexp>' - a regexp that needn't match the start of a name
76 'relre:<regexp>' - a regexp that needn't match the start of a name
77 'set:<fileset>' - a fileset expression
77 'set:<fileset>' - a fileset expression
78 'include:<path>' - a file of patterns to read and include
78 'include:<path>' - a file of patterns to read and include
79 '<something>' - a pattern of the specified default type
79 '<something>' - a pattern of the specified default type
80 """
80 """
81
81
82 self._root = root
82 self._root = root
83 self._cwd = cwd
83 self._cwd = cwd
84 self._files = [] # exact files and roots of patterns
84 self._files = [] # exact files and roots of patterns
85 self._anypats = bool(include or exclude)
85 self._anypats = bool(include or exclude)
86 self._always = False
86 self._always = False
87 self._pathrestricted = bool(include or exclude or patterns)
87 self._pathrestricted = bool(include or exclude or patterns)
88 self._warn = warn
88 self._warn = warn
89
89
90 matchfns = []
90 matchfns = []
91 if include:
91 if include:
92 kindpats = self._normalize(include, 'glob', root, cwd, auditor)
92 kindpats = self._normalize(include, 'glob', root, cwd, auditor)
93 self.includepat, im = _buildmatch(ctx, kindpats, '(?:/|$)',
93 self.includepat, im = _buildmatch(ctx, kindpats, '(?:/|$)',
94 listsubrepos)
94 listsubrepos)
95 matchfns.append(im)
95 matchfns.append(im)
96 if exclude:
96 if exclude:
97 kindpats = self._normalize(exclude, 'glob', root, cwd, auditor)
97 kindpats = self._normalize(exclude, 'glob', root, cwd, auditor)
98 self.excludepat, em = _buildmatch(ctx, kindpats, '(?:/|$)',
98 self.excludepat, em = _buildmatch(ctx, kindpats, '(?:/|$)',
99 listsubrepos)
99 listsubrepos)
100 matchfns.append(lambda f: not em(f))
100 matchfns.append(lambda f: not em(f))
101 if exact:
101 if exact:
102 if isinstance(patterns, list):
102 if isinstance(patterns, list):
103 self._files = patterns
103 self._files = patterns
104 else:
104 else:
105 self._files = list(patterns)
105 self._files = list(patterns)
106 matchfns.append(self.exact)
106 matchfns.append(self.exact)
107 elif patterns:
107 elif patterns:
108 kindpats = self._normalize(patterns, default, root, cwd, auditor)
108 kindpats = self._normalize(patterns, default, root, cwd, auditor)
109 if not _kindpatsalwaysmatch(kindpats):
109 if not _kindpatsalwaysmatch(kindpats):
110 self._files = _roots(kindpats)
110 self._files = _roots(kindpats)
111 self._anypats = self._anypats or _anypats(kindpats)
111 self._anypats = self._anypats or _anypats(kindpats)
112 self.patternspat, pm = _buildmatch(ctx, kindpats, '$',
112 self.patternspat, pm = _buildmatch(ctx, kindpats, '$',
113 listsubrepos)
113 listsubrepos)
114 matchfns.append(pm)
114 matchfns.append(pm)
115
115
116 if not matchfns:
116 if not matchfns:
117 m = util.always
117 m = util.always
118 self._always = True
118 self._always = True
119 elif len(matchfns) == 1:
119 elif len(matchfns) == 1:
120 m = matchfns[0]
120 m = matchfns[0]
121 else:
121 else:
122 def m(f):
122 def m(f):
123 for matchfn in matchfns:
123 for matchfn in matchfns:
124 if not matchfn(f):
124 if not matchfn(f):
125 return False
125 return False
126 return True
126 return True
127
127
128 self.matchfn = m
128 self.matchfn = m
129 self._fileroots = set(self._files)
129 self._fileroots = set(self._files)
130
130
131 def __call__(self, fn):
131 def __call__(self, fn):
132 return self.matchfn(fn)
132 return self.matchfn(fn)
133 def __iter__(self):
133 def __iter__(self):
134 for f in self._files:
134 for f in self._files:
135 yield f
135 yield f
136
136
137 # Callbacks related to how the matcher is used by dirstate.walk.
137 # Callbacks related to how the matcher is used by dirstate.walk.
138 # Subscribers to these events must monkeypatch the matcher object.
138 # Subscribers to these events must monkeypatch the matcher object.
139 def bad(self, f, msg):
139 def bad(self, f, msg):
140 '''Callback from dirstate.walk for each explicit file that can't be
140 '''Callback from dirstate.walk for each explicit file that can't be
141 found/accessed, with an error message.'''
141 found/accessed, with an error message.'''
142 pass
142 pass
143
143
144 # If an explicitdir is set, it will be called when an explicitly listed
144 # If an explicitdir is set, it will be called when an explicitly listed
145 # directory is visited.
145 # directory is visited.
146 explicitdir = None
146 explicitdir = None
147
147
148 # If an traversedir is set, it will be called when a directory discovered
148 # If an traversedir is set, it will be called when a directory discovered
149 # by recursive traversal is visited.
149 # by recursive traversal is visited.
150 traversedir = None
150 traversedir = None
151
151
152 def abs(self, f):
152 def abs(self, f):
153 '''Convert a repo path back to path that is relative to the root of the
153 '''Convert a repo path back to path that is relative to the root of the
154 matcher.'''
154 matcher.'''
155 return f
155 return f
156
156
157 def rel(self, f):
157 def rel(self, f):
158 '''Convert repo path back to path that is relative to cwd of matcher.'''
158 '''Convert repo path back to path that is relative to cwd of matcher.'''
159 return util.pathto(self._root, self._cwd, f)
159 return util.pathto(self._root, self._cwd, f)
160
160
161 def uipath(self, f):
161 def uipath(self, f):
162 '''Convert repo path to a display path. If patterns or -I/-X were used
162 '''Convert repo path to a display path. If patterns or -I/-X were used
163 to create this matcher, the display path will be relative to cwd.
163 to create this matcher, the display path will be relative to cwd.
164 Otherwise it is relative to the root of the repo.'''
164 Otherwise it is relative to the root of the repo.'''
165 return (self._pathrestricted and self.rel(f)) or self.abs(f)
165 return (self._pathrestricted and self.rel(f)) or self.abs(f)
166
166
167 def files(self):
167 def files(self):
168 '''Explicitly listed files or patterns or roots:
168 '''Explicitly listed files or patterns or roots:
169 if no patterns or .always(): empty list,
169 if no patterns or .always(): empty list,
170 if exact: list exact files,
170 if exact: list exact files,
171 if not .anypats(): list all files and dirs,
171 if not .anypats(): list all files and dirs,
172 else: optimal roots'''
172 else: optimal roots'''
173 return self._files
173 return self._files
174
174
175 @propertycache
175 @propertycache
176 def _dirs(self):
176 def _dirs(self):
177 return set(util.dirs(self._fileroots)) | set(['.'])
177 return set(util.dirs(self._fileroots)) | set(['.'])
178
178
179 def visitdir(self, dir):
179 def visitdir(self, dir):
180 return (not self._fileroots or '.' in self._fileroots or
180 return (not self._fileroots or '.' in self._fileroots or
181 dir in self._fileroots or dir in self._dirs or
181 dir in self._fileroots or dir in self._dirs or
182 any(parentdir in self._fileroots
182 any(parentdir in self._fileroots
183 for parentdir in util.finddirs(dir)))
183 for parentdir in util.finddirs(dir)))
184
184
185 def exact(self, f):
185 def exact(self, f):
186 '''Returns True if f is in .files().'''
186 '''Returns True if f is in .files().'''
187 return f in self._fileroots
187 return f in self._fileroots
188
188
189 def anypats(self):
189 def anypats(self):
190 '''Matcher uses patterns or include/exclude.'''
190 '''Matcher uses patterns or include/exclude.'''
191 return self._anypats
191 return self._anypats
192
192
193 def always(self):
193 def always(self):
194 '''Matcher will match everything and .files() will be empty
194 '''Matcher will match everything and .files() will be empty
195 - optimization might be possible and necessary.'''
195 - optimization might be possible and necessary.'''
196 return self._always
196 return self._always
197
197
198 def ispartial(self):
198 def ispartial(self):
199 '''True if the matcher won't always match.
199 '''True if the matcher won't always match.
200
200
201 Although it's just the inverse of _always in this implementation,
201 Although it's just the inverse of _always in this implementation,
202 an extenion such as narrowhg might make it return something
202 an extenion such as narrowhg might make it return something
203 slightly different.'''
203 slightly different.'''
204 return not self._always
204 return not self._always
205
205
206 def isexact(self):
206 def isexact(self):
207 return self.matchfn == self.exact
207 return self.matchfn == self.exact
208
208
209 def _normalize(self, patterns, default, root, cwd, auditor):
209 def _normalize(self, patterns, default, root, cwd, auditor):
210 '''Convert 'kind:pat' from the patterns list to tuples with kind and
210 '''Convert 'kind:pat' from the patterns list to tuples with kind and
211 normalized and rooted patterns and with listfiles expanded.'''
211 normalized and rooted patterns and with listfiles expanded.'''
212 kindpats = []
212 kindpats = []
213 for kind, pat in [_patsplit(p, default) for p in patterns]:
213 for kind, pat in [_patsplit(p, default) for p in patterns]:
214 if kind in ('glob', 'relpath'):
214 if kind in ('glob', 'relpath'):
215 pat = pathutil.canonpath(root, cwd, pat, auditor)
215 pat = pathutil.canonpath(root, cwd, pat, auditor)
216 elif kind in ('relglob', 'path'):
216 elif kind in ('relglob', 'path'):
217 pat = util.normpath(pat)
217 pat = util.normpath(pat)
218 elif kind in ('listfile', 'listfile0'):
218 elif kind in ('listfile', 'listfile0'):
219 try:
219 try:
220 files = util.readfile(pat)
220 files = util.readfile(pat)
221 if kind == 'listfile0':
221 if kind == 'listfile0':
222 files = files.split('\0')
222 files = files.split('\0')
223 else:
223 else:
224 files = files.splitlines()
224 files = files.splitlines()
225 files = [f for f in files if f]
225 files = [f for f in files if f]
226 except EnvironmentError:
226 except EnvironmentError:
227 raise util.Abort(_("unable to read file list (%s)") % pat)
227 raise util.Abort(_("unable to read file list (%s)") % pat)
228 for k, p, source in self._normalize(files, default, root, cwd,
228 for k, p, source in self._normalize(files, default, root, cwd,
229 auditor):
229 auditor):
230 kindpats.append((k, p, pat))
230 kindpats.append((k, p, pat))
231 continue
231 continue
232 elif kind == 'include':
232 elif kind == 'include':
233 try:
233 try:
234 includepats = readpatternfile(pat, self._warn)
234 includepats = readpatternfile(pat, self._warn)
235 for k, p, source in self._normalize(includepats, default,
235 for k, p, source in self._normalize(includepats, default,
236 root, cwd, auditor):
236 root, cwd, auditor):
237 kindpats.append((k, p, source or pat))
237 kindpats.append((k, p, source or pat))
238 except util.Abort, inst:
238 except util.Abort, inst:
239 raise util.Abort('%s: %s' % (pat, inst[0]))
239 raise util.Abort('%s: %s' % (pat, inst[0]))
240 except IOError, inst:
240 except IOError, inst:
241 if self._warn:
241 if self._warn:
242 self._warn(_("skipping unreadable pattern file "
242 self._warn(_("skipping unreadable pattern file "
243 "'%s': %s\n") % (pat, inst.strerror))
243 "'%s': %s\n") % (pat, inst.strerror))
244 continue
244 continue
245 # else: re or relre - which cannot be normalized
245 # else: re or relre - which cannot be normalized
246 kindpats.append((kind, pat, ''))
246 kindpats.append((kind, pat, ''))
247 return kindpats
247 return kindpats
248
248
249 def exact(root, cwd, files):
249 def exact(root, cwd, files):
250 return match(root, cwd, files, exact=True)
250 return match(root, cwd, files, exact=True)
251
251
252 def always(root, cwd):
252 def always(root, cwd):
253 return match(root, cwd, [])
253 return match(root, cwd, [])
254
254
255 class narrowmatcher(match):
255 class narrowmatcher(match):
256 """Adapt a matcher to work on a subdirectory only.
256 """Adapt a matcher to work on a subdirectory only.
257
257
258 The paths are remapped to remove/insert the path as needed:
258 The paths are remapped to remove/insert the path as needed:
259
259
260 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
260 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
261 >>> m2 = narrowmatcher('sub', m1)
261 >>> m2 = narrowmatcher('sub', m1)
262 >>> bool(m2('a.txt'))
262 >>> bool(m2('a.txt'))
263 False
263 False
264 >>> bool(m2('b.txt'))
264 >>> bool(m2('b.txt'))
265 True
265 True
266 >>> bool(m2.matchfn('a.txt'))
266 >>> bool(m2.matchfn('a.txt'))
267 False
267 False
268 >>> bool(m2.matchfn('b.txt'))
268 >>> bool(m2.matchfn('b.txt'))
269 True
269 True
270 >>> m2.files()
270 >>> m2.files()
271 ['b.txt']
271 ['b.txt']
272 >>> m2.exact('b.txt')
272 >>> m2.exact('b.txt')
273 True
273 True
274 >>> util.pconvert(m2.rel('b.txt'))
274 >>> util.pconvert(m2.rel('b.txt'))
275 'sub/b.txt'
275 'sub/b.txt'
276 >>> def bad(f, msg):
276 >>> def bad(f, msg):
277 ... print "%s: %s" % (f, msg)
277 ... print "%s: %s" % (f, msg)
278 >>> m1.bad = bad
278 >>> m1.bad = bad
279 >>> m2.bad('x.txt', 'No such file')
279 >>> m2.bad('x.txt', 'No such file')
280 sub/x.txt: No such file
280 sub/x.txt: No such file
281 >>> m2.abs('c.txt')
281 >>> m2.abs('c.txt')
282 'sub/c.txt'
282 'sub/c.txt'
283 """
283 """
284
284
285 def __init__(self, path, matcher):
285 def __init__(self, path, matcher):
286 self._root = matcher._root
286 self._root = matcher._root
287 self._cwd = matcher._cwd
287 self._cwd = matcher._cwd
288 self._path = path
288 self._path = path
289 self._matcher = matcher
289 self._matcher = matcher
290 self._always = matcher._always
290 self._always = matcher._always
291 self._pathrestricted = matcher._pathrestricted
291 self._pathrestricted = matcher._pathrestricted
292
292
293 self._files = [f[len(path) + 1:] for f in matcher._files
293 self._files = [f[len(path) + 1:] for f in matcher._files
294 if f.startswith(path + "/")]
294 if f.startswith(path + "/")]
295
295
296 # If the parent repo had a path to this subrepo and no patterns are
296 # If the parent repo had a path to this subrepo and no patterns are
297 # specified, this submatcher always matches.
297 # specified, this submatcher always matches.
298 if not self._always and not matcher._anypats:
298 if not self._always and not matcher._anypats:
299 self._always = any(f == path for f in matcher._files)
299 self._always = any(f == path for f in matcher._files)
300
300
301 self._anypats = matcher._anypats
301 self._anypats = matcher._anypats
302 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
302 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
303 self._fileroots = set(self._files)
303 self._fileroots = set(self._files)
304
304
305 def abs(self, f):
305 def abs(self, f):
306 return self._matcher.abs(self._path + "/" + f)
306 return self._matcher.abs(self._path + "/" + f)
307
307
308 def bad(self, f, msg):
308 def bad(self, f, msg):
309 self._matcher.bad(self._path + "/" + f, msg)
309 self._matcher.bad(self._path + "/" + f, msg)
310
310
311 def rel(self, f):
311 def rel(self, f):
312 return self._matcher.rel(self._path + "/" + f)
312 return self._matcher.rel(self._path + "/" + f)
313
313
314 class icasefsmatcher(match):
314 class icasefsmatcher(match):
315 """A matcher for wdir on case insensitive filesystems, which normalizes the
315 """A matcher for wdir on case insensitive filesystems, which normalizes the
316 given patterns to the case in the filesystem.
316 given patterns to the case in the filesystem.
317 """
317 """
318
318
319 def __init__(self, root, cwd, patterns, include, exclude, default, auditor,
319 def __init__(self, root, cwd, patterns, include, exclude, default, auditor,
320 ctx, listsubrepos=False):
320 ctx, listsubrepos=False):
321 init = super(icasefsmatcher, self).__init__
321 init = super(icasefsmatcher, self).__init__
322 self._dsnormalize = ctx.repo().dirstate.normalize
322 self._dsnormalize = ctx.repo().dirstate.normalize
323
323
324 init(root, cwd, patterns, include, exclude, default, auditor=auditor,
324 init(root, cwd, patterns, include, exclude, default, auditor=auditor,
325 ctx=ctx, listsubrepos=listsubrepos)
325 ctx=ctx, listsubrepos=listsubrepos)
326
326
327 # m.exact(file) must be based off of the actual user input, otherwise
327 # m.exact(file) must be based off of the actual user input, otherwise
328 # inexact case matches are treated as exact, and not noted without -v.
328 # inexact case matches are treated as exact, and not noted without -v.
329 if self._files:
329 if self._files:
330 self._fileroots = set(_roots(self._kp))
330 self._fileroots = set(_roots(self._kp))
331
331
332 def _normalize(self, patterns, default, root, cwd, auditor):
332 def _normalize(self, patterns, default, root, cwd, auditor):
333 self._kp = super(icasefsmatcher, self)._normalize(patterns, default,
333 self._kp = super(icasefsmatcher, self)._normalize(patterns, default,
334 root, cwd, auditor)
334 root, cwd, auditor)
335 kindpats = []
335 kindpats = []
336 for kind, pats, source in self._kp:
336 for kind, pats, source in self._kp:
337 if kind not in ('re', 'relre'): # regex can't be normalized
337 if kind not in ('re', 'relre'): # regex can't be normalized
338 pats = self._dsnormalize(pats)
338 pats = self._dsnormalize(pats)
339 kindpats.append((kind, pats, source))
339 kindpats.append((kind, pats, source))
340 return kindpats
340 return kindpats
341
341
342 def patkind(pattern, default=None):
342 def patkind(pattern, default=None):
343 '''If pattern is 'kind:pat' with a known kind, return kind.'''
343 '''If pattern is 'kind:pat' with a known kind, return kind.'''
344 return _patsplit(pattern, default)[0]
344 return _patsplit(pattern, default)[0]
345
345
346 def _patsplit(pattern, default):
346 def _patsplit(pattern, default):
347 """Split a string into the optional pattern kind prefix and the actual
347 """Split a string into the optional pattern kind prefix and the actual
348 pattern."""
348 pattern."""
349 if ':' in pattern:
349 if ':' in pattern:
350 kind, pat = pattern.split(':', 1)
350 kind, pat = pattern.split(':', 1)
351 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
351 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
352 'listfile', 'listfile0', 'set', 'include'):
352 'listfile', 'listfile0', 'set', 'include'):
353 return kind, pat
353 return kind, pat
354 return default, pattern
354 return default, pattern
355
355
356 def _globre(pat):
356 def _globre(pat):
357 r'''Convert an extended glob string to a regexp string.
357 r'''Convert an extended glob string to a regexp string.
358
358
359 >>> print _globre(r'?')
359 >>> print _globre(r'?')
360 .
360 .
361 >>> print _globre(r'*')
361 >>> print _globre(r'*')
362 [^/]*
362 [^/]*
363 >>> print _globre(r'**')
363 >>> print _globre(r'**')
364 .*
364 .*
365 >>> print _globre(r'**/a')
365 >>> print _globre(r'**/a')
366 (?:.*/)?a
366 (?:.*/)?a
367 >>> print _globre(r'a/**/b')
367 >>> print _globre(r'a/**/b')
368 a\/(?:.*/)?b
368 a\/(?:.*/)?b
369 >>> print _globre(r'[a*?!^][^b][!c]')
369 >>> print _globre(r'[a*?!^][^b][!c]')
370 [a*?!^][\^b][^c]
370 [a*?!^][\^b][^c]
371 >>> print _globre(r'{a,b}')
371 >>> print _globre(r'{a,b}')
372 (?:a|b)
372 (?:a|b)
373 >>> print _globre(r'.\*\?')
373 >>> print _globre(r'.\*\?')
374 \.\*\?
374 \.\*\?
375 '''
375 '''
376 i, n = 0, len(pat)
376 i, n = 0, len(pat)
377 res = ''
377 res = ''
378 group = 0
378 group = 0
379 escape = util.re.escape
379 escape = util.re.escape
380 def peek():
380 def peek():
381 return i < n and pat[i]
381 return i < n and pat[i]
382 while i < n:
382 while i < n:
383 c = pat[i]
383 c = pat[i]
384 i += 1
384 i += 1
385 if c not in '*?[{},\\':
385 if c not in '*?[{},\\':
386 res += escape(c)
386 res += escape(c)
387 elif c == '*':
387 elif c == '*':
388 if peek() == '*':
388 if peek() == '*':
389 i += 1
389 i += 1
390 if peek() == '/':
390 if peek() == '/':
391 i += 1
391 i += 1
392 res += '(?:.*/)?'
392 res += '(?:.*/)?'
393 else:
393 else:
394 res += '.*'
394 res += '.*'
395 else:
395 else:
396 res += '[^/]*'
396 res += '[^/]*'
397 elif c == '?':
397 elif c == '?':
398 res += '.'
398 res += '.'
399 elif c == '[':
399 elif c == '[':
400 j = i
400 j = i
401 if j < n and pat[j] in '!]':
401 if j < n and pat[j] in '!]':
402 j += 1
402 j += 1
403 while j < n and pat[j] != ']':
403 while j < n and pat[j] != ']':
404 j += 1
404 j += 1
405 if j >= n:
405 if j >= n:
406 res += '\\['
406 res += '\\['
407 else:
407 else:
408 stuff = pat[i:j].replace('\\','\\\\')
408 stuff = pat[i:j].replace('\\','\\\\')
409 i = j + 1
409 i = j + 1
410 if stuff[0] == '!':
410 if stuff[0] == '!':
411 stuff = '^' + stuff[1:]
411 stuff = '^' + stuff[1:]
412 elif stuff[0] == '^':
412 elif stuff[0] == '^':
413 stuff = '\\' + stuff
413 stuff = '\\' + stuff
414 res = '%s[%s]' % (res, stuff)
414 res = '%s[%s]' % (res, stuff)
415 elif c == '{':
415 elif c == '{':
416 group += 1
416 group += 1
417 res += '(?:'
417 res += '(?:'
418 elif c == '}' and group:
418 elif c == '}' and group:
419 res += ')'
419 res += ')'
420 group -= 1
420 group -= 1
421 elif c == ',' and group:
421 elif c == ',' and group:
422 res += '|'
422 res += '|'
423 elif c == '\\':
423 elif c == '\\':
424 p = peek()
424 p = peek()
425 if p:
425 if p:
426 i += 1
426 i += 1
427 res += escape(p)
427 res += escape(p)
428 else:
428 else:
429 res += escape(c)
429 res += escape(c)
430 else:
430 else:
431 res += escape(c)
431 res += escape(c)
432 return res
432 return res
433
433
434 def _regex(kind, pat, globsuffix):
434 def _regex(kind, pat, globsuffix):
435 '''Convert a (normalized) pattern of any kind into a regular expression.
435 '''Convert a (normalized) pattern of any kind into a regular expression.
436 globsuffix is appended to the regexp of globs.'''
436 globsuffix is appended to the regexp of globs.'''
437 if not pat:
437 if not pat:
438 return ''
438 return ''
439 if kind == 're':
439 if kind == 're':
440 return pat
440 return pat
441 if kind == 'path':
441 if kind == 'path':
442 return '^' + util.re.escape(pat) + '(?:/|$)'
442 return '^' + util.re.escape(pat) + '(?:/|$)'
443 if kind == 'relglob':
443 if kind == 'relglob':
444 return '(?:|.*/)' + _globre(pat) + globsuffix
444 return '(?:|.*/)' + _globre(pat) + globsuffix
445 if kind == 'relpath':
445 if kind == 'relpath':
446 return util.re.escape(pat) + '(?:/|$)'
446 return util.re.escape(pat) + '(?:/|$)'
447 if kind == 'relre':
447 if kind == 'relre':
448 if pat.startswith('^'):
448 if pat.startswith('^'):
449 return pat
449 return pat
450 return '.*' + pat
450 return '.*' + pat
451 return _globre(pat) + globsuffix
451 return _globre(pat) + globsuffix
452
452
453 def _buildmatch(ctx, kindpats, globsuffix, listsubrepos):
453 def _buildmatch(ctx, kindpats, globsuffix, listsubrepos):
454 '''Return regexp string and a matcher function for kindpats.
454 '''Return regexp string and a matcher function for kindpats.
455 globsuffix is appended to the regexp of globs.'''
455 globsuffix is appended to the regexp of globs.'''
456 fset, kindpats = _expandsets(kindpats, ctx, listsubrepos)
456 fset, kindpats = _expandsets(kindpats, ctx, listsubrepos)
457 if not kindpats:
457 if not kindpats:
458 return "", fset.__contains__
458 return "", fset.__contains__
459
459
460 regex, mf = _buildregexmatch(kindpats, globsuffix)
460 regex, mf = _buildregexmatch(kindpats, globsuffix)
461 if fset:
461 if fset:
462 return regex, lambda f: f in fset or mf(f)
462 return regex, lambda f: f in fset or mf(f)
463 return regex, mf
463 return regex, mf
464
464
465 def _buildregexmatch(kindpats, globsuffix):
465 def _buildregexmatch(kindpats, globsuffix):
466 """Build a match function from a list of kinds and kindpats,
466 """Build a match function from a list of kinds and kindpats,
467 return regexp string and a matcher function."""
467 return regexp string and a matcher function."""
468 try:
468 try:
469 regex = '(?:%s)' % '|'.join([_regex(k, p, globsuffix)
469 regex = '(?:%s)' % '|'.join([_regex(k, p, globsuffix)
470 for (k, p, s) in kindpats])
470 for (k, p, s) in kindpats])
471 if len(regex) > 20000:
471 if len(regex) > 20000:
472 raise OverflowError
472 raise OverflowError
473 return regex, _rematcher(regex)
473 return regex, _rematcher(regex)
474 except OverflowError:
474 except OverflowError:
475 # We're using a Python with a tiny regex engine and we
475 # We're using a Python with a tiny regex engine and we
476 # made it explode, so we'll divide the pattern list in two
476 # made it explode, so we'll divide the pattern list in two
477 # until it works
477 # until it works
478 l = len(kindpats)
478 l = len(kindpats)
479 if l < 2:
479 if l < 2:
480 raise
480 raise
481 regexa, a = _buildregexmatch(kindpats[:l//2], globsuffix)
481 regexa, a = _buildregexmatch(kindpats[:l//2], globsuffix)
482 regexb, b = _buildregexmatch(kindpats[l//2:], globsuffix)
482 regexb, b = _buildregexmatch(kindpats[l//2:], globsuffix)
483 return regex, lambda s: a(s) or b(s)
483 return regex, lambda s: a(s) or b(s)
484 except re.error:
484 except re.error:
485 for k, p, s in kindpats:
485 for k, p, s in kindpats:
486 try:
486 try:
487 _rematcher('(?:%s)' % _regex(k, p, globsuffix))
487 _rematcher('(?:%s)' % _regex(k, p, globsuffix))
488 except re.error:
488 except re.error:
489 if s:
489 if s:
490 raise util.Abort(_("%s: invalid pattern (%s): %s") %
490 raise util.Abort(_("%s: invalid pattern (%s): %s") %
491 (s, k, p))
491 (s, k, p))
492 else:
492 else:
493 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
493 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
494 raise util.Abort(_("invalid pattern"))
494 raise util.Abort(_("invalid pattern"))
495
495
496 def _roots(kindpats):
496 def _roots(kindpats):
497 '''return roots and exact explicitly listed files from patterns
497 '''return roots and exact explicitly listed files from patterns
498
498
499 >>> _roots([('glob', 'g/*', ''), ('glob', 'g', ''), ('glob', 'g*', '')])
499 >>> _roots([('glob', 'g/*', ''), ('glob', 'g', ''), ('glob', 'g*', '')])
500 ['g', 'g', '.']
500 ['g', 'g', '.']
501 >>> _roots([('relpath', 'r', ''), ('path', 'p/p', ''), ('path', '', '')])
501 >>> _roots([('relpath', 'r', ''), ('path', 'p/p', ''), ('path', '', '')])
502 ['r', 'p/p', '.']
502 ['r', 'p/p', '.']
503 >>> _roots([('relglob', 'rg*', ''), ('re', 're/', ''), ('relre', 'rr', '')])
503 >>> _roots([('relglob', 'rg*', ''), ('re', 're/', ''), ('relre', 'rr', '')])
504 ['.', '.', '.']
504 ['.', '.', '.']
505 '''
505 '''
506 r = []
506 r = []
507 for kind, pat, source in kindpats:
507 for kind, pat, source in kindpats:
508 if kind == 'glob': # find the non-glob prefix
508 if kind == 'glob': # find the non-glob prefix
509 root = []
509 root = []
510 for p in pat.split('/'):
510 for p in pat.split('/'):
511 if '[' in p or '{' in p or '*' in p or '?' in p:
511 if '[' in p or '{' in p or '*' in p or '?' in p:
512 break
512 break
513 root.append(p)
513 root.append(p)
514 r.append('/'.join(root) or '.')
514 r.append('/'.join(root) or '.')
515 elif kind in ('relpath', 'path'):
515 elif kind in ('relpath', 'path'):
516 r.append(pat or '.')
516 r.append(pat or '.')
517 else: # relglob, re, relre
517 else: # relglob, re, relre
518 r.append('.')
518 r.append('.')
519 return r
519 return r
520
520
521 def _anypats(kindpats):
521 def _anypats(kindpats):
522 for kind, pat, source in kindpats:
522 for kind, pat, source in kindpats:
523 if kind in ('glob', 're', 'relglob', 'relre', 'set'):
523 if kind in ('glob', 're', 'relglob', 'relre', 'set'):
524 return True
524 return True
525
525
526 _commentre = None
526 _commentre = None
527
527
528 def readpatternfile(filepath, warn):
528 def readpatternfile(filepath, warn):
529 '''parse a pattern file, returning a list of
529 '''parse a pattern file, returning a list of
530 patterns. These patterns should be given to compile()
530 patterns. These patterns should be given to compile()
531 to be validated and converted into a match function.'''
531 to be validated and converted into a match function.
532
533 trailing white space is dropped.
534 the escape character is backslash.
535 comments start with #.
536 empty lines are skipped.
537
538 lines can be of the following formats:
539
540 syntax: regexp # defaults following lines to non-rooted regexps
541 syntax: glob # defaults following lines to non-rooted globs
542 re:pattern # non-rooted regular expression
543 glob:pattern # non-rooted glob
544 pattern # pattern of the current default type'''
545
532 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:',
546 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:',
533 'include': 'include'}
547 'include': 'include'}
534 syntax = 'relre:'
548 syntax = 'relre:'
535 patterns = []
549 patterns = []
536
550
537 fp = open(filepath)
551 fp = open(filepath)
538 for line in fp:
552 for line in fp:
539 if "#" in line:
553 if "#" in line:
540 global _commentre
554 global _commentre
541 if not _commentre:
555 if not _commentre:
542 _commentre = re.compile(r'((^|[^\\])(\\\\)*)#.*')
556 _commentre = re.compile(r'((^|[^\\])(\\\\)*)#.*')
543 # remove comments prefixed by an even number of escapes
557 # remove comments prefixed by an even number of escapes
544 line = _commentre.sub(r'\1', line)
558 line = _commentre.sub(r'\1', line)
545 # fixup properly escaped comments that survived the above
559 # fixup properly escaped comments that survived the above
546 line = line.replace("\\#", "#")
560 line = line.replace("\\#", "#")
547 line = line.rstrip()
561 line = line.rstrip()
548 if not line:
562 if not line:
549 continue
563 continue
550
564
551 if line.startswith('syntax:'):
565 if line.startswith('syntax:'):
552 s = line[7:].strip()
566 s = line[7:].strip()
553 try:
567 try:
554 syntax = syntaxes[s]
568 syntax = syntaxes[s]
555 except KeyError:
569 except KeyError:
556 if warn:
570 if warn:
557 warn(_("%s: ignoring invalid syntax '%s'\n") %
571 warn(_("%s: ignoring invalid syntax '%s'\n") %
558 (filepath, s))
572 (filepath, s))
559 continue
573 continue
560
574
561 linesyntax = syntax
575 linesyntax = syntax
562 for s, rels in syntaxes.iteritems():
576 for s, rels in syntaxes.iteritems():
563 if line.startswith(rels):
577 if line.startswith(rels):
564 linesyntax = rels
578 linesyntax = rels
565 line = line[len(rels):]
579 line = line[len(rels):]
566 break
580 break
567 elif line.startswith(s+':'):
581 elif line.startswith(s+':'):
568 linesyntax = rels
582 linesyntax = rels
569 line = line[len(s) + 1:]
583 line = line[len(s) + 1:]
570 break
584 break
571 patterns.append(linesyntax + line)
585 patterns.append(linesyntax + line)
572 fp.close()
586 fp.close()
573 return patterns
587 return patterns
@@ -1,191 +1,196 b''
1 $ hg init
1 $ hg init
2
2
3 Issue562: .hgignore requires newline at end:
3 Issue562: .hgignore requires newline at end:
4
4
5 $ touch foo
5 $ touch foo
6 $ touch bar
6 $ touch bar
7 $ touch baz
7 $ touch baz
8 $ cat > makeignore.py <<EOF
8 $ cat > makeignore.py <<EOF
9 > f = open(".hgignore", "w")
9 > f = open(".hgignore", "w")
10 > f.write("ignore\n")
10 > f.write("ignore\n")
11 > f.write("foo\n")
11 > f.write("foo\n")
12 > # No EOL here
12 > # No EOL here
13 > f.write("bar")
13 > f.write("bar")
14 > f.close()
14 > f.close()
15 > EOF
15 > EOF
16
16
17 $ python makeignore.py
17 $ python makeignore.py
18
18
19 Should display baz only:
19 Should display baz only:
20
20
21 $ hg status
21 $ hg status
22 ? baz
22 ? baz
23
23
24 $ rm foo bar baz .hgignore makeignore.py
24 $ rm foo bar baz .hgignore makeignore.py
25
25
26 $ touch a.o
26 $ touch a.o
27 $ touch a.c
27 $ touch a.c
28 $ touch syntax
28 $ touch syntax
29 $ mkdir dir
29 $ mkdir dir
30 $ touch dir/a.o
30 $ touch dir/a.o
31 $ touch dir/b.o
31 $ touch dir/b.o
32 $ touch dir/c.o
32 $ touch dir/c.o
33
33
34 $ hg add dir/a.o
34 $ hg add dir/a.o
35 $ hg commit -m 0
35 $ hg commit -m 0
36 $ hg add dir/b.o
36 $ hg add dir/b.o
37
37
38 $ hg status
38 $ hg status
39 A dir/b.o
39 A dir/b.o
40 ? a.c
40 ? a.c
41 ? a.o
41 ? a.o
42 ? dir/c.o
42 ? dir/c.o
43 ? syntax
43 ? syntax
44
44
45 $ echo "*.o" > .hgignore
45 $ echo "*.o" > .hgignore
46 $ hg status
46 $ hg status
47 abort: $TESTTMP/.hgignore: invalid pattern (relre): *.o (glob)
47 abort: $TESTTMP/.hgignore: invalid pattern (relre): *.o (glob)
48 [255]
48 [255]
49
49
50 $ echo ".*\.o" > .hgignore
50 $ echo ".*\.o" > .hgignore
51 $ hg status
51 $ hg status
52 A dir/b.o
52 A dir/b.o
53 ? .hgignore
53 ? .hgignore
54 ? a.c
54 ? a.c
55 ? syntax
55 ? syntax
56
56
57 Check it does not ignore the current directory '.':
57 Check it does not ignore the current directory '.':
58
58
59 $ echo "^\." > .hgignore
59 $ echo "^\." > .hgignore
60 $ hg status
60 $ hg status
61 A dir/b.o
61 A dir/b.o
62 ? a.c
62 ? a.c
63 ? a.o
63 ? a.o
64 ? dir/c.o
64 ? dir/c.o
65 ? syntax
65 ? syntax
66
66
67 Test that patterns from ui.ignore options are read:
67 Test that patterns from ui.ignore options are read:
68
68
69 $ echo > .hgignore
69 $ echo > .hgignore
70 $ cat >> $HGRCPATH << EOF
70 $ cat >> $HGRCPATH << EOF
71 > [ui]
71 > [ui]
72 > ignore.other = $TESTTMP/.hg/testhgignore
72 > ignore.other = $TESTTMP/.hg/testhgignore
73 > EOF
73 > EOF
74 $ echo "glob:**.o" > .hg/testhgignore
74 $ echo "glob:**.o" > .hg/testhgignore
75 $ hg status
75 $ hg status
76 A dir/b.o
76 A dir/b.o
77 ? .hgignore
77 ? .hgignore
78 ? a.c
78 ? a.c
79 ? syntax
79 ? syntax
80
80
81 empty out testhgignore
81 empty out testhgignore
82 $ echo > .hg/testhgignore
82 $ echo > .hg/testhgignore
83
83
84 Test relative ignore path (issue4473):
84 Test relative ignore path (issue4473):
85
85
86 $ cat >> $HGRCPATH << EOF
86 $ cat >> $HGRCPATH << EOF
87 > [ui]
87 > [ui]
88 > ignore.relative = .hg/testhgignorerel
88 > ignore.relative = .hg/testhgignorerel
89 > EOF
89 > EOF
90 $ echo "glob:*.o" > .hg/testhgignorerel
90 $ echo "glob:*.o" > .hg/testhgignorerel
91 $ cd dir
91 $ cd dir
92 $ hg status
92 $ hg status
93 A dir/b.o
93 A dir/b.o
94 ? .hgignore
94 ? .hgignore
95 ? a.c
95 ? a.c
96 ? syntax
96 ? syntax
97
97
98 $ cd ..
98 $ cd ..
99 $ echo > .hg/testhgignorerel
99 $ echo > .hg/testhgignorerel
100 $ echo "syntax: glob" > .hgignore
100 $ echo "syntax: glob" > .hgignore
101 $ echo "re:.*\.o" >> .hgignore
101 $ echo "re:.*\.o" >> .hgignore
102 $ hg status
102 $ hg status
103 A dir/b.o
103 A dir/b.o
104 ? .hgignore
104 ? .hgignore
105 ? a.c
105 ? a.c
106 ? syntax
106 ? syntax
107
107
108 $ echo "syntax: invalid" > .hgignore
108 $ echo "syntax: invalid" > .hgignore
109 $ hg status
109 $ hg status
110 $TESTTMP/.hgignore: ignoring invalid syntax 'invalid' (glob)
110 $TESTTMP/.hgignore: ignoring invalid syntax 'invalid' (glob)
111 A dir/b.o
111 A dir/b.o
112 ? .hgignore
112 ? .hgignore
113 ? a.c
113 ? a.c
114 ? a.o
114 ? a.o
115 ? dir/c.o
115 ? dir/c.o
116 ? syntax
116 ? syntax
117
117
118 $ echo "syntax: glob" > .hgignore
118 $ echo "syntax: glob" > .hgignore
119 $ echo "*.o" >> .hgignore
119 $ echo "*.o" >> .hgignore
120 $ hg status
120 $ hg status
121 A dir/b.o
121 A dir/b.o
122 ? .hgignore
122 ? .hgignore
123 ? a.c
123 ? a.c
124 ? syntax
124 ? syntax
125
125
126 $ echo "relglob:syntax*" > .hgignore
126 $ echo "relglob:syntax*" > .hgignore
127 $ hg status
127 $ hg status
128 A dir/b.o
128 A dir/b.o
129 ? .hgignore
129 ? .hgignore
130 ? a.c
130 ? a.c
131 ? a.o
131 ? a.o
132 ? dir/c.o
132 ? dir/c.o
133
133
134 $ echo "relglob:*" > .hgignore
134 $ echo "relglob:*" > .hgignore
135 $ hg status
135 $ hg status
136 A dir/b.o
136 A dir/b.o
137
137
138 $ cd dir
138 $ cd dir
139 $ hg status .
139 $ hg status .
140 A b.o
140 A b.o
141
141
142 $ hg debugignore
142 $ hg debugignore
143 (?:(?:|.*/)[^/]*(?:/|$))
143 (?:(?:|.*/)[^/]*(?:/|$))
144
144
145 $ cd ..
145 $ cd ..
146
146
147 Check patterns that match only the directory
147 Check patterns that match only the directory
148
148
149 $ echo "^dir\$" > .hgignore
149 $ echo "^dir\$" > .hgignore
150 $ hg status
150 $ hg status
151 A dir/b.o
151 A dir/b.o
152 ? .hgignore
152 ? .hgignore
153 ? a.c
153 ? a.c
154 ? a.o
154 ? a.o
155 ? syntax
155 ? syntax
156
156
157 Check recursive glob pattern matches no directories (dir/**/c.o matches dir/c.o)
157 Check recursive glob pattern matches no directories (dir/**/c.o matches dir/c.o)
158
158
159 $ echo "syntax: glob" > .hgignore
159 $ echo "syntax: glob" > .hgignore
160 $ echo "dir/**/c.o" >> .hgignore
160 $ echo "dir/**/c.o" >> .hgignore
161 $ touch dir/c.o
161 $ touch dir/c.o
162 $ mkdir dir/subdir
162 $ mkdir dir/subdir
163 $ touch dir/subdir/c.o
163 $ touch dir/subdir/c.o
164 $ hg status
164 $ hg status
165 A dir/b.o
165 A dir/b.o
166 ? .hgignore
166 ? .hgignore
167 ? a.c
167 ? a.c
168 ? a.o
168 ? a.o
169 ? syntax
169 ? syntax
170
170
171 Check using 'include:' in ignore file
171 Check using 'include:' in ignore file
172
172
173 $ hg purge --all --config extensions.purge=
173 $ hg purge --all --config extensions.purge=
174 $ touch foo.included
174 $ touch foo.included
175
175
176 $ echo ".*.included" > otherignore
176 $ echo ".*.included" > otherignore
177 $ hg status -I "include:otherignore"
177 $ hg status -I "include:otherignore"
178 ? foo.included
178 ? foo.included
179
179
180 $ echo "include:otherignore" >> .hgignore
180 $ echo "include:otherignore" >> .hgignore
181 $ hg status
181 $ hg status
182 A dir/b.o
182 A dir/b.o
183 ? .hgignore
183 ? .hgignore
184 ? otherignore
184 ? otherignore
185
185
186 Check recursive uses of 'include:'
186 Check recursive uses of 'include:'
187
187
188 $ echo "include:nestedignore" >> otherignore
188 $ echo "include:nestedignore" >> otherignore
189 $ echo "glob:*ignore" > nestedignore
189 $ echo "glob:*ignore" > nestedignore
190 $ hg status
190 $ hg status
191 A dir/b.o
191 A dir/b.o
192
193 $ echo "include:badignore" >> otherignore
194 $ hg status
195 skipping unreadable pattern file 'badignore': No such file or directory
196 A dir/b.o
1 NO CONTENT: file was removed
NO CONTENT: file was removed
General Comments 0
You need to be logged in to leave comments. Login now