##// END OF EJS Templates
matcher: make e.g. 'relpath:.' lead to fast paths...
Martin von Zweigbergk -
r24447:d44d53bc default
parent child Browse files
Show More
@@ -1,426 +1,436
1 # match.py - filename matching
1 # match.py - filename matching
2 #
2 #
3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import re
8 import re
9 import util, pathutil
9 import util, pathutil
10 from i18n import _
10 from i18n import _
11
11
12 def _rematcher(regex):
12 def _rematcher(regex):
13 '''compile the regexp with the best available regexp engine and return a
13 '''compile the regexp with the best available regexp engine and return a
14 matcher function'''
14 matcher function'''
15 m = util.re.compile(regex)
15 m = util.re.compile(regex)
16 try:
16 try:
17 # slightly faster, provided by facebook's re2 bindings
17 # slightly faster, provided by facebook's re2 bindings
18 return m.test_match
18 return m.test_match
19 except AttributeError:
19 except AttributeError:
20 return m.match
20 return m.match
21
21
22 def _expandsets(kindpats, ctx):
22 def _expandsets(kindpats, ctx):
23 '''Returns the kindpats list with the 'set' patterns expanded.'''
23 '''Returns the kindpats list with the 'set' patterns expanded.'''
24 fset = set()
24 fset = set()
25 other = []
25 other = []
26
26
27 for kind, pat in kindpats:
27 for kind, pat in kindpats:
28 if kind == 'set':
28 if kind == 'set':
29 if not ctx:
29 if not ctx:
30 raise util.Abort("fileset expression with no context")
30 raise util.Abort("fileset expression with no context")
31 s = ctx.getfileset(pat)
31 s = ctx.getfileset(pat)
32 fset.update(s)
32 fset.update(s)
33 continue
33 continue
34 other.append((kind, pat))
34 other.append((kind, pat))
35 return fset, other
35 return fset, other
36
36
37 def _kindpatsalwaysmatch(kindpats):
38 """"Checks whether the kindspats match everything, as e.g.
39 'relpath:.' does.
40 """
41 for kind, pat in kindpats:
42 if pat != '' or kind not in ['relpath', 'glob']:
43 return False
44 return True
45
37 class match(object):
46 class match(object):
38 def __init__(self, root, cwd, patterns, include=[], exclude=[],
47 def __init__(self, root, cwd, patterns, include=[], exclude=[],
39 default='glob', exact=False, auditor=None, ctx=None):
48 default='glob', exact=False, auditor=None, ctx=None):
40 """build an object to match a set of file patterns
49 """build an object to match a set of file patterns
41
50
42 arguments:
51 arguments:
43 root - the canonical root of the tree you're matching against
52 root - the canonical root of the tree you're matching against
44 cwd - the current working directory, if relevant
53 cwd - the current working directory, if relevant
45 patterns - patterns to find
54 patterns - patterns to find
46 include - patterns to include (unless they are excluded)
55 include - patterns to include (unless they are excluded)
47 exclude - patterns to exclude (even if they are included)
56 exclude - patterns to exclude (even if they are included)
48 default - if a pattern in patterns has no explicit type, assume this one
57 default - if a pattern in patterns has no explicit type, assume this one
49 exact - patterns are actually filenames (include/exclude still apply)
58 exact - patterns are actually filenames (include/exclude still apply)
50
59
51 a pattern is one of:
60 a pattern is one of:
52 'glob:<glob>' - a glob relative to cwd
61 'glob:<glob>' - a glob relative to cwd
53 're:<regexp>' - a regular expression
62 're:<regexp>' - a regular expression
54 'path:<path>' - a path relative to repository root
63 'path:<path>' - a path relative to repository root
55 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
64 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
56 'relpath:<path>' - a path relative to cwd
65 'relpath:<path>' - a path relative to cwd
57 'relre:<regexp>' - a regexp that needn't match the start of a name
66 'relre:<regexp>' - a regexp that needn't match the start of a name
58 'set:<fileset>' - a fileset expression
67 'set:<fileset>' - a fileset expression
59 '<something>' - a pattern of the specified default type
68 '<something>' - a pattern of the specified default type
60 """
69 """
61
70
62 self._root = root
71 self._root = root
63 self._cwd = cwd
72 self._cwd = cwd
64 self._files = [] # exact files and roots of patterns
73 self._files = [] # exact files and roots of patterns
65 self._anypats = bool(include or exclude)
74 self._anypats = bool(include or exclude)
66 self._ctx = ctx
75 self._ctx = ctx
67 self._always = False
76 self._always = False
68 self._pathrestricted = bool(include or exclude or patterns)
77 self._pathrestricted = bool(include or exclude or patterns)
69
78
70 matchfns = []
79 matchfns = []
71 if include:
80 if include:
72 kindpats = _normalize(include, 'glob', root, cwd, auditor)
81 kindpats = _normalize(include, 'glob', root, cwd, auditor)
73 self.includepat, im = _buildmatch(ctx, kindpats, '(?:/|$)')
82 self.includepat, im = _buildmatch(ctx, kindpats, '(?:/|$)')
74 matchfns.append(im)
83 matchfns.append(im)
75 if exclude:
84 if exclude:
76 kindpats = _normalize(exclude, 'glob', root, cwd, auditor)
85 kindpats = _normalize(exclude, 'glob', root, cwd, auditor)
77 self.excludepat, em = _buildmatch(ctx, kindpats, '(?:/|$)')
86 self.excludepat, em = _buildmatch(ctx, kindpats, '(?:/|$)')
78 matchfns.append(lambda f: not em(f))
87 matchfns.append(lambda f: not em(f))
79 if exact:
88 if exact:
80 if isinstance(patterns, list):
89 if isinstance(patterns, list):
81 self._files = patterns
90 self._files = patterns
82 else:
91 else:
83 self._files = list(patterns)
92 self._files = list(patterns)
84 matchfns.append(self.exact)
93 matchfns.append(self.exact)
85 elif patterns:
94 elif patterns:
86 kindpats = _normalize(patterns, default, root, cwd, auditor)
95 kindpats = _normalize(patterns, default, root, cwd, auditor)
96 if not _kindpatsalwaysmatch(kindpats):
87 self._files = _roots(kindpats)
97 self._files = _roots(kindpats)
88 self._anypats = self._anypats or _anypats(kindpats)
98 self._anypats = self._anypats or _anypats(kindpats)
89 self.patternspat, pm = _buildmatch(ctx, kindpats, '$')
99 self.patternspat, pm = _buildmatch(ctx, kindpats, '$')
90 matchfns.append(pm)
100 matchfns.append(pm)
91
101
92 if not matchfns:
102 if not matchfns:
93 m = util.always
103 m = util.always
94 self._always = True
104 self._always = True
95 elif len(matchfns) == 1:
105 elif len(matchfns) == 1:
96 m = matchfns[0]
106 m = matchfns[0]
97 else:
107 else:
98 def m(f):
108 def m(f):
99 for matchfn in matchfns:
109 for matchfn in matchfns:
100 if not matchfn(f):
110 if not matchfn(f):
101 return False
111 return False
102 return True
112 return True
103
113
104 self.matchfn = m
114 self.matchfn = m
105 self._fmap = set(self._files)
115 self._fmap = set(self._files)
106
116
107 def __call__(self, fn):
117 def __call__(self, fn):
108 return self.matchfn(fn)
118 return self.matchfn(fn)
109 def __iter__(self):
119 def __iter__(self):
110 for f in self._files:
120 for f in self._files:
111 yield f
121 yield f
112
122
113 # Callbacks related to how the matcher is used by dirstate.walk.
123 # Callbacks related to how the matcher is used by dirstate.walk.
114 # Subscribers to these events must monkeypatch the matcher object.
124 # Subscribers to these events must monkeypatch the matcher object.
115 def bad(self, f, msg):
125 def bad(self, f, msg):
116 '''Callback from dirstate.walk for each explicit file that can't be
126 '''Callback from dirstate.walk for each explicit file that can't be
117 found/accessed, with an error message.'''
127 found/accessed, with an error message.'''
118 pass
128 pass
119
129
120 # If an explicitdir is set, it will be called when an explicitly listed
130 # If an explicitdir is set, it will be called when an explicitly listed
121 # directory is visited.
131 # directory is visited.
122 explicitdir = None
132 explicitdir = None
123
133
124 # If an traversedir is set, it will be called when a directory discovered
134 # If an traversedir is set, it will be called when a directory discovered
125 # by recursive traversal is visited.
135 # by recursive traversal is visited.
126 traversedir = None
136 traversedir = None
127
137
128 def abs(self, f):
138 def abs(self, f):
129 '''Convert a repo path back to path that is relative to the root of the
139 '''Convert a repo path back to path that is relative to the root of the
130 matcher.'''
140 matcher.'''
131 return f
141 return f
132
142
133 def rel(self, f):
143 def rel(self, f):
134 '''Convert repo path back to path that is relative to cwd of matcher.'''
144 '''Convert repo path back to path that is relative to cwd of matcher.'''
135 return util.pathto(self._root, self._cwd, f)
145 return util.pathto(self._root, self._cwd, f)
136
146
137 def uipath(self, f):
147 def uipath(self, f):
138 '''Convert repo path to a display path. If patterns or -I/-X were used
148 '''Convert repo path to a display path. If patterns or -I/-X were used
139 to create this matcher, the display path will be relative to cwd.
149 to create this matcher, the display path will be relative to cwd.
140 Otherwise it is relative to the root of the repo.'''
150 Otherwise it is relative to the root of the repo.'''
141 return (self._pathrestricted and self.rel(f)) or self.abs(f)
151 return (self._pathrestricted and self.rel(f)) or self.abs(f)
142
152
143 def files(self):
153 def files(self):
144 '''Explicitly listed files or patterns or roots:
154 '''Explicitly listed files or patterns or roots:
145 if no patterns or .always(): empty list,
155 if no patterns or .always(): empty list,
146 if exact: list exact files,
156 if exact: list exact files,
147 if not .anypats(): list all files and dirs,
157 if not .anypats(): list all files and dirs,
148 else: optimal roots'''
158 else: optimal roots'''
149 return self._files
159 return self._files
150
160
151 def exact(self, f):
161 def exact(self, f):
152 '''Returns True if f is in .files().'''
162 '''Returns True if f is in .files().'''
153 return f in self._fmap
163 return f in self._fmap
154
164
155 def anypats(self):
165 def anypats(self):
156 '''Matcher uses patterns or include/exclude.'''
166 '''Matcher uses patterns or include/exclude.'''
157 return self._anypats
167 return self._anypats
158
168
159 def always(self):
169 def always(self):
160 '''Matcher will match everything and .files() will be empty
170 '''Matcher will match everything and .files() will be empty
161 - optimization might be possible and necessary.'''
171 - optimization might be possible and necessary.'''
162 return self._always
172 return self._always
163
173
164 def exact(root, cwd, files):
174 def exact(root, cwd, files):
165 return match(root, cwd, files, exact=True)
175 return match(root, cwd, files, exact=True)
166
176
167 def always(root, cwd):
177 def always(root, cwd):
168 return match(root, cwd, [])
178 return match(root, cwd, [])
169
179
170 class narrowmatcher(match):
180 class narrowmatcher(match):
171 """Adapt a matcher to work on a subdirectory only.
181 """Adapt a matcher to work on a subdirectory only.
172
182
173 The paths are remapped to remove/insert the path as needed:
183 The paths are remapped to remove/insert the path as needed:
174
184
175 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
185 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
176 >>> m2 = narrowmatcher('sub', m1)
186 >>> m2 = narrowmatcher('sub', m1)
177 >>> bool(m2('a.txt'))
187 >>> bool(m2('a.txt'))
178 False
188 False
179 >>> bool(m2('b.txt'))
189 >>> bool(m2('b.txt'))
180 True
190 True
181 >>> bool(m2.matchfn('a.txt'))
191 >>> bool(m2.matchfn('a.txt'))
182 False
192 False
183 >>> bool(m2.matchfn('b.txt'))
193 >>> bool(m2.matchfn('b.txt'))
184 True
194 True
185 >>> m2.files()
195 >>> m2.files()
186 ['b.txt']
196 ['b.txt']
187 >>> m2.exact('b.txt')
197 >>> m2.exact('b.txt')
188 True
198 True
189 >>> util.pconvert(m2.rel('b.txt'))
199 >>> util.pconvert(m2.rel('b.txt'))
190 'sub/b.txt'
200 'sub/b.txt'
191 >>> def bad(f, msg):
201 >>> def bad(f, msg):
192 ... print "%s: %s" % (f, msg)
202 ... print "%s: %s" % (f, msg)
193 >>> m1.bad = bad
203 >>> m1.bad = bad
194 >>> m2.bad('x.txt', 'No such file')
204 >>> m2.bad('x.txt', 'No such file')
195 sub/x.txt: No such file
205 sub/x.txt: No such file
196 >>> m2.abs('c.txt')
206 >>> m2.abs('c.txt')
197 'sub/c.txt'
207 'sub/c.txt'
198 """
208 """
199
209
200 def __init__(self, path, matcher):
210 def __init__(self, path, matcher):
201 self._root = matcher._root
211 self._root = matcher._root
202 self._cwd = matcher._cwd
212 self._cwd = matcher._cwd
203 self._path = path
213 self._path = path
204 self._matcher = matcher
214 self._matcher = matcher
205 self._always = matcher._always
215 self._always = matcher._always
206 self._pathrestricted = matcher._pathrestricted
216 self._pathrestricted = matcher._pathrestricted
207
217
208 self._files = [f[len(path) + 1:] for f in matcher._files
218 self._files = [f[len(path) + 1:] for f in matcher._files
209 if f.startswith(path + "/")]
219 if f.startswith(path + "/")]
210 self._anypats = matcher._anypats
220 self._anypats = matcher._anypats
211 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
221 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
212 self._fmap = set(self._files)
222 self._fmap = set(self._files)
213
223
214 def abs(self, f):
224 def abs(self, f):
215 return self._matcher.abs(self._path + "/" + f)
225 return self._matcher.abs(self._path + "/" + f)
216
226
217 def bad(self, f, msg):
227 def bad(self, f, msg):
218 self._matcher.bad(self._path + "/" + f, msg)
228 self._matcher.bad(self._path + "/" + f, msg)
219
229
220 def rel(self, f):
230 def rel(self, f):
221 return self._matcher.rel(self._path + "/" + f)
231 return self._matcher.rel(self._path + "/" + f)
222
232
223 def patkind(pattern, default=None):
233 def patkind(pattern, default=None):
224 '''If pattern is 'kind:pat' with a known kind, return kind.'''
234 '''If pattern is 'kind:pat' with a known kind, return kind.'''
225 return _patsplit(pattern, default)[0]
235 return _patsplit(pattern, default)[0]
226
236
227 def _patsplit(pattern, default):
237 def _patsplit(pattern, default):
228 """Split a string into the optional pattern kind prefix and the actual
238 """Split a string into the optional pattern kind prefix and the actual
229 pattern."""
239 pattern."""
230 if ':' in pattern:
240 if ':' in pattern:
231 kind, pat = pattern.split(':', 1)
241 kind, pat = pattern.split(':', 1)
232 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
242 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
233 'listfile', 'listfile0', 'set'):
243 'listfile', 'listfile0', 'set'):
234 return kind, pat
244 return kind, pat
235 return default, pattern
245 return default, pattern
236
246
237 def _globre(pat):
247 def _globre(pat):
238 r'''Convert an extended glob string to a regexp string.
248 r'''Convert an extended glob string to a regexp string.
239
249
240 >>> print _globre(r'?')
250 >>> print _globre(r'?')
241 .
251 .
242 >>> print _globre(r'*')
252 >>> print _globre(r'*')
243 [^/]*
253 [^/]*
244 >>> print _globre(r'**')
254 >>> print _globre(r'**')
245 .*
255 .*
246 >>> print _globre(r'**/a')
256 >>> print _globre(r'**/a')
247 (?:.*/)?a
257 (?:.*/)?a
248 >>> print _globre(r'a/**/b')
258 >>> print _globre(r'a/**/b')
249 a\/(?:.*/)?b
259 a\/(?:.*/)?b
250 >>> print _globre(r'[a*?!^][^b][!c]')
260 >>> print _globre(r'[a*?!^][^b][!c]')
251 [a*?!^][\^b][^c]
261 [a*?!^][\^b][^c]
252 >>> print _globre(r'{a,b}')
262 >>> print _globre(r'{a,b}')
253 (?:a|b)
263 (?:a|b)
254 >>> print _globre(r'.\*\?')
264 >>> print _globre(r'.\*\?')
255 \.\*\?
265 \.\*\?
256 '''
266 '''
257 i, n = 0, len(pat)
267 i, n = 0, len(pat)
258 res = ''
268 res = ''
259 group = 0
269 group = 0
260 escape = util.re.escape
270 escape = util.re.escape
261 def peek():
271 def peek():
262 return i < n and pat[i]
272 return i < n and pat[i]
263 while i < n:
273 while i < n:
264 c = pat[i]
274 c = pat[i]
265 i += 1
275 i += 1
266 if c not in '*?[{},\\':
276 if c not in '*?[{},\\':
267 res += escape(c)
277 res += escape(c)
268 elif c == '*':
278 elif c == '*':
269 if peek() == '*':
279 if peek() == '*':
270 i += 1
280 i += 1
271 if peek() == '/':
281 if peek() == '/':
272 i += 1
282 i += 1
273 res += '(?:.*/)?'
283 res += '(?:.*/)?'
274 else:
284 else:
275 res += '.*'
285 res += '.*'
276 else:
286 else:
277 res += '[^/]*'
287 res += '[^/]*'
278 elif c == '?':
288 elif c == '?':
279 res += '.'
289 res += '.'
280 elif c == '[':
290 elif c == '[':
281 j = i
291 j = i
282 if j < n and pat[j] in '!]':
292 if j < n and pat[j] in '!]':
283 j += 1
293 j += 1
284 while j < n and pat[j] != ']':
294 while j < n and pat[j] != ']':
285 j += 1
295 j += 1
286 if j >= n:
296 if j >= n:
287 res += '\\['
297 res += '\\['
288 else:
298 else:
289 stuff = pat[i:j].replace('\\','\\\\')
299 stuff = pat[i:j].replace('\\','\\\\')
290 i = j + 1
300 i = j + 1
291 if stuff[0] == '!':
301 if stuff[0] == '!':
292 stuff = '^' + stuff[1:]
302 stuff = '^' + stuff[1:]
293 elif stuff[0] == '^':
303 elif stuff[0] == '^':
294 stuff = '\\' + stuff
304 stuff = '\\' + stuff
295 res = '%s[%s]' % (res, stuff)
305 res = '%s[%s]' % (res, stuff)
296 elif c == '{':
306 elif c == '{':
297 group += 1
307 group += 1
298 res += '(?:'
308 res += '(?:'
299 elif c == '}' and group:
309 elif c == '}' and group:
300 res += ')'
310 res += ')'
301 group -= 1
311 group -= 1
302 elif c == ',' and group:
312 elif c == ',' and group:
303 res += '|'
313 res += '|'
304 elif c == '\\':
314 elif c == '\\':
305 p = peek()
315 p = peek()
306 if p:
316 if p:
307 i += 1
317 i += 1
308 res += escape(p)
318 res += escape(p)
309 else:
319 else:
310 res += escape(c)
320 res += escape(c)
311 else:
321 else:
312 res += escape(c)
322 res += escape(c)
313 return res
323 return res
314
324
315 def _regex(kind, pat, globsuffix):
325 def _regex(kind, pat, globsuffix):
316 '''Convert a (normalized) pattern of any kind into a regular expression.
326 '''Convert a (normalized) pattern of any kind into a regular expression.
317 globsuffix is appended to the regexp of globs.'''
327 globsuffix is appended to the regexp of globs.'''
318 if not pat:
328 if not pat:
319 return ''
329 return ''
320 if kind == 're':
330 if kind == 're':
321 return pat
331 return pat
322 if kind == 'path':
332 if kind == 'path':
323 return '^' + util.re.escape(pat) + '(?:/|$)'
333 return '^' + util.re.escape(pat) + '(?:/|$)'
324 if kind == 'relglob':
334 if kind == 'relglob':
325 return '(?:|.*/)' + _globre(pat) + globsuffix
335 return '(?:|.*/)' + _globre(pat) + globsuffix
326 if kind == 'relpath':
336 if kind == 'relpath':
327 return util.re.escape(pat) + '(?:/|$)'
337 return util.re.escape(pat) + '(?:/|$)'
328 if kind == 'relre':
338 if kind == 'relre':
329 if pat.startswith('^'):
339 if pat.startswith('^'):
330 return pat
340 return pat
331 return '.*' + pat
341 return '.*' + pat
332 return _globre(pat) + globsuffix
342 return _globre(pat) + globsuffix
333
343
334 def _buildmatch(ctx, kindpats, globsuffix):
344 def _buildmatch(ctx, kindpats, globsuffix):
335 '''Return regexp string and a matcher function for kindpats.
345 '''Return regexp string and a matcher function for kindpats.
336 globsuffix is appended to the regexp of globs.'''
346 globsuffix is appended to the regexp of globs.'''
337 fset, kindpats = _expandsets(kindpats, ctx)
347 fset, kindpats = _expandsets(kindpats, ctx)
338 if not kindpats:
348 if not kindpats:
339 return "", fset.__contains__
349 return "", fset.__contains__
340
350
341 regex, mf = _buildregexmatch(kindpats, globsuffix)
351 regex, mf = _buildregexmatch(kindpats, globsuffix)
342 if fset:
352 if fset:
343 return regex, lambda f: f in fset or mf(f)
353 return regex, lambda f: f in fset or mf(f)
344 return regex, mf
354 return regex, mf
345
355
346 def _buildregexmatch(kindpats, globsuffix):
356 def _buildregexmatch(kindpats, globsuffix):
347 """Build a match function from a list of kinds and kindpats,
357 """Build a match function from a list of kinds and kindpats,
348 return regexp string and a matcher function."""
358 return regexp string and a matcher function."""
349 try:
359 try:
350 regex = '(?:%s)' % '|'.join([_regex(k, p, globsuffix)
360 regex = '(?:%s)' % '|'.join([_regex(k, p, globsuffix)
351 for (k, p) in kindpats])
361 for (k, p) in kindpats])
352 if len(regex) > 20000:
362 if len(regex) > 20000:
353 raise OverflowError
363 raise OverflowError
354 return regex, _rematcher(regex)
364 return regex, _rematcher(regex)
355 except OverflowError:
365 except OverflowError:
356 # We're using a Python with a tiny regex engine and we
366 # We're using a Python with a tiny regex engine and we
357 # made it explode, so we'll divide the pattern list in two
367 # made it explode, so we'll divide the pattern list in two
358 # until it works
368 # until it works
359 l = len(kindpats)
369 l = len(kindpats)
360 if l < 2:
370 if l < 2:
361 raise
371 raise
362 regexa, a = _buildregexmatch(kindpats[:l//2], globsuffix)
372 regexa, a = _buildregexmatch(kindpats[:l//2], globsuffix)
363 regexb, b = _buildregexmatch(kindpats[l//2:], globsuffix)
373 regexb, b = _buildregexmatch(kindpats[l//2:], globsuffix)
364 return regex, lambda s: a(s) or b(s)
374 return regex, lambda s: a(s) or b(s)
365 except re.error:
375 except re.error:
366 for k, p in kindpats:
376 for k, p in kindpats:
367 try:
377 try:
368 _rematcher('(?:%s)' % _regex(k, p, globsuffix))
378 _rematcher('(?:%s)' % _regex(k, p, globsuffix))
369 except re.error:
379 except re.error:
370 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
380 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
371 raise util.Abort(_("invalid pattern"))
381 raise util.Abort(_("invalid pattern"))
372
382
373 def _normalize(patterns, default, root, cwd, auditor):
383 def _normalize(patterns, default, root, cwd, auditor):
374 '''Convert 'kind:pat' from the patterns list to tuples with kind and
384 '''Convert 'kind:pat' from the patterns list to tuples with kind and
375 normalized and rooted patterns and with listfiles expanded.'''
385 normalized and rooted patterns and with listfiles expanded.'''
376 kindpats = []
386 kindpats = []
377 for kind, pat in [_patsplit(p, default) for p in patterns]:
387 for kind, pat in [_patsplit(p, default) for p in patterns]:
378 if kind in ('glob', 'relpath'):
388 if kind in ('glob', 'relpath'):
379 pat = pathutil.canonpath(root, cwd, pat, auditor)
389 pat = pathutil.canonpath(root, cwd, pat, auditor)
380 elif kind in ('relglob', 'path'):
390 elif kind in ('relglob', 'path'):
381 pat = util.normpath(pat)
391 pat = util.normpath(pat)
382 elif kind in ('listfile', 'listfile0'):
392 elif kind in ('listfile', 'listfile0'):
383 try:
393 try:
384 files = util.readfile(pat)
394 files = util.readfile(pat)
385 if kind == 'listfile0':
395 if kind == 'listfile0':
386 files = files.split('\0')
396 files = files.split('\0')
387 else:
397 else:
388 files = files.splitlines()
398 files = files.splitlines()
389 files = [f for f in files if f]
399 files = [f for f in files if f]
390 except EnvironmentError:
400 except EnvironmentError:
391 raise util.Abort(_("unable to read file list (%s)") % pat)
401 raise util.Abort(_("unable to read file list (%s)") % pat)
392 kindpats += _normalize(files, default, root, cwd, auditor)
402 kindpats += _normalize(files, default, root, cwd, auditor)
393 continue
403 continue
394 # else: re or relre - which cannot be normalized
404 # else: re or relre - which cannot be normalized
395 kindpats.append((kind, pat))
405 kindpats.append((kind, pat))
396 return kindpats
406 return kindpats
397
407
398 def _roots(kindpats):
408 def _roots(kindpats):
399 '''return roots and exact explicitly listed files from patterns
409 '''return roots and exact explicitly listed files from patterns
400
410
401 >>> _roots([('glob', 'g/*'), ('glob', 'g'), ('glob', 'g*')])
411 >>> _roots([('glob', 'g/*'), ('glob', 'g'), ('glob', 'g*')])
402 ['g', 'g', '.']
412 ['g', 'g', '.']
403 >>> _roots([('relpath', 'r'), ('path', 'p/p'), ('path', '')])
413 >>> _roots([('relpath', 'r'), ('path', 'p/p'), ('path', '')])
404 ['r', 'p/p', '.']
414 ['r', 'p/p', '.']
405 >>> _roots([('relglob', 'rg*'), ('re', 're/'), ('relre', 'rr')])
415 >>> _roots([('relglob', 'rg*'), ('re', 're/'), ('relre', 'rr')])
406 ['.', '.', '.']
416 ['.', '.', '.']
407 '''
417 '''
408 r = []
418 r = []
409 for kind, pat in kindpats:
419 for kind, pat in kindpats:
410 if kind == 'glob': # find the non-glob prefix
420 if kind == 'glob': # find the non-glob prefix
411 root = []
421 root = []
412 for p in pat.split('/'):
422 for p in pat.split('/'):
413 if '[' in p or '{' in p or '*' in p or '?' in p:
423 if '[' in p or '{' in p or '*' in p or '?' in p:
414 break
424 break
415 root.append(p)
425 root.append(p)
416 r.append('/'.join(root) or '.')
426 r.append('/'.join(root) or '.')
417 elif kind in ('relpath', 'path'):
427 elif kind in ('relpath', 'path'):
418 r.append(pat or '.')
428 r.append(pat or '.')
419 else: # relglob, re, relre
429 else: # relglob, re, relre
420 r.append('.')
430 r.append('.')
421 return r
431 return r
422
432
423 def _anypats(kindpats):
433 def _anypats(kindpats):
424 for kind, pat in kindpats:
434 for kind, pat in kindpats:
425 if kind in ('glob', 're', 'relglob', 'relre', 'set'):
435 if kind in ('glob', 're', 'relglob', 'relre', 'set'):
426 return True
436 return True
@@ -1,1121 +1,1123
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from mercurial.node import nullrev
9 from mercurial.node import nullrev
10 import util, error, osutil, revset, similar, encoding, phases, parsers
10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 import pathutil
11 import pathutil
12 import match as matchmod
12 import match as matchmod
13 import os, errno, re, glob, tempfile
13 import os, errno, re, glob, tempfile
14
14
15 if os.name == 'nt':
15 if os.name == 'nt':
16 import scmwindows as scmplatform
16 import scmwindows as scmplatform
17 else:
17 else:
18 import scmposix as scmplatform
18 import scmposix as scmplatform
19
19
20 systemrcpath = scmplatform.systemrcpath
20 systemrcpath = scmplatform.systemrcpath
21 userrcpath = scmplatform.userrcpath
21 userrcpath = scmplatform.userrcpath
22
22
23 class status(tuple):
23 class status(tuple):
24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 and 'ignored' properties are only relevant to the working copy.
25 and 'ignored' properties are only relevant to the working copy.
26 '''
26 '''
27
27
28 __slots__ = ()
28 __slots__ = ()
29
29
30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 clean):
31 clean):
32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 ignored, clean))
33 ignored, clean))
34
34
35 @property
35 @property
36 def modified(self):
36 def modified(self):
37 '''files that have been modified'''
37 '''files that have been modified'''
38 return self[0]
38 return self[0]
39
39
40 @property
40 @property
41 def added(self):
41 def added(self):
42 '''files that have been added'''
42 '''files that have been added'''
43 return self[1]
43 return self[1]
44
44
45 @property
45 @property
46 def removed(self):
46 def removed(self):
47 '''files that have been removed'''
47 '''files that have been removed'''
48 return self[2]
48 return self[2]
49
49
50 @property
50 @property
51 def deleted(self):
51 def deleted(self):
52 '''files that are in the dirstate, but have been deleted from the
52 '''files that are in the dirstate, but have been deleted from the
53 working copy (aka "missing")
53 working copy (aka "missing")
54 '''
54 '''
55 return self[3]
55 return self[3]
56
56
57 @property
57 @property
58 def unknown(self):
58 def unknown(self):
59 '''files not in the dirstate that are not ignored'''
59 '''files not in the dirstate that are not ignored'''
60 return self[4]
60 return self[4]
61
61
62 @property
62 @property
63 def ignored(self):
63 def ignored(self):
64 '''files not in the dirstate that are ignored (by _dirignore())'''
64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 return self[5]
65 return self[5]
66
66
67 @property
67 @property
68 def clean(self):
68 def clean(self):
69 '''files that have not been modified'''
69 '''files that have not been modified'''
70 return self[6]
70 return self[6]
71
71
72 def __repr__(self, *args, **kwargs):
72 def __repr__(self, *args, **kwargs):
73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 'unknown=%r, ignored=%r, clean=%r>') % self)
74 'unknown=%r, ignored=%r, clean=%r>') % self)
75
75
76 def itersubrepos(ctx1, ctx2):
76 def itersubrepos(ctx1, ctx2):
77 """find subrepos in ctx1 or ctx2"""
77 """find subrepos in ctx1 or ctx2"""
78 # Create a (subpath, ctx) mapping where we prefer subpaths from
78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 # has been modified (in ctx2) but not yet committed (in ctx1).
80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 for subpath, ctx in sorted(subpaths.iteritems()):
83 for subpath, ctx in sorted(subpaths.iteritems()):
84 yield subpath, ctx.sub(subpath)
84 yield subpath, ctx.sub(subpath)
85
85
86 def nochangesfound(ui, repo, excluded=None):
86 def nochangesfound(ui, repo, excluded=None):
87 '''Report no changes for push/pull, excluded is None or a list of
87 '''Report no changes for push/pull, excluded is None or a list of
88 nodes excluded from the push/pull.
88 nodes excluded from the push/pull.
89 '''
89 '''
90 secretlist = []
90 secretlist = []
91 if excluded:
91 if excluded:
92 for n in excluded:
92 for n in excluded:
93 if n not in repo:
93 if n not in repo:
94 # discovery should not have included the filtered revision,
94 # discovery should not have included the filtered revision,
95 # we have to explicitly exclude it until discovery is cleanup.
95 # we have to explicitly exclude it until discovery is cleanup.
96 continue
96 continue
97 ctx = repo[n]
97 ctx = repo[n]
98 if ctx.phase() >= phases.secret and not ctx.extinct():
98 if ctx.phase() >= phases.secret and not ctx.extinct():
99 secretlist.append(n)
99 secretlist.append(n)
100
100
101 if secretlist:
101 if secretlist:
102 ui.status(_("no changes found (ignored %d secret changesets)\n")
102 ui.status(_("no changes found (ignored %d secret changesets)\n")
103 % len(secretlist))
103 % len(secretlist))
104 else:
104 else:
105 ui.status(_("no changes found\n"))
105 ui.status(_("no changes found\n"))
106
106
107 def checknewlabel(repo, lbl, kind):
107 def checknewlabel(repo, lbl, kind):
108 # Do not use the "kind" parameter in ui output.
108 # Do not use the "kind" parameter in ui output.
109 # It makes strings difficult to translate.
109 # It makes strings difficult to translate.
110 if lbl in ['tip', '.', 'null']:
110 if lbl in ['tip', '.', 'null']:
111 raise util.Abort(_("the name '%s' is reserved") % lbl)
111 raise util.Abort(_("the name '%s' is reserved") % lbl)
112 for c in (':', '\0', '\n', '\r'):
112 for c in (':', '\0', '\n', '\r'):
113 if c in lbl:
113 if c in lbl:
114 raise util.Abort(_("%r cannot be used in a name") % c)
114 raise util.Abort(_("%r cannot be used in a name") % c)
115 try:
115 try:
116 int(lbl)
116 int(lbl)
117 raise util.Abort(_("cannot use an integer as a name"))
117 raise util.Abort(_("cannot use an integer as a name"))
118 except ValueError:
118 except ValueError:
119 pass
119 pass
120
120
121 def checkfilename(f):
121 def checkfilename(f):
122 '''Check that the filename f is an acceptable filename for a tracked file'''
122 '''Check that the filename f is an acceptable filename for a tracked file'''
123 if '\r' in f or '\n' in f:
123 if '\r' in f or '\n' in f:
124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
125
125
126 def checkportable(ui, f):
126 def checkportable(ui, f):
127 '''Check if filename f is portable and warn or abort depending on config'''
127 '''Check if filename f is portable and warn or abort depending on config'''
128 checkfilename(f)
128 checkfilename(f)
129 abort, warn = checkportabilityalert(ui)
129 abort, warn = checkportabilityalert(ui)
130 if abort or warn:
130 if abort or warn:
131 msg = util.checkwinfilename(f)
131 msg = util.checkwinfilename(f)
132 if msg:
132 if msg:
133 msg = "%s: %r" % (msg, f)
133 msg = "%s: %r" % (msg, f)
134 if abort:
134 if abort:
135 raise util.Abort(msg)
135 raise util.Abort(msg)
136 ui.warn(_("warning: %s\n") % msg)
136 ui.warn(_("warning: %s\n") % msg)
137
137
138 def checkportabilityalert(ui):
138 def checkportabilityalert(ui):
139 '''check if the user's config requests nothing, a warning, or abort for
139 '''check if the user's config requests nothing, a warning, or abort for
140 non-portable filenames'''
140 non-portable filenames'''
141 val = ui.config('ui', 'portablefilenames', 'warn')
141 val = ui.config('ui', 'portablefilenames', 'warn')
142 lval = val.lower()
142 lval = val.lower()
143 bval = util.parsebool(val)
143 bval = util.parsebool(val)
144 abort = os.name == 'nt' or lval == 'abort'
144 abort = os.name == 'nt' or lval == 'abort'
145 warn = bval or lval == 'warn'
145 warn = bval or lval == 'warn'
146 if bval is None and not (warn or abort or lval == 'ignore'):
146 if bval is None and not (warn or abort or lval == 'ignore'):
147 raise error.ConfigError(
147 raise error.ConfigError(
148 _("ui.portablefilenames value is invalid ('%s')") % val)
148 _("ui.portablefilenames value is invalid ('%s')") % val)
149 return abort, warn
149 return abort, warn
150
150
151 class casecollisionauditor(object):
151 class casecollisionauditor(object):
152 def __init__(self, ui, abort, dirstate):
152 def __init__(self, ui, abort, dirstate):
153 self._ui = ui
153 self._ui = ui
154 self._abort = abort
154 self._abort = abort
155 allfiles = '\0'.join(dirstate._map)
155 allfiles = '\0'.join(dirstate._map)
156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
157 self._dirstate = dirstate
157 self._dirstate = dirstate
158 # The purpose of _newfiles is so that we don't complain about
158 # The purpose of _newfiles is so that we don't complain about
159 # case collisions if someone were to call this object with the
159 # case collisions if someone were to call this object with the
160 # same filename twice.
160 # same filename twice.
161 self._newfiles = set()
161 self._newfiles = set()
162
162
163 def __call__(self, f):
163 def __call__(self, f):
164 if f in self._newfiles:
164 if f in self._newfiles:
165 return
165 return
166 fl = encoding.lower(f)
166 fl = encoding.lower(f)
167 if fl in self._loweredfiles and f not in self._dirstate:
167 if fl in self._loweredfiles and f not in self._dirstate:
168 msg = _('possible case-folding collision for %s') % f
168 msg = _('possible case-folding collision for %s') % f
169 if self._abort:
169 if self._abort:
170 raise util.Abort(msg)
170 raise util.Abort(msg)
171 self._ui.warn(_("warning: %s\n") % msg)
171 self._ui.warn(_("warning: %s\n") % msg)
172 self._loweredfiles.add(fl)
172 self._loweredfiles.add(fl)
173 self._newfiles.add(f)
173 self._newfiles.add(f)
174
174
175 class abstractvfs(object):
175 class abstractvfs(object):
176 """Abstract base class; cannot be instantiated"""
176 """Abstract base class; cannot be instantiated"""
177
177
178 def __init__(self, *args, **kwargs):
178 def __init__(self, *args, **kwargs):
179 '''Prevent instantiation; don't call this from subclasses.'''
179 '''Prevent instantiation; don't call this from subclasses.'''
180 raise NotImplementedError('attempted instantiating ' + str(type(self)))
180 raise NotImplementedError('attempted instantiating ' + str(type(self)))
181
181
182 def tryread(self, path):
182 def tryread(self, path):
183 '''gracefully return an empty string for missing files'''
183 '''gracefully return an empty string for missing files'''
184 try:
184 try:
185 return self.read(path)
185 return self.read(path)
186 except IOError, inst:
186 except IOError, inst:
187 if inst.errno != errno.ENOENT:
187 if inst.errno != errno.ENOENT:
188 raise
188 raise
189 return ""
189 return ""
190
190
191 def tryreadlines(self, path, mode='rb'):
191 def tryreadlines(self, path, mode='rb'):
192 '''gracefully return an empty array for missing files'''
192 '''gracefully return an empty array for missing files'''
193 try:
193 try:
194 return self.readlines(path, mode=mode)
194 return self.readlines(path, mode=mode)
195 except IOError, inst:
195 except IOError, inst:
196 if inst.errno != errno.ENOENT:
196 if inst.errno != errno.ENOENT:
197 raise
197 raise
198 return []
198 return []
199
199
200 def open(self, path, mode="r", text=False, atomictemp=False,
200 def open(self, path, mode="r", text=False, atomictemp=False,
201 notindexed=False):
201 notindexed=False):
202 '''Open ``path`` file, which is relative to vfs root.
202 '''Open ``path`` file, which is relative to vfs root.
203
203
204 Newly created directories are marked as "not to be indexed by
204 Newly created directories are marked as "not to be indexed by
205 the content indexing service", if ``notindexed`` is specified
205 the content indexing service", if ``notindexed`` is specified
206 for "write" mode access.
206 for "write" mode access.
207 '''
207 '''
208 self.open = self.__call__
208 self.open = self.__call__
209 return self.__call__(path, mode, text, atomictemp, notindexed)
209 return self.__call__(path, mode, text, atomictemp, notindexed)
210
210
211 def read(self, path):
211 def read(self, path):
212 fp = self(path, 'rb')
212 fp = self(path, 'rb')
213 try:
213 try:
214 return fp.read()
214 return fp.read()
215 finally:
215 finally:
216 fp.close()
216 fp.close()
217
217
218 def readlines(self, path, mode='rb'):
218 def readlines(self, path, mode='rb'):
219 fp = self(path, mode=mode)
219 fp = self(path, mode=mode)
220 try:
220 try:
221 return fp.readlines()
221 return fp.readlines()
222 finally:
222 finally:
223 fp.close()
223 fp.close()
224
224
225 def write(self, path, data):
225 def write(self, path, data):
226 fp = self(path, 'wb')
226 fp = self(path, 'wb')
227 try:
227 try:
228 return fp.write(data)
228 return fp.write(data)
229 finally:
229 finally:
230 fp.close()
230 fp.close()
231
231
232 def writelines(self, path, data, mode='wb', notindexed=False):
232 def writelines(self, path, data, mode='wb', notindexed=False):
233 fp = self(path, mode=mode, notindexed=notindexed)
233 fp = self(path, mode=mode, notindexed=notindexed)
234 try:
234 try:
235 return fp.writelines(data)
235 return fp.writelines(data)
236 finally:
236 finally:
237 fp.close()
237 fp.close()
238
238
239 def append(self, path, data):
239 def append(self, path, data):
240 fp = self(path, 'ab')
240 fp = self(path, 'ab')
241 try:
241 try:
242 return fp.write(data)
242 return fp.write(data)
243 finally:
243 finally:
244 fp.close()
244 fp.close()
245
245
246 def chmod(self, path, mode):
246 def chmod(self, path, mode):
247 return os.chmod(self.join(path), mode)
247 return os.chmod(self.join(path), mode)
248
248
249 def exists(self, path=None):
249 def exists(self, path=None):
250 return os.path.exists(self.join(path))
250 return os.path.exists(self.join(path))
251
251
252 def fstat(self, fp):
252 def fstat(self, fp):
253 return util.fstat(fp)
253 return util.fstat(fp)
254
254
255 def isdir(self, path=None):
255 def isdir(self, path=None):
256 return os.path.isdir(self.join(path))
256 return os.path.isdir(self.join(path))
257
257
258 def isfile(self, path=None):
258 def isfile(self, path=None):
259 return os.path.isfile(self.join(path))
259 return os.path.isfile(self.join(path))
260
260
261 def islink(self, path=None):
261 def islink(self, path=None):
262 return os.path.islink(self.join(path))
262 return os.path.islink(self.join(path))
263
263
264 def reljoin(self, *paths):
264 def reljoin(self, *paths):
265 """join various elements of a path together (as os.path.join would do)
265 """join various elements of a path together (as os.path.join would do)
266
266
267 The vfs base is not injected so that path stay relative. This exists
267 The vfs base is not injected so that path stay relative. This exists
268 to allow handling of strange encoding if needed."""
268 to allow handling of strange encoding if needed."""
269 return os.path.join(*paths)
269 return os.path.join(*paths)
270
270
271 def split(self, path):
271 def split(self, path):
272 """split top-most element of a path (as os.path.split would do)
272 """split top-most element of a path (as os.path.split would do)
273
273
274 This exists to allow handling of strange encoding if needed."""
274 This exists to allow handling of strange encoding if needed."""
275 return os.path.split(path)
275 return os.path.split(path)
276
276
277 def lexists(self, path=None):
277 def lexists(self, path=None):
278 return os.path.lexists(self.join(path))
278 return os.path.lexists(self.join(path))
279
279
280 def lstat(self, path=None):
280 def lstat(self, path=None):
281 return os.lstat(self.join(path))
281 return os.lstat(self.join(path))
282
282
283 def listdir(self, path=None):
283 def listdir(self, path=None):
284 return os.listdir(self.join(path))
284 return os.listdir(self.join(path))
285
285
286 def makedir(self, path=None, notindexed=True):
286 def makedir(self, path=None, notindexed=True):
287 return util.makedir(self.join(path), notindexed)
287 return util.makedir(self.join(path), notindexed)
288
288
289 def makedirs(self, path=None, mode=None):
289 def makedirs(self, path=None, mode=None):
290 return util.makedirs(self.join(path), mode)
290 return util.makedirs(self.join(path), mode)
291
291
292 def makelock(self, info, path):
292 def makelock(self, info, path):
293 return util.makelock(info, self.join(path))
293 return util.makelock(info, self.join(path))
294
294
295 def mkdir(self, path=None):
295 def mkdir(self, path=None):
296 return os.mkdir(self.join(path))
296 return os.mkdir(self.join(path))
297
297
298 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
298 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
299 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
299 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
300 dir=self.join(dir), text=text)
300 dir=self.join(dir), text=text)
301 dname, fname = util.split(name)
301 dname, fname = util.split(name)
302 if dir:
302 if dir:
303 return fd, os.path.join(dir, fname)
303 return fd, os.path.join(dir, fname)
304 else:
304 else:
305 return fd, fname
305 return fd, fname
306
306
307 def readdir(self, path=None, stat=None, skip=None):
307 def readdir(self, path=None, stat=None, skip=None):
308 return osutil.listdir(self.join(path), stat, skip)
308 return osutil.listdir(self.join(path), stat, skip)
309
309
310 def readlock(self, path):
310 def readlock(self, path):
311 return util.readlock(self.join(path))
311 return util.readlock(self.join(path))
312
312
313 def rename(self, src, dst):
313 def rename(self, src, dst):
314 return util.rename(self.join(src), self.join(dst))
314 return util.rename(self.join(src), self.join(dst))
315
315
316 def readlink(self, path):
316 def readlink(self, path):
317 return os.readlink(self.join(path))
317 return os.readlink(self.join(path))
318
318
319 def setflags(self, path, l, x):
319 def setflags(self, path, l, x):
320 return util.setflags(self.join(path), l, x)
320 return util.setflags(self.join(path), l, x)
321
321
322 def stat(self, path=None):
322 def stat(self, path=None):
323 return os.stat(self.join(path))
323 return os.stat(self.join(path))
324
324
325 def unlink(self, path=None):
325 def unlink(self, path=None):
326 return util.unlink(self.join(path))
326 return util.unlink(self.join(path))
327
327
328 def unlinkpath(self, path=None, ignoremissing=False):
328 def unlinkpath(self, path=None, ignoremissing=False):
329 return util.unlinkpath(self.join(path), ignoremissing)
329 return util.unlinkpath(self.join(path), ignoremissing)
330
330
331 def utime(self, path=None, t=None):
331 def utime(self, path=None, t=None):
332 return os.utime(self.join(path), t)
332 return os.utime(self.join(path), t)
333
333
334 class vfs(abstractvfs):
334 class vfs(abstractvfs):
335 '''Operate files relative to a base directory
335 '''Operate files relative to a base directory
336
336
337 This class is used to hide the details of COW semantics and
337 This class is used to hide the details of COW semantics and
338 remote file access from higher level code.
338 remote file access from higher level code.
339 '''
339 '''
340 def __init__(self, base, audit=True, expandpath=False, realpath=False):
340 def __init__(self, base, audit=True, expandpath=False, realpath=False):
341 if expandpath:
341 if expandpath:
342 base = util.expandpath(base)
342 base = util.expandpath(base)
343 if realpath:
343 if realpath:
344 base = os.path.realpath(base)
344 base = os.path.realpath(base)
345 self.base = base
345 self.base = base
346 self._setmustaudit(audit)
346 self._setmustaudit(audit)
347 self.createmode = None
347 self.createmode = None
348 self._trustnlink = None
348 self._trustnlink = None
349
349
350 def _getmustaudit(self):
350 def _getmustaudit(self):
351 return self._audit
351 return self._audit
352
352
353 def _setmustaudit(self, onoff):
353 def _setmustaudit(self, onoff):
354 self._audit = onoff
354 self._audit = onoff
355 if onoff:
355 if onoff:
356 self.audit = pathutil.pathauditor(self.base)
356 self.audit = pathutil.pathauditor(self.base)
357 else:
357 else:
358 self.audit = util.always
358 self.audit = util.always
359
359
360 mustaudit = property(_getmustaudit, _setmustaudit)
360 mustaudit = property(_getmustaudit, _setmustaudit)
361
361
362 @util.propertycache
362 @util.propertycache
363 def _cansymlink(self):
363 def _cansymlink(self):
364 return util.checklink(self.base)
364 return util.checklink(self.base)
365
365
366 @util.propertycache
366 @util.propertycache
367 def _chmod(self):
367 def _chmod(self):
368 return util.checkexec(self.base)
368 return util.checkexec(self.base)
369
369
370 def _fixfilemode(self, name):
370 def _fixfilemode(self, name):
371 if self.createmode is None or not self._chmod:
371 if self.createmode is None or not self._chmod:
372 return
372 return
373 os.chmod(name, self.createmode & 0666)
373 os.chmod(name, self.createmode & 0666)
374
374
375 def __call__(self, path, mode="r", text=False, atomictemp=False,
375 def __call__(self, path, mode="r", text=False, atomictemp=False,
376 notindexed=False):
376 notindexed=False):
377 '''Open ``path`` file, which is relative to vfs root.
377 '''Open ``path`` file, which is relative to vfs root.
378
378
379 Newly created directories are marked as "not to be indexed by
379 Newly created directories are marked as "not to be indexed by
380 the content indexing service", if ``notindexed`` is specified
380 the content indexing service", if ``notindexed`` is specified
381 for "write" mode access.
381 for "write" mode access.
382 '''
382 '''
383 if self._audit:
383 if self._audit:
384 r = util.checkosfilename(path)
384 r = util.checkosfilename(path)
385 if r:
385 if r:
386 raise util.Abort("%s: %r" % (r, path))
386 raise util.Abort("%s: %r" % (r, path))
387 self.audit(path)
387 self.audit(path)
388 f = self.join(path)
388 f = self.join(path)
389
389
390 if not text and "b" not in mode:
390 if not text and "b" not in mode:
391 mode += "b" # for that other OS
391 mode += "b" # for that other OS
392
392
393 nlink = -1
393 nlink = -1
394 if mode not in ('r', 'rb'):
394 if mode not in ('r', 'rb'):
395 dirname, basename = util.split(f)
395 dirname, basename = util.split(f)
396 # If basename is empty, then the path is malformed because it points
396 # If basename is empty, then the path is malformed because it points
397 # to a directory. Let the posixfile() call below raise IOError.
397 # to a directory. Let the posixfile() call below raise IOError.
398 if basename:
398 if basename:
399 if atomictemp:
399 if atomictemp:
400 util.ensuredirs(dirname, self.createmode, notindexed)
400 util.ensuredirs(dirname, self.createmode, notindexed)
401 return util.atomictempfile(f, mode, self.createmode)
401 return util.atomictempfile(f, mode, self.createmode)
402 try:
402 try:
403 if 'w' in mode:
403 if 'w' in mode:
404 util.unlink(f)
404 util.unlink(f)
405 nlink = 0
405 nlink = 0
406 else:
406 else:
407 # nlinks() may behave differently for files on Windows
407 # nlinks() may behave differently for files on Windows
408 # shares if the file is open.
408 # shares if the file is open.
409 fd = util.posixfile(f)
409 fd = util.posixfile(f)
410 nlink = util.nlinks(f)
410 nlink = util.nlinks(f)
411 if nlink < 1:
411 if nlink < 1:
412 nlink = 2 # force mktempcopy (issue1922)
412 nlink = 2 # force mktempcopy (issue1922)
413 fd.close()
413 fd.close()
414 except (OSError, IOError), e:
414 except (OSError, IOError), e:
415 if e.errno != errno.ENOENT:
415 if e.errno != errno.ENOENT:
416 raise
416 raise
417 nlink = 0
417 nlink = 0
418 util.ensuredirs(dirname, self.createmode, notindexed)
418 util.ensuredirs(dirname, self.createmode, notindexed)
419 if nlink > 0:
419 if nlink > 0:
420 if self._trustnlink is None:
420 if self._trustnlink is None:
421 self._trustnlink = nlink > 1 or util.checknlink(f)
421 self._trustnlink = nlink > 1 or util.checknlink(f)
422 if nlink > 1 or not self._trustnlink:
422 if nlink > 1 or not self._trustnlink:
423 util.rename(util.mktempcopy(f), f)
423 util.rename(util.mktempcopy(f), f)
424 fp = util.posixfile(f, mode)
424 fp = util.posixfile(f, mode)
425 if nlink == 0:
425 if nlink == 0:
426 self._fixfilemode(f)
426 self._fixfilemode(f)
427 return fp
427 return fp
428
428
429 def symlink(self, src, dst):
429 def symlink(self, src, dst):
430 self.audit(dst)
430 self.audit(dst)
431 linkname = self.join(dst)
431 linkname = self.join(dst)
432 try:
432 try:
433 os.unlink(linkname)
433 os.unlink(linkname)
434 except OSError:
434 except OSError:
435 pass
435 pass
436
436
437 util.ensuredirs(os.path.dirname(linkname), self.createmode)
437 util.ensuredirs(os.path.dirname(linkname), self.createmode)
438
438
439 if self._cansymlink:
439 if self._cansymlink:
440 try:
440 try:
441 os.symlink(src, linkname)
441 os.symlink(src, linkname)
442 except OSError, err:
442 except OSError, err:
443 raise OSError(err.errno, _('could not symlink to %r: %s') %
443 raise OSError(err.errno, _('could not symlink to %r: %s') %
444 (src, err.strerror), linkname)
444 (src, err.strerror), linkname)
445 else:
445 else:
446 self.write(dst, src)
446 self.write(dst, src)
447
447
448 def join(self, path):
448 def join(self, path):
449 if path:
449 if path:
450 return os.path.join(self.base, path)
450 return os.path.join(self.base, path)
451 else:
451 else:
452 return self.base
452 return self.base
453
453
454 opener = vfs
454 opener = vfs
455
455
456 class auditvfs(object):
456 class auditvfs(object):
457 def __init__(self, vfs):
457 def __init__(self, vfs):
458 self.vfs = vfs
458 self.vfs = vfs
459
459
460 def _getmustaudit(self):
460 def _getmustaudit(self):
461 return self.vfs.mustaudit
461 return self.vfs.mustaudit
462
462
463 def _setmustaudit(self, onoff):
463 def _setmustaudit(self, onoff):
464 self.vfs.mustaudit = onoff
464 self.vfs.mustaudit = onoff
465
465
466 mustaudit = property(_getmustaudit, _setmustaudit)
466 mustaudit = property(_getmustaudit, _setmustaudit)
467
467
468 class filtervfs(abstractvfs, auditvfs):
468 class filtervfs(abstractvfs, auditvfs):
469 '''Wrapper vfs for filtering filenames with a function.'''
469 '''Wrapper vfs for filtering filenames with a function.'''
470
470
471 def __init__(self, vfs, filter):
471 def __init__(self, vfs, filter):
472 auditvfs.__init__(self, vfs)
472 auditvfs.__init__(self, vfs)
473 self._filter = filter
473 self._filter = filter
474
474
475 def __call__(self, path, *args, **kwargs):
475 def __call__(self, path, *args, **kwargs):
476 return self.vfs(self._filter(path), *args, **kwargs)
476 return self.vfs(self._filter(path), *args, **kwargs)
477
477
478 def join(self, path):
478 def join(self, path):
479 if path:
479 if path:
480 return self.vfs.join(self._filter(path))
480 return self.vfs.join(self._filter(path))
481 else:
481 else:
482 return self.vfs.join(path)
482 return self.vfs.join(path)
483
483
484 filteropener = filtervfs
484 filteropener = filtervfs
485
485
486 class readonlyvfs(abstractvfs, auditvfs):
486 class readonlyvfs(abstractvfs, auditvfs):
487 '''Wrapper vfs preventing any writing.'''
487 '''Wrapper vfs preventing any writing.'''
488
488
489 def __init__(self, vfs):
489 def __init__(self, vfs):
490 auditvfs.__init__(self, vfs)
490 auditvfs.__init__(self, vfs)
491
491
492 def __call__(self, path, mode='r', *args, **kw):
492 def __call__(self, path, mode='r', *args, **kw):
493 if mode not in ('r', 'rb'):
493 if mode not in ('r', 'rb'):
494 raise util.Abort('this vfs is read only')
494 raise util.Abort('this vfs is read only')
495 return self.vfs(path, mode, *args, **kw)
495 return self.vfs(path, mode, *args, **kw)
496
496
497
497
498 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
498 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
499 '''yield every hg repository under path, always recursively.
499 '''yield every hg repository under path, always recursively.
500 The recurse flag will only control recursion into repo working dirs'''
500 The recurse flag will only control recursion into repo working dirs'''
501 def errhandler(err):
501 def errhandler(err):
502 if err.filename == path:
502 if err.filename == path:
503 raise err
503 raise err
504 samestat = getattr(os.path, 'samestat', None)
504 samestat = getattr(os.path, 'samestat', None)
505 if followsym and samestat is not None:
505 if followsym and samestat is not None:
506 def adddir(dirlst, dirname):
506 def adddir(dirlst, dirname):
507 match = False
507 match = False
508 dirstat = os.stat(dirname)
508 dirstat = os.stat(dirname)
509 for lstdirstat in dirlst:
509 for lstdirstat in dirlst:
510 if samestat(dirstat, lstdirstat):
510 if samestat(dirstat, lstdirstat):
511 match = True
511 match = True
512 break
512 break
513 if not match:
513 if not match:
514 dirlst.append(dirstat)
514 dirlst.append(dirstat)
515 return not match
515 return not match
516 else:
516 else:
517 followsym = False
517 followsym = False
518
518
519 if (seen_dirs is None) and followsym:
519 if (seen_dirs is None) and followsym:
520 seen_dirs = []
520 seen_dirs = []
521 adddir(seen_dirs, path)
521 adddir(seen_dirs, path)
522 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
522 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
523 dirs.sort()
523 dirs.sort()
524 if '.hg' in dirs:
524 if '.hg' in dirs:
525 yield root # found a repository
525 yield root # found a repository
526 qroot = os.path.join(root, '.hg', 'patches')
526 qroot = os.path.join(root, '.hg', 'patches')
527 if os.path.isdir(os.path.join(qroot, '.hg')):
527 if os.path.isdir(os.path.join(qroot, '.hg')):
528 yield qroot # we have a patch queue repo here
528 yield qroot # we have a patch queue repo here
529 if recurse:
529 if recurse:
530 # avoid recursing inside the .hg directory
530 # avoid recursing inside the .hg directory
531 dirs.remove('.hg')
531 dirs.remove('.hg')
532 else:
532 else:
533 dirs[:] = [] # don't descend further
533 dirs[:] = [] # don't descend further
534 elif followsym:
534 elif followsym:
535 newdirs = []
535 newdirs = []
536 for d in dirs:
536 for d in dirs:
537 fname = os.path.join(root, d)
537 fname = os.path.join(root, d)
538 if adddir(seen_dirs, fname):
538 if adddir(seen_dirs, fname):
539 if os.path.islink(fname):
539 if os.path.islink(fname):
540 for hgname in walkrepos(fname, True, seen_dirs):
540 for hgname in walkrepos(fname, True, seen_dirs):
541 yield hgname
541 yield hgname
542 else:
542 else:
543 newdirs.append(d)
543 newdirs.append(d)
544 dirs[:] = newdirs
544 dirs[:] = newdirs
545
545
546 def osrcpath():
546 def osrcpath():
547 '''return default os-specific hgrc search path'''
547 '''return default os-specific hgrc search path'''
548 path = []
548 path = []
549 defaultpath = os.path.join(util.datapath, 'default.d')
549 defaultpath = os.path.join(util.datapath, 'default.d')
550 if os.path.isdir(defaultpath):
550 if os.path.isdir(defaultpath):
551 for f, kind in osutil.listdir(defaultpath):
551 for f, kind in osutil.listdir(defaultpath):
552 if f.endswith('.rc'):
552 if f.endswith('.rc'):
553 path.append(os.path.join(defaultpath, f))
553 path.append(os.path.join(defaultpath, f))
554 path.extend(systemrcpath())
554 path.extend(systemrcpath())
555 path.extend(userrcpath())
555 path.extend(userrcpath())
556 path = [os.path.normpath(f) for f in path]
556 path = [os.path.normpath(f) for f in path]
557 return path
557 return path
558
558
559 _rcpath = None
559 _rcpath = None
560
560
561 def rcpath():
561 def rcpath():
562 '''return hgrc search path. if env var HGRCPATH is set, use it.
562 '''return hgrc search path. if env var HGRCPATH is set, use it.
563 for each item in path, if directory, use files ending in .rc,
563 for each item in path, if directory, use files ending in .rc,
564 else use item.
564 else use item.
565 make HGRCPATH empty to only look in .hg/hgrc of current repo.
565 make HGRCPATH empty to only look in .hg/hgrc of current repo.
566 if no HGRCPATH, use default os-specific path.'''
566 if no HGRCPATH, use default os-specific path.'''
567 global _rcpath
567 global _rcpath
568 if _rcpath is None:
568 if _rcpath is None:
569 if 'HGRCPATH' in os.environ:
569 if 'HGRCPATH' in os.environ:
570 _rcpath = []
570 _rcpath = []
571 for p in os.environ['HGRCPATH'].split(os.pathsep):
571 for p in os.environ['HGRCPATH'].split(os.pathsep):
572 if not p:
572 if not p:
573 continue
573 continue
574 p = util.expandpath(p)
574 p = util.expandpath(p)
575 if os.path.isdir(p):
575 if os.path.isdir(p):
576 for f, kind in osutil.listdir(p):
576 for f, kind in osutil.listdir(p):
577 if f.endswith('.rc'):
577 if f.endswith('.rc'):
578 _rcpath.append(os.path.join(p, f))
578 _rcpath.append(os.path.join(p, f))
579 else:
579 else:
580 _rcpath.append(p)
580 _rcpath.append(p)
581 else:
581 else:
582 _rcpath = osrcpath()
582 _rcpath = osrcpath()
583 return _rcpath
583 return _rcpath
584
584
585 def revsingle(repo, revspec, default='.'):
585 def revsingle(repo, revspec, default='.'):
586 if not revspec and revspec != 0:
586 if not revspec and revspec != 0:
587 return repo[default]
587 return repo[default]
588
588
589 l = revrange(repo, [revspec])
589 l = revrange(repo, [revspec])
590 if not l:
590 if not l:
591 raise util.Abort(_('empty revision set'))
591 raise util.Abort(_('empty revision set'))
592 return repo[l.last()]
592 return repo[l.last()]
593
593
594 def revpair(repo, revs):
594 def revpair(repo, revs):
595 if not revs:
595 if not revs:
596 return repo.dirstate.p1(), None
596 return repo.dirstate.p1(), None
597
597
598 l = revrange(repo, revs)
598 l = revrange(repo, revs)
599
599
600 if not l:
600 if not l:
601 first = second = None
601 first = second = None
602 elif l.isascending():
602 elif l.isascending():
603 first = l.min()
603 first = l.min()
604 second = l.max()
604 second = l.max()
605 elif l.isdescending():
605 elif l.isdescending():
606 first = l.max()
606 first = l.max()
607 second = l.min()
607 second = l.min()
608 else:
608 else:
609 first = l.first()
609 first = l.first()
610 second = l.last()
610 second = l.last()
611
611
612 if first is None:
612 if first is None:
613 raise util.Abort(_('empty revision range'))
613 raise util.Abort(_('empty revision range'))
614
614
615 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
615 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
616 return repo.lookup(first), None
616 return repo.lookup(first), None
617
617
618 return repo.lookup(first), repo.lookup(second)
618 return repo.lookup(first), repo.lookup(second)
619
619
620 _revrangesep = ':'
620 _revrangesep = ':'
621
621
622 def revrange(repo, revs):
622 def revrange(repo, revs):
623 """Yield revision as strings from a list of revision specifications."""
623 """Yield revision as strings from a list of revision specifications."""
624
624
625 def revfix(repo, val, defval):
625 def revfix(repo, val, defval):
626 if not val and val != 0 and defval is not None:
626 if not val and val != 0 and defval is not None:
627 return defval
627 return defval
628 return repo[val].rev()
628 return repo[val].rev()
629
629
630 seen, l = set(), revset.baseset([])
630 seen, l = set(), revset.baseset([])
631
631
632 revsetaliases = [alias for (alias, _) in
632 revsetaliases = [alias for (alias, _) in
633 repo.ui.configitems("revsetalias")]
633 repo.ui.configitems("revsetalias")]
634
634
635 for spec in revs:
635 for spec in revs:
636 if l and not seen:
636 if l and not seen:
637 seen = set(l)
637 seen = set(l)
638 # attempt to parse old-style ranges first to deal with
638 # attempt to parse old-style ranges first to deal with
639 # things like old-tag which contain query metacharacters
639 # things like old-tag which contain query metacharacters
640 try:
640 try:
641 # ... except for revset aliases without arguments. These
641 # ... except for revset aliases without arguments. These
642 # should be parsed as soon as possible, because they might
642 # should be parsed as soon as possible, because they might
643 # clash with a hash prefix.
643 # clash with a hash prefix.
644 if spec in revsetaliases:
644 if spec in revsetaliases:
645 raise error.RepoLookupError
645 raise error.RepoLookupError
646
646
647 if isinstance(spec, int):
647 if isinstance(spec, int):
648 seen.add(spec)
648 seen.add(spec)
649 l = l + revset.baseset([spec])
649 l = l + revset.baseset([spec])
650 continue
650 continue
651
651
652 if _revrangesep in spec:
652 if _revrangesep in spec:
653 start, end = spec.split(_revrangesep, 1)
653 start, end = spec.split(_revrangesep, 1)
654 if start in revsetaliases or end in revsetaliases:
654 if start in revsetaliases or end in revsetaliases:
655 raise error.RepoLookupError
655 raise error.RepoLookupError
656
656
657 start = revfix(repo, start, 0)
657 start = revfix(repo, start, 0)
658 end = revfix(repo, end, len(repo) - 1)
658 end = revfix(repo, end, len(repo) - 1)
659 if end == nullrev and start < 0:
659 if end == nullrev and start < 0:
660 start = nullrev
660 start = nullrev
661 rangeiter = repo.changelog.revs(start, end)
661 rangeiter = repo.changelog.revs(start, end)
662 if not seen and not l:
662 if not seen and not l:
663 # by far the most common case: revs = ["-1:0"]
663 # by far the most common case: revs = ["-1:0"]
664 l = revset.baseset(rangeiter)
664 l = revset.baseset(rangeiter)
665 # defer syncing seen until next iteration
665 # defer syncing seen until next iteration
666 continue
666 continue
667 newrevs = set(rangeiter)
667 newrevs = set(rangeiter)
668 if seen:
668 if seen:
669 newrevs.difference_update(seen)
669 newrevs.difference_update(seen)
670 seen.update(newrevs)
670 seen.update(newrevs)
671 else:
671 else:
672 seen = newrevs
672 seen = newrevs
673 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
673 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
674 continue
674 continue
675 elif spec and spec in repo: # single unquoted rev
675 elif spec and spec in repo: # single unquoted rev
676 rev = revfix(repo, spec, None)
676 rev = revfix(repo, spec, None)
677 if rev in seen:
677 if rev in seen:
678 continue
678 continue
679 seen.add(rev)
679 seen.add(rev)
680 l = l + revset.baseset([rev])
680 l = l + revset.baseset([rev])
681 continue
681 continue
682 except error.RepoLookupError:
682 except error.RepoLookupError:
683 pass
683 pass
684
684
685 # fall through to new-style queries if old-style fails
685 # fall through to new-style queries if old-style fails
686 m = revset.match(repo.ui, spec, repo)
686 m = revset.match(repo.ui, spec, repo)
687 if seen or l:
687 if seen or l:
688 dl = [r for r in m(repo) if r not in seen]
688 dl = [r for r in m(repo) if r not in seen]
689 l = l + revset.baseset(dl)
689 l = l + revset.baseset(dl)
690 seen.update(dl)
690 seen.update(dl)
691 else:
691 else:
692 l = m(repo)
692 l = m(repo)
693
693
694 return l
694 return l
695
695
696 def expandpats(pats):
696 def expandpats(pats):
697 '''Expand bare globs when running on windows.
697 '''Expand bare globs when running on windows.
698 On posix we assume it already has already been done by sh.'''
698 On posix we assume it already has already been done by sh.'''
699 if not util.expandglobs:
699 if not util.expandglobs:
700 return list(pats)
700 return list(pats)
701 ret = []
701 ret = []
702 for kindpat in pats:
702 for kindpat in pats:
703 kind, pat = matchmod._patsplit(kindpat, None)
703 kind, pat = matchmod._patsplit(kindpat, None)
704 if kind is None:
704 if kind is None:
705 try:
705 try:
706 globbed = glob.glob(pat)
706 globbed = glob.glob(pat)
707 except re.error:
707 except re.error:
708 globbed = [pat]
708 globbed = [pat]
709 if globbed:
709 if globbed:
710 ret.extend(globbed)
710 ret.extend(globbed)
711 continue
711 continue
712 ret.append(kindpat)
712 ret.append(kindpat)
713 return ret
713 return ret
714
714
715 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
715 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
716 '''Return a matcher and the patterns that were used.
716 '''Return a matcher and the patterns that were used.
717 The matcher will warn about bad matches.'''
717 The matcher will warn about bad matches.'''
718 if pats == ("",):
718 if pats == ("",):
719 pats = []
719 pats = []
720 if not globbed and default == 'relpath':
720 if not globbed and default == 'relpath':
721 pats = expandpats(pats or [])
721 pats = expandpats(pats or [])
722
722
723 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
723 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
724 default)
724 default)
725 def badfn(f, msg):
725 def badfn(f, msg):
726 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
726 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
727 m.bad = badfn
727 m.bad = badfn
728 if m.always():
729 pats = []
728 return m, pats
730 return m, pats
729
731
730 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
732 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
731 '''Return a matcher that will warn about bad matches.'''
733 '''Return a matcher that will warn about bad matches.'''
732 return matchandpats(ctx, pats, opts, globbed, default)[0]
734 return matchandpats(ctx, pats, opts, globbed, default)[0]
733
735
734 def matchall(repo):
736 def matchall(repo):
735 '''Return a matcher that will efficiently match everything.'''
737 '''Return a matcher that will efficiently match everything.'''
736 return matchmod.always(repo.root, repo.getcwd())
738 return matchmod.always(repo.root, repo.getcwd())
737
739
738 def matchfiles(repo, files):
740 def matchfiles(repo, files):
739 '''Return a matcher that will efficiently match exactly these files.'''
741 '''Return a matcher that will efficiently match exactly these files.'''
740 return matchmod.exact(repo.root, repo.getcwd(), files)
742 return matchmod.exact(repo.root, repo.getcwd(), files)
741
743
742 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
744 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
743 m = matcher
745 m = matcher
744 if dry_run is None:
746 if dry_run is None:
745 dry_run = opts.get('dry_run')
747 dry_run = opts.get('dry_run')
746 if similarity is None:
748 if similarity is None:
747 similarity = float(opts.get('similarity') or 0)
749 similarity = float(opts.get('similarity') or 0)
748
750
749 ret = 0
751 ret = 0
750 join = lambda f: os.path.join(prefix, f)
752 join = lambda f: os.path.join(prefix, f)
751
753
752 def matchessubrepo(matcher, subpath):
754 def matchessubrepo(matcher, subpath):
753 if matcher.exact(subpath):
755 if matcher.exact(subpath):
754 return True
756 return True
755 for f in matcher.files():
757 for f in matcher.files():
756 if f.startswith(subpath):
758 if f.startswith(subpath):
757 return True
759 return True
758 return False
760 return False
759
761
760 wctx = repo[None]
762 wctx = repo[None]
761 for subpath in sorted(wctx.substate):
763 for subpath in sorted(wctx.substate):
762 if opts.get('subrepos') or matchessubrepo(m, subpath):
764 if opts.get('subrepos') or matchessubrepo(m, subpath):
763 sub = wctx.sub(subpath)
765 sub = wctx.sub(subpath)
764 try:
766 try:
765 submatch = matchmod.narrowmatcher(subpath, m)
767 submatch = matchmod.narrowmatcher(subpath, m)
766 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
768 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
767 ret = 1
769 ret = 1
768 except error.LookupError:
770 except error.LookupError:
769 repo.ui.status(_("skipping missing subrepository: %s\n")
771 repo.ui.status(_("skipping missing subrepository: %s\n")
770 % join(subpath))
772 % join(subpath))
771
773
772 rejected = []
774 rejected = []
773 origbad = m.bad
775 origbad = m.bad
774 def badfn(f, msg):
776 def badfn(f, msg):
775 if f in m.files():
777 if f in m.files():
776 origbad(f, msg)
778 origbad(f, msg)
777 rejected.append(f)
779 rejected.append(f)
778
780
779 m.bad = badfn
781 m.bad = badfn
780 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
782 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
781 m.bad = origbad
783 m.bad = origbad
782
784
783 unknownset = set(unknown + forgotten)
785 unknownset = set(unknown + forgotten)
784 toprint = unknownset.copy()
786 toprint = unknownset.copy()
785 toprint.update(deleted)
787 toprint.update(deleted)
786 for abs in sorted(toprint):
788 for abs in sorted(toprint):
787 if repo.ui.verbose or not m.exact(abs):
789 if repo.ui.verbose or not m.exact(abs):
788 if abs in unknownset:
790 if abs in unknownset:
789 status = _('adding %s\n') % m.uipath(abs)
791 status = _('adding %s\n') % m.uipath(abs)
790 else:
792 else:
791 status = _('removing %s\n') % m.uipath(abs)
793 status = _('removing %s\n') % m.uipath(abs)
792 repo.ui.status(status)
794 repo.ui.status(status)
793
795
794 renames = _findrenames(repo, m, added + unknown, removed + deleted,
796 renames = _findrenames(repo, m, added + unknown, removed + deleted,
795 similarity)
797 similarity)
796
798
797 if not dry_run:
799 if not dry_run:
798 _markchanges(repo, unknown + forgotten, deleted, renames)
800 _markchanges(repo, unknown + forgotten, deleted, renames)
799
801
800 for f in rejected:
802 for f in rejected:
801 if f in m.files():
803 if f in m.files():
802 return 1
804 return 1
803 return ret
805 return ret
804
806
805 def marktouched(repo, files, similarity=0.0):
807 def marktouched(repo, files, similarity=0.0):
806 '''Assert that files have somehow been operated upon. files are relative to
808 '''Assert that files have somehow been operated upon. files are relative to
807 the repo root.'''
809 the repo root.'''
808 m = matchfiles(repo, files)
810 m = matchfiles(repo, files)
809 rejected = []
811 rejected = []
810 m.bad = lambda x, y: rejected.append(x)
812 m.bad = lambda x, y: rejected.append(x)
811
813
812 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
814 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
813
815
814 if repo.ui.verbose:
816 if repo.ui.verbose:
815 unknownset = set(unknown + forgotten)
817 unknownset = set(unknown + forgotten)
816 toprint = unknownset.copy()
818 toprint = unknownset.copy()
817 toprint.update(deleted)
819 toprint.update(deleted)
818 for abs in sorted(toprint):
820 for abs in sorted(toprint):
819 if abs in unknownset:
821 if abs in unknownset:
820 status = _('adding %s\n') % abs
822 status = _('adding %s\n') % abs
821 else:
823 else:
822 status = _('removing %s\n') % abs
824 status = _('removing %s\n') % abs
823 repo.ui.status(status)
825 repo.ui.status(status)
824
826
825 renames = _findrenames(repo, m, added + unknown, removed + deleted,
827 renames = _findrenames(repo, m, added + unknown, removed + deleted,
826 similarity)
828 similarity)
827
829
828 _markchanges(repo, unknown + forgotten, deleted, renames)
830 _markchanges(repo, unknown + forgotten, deleted, renames)
829
831
830 for f in rejected:
832 for f in rejected:
831 if f in m.files():
833 if f in m.files():
832 return 1
834 return 1
833 return 0
835 return 0
834
836
835 def _interestingfiles(repo, matcher):
837 def _interestingfiles(repo, matcher):
836 '''Walk dirstate with matcher, looking for files that addremove would care
838 '''Walk dirstate with matcher, looking for files that addremove would care
837 about.
839 about.
838
840
839 This is different from dirstate.status because it doesn't care about
841 This is different from dirstate.status because it doesn't care about
840 whether files are modified or clean.'''
842 whether files are modified or clean.'''
841 added, unknown, deleted, removed, forgotten = [], [], [], [], []
843 added, unknown, deleted, removed, forgotten = [], [], [], [], []
842 audit_path = pathutil.pathauditor(repo.root)
844 audit_path = pathutil.pathauditor(repo.root)
843
845
844 ctx = repo[None]
846 ctx = repo[None]
845 dirstate = repo.dirstate
847 dirstate = repo.dirstate
846 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
848 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
847 full=False)
849 full=False)
848 for abs, st in walkresults.iteritems():
850 for abs, st in walkresults.iteritems():
849 dstate = dirstate[abs]
851 dstate = dirstate[abs]
850 if dstate == '?' and audit_path.check(abs):
852 if dstate == '?' and audit_path.check(abs):
851 unknown.append(abs)
853 unknown.append(abs)
852 elif dstate != 'r' and not st:
854 elif dstate != 'r' and not st:
853 deleted.append(abs)
855 deleted.append(abs)
854 elif dstate == 'r' and st:
856 elif dstate == 'r' and st:
855 forgotten.append(abs)
857 forgotten.append(abs)
856 # for finding renames
858 # for finding renames
857 elif dstate == 'r' and not st:
859 elif dstate == 'r' and not st:
858 removed.append(abs)
860 removed.append(abs)
859 elif dstate == 'a':
861 elif dstate == 'a':
860 added.append(abs)
862 added.append(abs)
861
863
862 return added, unknown, deleted, removed, forgotten
864 return added, unknown, deleted, removed, forgotten
863
865
864 def _findrenames(repo, matcher, added, removed, similarity):
866 def _findrenames(repo, matcher, added, removed, similarity):
865 '''Find renames from removed files to added ones.'''
867 '''Find renames from removed files to added ones.'''
866 renames = {}
868 renames = {}
867 if similarity > 0:
869 if similarity > 0:
868 for old, new, score in similar.findrenames(repo, added, removed,
870 for old, new, score in similar.findrenames(repo, added, removed,
869 similarity):
871 similarity):
870 if (repo.ui.verbose or not matcher.exact(old)
872 if (repo.ui.verbose or not matcher.exact(old)
871 or not matcher.exact(new)):
873 or not matcher.exact(new)):
872 repo.ui.status(_('recording removal of %s as rename to %s '
874 repo.ui.status(_('recording removal of %s as rename to %s '
873 '(%d%% similar)\n') %
875 '(%d%% similar)\n') %
874 (matcher.rel(old), matcher.rel(new),
876 (matcher.rel(old), matcher.rel(new),
875 score * 100))
877 score * 100))
876 renames[new] = old
878 renames[new] = old
877 return renames
879 return renames
878
880
879 def _markchanges(repo, unknown, deleted, renames):
881 def _markchanges(repo, unknown, deleted, renames):
880 '''Marks the files in unknown as added, the files in deleted as removed,
882 '''Marks the files in unknown as added, the files in deleted as removed,
881 and the files in renames as copied.'''
883 and the files in renames as copied.'''
882 wctx = repo[None]
884 wctx = repo[None]
883 wlock = repo.wlock()
885 wlock = repo.wlock()
884 try:
886 try:
885 wctx.forget(deleted)
887 wctx.forget(deleted)
886 wctx.add(unknown)
888 wctx.add(unknown)
887 for new, old in renames.iteritems():
889 for new, old in renames.iteritems():
888 wctx.copy(old, new)
890 wctx.copy(old, new)
889 finally:
891 finally:
890 wlock.release()
892 wlock.release()
891
893
892 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
894 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
893 """Update the dirstate to reflect the intent of copying src to dst. For
895 """Update the dirstate to reflect the intent of copying src to dst. For
894 different reasons it might not end with dst being marked as copied from src.
896 different reasons it might not end with dst being marked as copied from src.
895 """
897 """
896 origsrc = repo.dirstate.copied(src) or src
898 origsrc = repo.dirstate.copied(src) or src
897 if dst == origsrc: # copying back a copy?
899 if dst == origsrc: # copying back a copy?
898 if repo.dirstate[dst] not in 'mn' and not dryrun:
900 if repo.dirstate[dst] not in 'mn' and not dryrun:
899 repo.dirstate.normallookup(dst)
901 repo.dirstate.normallookup(dst)
900 else:
902 else:
901 if repo.dirstate[origsrc] == 'a' and origsrc == src:
903 if repo.dirstate[origsrc] == 'a' and origsrc == src:
902 if not ui.quiet:
904 if not ui.quiet:
903 ui.warn(_("%s has not been committed yet, so no copy "
905 ui.warn(_("%s has not been committed yet, so no copy "
904 "data will be stored for %s.\n")
906 "data will be stored for %s.\n")
905 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
907 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
906 if repo.dirstate[dst] in '?r' and not dryrun:
908 if repo.dirstate[dst] in '?r' and not dryrun:
907 wctx.add([dst])
909 wctx.add([dst])
908 elif not dryrun:
910 elif not dryrun:
909 wctx.copy(origsrc, dst)
911 wctx.copy(origsrc, dst)
910
912
911 def readrequires(opener, supported):
913 def readrequires(opener, supported):
912 '''Reads and parses .hg/requires and checks if all entries found
914 '''Reads and parses .hg/requires and checks if all entries found
913 are in the list of supported features.'''
915 are in the list of supported features.'''
914 requirements = set(opener.read("requires").splitlines())
916 requirements = set(opener.read("requires").splitlines())
915 missings = []
917 missings = []
916 for r in requirements:
918 for r in requirements:
917 if r not in supported:
919 if r not in supported:
918 if not r or not r[0].isalnum():
920 if not r or not r[0].isalnum():
919 raise error.RequirementError(_(".hg/requires file is corrupt"))
921 raise error.RequirementError(_(".hg/requires file is corrupt"))
920 missings.append(r)
922 missings.append(r)
921 missings.sort()
923 missings.sort()
922 if missings:
924 if missings:
923 raise error.RequirementError(
925 raise error.RequirementError(
924 _("repository requires features unknown to this Mercurial: %s")
926 _("repository requires features unknown to this Mercurial: %s")
925 % " ".join(missings),
927 % " ".join(missings),
926 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
928 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
927 " for more information"))
929 " for more information"))
928 return requirements
930 return requirements
929
931
930 class filecachesubentry(object):
932 class filecachesubentry(object):
931 def __init__(self, path, stat):
933 def __init__(self, path, stat):
932 self.path = path
934 self.path = path
933 self.cachestat = None
935 self.cachestat = None
934 self._cacheable = None
936 self._cacheable = None
935
937
936 if stat:
938 if stat:
937 self.cachestat = filecachesubentry.stat(self.path)
939 self.cachestat = filecachesubentry.stat(self.path)
938
940
939 if self.cachestat:
941 if self.cachestat:
940 self._cacheable = self.cachestat.cacheable()
942 self._cacheable = self.cachestat.cacheable()
941 else:
943 else:
942 # None means we don't know yet
944 # None means we don't know yet
943 self._cacheable = None
945 self._cacheable = None
944
946
945 def refresh(self):
947 def refresh(self):
946 if self.cacheable():
948 if self.cacheable():
947 self.cachestat = filecachesubentry.stat(self.path)
949 self.cachestat = filecachesubentry.stat(self.path)
948
950
949 def cacheable(self):
951 def cacheable(self):
950 if self._cacheable is not None:
952 if self._cacheable is not None:
951 return self._cacheable
953 return self._cacheable
952
954
953 # we don't know yet, assume it is for now
955 # we don't know yet, assume it is for now
954 return True
956 return True
955
957
956 def changed(self):
958 def changed(self):
957 # no point in going further if we can't cache it
959 # no point in going further if we can't cache it
958 if not self.cacheable():
960 if not self.cacheable():
959 return True
961 return True
960
962
961 newstat = filecachesubentry.stat(self.path)
963 newstat = filecachesubentry.stat(self.path)
962
964
963 # we may not know if it's cacheable yet, check again now
965 # we may not know if it's cacheable yet, check again now
964 if newstat and self._cacheable is None:
966 if newstat and self._cacheable is None:
965 self._cacheable = newstat.cacheable()
967 self._cacheable = newstat.cacheable()
966
968
967 # check again
969 # check again
968 if not self._cacheable:
970 if not self._cacheable:
969 return True
971 return True
970
972
971 if self.cachestat != newstat:
973 if self.cachestat != newstat:
972 self.cachestat = newstat
974 self.cachestat = newstat
973 return True
975 return True
974 else:
976 else:
975 return False
977 return False
976
978
977 @staticmethod
979 @staticmethod
978 def stat(path):
980 def stat(path):
979 try:
981 try:
980 return util.cachestat(path)
982 return util.cachestat(path)
981 except OSError, e:
983 except OSError, e:
982 if e.errno != errno.ENOENT:
984 if e.errno != errno.ENOENT:
983 raise
985 raise
984
986
985 class filecacheentry(object):
987 class filecacheentry(object):
986 def __init__(self, paths, stat=True):
988 def __init__(self, paths, stat=True):
987 self._entries = []
989 self._entries = []
988 for path in paths:
990 for path in paths:
989 self._entries.append(filecachesubentry(path, stat))
991 self._entries.append(filecachesubentry(path, stat))
990
992
991 def changed(self):
993 def changed(self):
992 '''true if any entry has changed'''
994 '''true if any entry has changed'''
993 for entry in self._entries:
995 for entry in self._entries:
994 if entry.changed():
996 if entry.changed():
995 return True
997 return True
996 return False
998 return False
997
999
998 def refresh(self):
1000 def refresh(self):
999 for entry in self._entries:
1001 for entry in self._entries:
1000 entry.refresh()
1002 entry.refresh()
1001
1003
1002 class filecache(object):
1004 class filecache(object):
1003 '''A property like decorator that tracks files under .hg/ for updates.
1005 '''A property like decorator that tracks files under .hg/ for updates.
1004
1006
1005 Records stat info when called in _filecache.
1007 Records stat info when called in _filecache.
1006
1008
1007 On subsequent calls, compares old stat info with new info, and recreates the
1009 On subsequent calls, compares old stat info with new info, and recreates the
1008 object when any of the files changes, updating the new stat info in
1010 object when any of the files changes, updating the new stat info in
1009 _filecache.
1011 _filecache.
1010
1012
1011 Mercurial either atomic renames or appends for files under .hg,
1013 Mercurial either atomic renames or appends for files under .hg,
1012 so to ensure the cache is reliable we need the filesystem to be able
1014 so to ensure the cache is reliable we need the filesystem to be able
1013 to tell us if a file has been replaced. If it can't, we fallback to
1015 to tell us if a file has been replaced. If it can't, we fallback to
1014 recreating the object on every call (essentially the same behaviour as
1016 recreating the object on every call (essentially the same behaviour as
1015 propertycache).
1017 propertycache).
1016
1018
1017 '''
1019 '''
1018 def __init__(self, *paths):
1020 def __init__(self, *paths):
1019 self.paths = paths
1021 self.paths = paths
1020
1022
1021 def join(self, obj, fname):
1023 def join(self, obj, fname):
1022 """Used to compute the runtime path of a cached file.
1024 """Used to compute the runtime path of a cached file.
1023
1025
1024 Users should subclass filecache and provide their own version of this
1026 Users should subclass filecache and provide their own version of this
1025 function to call the appropriate join function on 'obj' (an instance
1027 function to call the appropriate join function on 'obj' (an instance
1026 of the class that its member function was decorated).
1028 of the class that its member function was decorated).
1027 """
1029 """
1028 return obj.join(fname)
1030 return obj.join(fname)
1029
1031
1030 def __call__(self, func):
1032 def __call__(self, func):
1031 self.func = func
1033 self.func = func
1032 self.name = func.__name__
1034 self.name = func.__name__
1033 return self
1035 return self
1034
1036
1035 def __get__(self, obj, type=None):
1037 def __get__(self, obj, type=None):
1036 # do we need to check if the file changed?
1038 # do we need to check if the file changed?
1037 if self.name in obj.__dict__:
1039 if self.name in obj.__dict__:
1038 assert self.name in obj._filecache, self.name
1040 assert self.name in obj._filecache, self.name
1039 return obj.__dict__[self.name]
1041 return obj.__dict__[self.name]
1040
1042
1041 entry = obj._filecache.get(self.name)
1043 entry = obj._filecache.get(self.name)
1042
1044
1043 if entry:
1045 if entry:
1044 if entry.changed():
1046 if entry.changed():
1045 entry.obj = self.func(obj)
1047 entry.obj = self.func(obj)
1046 else:
1048 else:
1047 paths = [self.join(obj, path) for path in self.paths]
1049 paths = [self.join(obj, path) for path in self.paths]
1048
1050
1049 # We stat -before- creating the object so our cache doesn't lie if
1051 # We stat -before- creating the object so our cache doesn't lie if
1050 # a writer modified between the time we read and stat
1052 # a writer modified between the time we read and stat
1051 entry = filecacheentry(paths, True)
1053 entry = filecacheentry(paths, True)
1052 entry.obj = self.func(obj)
1054 entry.obj = self.func(obj)
1053
1055
1054 obj._filecache[self.name] = entry
1056 obj._filecache[self.name] = entry
1055
1057
1056 obj.__dict__[self.name] = entry.obj
1058 obj.__dict__[self.name] = entry.obj
1057 return entry.obj
1059 return entry.obj
1058
1060
1059 def __set__(self, obj, value):
1061 def __set__(self, obj, value):
1060 if self.name not in obj._filecache:
1062 if self.name not in obj._filecache:
1061 # we add an entry for the missing value because X in __dict__
1063 # we add an entry for the missing value because X in __dict__
1062 # implies X in _filecache
1064 # implies X in _filecache
1063 paths = [self.join(obj, path) for path in self.paths]
1065 paths = [self.join(obj, path) for path in self.paths]
1064 ce = filecacheentry(paths, False)
1066 ce = filecacheentry(paths, False)
1065 obj._filecache[self.name] = ce
1067 obj._filecache[self.name] = ce
1066 else:
1068 else:
1067 ce = obj._filecache[self.name]
1069 ce = obj._filecache[self.name]
1068
1070
1069 ce.obj = value # update cached copy
1071 ce.obj = value # update cached copy
1070 obj.__dict__[self.name] = value # update copy returned by obj.x
1072 obj.__dict__[self.name] = value # update copy returned by obj.x
1071
1073
1072 def __delete__(self, obj):
1074 def __delete__(self, obj):
1073 try:
1075 try:
1074 del obj.__dict__[self.name]
1076 del obj.__dict__[self.name]
1075 except KeyError:
1077 except KeyError:
1076 raise AttributeError(self.name)
1078 raise AttributeError(self.name)
1077
1079
1078 class dirs(object):
1080 class dirs(object):
1079 '''a multiset of directory names from a dirstate or manifest'''
1081 '''a multiset of directory names from a dirstate or manifest'''
1080
1082
1081 def __init__(self, map, skip=None):
1083 def __init__(self, map, skip=None):
1082 self._dirs = {}
1084 self._dirs = {}
1083 addpath = self.addpath
1085 addpath = self.addpath
1084 if util.safehasattr(map, 'iteritems') and skip is not None:
1086 if util.safehasattr(map, 'iteritems') and skip is not None:
1085 for f, s in map.iteritems():
1087 for f, s in map.iteritems():
1086 if s[0] != skip:
1088 if s[0] != skip:
1087 addpath(f)
1089 addpath(f)
1088 else:
1090 else:
1089 for f in map:
1091 for f in map:
1090 addpath(f)
1092 addpath(f)
1091
1093
1092 def addpath(self, path):
1094 def addpath(self, path):
1093 dirs = self._dirs
1095 dirs = self._dirs
1094 for base in finddirs(path):
1096 for base in finddirs(path):
1095 if base in dirs:
1097 if base in dirs:
1096 dirs[base] += 1
1098 dirs[base] += 1
1097 return
1099 return
1098 dirs[base] = 1
1100 dirs[base] = 1
1099
1101
1100 def delpath(self, path):
1102 def delpath(self, path):
1101 dirs = self._dirs
1103 dirs = self._dirs
1102 for base in finddirs(path):
1104 for base in finddirs(path):
1103 if dirs[base] > 1:
1105 if dirs[base] > 1:
1104 dirs[base] -= 1
1106 dirs[base] -= 1
1105 return
1107 return
1106 del dirs[base]
1108 del dirs[base]
1107
1109
1108 def __iter__(self):
1110 def __iter__(self):
1109 return self._dirs.iterkeys()
1111 return self._dirs.iterkeys()
1110
1112
1111 def __contains__(self, d):
1113 def __contains__(self, d):
1112 return d in self._dirs
1114 return d in self._dirs
1113
1115
1114 if util.safehasattr(parsers, 'dirs'):
1116 if util.safehasattr(parsers, 'dirs'):
1115 dirs = parsers.dirs
1117 dirs = parsers.dirs
1116
1118
1117 def finddirs(path):
1119 def finddirs(path):
1118 pos = path.rfind('/')
1120 pos = path.rfind('/')
1119 while pos != -1:
1121 while pos != -1:
1120 yield path[:pos]
1122 yield path[:pos]
1121 pos = path.rfind('/', 0, pos)
1123 pos = path.rfind('/', 0, pos)
General Comments 0
You need to be logged in to leave comments. Login now