##// END OF EJS Templates
matcher: make e.g. 'relpath:.' lead to fast paths...
Martin von Zweigbergk -
r24447:d44d53bc default
parent child Browse files
Show More
@@ -1,426 +1,436
1 1 # match.py - filename matching
2 2 #
3 3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import util, pathutil
10 10 from i18n import _
11 11
12 12 def _rematcher(regex):
13 13 '''compile the regexp with the best available regexp engine and return a
14 14 matcher function'''
15 15 m = util.re.compile(regex)
16 16 try:
17 17 # slightly faster, provided by facebook's re2 bindings
18 18 return m.test_match
19 19 except AttributeError:
20 20 return m.match
21 21
22 22 def _expandsets(kindpats, ctx):
23 23 '''Returns the kindpats list with the 'set' patterns expanded.'''
24 24 fset = set()
25 25 other = []
26 26
27 27 for kind, pat in kindpats:
28 28 if kind == 'set':
29 29 if not ctx:
30 30 raise util.Abort("fileset expression with no context")
31 31 s = ctx.getfileset(pat)
32 32 fset.update(s)
33 33 continue
34 34 other.append((kind, pat))
35 35 return fset, other
36 36
37 def _kindpatsalwaysmatch(kindpats):
38 """"Checks whether the kindspats match everything, as e.g.
39 'relpath:.' does.
40 """
41 for kind, pat in kindpats:
42 if pat != '' or kind not in ['relpath', 'glob']:
43 return False
44 return True
45
37 46 class match(object):
38 47 def __init__(self, root, cwd, patterns, include=[], exclude=[],
39 48 default='glob', exact=False, auditor=None, ctx=None):
40 49 """build an object to match a set of file patterns
41 50
42 51 arguments:
43 52 root - the canonical root of the tree you're matching against
44 53 cwd - the current working directory, if relevant
45 54 patterns - patterns to find
46 55 include - patterns to include (unless they are excluded)
47 56 exclude - patterns to exclude (even if they are included)
48 57 default - if a pattern in patterns has no explicit type, assume this one
49 58 exact - patterns are actually filenames (include/exclude still apply)
50 59
51 60 a pattern is one of:
52 61 'glob:<glob>' - a glob relative to cwd
53 62 're:<regexp>' - a regular expression
54 63 'path:<path>' - a path relative to repository root
55 64 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
56 65 'relpath:<path>' - a path relative to cwd
57 66 'relre:<regexp>' - a regexp that needn't match the start of a name
58 67 'set:<fileset>' - a fileset expression
59 68 '<something>' - a pattern of the specified default type
60 69 """
61 70
62 71 self._root = root
63 72 self._cwd = cwd
64 73 self._files = [] # exact files and roots of patterns
65 74 self._anypats = bool(include or exclude)
66 75 self._ctx = ctx
67 76 self._always = False
68 77 self._pathrestricted = bool(include or exclude or patterns)
69 78
70 79 matchfns = []
71 80 if include:
72 81 kindpats = _normalize(include, 'glob', root, cwd, auditor)
73 82 self.includepat, im = _buildmatch(ctx, kindpats, '(?:/|$)')
74 83 matchfns.append(im)
75 84 if exclude:
76 85 kindpats = _normalize(exclude, 'glob', root, cwd, auditor)
77 86 self.excludepat, em = _buildmatch(ctx, kindpats, '(?:/|$)')
78 87 matchfns.append(lambda f: not em(f))
79 88 if exact:
80 89 if isinstance(patterns, list):
81 90 self._files = patterns
82 91 else:
83 92 self._files = list(patterns)
84 93 matchfns.append(self.exact)
85 94 elif patterns:
86 95 kindpats = _normalize(patterns, default, root, cwd, auditor)
87 self._files = _roots(kindpats)
88 self._anypats = self._anypats or _anypats(kindpats)
89 self.patternspat, pm = _buildmatch(ctx, kindpats, '$')
90 matchfns.append(pm)
96 if not _kindpatsalwaysmatch(kindpats):
97 self._files = _roots(kindpats)
98 self._anypats = self._anypats or _anypats(kindpats)
99 self.patternspat, pm = _buildmatch(ctx, kindpats, '$')
100 matchfns.append(pm)
91 101
92 102 if not matchfns:
93 103 m = util.always
94 104 self._always = True
95 105 elif len(matchfns) == 1:
96 106 m = matchfns[0]
97 107 else:
98 108 def m(f):
99 109 for matchfn in matchfns:
100 110 if not matchfn(f):
101 111 return False
102 112 return True
103 113
104 114 self.matchfn = m
105 115 self._fmap = set(self._files)
106 116
107 117 def __call__(self, fn):
108 118 return self.matchfn(fn)
109 119 def __iter__(self):
110 120 for f in self._files:
111 121 yield f
112 122
113 123 # Callbacks related to how the matcher is used by dirstate.walk.
114 124 # Subscribers to these events must monkeypatch the matcher object.
115 125 def bad(self, f, msg):
116 126 '''Callback from dirstate.walk for each explicit file that can't be
117 127 found/accessed, with an error message.'''
118 128 pass
119 129
120 130 # If an explicitdir is set, it will be called when an explicitly listed
121 131 # directory is visited.
122 132 explicitdir = None
123 133
124 134 # If an traversedir is set, it will be called when a directory discovered
125 135 # by recursive traversal is visited.
126 136 traversedir = None
127 137
128 138 def abs(self, f):
129 139 '''Convert a repo path back to path that is relative to the root of the
130 140 matcher.'''
131 141 return f
132 142
133 143 def rel(self, f):
134 144 '''Convert repo path back to path that is relative to cwd of matcher.'''
135 145 return util.pathto(self._root, self._cwd, f)
136 146
137 147 def uipath(self, f):
138 148 '''Convert repo path to a display path. If patterns or -I/-X were used
139 149 to create this matcher, the display path will be relative to cwd.
140 150 Otherwise it is relative to the root of the repo.'''
141 151 return (self._pathrestricted and self.rel(f)) or self.abs(f)
142 152
143 153 def files(self):
144 154 '''Explicitly listed files or patterns or roots:
145 155 if no patterns or .always(): empty list,
146 156 if exact: list exact files,
147 157 if not .anypats(): list all files and dirs,
148 158 else: optimal roots'''
149 159 return self._files
150 160
151 161 def exact(self, f):
152 162 '''Returns True if f is in .files().'''
153 163 return f in self._fmap
154 164
155 165 def anypats(self):
156 166 '''Matcher uses patterns or include/exclude.'''
157 167 return self._anypats
158 168
159 169 def always(self):
160 170 '''Matcher will match everything and .files() will be empty
161 171 - optimization might be possible and necessary.'''
162 172 return self._always
163 173
164 174 def exact(root, cwd, files):
165 175 return match(root, cwd, files, exact=True)
166 176
167 177 def always(root, cwd):
168 178 return match(root, cwd, [])
169 179
170 180 class narrowmatcher(match):
171 181 """Adapt a matcher to work on a subdirectory only.
172 182
173 183 The paths are remapped to remove/insert the path as needed:
174 184
175 185 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
176 186 >>> m2 = narrowmatcher('sub', m1)
177 187 >>> bool(m2('a.txt'))
178 188 False
179 189 >>> bool(m2('b.txt'))
180 190 True
181 191 >>> bool(m2.matchfn('a.txt'))
182 192 False
183 193 >>> bool(m2.matchfn('b.txt'))
184 194 True
185 195 >>> m2.files()
186 196 ['b.txt']
187 197 >>> m2.exact('b.txt')
188 198 True
189 199 >>> util.pconvert(m2.rel('b.txt'))
190 200 'sub/b.txt'
191 201 >>> def bad(f, msg):
192 202 ... print "%s: %s" % (f, msg)
193 203 >>> m1.bad = bad
194 204 >>> m2.bad('x.txt', 'No such file')
195 205 sub/x.txt: No such file
196 206 >>> m2.abs('c.txt')
197 207 'sub/c.txt'
198 208 """
199 209
200 210 def __init__(self, path, matcher):
201 211 self._root = matcher._root
202 212 self._cwd = matcher._cwd
203 213 self._path = path
204 214 self._matcher = matcher
205 215 self._always = matcher._always
206 216 self._pathrestricted = matcher._pathrestricted
207 217
208 218 self._files = [f[len(path) + 1:] for f in matcher._files
209 219 if f.startswith(path + "/")]
210 220 self._anypats = matcher._anypats
211 221 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
212 222 self._fmap = set(self._files)
213 223
214 224 def abs(self, f):
215 225 return self._matcher.abs(self._path + "/" + f)
216 226
217 227 def bad(self, f, msg):
218 228 self._matcher.bad(self._path + "/" + f, msg)
219 229
220 230 def rel(self, f):
221 231 return self._matcher.rel(self._path + "/" + f)
222 232
223 233 def patkind(pattern, default=None):
224 234 '''If pattern is 'kind:pat' with a known kind, return kind.'''
225 235 return _patsplit(pattern, default)[0]
226 236
227 237 def _patsplit(pattern, default):
228 238 """Split a string into the optional pattern kind prefix and the actual
229 239 pattern."""
230 240 if ':' in pattern:
231 241 kind, pat = pattern.split(':', 1)
232 242 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
233 243 'listfile', 'listfile0', 'set'):
234 244 return kind, pat
235 245 return default, pattern
236 246
237 247 def _globre(pat):
238 248 r'''Convert an extended glob string to a regexp string.
239 249
240 250 >>> print _globre(r'?')
241 251 .
242 252 >>> print _globre(r'*')
243 253 [^/]*
244 254 >>> print _globre(r'**')
245 255 .*
246 256 >>> print _globre(r'**/a')
247 257 (?:.*/)?a
248 258 >>> print _globre(r'a/**/b')
249 259 a\/(?:.*/)?b
250 260 >>> print _globre(r'[a*?!^][^b][!c]')
251 261 [a*?!^][\^b][^c]
252 262 >>> print _globre(r'{a,b}')
253 263 (?:a|b)
254 264 >>> print _globre(r'.\*\?')
255 265 \.\*\?
256 266 '''
257 267 i, n = 0, len(pat)
258 268 res = ''
259 269 group = 0
260 270 escape = util.re.escape
261 271 def peek():
262 272 return i < n and pat[i]
263 273 while i < n:
264 274 c = pat[i]
265 275 i += 1
266 276 if c not in '*?[{},\\':
267 277 res += escape(c)
268 278 elif c == '*':
269 279 if peek() == '*':
270 280 i += 1
271 281 if peek() == '/':
272 282 i += 1
273 283 res += '(?:.*/)?'
274 284 else:
275 285 res += '.*'
276 286 else:
277 287 res += '[^/]*'
278 288 elif c == '?':
279 289 res += '.'
280 290 elif c == '[':
281 291 j = i
282 292 if j < n and pat[j] in '!]':
283 293 j += 1
284 294 while j < n and pat[j] != ']':
285 295 j += 1
286 296 if j >= n:
287 297 res += '\\['
288 298 else:
289 299 stuff = pat[i:j].replace('\\','\\\\')
290 300 i = j + 1
291 301 if stuff[0] == '!':
292 302 stuff = '^' + stuff[1:]
293 303 elif stuff[0] == '^':
294 304 stuff = '\\' + stuff
295 305 res = '%s[%s]' % (res, stuff)
296 306 elif c == '{':
297 307 group += 1
298 308 res += '(?:'
299 309 elif c == '}' and group:
300 310 res += ')'
301 311 group -= 1
302 312 elif c == ',' and group:
303 313 res += '|'
304 314 elif c == '\\':
305 315 p = peek()
306 316 if p:
307 317 i += 1
308 318 res += escape(p)
309 319 else:
310 320 res += escape(c)
311 321 else:
312 322 res += escape(c)
313 323 return res
314 324
315 325 def _regex(kind, pat, globsuffix):
316 326 '''Convert a (normalized) pattern of any kind into a regular expression.
317 327 globsuffix is appended to the regexp of globs.'''
318 328 if not pat:
319 329 return ''
320 330 if kind == 're':
321 331 return pat
322 332 if kind == 'path':
323 333 return '^' + util.re.escape(pat) + '(?:/|$)'
324 334 if kind == 'relglob':
325 335 return '(?:|.*/)' + _globre(pat) + globsuffix
326 336 if kind == 'relpath':
327 337 return util.re.escape(pat) + '(?:/|$)'
328 338 if kind == 'relre':
329 339 if pat.startswith('^'):
330 340 return pat
331 341 return '.*' + pat
332 342 return _globre(pat) + globsuffix
333 343
334 344 def _buildmatch(ctx, kindpats, globsuffix):
335 345 '''Return regexp string and a matcher function for kindpats.
336 346 globsuffix is appended to the regexp of globs.'''
337 347 fset, kindpats = _expandsets(kindpats, ctx)
338 348 if not kindpats:
339 349 return "", fset.__contains__
340 350
341 351 regex, mf = _buildregexmatch(kindpats, globsuffix)
342 352 if fset:
343 353 return regex, lambda f: f in fset or mf(f)
344 354 return regex, mf
345 355
346 356 def _buildregexmatch(kindpats, globsuffix):
347 357 """Build a match function from a list of kinds and kindpats,
348 358 return regexp string and a matcher function."""
349 359 try:
350 360 regex = '(?:%s)' % '|'.join([_regex(k, p, globsuffix)
351 361 for (k, p) in kindpats])
352 362 if len(regex) > 20000:
353 363 raise OverflowError
354 364 return regex, _rematcher(regex)
355 365 except OverflowError:
356 366 # We're using a Python with a tiny regex engine and we
357 367 # made it explode, so we'll divide the pattern list in two
358 368 # until it works
359 369 l = len(kindpats)
360 370 if l < 2:
361 371 raise
362 372 regexa, a = _buildregexmatch(kindpats[:l//2], globsuffix)
363 373 regexb, b = _buildregexmatch(kindpats[l//2:], globsuffix)
364 374 return regex, lambda s: a(s) or b(s)
365 375 except re.error:
366 376 for k, p in kindpats:
367 377 try:
368 378 _rematcher('(?:%s)' % _regex(k, p, globsuffix))
369 379 except re.error:
370 380 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
371 381 raise util.Abort(_("invalid pattern"))
372 382
373 383 def _normalize(patterns, default, root, cwd, auditor):
374 384 '''Convert 'kind:pat' from the patterns list to tuples with kind and
375 385 normalized and rooted patterns and with listfiles expanded.'''
376 386 kindpats = []
377 387 for kind, pat in [_patsplit(p, default) for p in patterns]:
378 388 if kind in ('glob', 'relpath'):
379 389 pat = pathutil.canonpath(root, cwd, pat, auditor)
380 390 elif kind in ('relglob', 'path'):
381 391 pat = util.normpath(pat)
382 392 elif kind in ('listfile', 'listfile0'):
383 393 try:
384 394 files = util.readfile(pat)
385 395 if kind == 'listfile0':
386 396 files = files.split('\0')
387 397 else:
388 398 files = files.splitlines()
389 399 files = [f for f in files if f]
390 400 except EnvironmentError:
391 401 raise util.Abort(_("unable to read file list (%s)") % pat)
392 402 kindpats += _normalize(files, default, root, cwd, auditor)
393 403 continue
394 404 # else: re or relre - which cannot be normalized
395 405 kindpats.append((kind, pat))
396 406 return kindpats
397 407
398 408 def _roots(kindpats):
399 409 '''return roots and exact explicitly listed files from patterns
400 410
401 411 >>> _roots([('glob', 'g/*'), ('glob', 'g'), ('glob', 'g*')])
402 412 ['g', 'g', '.']
403 413 >>> _roots([('relpath', 'r'), ('path', 'p/p'), ('path', '')])
404 414 ['r', 'p/p', '.']
405 415 >>> _roots([('relglob', 'rg*'), ('re', 're/'), ('relre', 'rr')])
406 416 ['.', '.', '.']
407 417 '''
408 418 r = []
409 419 for kind, pat in kindpats:
410 420 if kind == 'glob': # find the non-glob prefix
411 421 root = []
412 422 for p in pat.split('/'):
413 423 if '[' in p or '{' in p or '*' in p or '?' in p:
414 424 break
415 425 root.append(p)
416 426 r.append('/'.join(root) or '.')
417 427 elif kind in ('relpath', 'path'):
418 428 r.append(pat or '.')
419 429 else: # relglob, re, relre
420 430 r.append('.')
421 431 return r
422 432
423 433 def _anypats(kindpats):
424 434 for kind, pat in kindpats:
425 435 if kind in ('glob', 're', 'relglob', 'relre', 'set'):
426 436 return True
@@ -1,1121 +1,1123
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob, tempfile
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 class status(tuple):
24 24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 25 and 'ignored' properties are only relevant to the working copy.
26 26 '''
27 27
28 28 __slots__ = ()
29 29
30 30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 31 clean):
32 32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 33 ignored, clean))
34 34
35 35 @property
36 36 def modified(self):
37 37 '''files that have been modified'''
38 38 return self[0]
39 39
40 40 @property
41 41 def added(self):
42 42 '''files that have been added'''
43 43 return self[1]
44 44
45 45 @property
46 46 def removed(self):
47 47 '''files that have been removed'''
48 48 return self[2]
49 49
50 50 @property
51 51 def deleted(self):
52 52 '''files that are in the dirstate, but have been deleted from the
53 53 working copy (aka "missing")
54 54 '''
55 55 return self[3]
56 56
57 57 @property
58 58 def unknown(self):
59 59 '''files not in the dirstate that are not ignored'''
60 60 return self[4]
61 61
62 62 @property
63 63 def ignored(self):
64 64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 65 return self[5]
66 66
67 67 @property
68 68 def clean(self):
69 69 '''files that have not been modified'''
70 70 return self[6]
71 71
72 72 def __repr__(self, *args, **kwargs):
73 73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 74 'unknown=%r, ignored=%r, clean=%r>') % self)
75 75
76 76 def itersubrepos(ctx1, ctx2):
77 77 """find subrepos in ctx1 or ctx2"""
78 78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 83 for subpath, ctx in sorted(subpaths.iteritems()):
84 84 yield subpath, ctx.sub(subpath)
85 85
86 86 def nochangesfound(ui, repo, excluded=None):
87 87 '''Report no changes for push/pull, excluded is None or a list of
88 88 nodes excluded from the push/pull.
89 89 '''
90 90 secretlist = []
91 91 if excluded:
92 92 for n in excluded:
93 93 if n not in repo:
94 94 # discovery should not have included the filtered revision,
95 95 # we have to explicitly exclude it until discovery is cleanup.
96 96 continue
97 97 ctx = repo[n]
98 98 if ctx.phase() >= phases.secret and not ctx.extinct():
99 99 secretlist.append(n)
100 100
101 101 if secretlist:
102 102 ui.status(_("no changes found (ignored %d secret changesets)\n")
103 103 % len(secretlist))
104 104 else:
105 105 ui.status(_("no changes found\n"))
106 106
107 107 def checknewlabel(repo, lbl, kind):
108 108 # Do not use the "kind" parameter in ui output.
109 109 # It makes strings difficult to translate.
110 110 if lbl in ['tip', '.', 'null']:
111 111 raise util.Abort(_("the name '%s' is reserved") % lbl)
112 112 for c in (':', '\0', '\n', '\r'):
113 113 if c in lbl:
114 114 raise util.Abort(_("%r cannot be used in a name") % c)
115 115 try:
116 116 int(lbl)
117 117 raise util.Abort(_("cannot use an integer as a name"))
118 118 except ValueError:
119 119 pass
120 120
121 121 def checkfilename(f):
122 122 '''Check that the filename f is an acceptable filename for a tracked file'''
123 123 if '\r' in f or '\n' in f:
124 124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
125 125
126 126 def checkportable(ui, f):
127 127 '''Check if filename f is portable and warn or abort depending on config'''
128 128 checkfilename(f)
129 129 abort, warn = checkportabilityalert(ui)
130 130 if abort or warn:
131 131 msg = util.checkwinfilename(f)
132 132 if msg:
133 133 msg = "%s: %r" % (msg, f)
134 134 if abort:
135 135 raise util.Abort(msg)
136 136 ui.warn(_("warning: %s\n") % msg)
137 137
138 138 def checkportabilityalert(ui):
139 139 '''check if the user's config requests nothing, a warning, or abort for
140 140 non-portable filenames'''
141 141 val = ui.config('ui', 'portablefilenames', 'warn')
142 142 lval = val.lower()
143 143 bval = util.parsebool(val)
144 144 abort = os.name == 'nt' or lval == 'abort'
145 145 warn = bval or lval == 'warn'
146 146 if bval is None and not (warn or abort or lval == 'ignore'):
147 147 raise error.ConfigError(
148 148 _("ui.portablefilenames value is invalid ('%s')") % val)
149 149 return abort, warn
150 150
151 151 class casecollisionauditor(object):
152 152 def __init__(self, ui, abort, dirstate):
153 153 self._ui = ui
154 154 self._abort = abort
155 155 allfiles = '\0'.join(dirstate._map)
156 156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
157 157 self._dirstate = dirstate
158 158 # The purpose of _newfiles is so that we don't complain about
159 159 # case collisions if someone were to call this object with the
160 160 # same filename twice.
161 161 self._newfiles = set()
162 162
163 163 def __call__(self, f):
164 164 if f in self._newfiles:
165 165 return
166 166 fl = encoding.lower(f)
167 167 if fl in self._loweredfiles and f not in self._dirstate:
168 168 msg = _('possible case-folding collision for %s') % f
169 169 if self._abort:
170 170 raise util.Abort(msg)
171 171 self._ui.warn(_("warning: %s\n") % msg)
172 172 self._loweredfiles.add(fl)
173 173 self._newfiles.add(f)
174 174
175 175 class abstractvfs(object):
176 176 """Abstract base class; cannot be instantiated"""
177 177
178 178 def __init__(self, *args, **kwargs):
179 179 '''Prevent instantiation; don't call this from subclasses.'''
180 180 raise NotImplementedError('attempted instantiating ' + str(type(self)))
181 181
182 182 def tryread(self, path):
183 183 '''gracefully return an empty string for missing files'''
184 184 try:
185 185 return self.read(path)
186 186 except IOError, inst:
187 187 if inst.errno != errno.ENOENT:
188 188 raise
189 189 return ""
190 190
191 191 def tryreadlines(self, path, mode='rb'):
192 192 '''gracefully return an empty array for missing files'''
193 193 try:
194 194 return self.readlines(path, mode=mode)
195 195 except IOError, inst:
196 196 if inst.errno != errno.ENOENT:
197 197 raise
198 198 return []
199 199
200 200 def open(self, path, mode="r", text=False, atomictemp=False,
201 201 notindexed=False):
202 202 '''Open ``path`` file, which is relative to vfs root.
203 203
204 204 Newly created directories are marked as "not to be indexed by
205 205 the content indexing service", if ``notindexed`` is specified
206 206 for "write" mode access.
207 207 '''
208 208 self.open = self.__call__
209 209 return self.__call__(path, mode, text, atomictemp, notindexed)
210 210
211 211 def read(self, path):
212 212 fp = self(path, 'rb')
213 213 try:
214 214 return fp.read()
215 215 finally:
216 216 fp.close()
217 217
218 218 def readlines(self, path, mode='rb'):
219 219 fp = self(path, mode=mode)
220 220 try:
221 221 return fp.readlines()
222 222 finally:
223 223 fp.close()
224 224
225 225 def write(self, path, data):
226 226 fp = self(path, 'wb')
227 227 try:
228 228 return fp.write(data)
229 229 finally:
230 230 fp.close()
231 231
232 232 def writelines(self, path, data, mode='wb', notindexed=False):
233 233 fp = self(path, mode=mode, notindexed=notindexed)
234 234 try:
235 235 return fp.writelines(data)
236 236 finally:
237 237 fp.close()
238 238
239 239 def append(self, path, data):
240 240 fp = self(path, 'ab')
241 241 try:
242 242 return fp.write(data)
243 243 finally:
244 244 fp.close()
245 245
246 246 def chmod(self, path, mode):
247 247 return os.chmod(self.join(path), mode)
248 248
249 249 def exists(self, path=None):
250 250 return os.path.exists(self.join(path))
251 251
252 252 def fstat(self, fp):
253 253 return util.fstat(fp)
254 254
255 255 def isdir(self, path=None):
256 256 return os.path.isdir(self.join(path))
257 257
258 258 def isfile(self, path=None):
259 259 return os.path.isfile(self.join(path))
260 260
261 261 def islink(self, path=None):
262 262 return os.path.islink(self.join(path))
263 263
264 264 def reljoin(self, *paths):
265 265 """join various elements of a path together (as os.path.join would do)
266 266
267 267 The vfs base is not injected so that path stay relative. This exists
268 268 to allow handling of strange encoding if needed."""
269 269 return os.path.join(*paths)
270 270
271 271 def split(self, path):
272 272 """split top-most element of a path (as os.path.split would do)
273 273
274 274 This exists to allow handling of strange encoding if needed."""
275 275 return os.path.split(path)
276 276
277 277 def lexists(self, path=None):
278 278 return os.path.lexists(self.join(path))
279 279
280 280 def lstat(self, path=None):
281 281 return os.lstat(self.join(path))
282 282
283 283 def listdir(self, path=None):
284 284 return os.listdir(self.join(path))
285 285
286 286 def makedir(self, path=None, notindexed=True):
287 287 return util.makedir(self.join(path), notindexed)
288 288
289 289 def makedirs(self, path=None, mode=None):
290 290 return util.makedirs(self.join(path), mode)
291 291
292 292 def makelock(self, info, path):
293 293 return util.makelock(info, self.join(path))
294 294
295 295 def mkdir(self, path=None):
296 296 return os.mkdir(self.join(path))
297 297
298 298 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
299 299 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
300 300 dir=self.join(dir), text=text)
301 301 dname, fname = util.split(name)
302 302 if dir:
303 303 return fd, os.path.join(dir, fname)
304 304 else:
305 305 return fd, fname
306 306
307 307 def readdir(self, path=None, stat=None, skip=None):
308 308 return osutil.listdir(self.join(path), stat, skip)
309 309
310 310 def readlock(self, path):
311 311 return util.readlock(self.join(path))
312 312
313 313 def rename(self, src, dst):
314 314 return util.rename(self.join(src), self.join(dst))
315 315
316 316 def readlink(self, path):
317 317 return os.readlink(self.join(path))
318 318
319 319 def setflags(self, path, l, x):
320 320 return util.setflags(self.join(path), l, x)
321 321
322 322 def stat(self, path=None):
323 323 return os.stat(self.join(path))
324 324
325 325 def unlink(self, path=None):
326 326 return util.unlink(self.join(path))
327 327
328 328 def unlinkpath(self, path=None, ignoremissing=False):
329 329 return util.unlinkpath(self.join(path), ignoremissing)
330 330
331 331 def utime(self, path=None, t=None):
332 332 return os.utime(self.join(path), t)
333 333
334 334 class vfs(abstractvfs):
335 335 '''Operate files relative to a base directory
336 336
337 337 This class is used to hide the details of COW semantics and
338 338 remote file access from higher level code.
339 339 '''
340 340 def __init__(self, base, audit=True, expandpath=False, realpath=False):
341 341 if expandpath:
342 342 base = util.expandpath(base)
343 343 if realpath:
344 344 base = os.path.realpath(base)
345 345 self.base = base
346 346 self._setmustaudit(audit)
347 347 self.createmode = None
348 348 self._trustnlink = None
349 349
350 350 def _getmustaudit(self):
351 351 return self._audit
352 352
353 353 def _setmustaudit(self, onoff):
354 354 self._audit = onoff
355 355 if onoff:
356 356 self.audit = pathutil.pathauditor(self.base)
357 357 else:
358 358 self.audit = util.always
359 359
360 360 mustaudit = property(_getmustaudit, _setmustaudit)
361 361
362 362 @util.propertycache
363 363 def _cansymlink(self):
364 364 return util.checklink(self.base)
365 365
366 366 @util.propertycache
367 367 def _chmod(self):
368 368 return util.checkexec(self.base)
369 369
370 370 def _fixfilemode(self, name):
371 371 if self.createmode is None or not self._chmod:
372 372 return
373 373 os.chmod(name, self.createmode & 0666)
374 374
375 375 def __call__(self, path, mode="r", text=False, atomictemp=False,
376 376 notindexed=False):
377 377 '''Open ``path`` file, which is relative to vfs root.
378 378
379 379 Newly created directories are marked as "not to be indexed by
380 380 the content indexing service", if ``notindexed`` is specified
381 381 for "write" mode access.
382 382 '''
383 383 if self._audit:
384 384 r = util.checkosfilename(path)
385 385 if r:
386 386 raise util.Abort("%s: %r" % (r, path))
387 387 self.audit(path)
388 388 f = self.join(path)
389 389
390 390 if not text and "b" not in mode:
391 391 mode += "b" # for that other OS
392 392
393 393 nlink = -1
394 394 if mode not in ('r', 'rb'):
395 395 dirname, basename = util.split(f)
396 396 # If basename is empty, then the path is malformed because it points
397 397 # to a directory. Let the posixfile() call below raise IOError.
398 398 if basename:
399 399 if atomictemp:
400 400 util.ensuredirs(dirname, self.createmode, notindexed)
401 401 return util.atomictempfile(f, mode, self.createmode)
402 402 try:
403 403 if 'w' in mode:
404 404 util.unlink(f)
405 405 nlink = 0
406 406 else:
407 407 # nlinks() may behave differently for files on Windows
408 408 # shares if the file is open.
409 409 fd = util.posixfile(f)
410 410 nlink = util.nlinks(f)
411 411 if nlink < 1:
412 412 nlink = 2 # force mktempcopy (issue1922)
413 413 fd.close()
414 414 except (OSError, IOError), e:
415 415 if e.errno != errno.ENOENT:
416 416 raise
417 417 nlink = 0
418 418 util.ensuredirs(dirname, self.createmode, notindexed)
419 419 if nlink > 0:
420 420 if self._trustnlink is None:
421 421 self._trustnlink = nlink > 1 or util.checknlink(f)
422 422 if nlink > 1 or not self._trustnlink:
423 423 util.rename(util.mktempcopy(f), f)
424 424 fp = util.posixfile(f, mode)
425 425 if nlink == 0:
426 426 self._fixfilemode(f)
427 427 return fp
428 428
429 429 def symlink(self, src, dst):
430 430 self.audit(dst)
431 431 linkname = self.join(dst)
432 432 try:
433 433 os.unlink(linkname)
434 434 except OSError:
435 435 pass
436 436
437 437 util.ensuredirs(os.path.dirname(linkname), self.createmode)
438 438
439 439 if self._cansymlink:
440 440 try:
441 441 os.symlink(src, linkname)
442 442 except OSError, err:
443 443 raise OSError(err.errno, _('could not symlink to %r: %s') %
444 444 (src, err.strerror), linkname)
445 445 else:
446 446 self.write(dst, src)
447 447
448 448 def join(self, path):
449 449 if path:
450 450 return os.path.join(self.base, path)
451 451 else:
452 452 return self.base
453 453
454 454 opener = vfs
455 455
456 456 class auditvfs(object):
457 457 def __init__(self, vfs):
458 458 self.vfs = vfs
459 459
460 460 def _getmustaudit(self):
461 461 return self.vfs.mustaudit
462 462
463 463 def _setmustaudit(self, onoff):
464 464 self.vfs.mustaudit = onoff
465 465
466 466 mustaudit = property(_getmustaudit, _setmustaudit)
467 467
468 468 class filtervfs(abstractvfs, auditvfs):
469 469 '''Wrapper vfs for filtering filenames with a function.'''
470 470
471 471 def __init__(self, vfs, filter):
472 472 auditvfs.__init__(self, vfs)
473 473 self._filter = filter
474 474
475 475 def __call__(self, path, *args, **kwargs):
476 476 return self.vfs(self._filter(path), *args, **kwargs)
477 477
478 478 def join(self, path):
479 479 if path:
480 480 return self.vfs.join(self._filter(path))
481 481 else:
482 482 return self.vfs.join(path)
483 483
484 484 filteropener = filtervfs
485 485
486 486 class readonlyvfs(abstractvfs, auditvfs):
487 487 '''Wrapper vfs preventing any writing.'''
488 488
489 489 def __init__(self, vfs):
490 490 auditvfs.__init__(self, vfs)
491 491
492 492 def __call__(self, path, mode='r', *args, **kw):
493 493 if mode not in ('r', 'rb'):
494 494 raise util.Abort('this vfs is read only')
495 495 return self.vfs(path, mode, *args, **kw)
496 496
497 497
498 498 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
499 499 '''yield every hg repository under path, always recursively.
500 500 The recurse flag will only control recursion into repo working dirs'''
501 501 def errhandler(err):
502 502 if err.filename == path:
503 503 raise err
504 504 samestat = getattr(os.path, 'samestat', None)
505 505 if followsym and samestat is not None:
506 506 def adddir(dirlst, dirname):
507 507 match = False
508 508 dirstat = os.stat(dirname)
509 509 for lstdirstat in dirlst:
510 510 if samestat(dirstat, lstdirstat):
511 511 match = True
512 512 break
513 513 if not match:
514 514 dirlst.append(dirstat)
515 515 return not match
516 516 else:
517 517 followsym = False
518 518
519 519 if (seen_dirs is None) and followsym:
520 520 seen_dirs = []
521 521 adddir(seen_dirs, path)
522 522 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
523 523 dirs.sort()
524 524 if '.hg' in dirs:
525 525 yield root # found a repository
526 526 qroot = os.path.join(root, '.hg', 'patches')
527 527 if os.path.isdir(os.path.join(qroot, '.hg')):
528 528 yield qroot # we have a patch queue repo here
529 529 if recurse:
530 530 # avoid recursing inside the .hg directory
531 531 dirs.remove('.hg')
532 532 else:
533 533 dirs[:] = [] # don't descend further
534 534 elif followsym:
535 535 newdirs = []
536 536 for d in dirs:
537 537 fname = os.path.join(root, d)
538 538 if adddir(seen_dirs, fname):
539 539 if os.path.islink(fname):
540 540 for hgname in walkrepos(fname, True, seen_dirs):
541 541 yield hgname
542 542 else:
543 543 newdirs.append(d)
544 544 dirs[:] = newdirs
545 545
546 546 def osrcpath():
547 547 '''return default os-specific hgrc search path'''
548 548 path = []
549 549 defaultpath = os.path.join(util.datapath, 'default.d')
550 550 if os.path.isdir(defaultpath):
551 551 for f, kind in osutil.listdir(defaultpath):
552 552 if f.endswith('.rc'):
553 553 path.append(os.path.join(defaultpath, f))
554 554 path.extend(systemrcpath())
555 555 path.extend(userrcpath())
556 556 path = [os.path.normpath(f) for f in path]
557 557 return path
558 558
559 559 _rcpath = None
560 560
561 561 def rcpath():
562 562 '''return hgrc search path. if env var HGRCPATH is set, use it.
563 563 for each item in path, if directory, use files ending in .rc,
564 564 else use item.
565 565 make HGRCPATH empty to only look in .hg/hgrc of current repo.
566 566 if no HGRCPATH, use default os-specific path.'''
567 567 global _rcpath
568 568 if _rcpath is None:
569 569 if 'HGRCPATH' in os.environ:
570 570 _rcpath = []
571 571 for p in os.environ['HGRCPATH'].split(os.pathsep):
572 572 if not p:
573 573 continue
574 574 p = util.expandpath(p)
575 575 if os.path.isdir(p):
576 576 for f, kind in osutil.listdir(p):
577 577 if f.endswith('.rc'):
578 578 _rcpath.append(os.path.join(p, f))
579 579 else:
580 580 _rcpath.append(p)
581 581 else:
582 582 _rcpath = osrcpath()
583 583 return _rcpath
584 584
585 585 def revsingle(repo, revspec, default='.'):
586 586 if not revspec and revspec != 0:
587 587 return repo[default]
588 588
589 589 l = revrange(repo, [revspec])
590 590 if not l:
591 591 raise util.Abort(_('empty revision set'))
592 592 return repo[l.last()]
593 593
594 594 def revpair(repo, revs):
595 595 if not revs:
596 596 return repo.dirstate.p1(), None
597 597
598 598 l = revrange(repo, revs)
599 599
600 600 if not l:
601 601 first = second = None
602 602 elif l.isascending():
603 603 first = l.min()
604 604 second = l.max()
605 605 elif l.isdescending():
606 606 first = l.max()
607 607 second = l.min()
608 608 else:
609 609 first = l.first()
610 610 second = l.last()
611 611
612 612 if first is None:
613 613 raise util.Abort(_('empty revision range'))
614 614
615 615 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
616 616 return repo.lookup(first), None
617 617
618 618 return repo.lookup(first), repo.lookup(second)
619 619
620 620 _revrangesep = ':'
621 621
622 622 def revrange(repo, revs):
623 623 """Yield revision as strings from a list of revision specifications."""
624 624
625 625 def revfix(repo, val, defval):
626 626 if not val and val != 0 and defval is not None:
627 627 return defval
628 628 return repo[val].rev()
629 629
630 630 seen, l = set(), revset.baseset([])
631 631
632 632 revsetaliases = [alias for (alias, _) in
633 633 repo.ui.configitems("revsetalias")]
634 634
635 635 for spec in revs:
636 636 if l and not seen:
637 637 seen = set(l)
638 638 # attempt to parse old-style ranges first to deal with
639 639 # things like old-tag which contain query metacharacters
640 640 try:
641 641 # ... except for revset aliases without arguments. These
642 642 # should be parsed as soon as possible, because they might
643 643 # clash with a hash prefix.
644 644 if spec in revsetaliases:
645 645 raise error.RepoLookupError
646 646
647 647 if isinstance(spec, int):
648 648 seen.add(spec)
649 649 l = l + revset.baseset([spec])
650 650 continue
651 651
652 652 if _revrangesep in spec:
653 653 start, end = spec.split(_revrangesep, 1)
654 654 if start in revsetaliases or end in revsetaliases:
655 655 raise error.RepoLookupError
656 656
657 657 start = revfix(repo, start, 0)
658 658 end = revfix(repo, end, len(repo) - 1)
659 659 if end == nullrev and start < 0:
660 660 start = nullrev
661 661 rangeiter = repo.changelog.revs(start, end)
662 662 if not seen and not l:
663 663 # by far the most common case: revs = ["-1:0"]
664 664 l = revset.baseset(rangeiter)
665 665 # defer syncing seen until next iteration
666 666 continue
667 667 newrevs = set(rangeiter)
668 668 if seen:
669 669 newrevs.difference_update(seen)
670 670 seen.update(newrevs)
671 671 else:
672 672 seen = newrevs
673 673 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
674 674 continue
675 675 elif spec and spec in repo: # single unquoted rev
676 676 rev = revfix(repo, spec, None)
677 677 if rev in seen:
678 678 continue
679 679 seen.add(rev)
680 680 l = l + revset.baseset([rev])
681 681 continue
682 682 except error.RepoLookupError:
683 683 pass
684 684
685 685 # fall through to new-style queries if old-style fails
686 686 m = revset.match(repo.ui, spec, repo)
687 687 if seen or l:
688 688 dl = [r for r in m(repo) if r not in seen]
689 689 l = l + revset.baseset(dl)
690 690 seen.update(dl)
691 691 else:
692 692 l = m(repo)
693 693
694 694 return l
695 695
696 696 def expandpats(pats):
697 697 '''Expand bare globs when running on windows.
698 698 On posix we assume it already has already been done by sh.'''
699 699 if not util.expandglobs:
700 700 return list(pats)
701 701 ret = []
702 702 for kindpat in pats:
703 703 kind, pat = matchmod._patsplit(kindpat, None)
704 704 if kind is None:
705 705 try:
706 706 globbed = glob.glob(pat)
707 707 except re.error:
708 708 globbed = [pat]
709 709 if globbed:
710 710 ret.extend(globbed)
711 711 continue
712 712 ret.append(kindpat)
713 713 return ret
714 714
715 715 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
716 716 '''Return a matcher and the patterns that were used.
717 717 The matcher will warn about bad matches.'''
718 718 if pats == ("",):
719 719 pats = []
720 720 if not globbed and default == 'relpath':
721 721 pats = expandpats(pats or [])
722 722
723 723 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
724 724 default)
725 725 def badfn(f, msg):
726 726 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
727 727 m.bad = badfn
728 if m.always():
729 pats = []
728 730 return m, pats
729 731
730 732 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
731 733 '''Return a matcher that will warn about bad matches.'''
732 734 return matchandpats(ctx, pats, opts, globbed, default)[0]
733 735
734 736 def matchall(repo):
735 737 '''Return a matcher that will efficiently match everything.'''
736 738 return matchmod.always(repo.root, repo.getcwd())
737 739
738 740 def matchfiles(repo, files):
739 741 '''Return a matcher that will efficiently match exactly these files.'''
740 742 return matchmod.exact(repo.root, repo.getcwd(), files)
741 743
742 744 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
743 745 m = matcher
744 746 if dry_run is None:
745 747 dry_run = opts.get('dry_run')
746 748 if similarity is None:
747 749 similarity = float(opts.get('similarity') or 0)
748 750
749 751 ret = 0
750 752 join = lambda f: os.path.join(prefix, f)
751 753
752 754 def matchessubrepo(matcher, subpath):
753 755 if matcher.exact(subpath):
754 756 return True
755 757 for f in matcher.files():
756 758 if f.startswith(subpath):
757 759 return True
758 760 return False
759 761
760 762 wctx = repo[None]
761 763 for subpath in sorted(wctx.substate):
762 764 if opts.get('subrepos') or matchessubrepo(m, subpath):
763 765 sub = wctx.sub(subpath)
764 766 try:
765 767 submatch = matchmod.narrowmatcher(subpath, m)
766 768 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
767 769 ret = 1
768 770 except error.LookupError:
769 771 repo.ui.status(_("skipping missing subrepository: %s\n")
770 772 % join(subpath))
771 773
772 774 rejected = []
773 775 origbad = m.bad
774 776 def badfn(f, msg):
775 777 if f in m.files():
776 778 origbad(f, msg)
777 779 rejected.append(f)
778 780
779 781 m.bad = badfn
780 782 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
781 783 m.bad = origbad
782 784
783 785 unknownset = set(unknown + forgotten)
784 786 toprint = unknownset.copy()
785 787 toprint.update(deleted)
786 788 for abs in sorted(toprint):
787 789 if repo.ui.verbose or not m.exact(abs):
788 790 if abs in unknownset:
789 791 status = _('adding %s\n') % m.uipath(abs)
790 792 else:
791 793 status = _('removing %s\n') % m.uipath(abs)
792 794 repo.ui.status(status)
793 795
794 796 renames = _findrenames(repo, m, added + unknown, removed + deleted,
795 797 similarity)
796 798
797 799 if not dry_run:
798 800 _markchanges(repo, unknown + forgotten, deleted, renames)
799 801
800 802 for f in rejected:
801 803 if f in m.files():
802 804 return 1
803 805 return ret
804 806
805 807 def marktouched(repo, files, similarity=0.0):
806 808 '''Assert that files have somehow been operated upon. files are relative to
807 809 the repo root.'''
808 810 m = matchfiles(repo, files)
809 811 rejected = []
810 812 m.bad = lambda x, y: rejected.append(x)
811 813
812 814 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
813 815
814 816 if repo.ui.verbose:
815 817 unknownset = set(unknown + forgotten)
816 818 toprint = unknownset.copy()
817 819 toprint.update(deleted)
818 820 for abs in sorted(toprint):
819 821 if abs in unknownset:
820 822 status = _('adding %s\n') % abs
821 823 else:
822 824 status = _('removing %s\n') % abs
823 825 repo.ui.status(status)
824 826
825 827 renames = _findrenames(repo, m, added + unknown, removed + deleted,
826 828 similarity)
827 829
828 830 _markchanges(repo, unknown + forgotten, deleted, renames)
829 831
830 832 for f in rejected:
831 833 if f in m.files():
832 834 return 1
833 835 return 0
834 836
835 837 def _interestingfiles(repo, matcher):
836 838 '''Walk dirstate with matcher, looking for files that addremove would care
837 839 about.
838 840
839 841 This is different from dirstate.status because it doesn't care about
840 842 whether files are modified or clean.'''
841 843 added, unknown, deleted, removed, forgotten = [], [], [], [], []
842 844 audit_path = pathutil.pathauditor(repo.root)
843 845
844 846 ctx = repo[None]
845 847 dirstate = repo.dirstate
846 848 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
847 849 full=False)
848 850 for abs, st in walkresults.iteritems():
849 851 dstate = dirstate[abs]
850 852 if dstate == '?' and audit_path.check(abs):
851 853 unknown.append(abs)
852 854 elif dstate != 'r' and not st:
853 855 deleted.append(abs)
854 856 elif dstate == 'r' and st:
855 857 forgotten.append(abs)
856 858 # for finding renames
857 859 elif dstate == 'r' and not st:
858 860 removed.append(abs)
859 861 elif dstate == 'a':
860 862 added.append(abs)
861 863
862 864 return added, unknown, deleted, removed, forgotten
863 865
864 866 def _findrenames(repo, matcher, added, removed, similarity):
865 867 '''Find renames from removed files to added ones.'''
866 868 renames = {}
867 869 if similarity > 0:
868 870 for old, new, score in similar.findrenames(repo, added, removed,
869 871 similarity):
870 872 if (repo.ui.verbose or not matcher.exact(old)
871 873 or not matcher.exact(new)):
872 874 repo.ui.status(_('recording removal of %s as rename to %s '
873 875 '(%d%% similar)\n') %
874 876 (matcher.rel(old), matcher.rel(new),
875 877 score * 100))
876 878 renames[new] = old
877 879 return renames
878 880
879 881 def _markchanges(repo, unknown, deleted, renames):
880 882 '''Marks the files in unknown as added, the files in deleted as removed,
881 883 and the files in renames as copied.'''
882 884 wctx = repo[None]
883 885 wlock = repo.wlock()
884 886 try:
885 887 wctx.forget(deleted)
886 888 wctx.add(unknown)
887 889 for new, old in renames.iteritems():
888 890 wctx.copy(old, new)
889 891 finally:
890 892 wlock.release()
891 893
892 894 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
893 895 """Update the dirstate to reflect the intent of copying src to dst. For
894 896 different reasons it might not end with dst being marked as copied from src.
895 897 """
896 898 origsrc = repo.dirstate.copied(src) or src
897 899 if dst == origsrc: # copying back a copy?
898 900 if repo.dirstate[dst] not in 'mn' and not dryrun:
899 901 repo.dirstate.normallookup(dst)
900 902 else:
901 903 if repo.dirstate[origsrc] == 'a' and origsrc == src:
902 904 if not ui.quiet:
903 905 ui.warn(_("%s has not been committed yet, so no copy "
904 906 "data will be stored for %s.\n")
905 907 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
906 908 if repo.dirstate[dst] in '?r' and not dryrun:
907 909 wctx.add([dst])
908 910 elif not dryrun:
909 911 wctx.copy(origsrc, dst)
910 912
911 913 def readrequires(opener, supported):
912 914 '''Reads and parses .hg/requires and checks if all entries found
913 915 are in the list of supported features.'''
914 916 requirements = set(opener.read("requires").splitlines())
915 917 missings = []
916 918 for r in requirements:
917 919 if r not in supported:
918 920 if not r or not r[0].isalnum():
919 921 raise error.RequirementError(_(".hg/requires file is corrupt"))
920 922 missings.append(r)
921 923 missings.sort()
922 924 if missings:
923 925 raise error.RequirementError(
924 926 _("repository requires features unknown to this Mercurial: %s")
925 927 % " ".join(missings),
926 928 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
927 929 " for more information"))
928 930 return requirements
929 931
930 932 class filecachesubentry(object):
931 933 def __init__(self, path, stat):
932 934 self.path = path
933 935 self.cachestat = None
934 936 self._cacheable = None
935 937
936 938 if stat:
937 939 self.cachestat = filecachesubentry.stat(self.path)
938 940
939 941 if self.cachestat:
940 942 self._cacheable = self.cachestat.cacheable()
941 943 else:
942 944 # None means we don't know yet
943 945 self._cacheable = None
944 946
945 947 def refresh(self):
946 948 if self.cacheable():
947 949 self.cachestat = filecachesubentry.stat(self.path)
948 950
949 951 def cacheable(self):
950 952 if self._cacheable is not None:
951 953 return self._cacheable
952 954
953 955 # we don't know yet, assume it is for now
954 956 return True
955 957
956 958 def changed(self):
957 959 # no point in going further if we can't cache it
958 960 if not self.cacheable():
959 961 return True
960 962
961 963 newstat = filecachesubentry.stat(self.path)
962 964
963 965 # we may not know if it's cacheable yet, check again now
964 966 if newstat and self._cacheable is None:
965 967 self._cacheable = newstat.cacheable()
966 968
967 969 # check again
968 970 if not self._cacheable:
969 971 return True
970 972
971 973 if self.cachestat != newstat:
972 974 self.cachestat = newstat
973 975 return True
974 976 else:
975 977 return False
976 978
977 979 @staticmethod
978 980 def stat(path):
979 981 try:
980 982 return util.cachestat(path)
981 983 except OSError, e:
982 984 if e.errno != errno.ENOENT:
983 985 raise
984 986
985 987 class filecacheentry(object):
986 988 def __init__(self, paths, stat=True):
987 989 self._entries = []
988 990 for path in paths:
989 991 self._entries.append(filecachesubentry(path, stat))
990 992
991 993 def changed(self):
992 994 '''true if any entry has changed'''
993 995 for entry in self._entries:
994 996 if entry.changed():
995 997 return True
996 998 return False
997 999
998 1000 def refresh(self):
999 1001 for entry in self._entries:
1000 1002 entry.refresh()
1001 1003
1002 1004 class filecache(object):
1003 1005 '''A property like decorator that tracks files under .hg/ for updates.
1004 1006
1005 1007 Records stat info when called in _filecache.
1006 1008
1007 1009 On subsequent calls, compares old stat info with new info, and recreates the
1008 1010 object when any of the files changes, updating the new stat info in
1009 1011 _filecache.
1010 1012
1011 1013 Mercurial either atomic renames or appends for files under .hg,
1012 1014 so to ensure the cache is reliable we need the filesystem to be able
1013 1015 to tell us if a file has been replaced. If it can't, we fallback to
1014 1016 recreating the object on every call (essentially the same behaviour as
1015 1017 propertycache).
1016 1018
1017 1019 '''
1018 1020 def __init__(self, *paths):
1019 1021 self.paths = paths
1020 1022
1021 1023 def join(self, obj, fname):
1022 1024 """Used to compute the runtime path of a cached file.
1023 1025
1024 1026 Users should subclass filecache and provide their own version of this
1025 1027 function to call the appropriate join function on 'obj' (an instance
1026 1028 of the class that its member function was decorated).
1027 1029 """
1028 1030 return obj.join(fname)
1029 1031
1030 1032 def __call__(self, func):
1031 1033 self.func = func
1032 1034 self.name = func.__name__
1033 1035 return self
1034 1036
1035 1037 def __get__(self, obj, type=None):
1036 1038 # do we need to check if the file changed?
1037 1039 if self.name in obj.__dict__:
1038 1040 assert self.name in obj._filecache, self.name
1039 1041 return obj.__dict__[self.name]
1040 1042
1041 1043 entry = obj._filecache.get(self.name)
1042 1044
1043 1045 if entry:
1044 1046 if entry.changed():
1045 1047 entry.obj = self.func(obj)
1046 1048 else:
1047 1049 paths = [self.join(obj, path) for path in self.paths]
1048 1050
1049 1051 # We stat -before- creating the object so our cache doesn't lie if
1050 1052 # a writer modified between the time we read and stat
1051 1053 entry = filecacheentry(paths, True)
1052 1054 entry.obj = self.func(obj)
1053 1055
1054 1056 obj._filecache[self.name] = entry
1055 1057
1056 1058 obj.__dict__[self.name] = entry.obj
1057 1059 return entry.obj
1058 1060
1059 1061 def __set__(self, obj, value):
1060 1062 if self.name not in obj._filecache:
1061 1063 # we add an entry for the missing value because X in __dict__
1062 1064 # implies X in _filecache
1063 1065 paths = [self.join(obj, path) for path in self.paths]
1064 1066 ce = filecacheentry(paths, False)
1065 1067 obj._filecache[self.name] = ce
1066 1068 else:
1067 1069 ce = obj._filecache[self.name]
1068 1070
1069 1071 ce.obj = value # update cached copy
1070 1072 obj.__dict__[self.name] = value # update copy returned by obj.x
1071 1073
1072 1074 def __delete__(self, obj):
1073 1075 try:
1074 1076 del obj.__dict__[self.name]
1075 1077 except KeyError:
1076 1078 raise AttributeError(self.name)
1077 1079
1078 1080 class dirs(object):
1079 1081 '''a multiset of directory names from a dirstate or manifest'''
1080 1082
1081 1083 def __init__(self, map, skip=None):
1082 1084 self._dirs = {}
1083 1085 addpath = self.addpath
1084 1086 if util.safehasattr(map, 'iteritems') and skip is not None:
1085 1087 for f, s in map.iteritems():
1086 1088 if s[0] != skip:
1087 1089 addpath(f)
1088 1090 else:
1089 1091 for f in map:
1090 1092 addpath(f)
1091 1093
1092 1094 def addpath(self, path):
1093 1095 dirs = self._dirs
1094 1096 for base in finddirs(path):
1095 1097 if base in dirs:
1096 1098 dirs[base] += 1
1097 1099 return
1098 1100 dirs[base] = 1
1099 1101
1100 1102 def delpath(self, path):
1101 1103 dirs = self._dirs
1102 1104 for base in finddirs(path):
1103 1105 if dirs[base] > 1:
1104 1106 dirs[base] -= 1
1105 1107 return
1106 1108 del dirs[base]
1107 1109
1108 1110 def __iter__(self):
1109 1111 return self._dirs.iterkeys()
1110 1112
1111 1113 def __contains__(self, d):
1112 1114 return d in self._dirs
1113 1115
1114 1116 if util.safehasattr(parsers, 'dirs'):
1115 1117 dirs = parsers.dirs
1116 1118
1117 1119 def finddirs(path):
1118 1120 pos = path.rfind('/')
1119 1121 while pos != -1:
1120 1122 yield path[:pos]
1121 1123 pos = path.rfind('/', 0, pos)
General Comments 0
You need to be logged in to leave comments. Login now