##// END OF EJS Templates
changelog: use "vfs.fstat()" instead of "util.fstat()"...
FUJIWARA Katsunori -
r19899:8c3dcbbf default
parent child Browse files
Show More
@@ -1,349 +1,350
1 1 # changelog.py - changelog class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid
9 9 from i18n import _
10 10 import util, error, revlog, encoding
11 11
12 12 _defaultextra = {'branch': 'default'}
13 13
14 14 def _string_escape(text):
15 15 """
16 16 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
17 17 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
18 18 >>> s
19 19 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
20 20 >>> res = _string_escape(s)
21 21 >>> s == res.decode('string_escape')
22 22 True
23 23 """
24 24 # subset of the string_escape codec
25 25 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
26 26 return text.replace('\0', '\\0')
27 27
28 28 def decodeextra(text):
29 29 """
30 30 >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
31 31 ... ).iteritems())
32 32 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
33 33 >>> sorted(decodeextra(encodeextra({'foo': 'bar',
34 34 ... 'baz': chr(92) + chr(0) + '2'})
35 35 ... ).iteritems())
36 36 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
37 37 """
38 38 extra = _defaultextra.copy()
39 39 for l in text.split('\0'):
40 40 if l:
41 41 if '\\0' in l:
42 42 # fix up \0 without getting into trouble with \\0
43 43 l = l.replace('\\\\', '\\\\\n')
44 44 l = l.replace('\\0', '\0')
45 45 l = l.replace('\n', '')
46 46 k, v = l.decode('string_escape').split(':', 1)
47 47 extra[k] = v
48 48 return extra
49 49
50 50 def encodeextra(d):
51 51 # keys must be sorted to produce a deterministic changelog entry
52 52 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
53 53 return "\0".join(items)
54 54
55 55 def stripdesc(desc):
56 56 """strip trailing whitespace and leading and trailing empty lines"""
57 57 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
58 58
59 59 class appender(object):
60 60 '''the changelog index must be updated last on disk, so we use this class
61 61 to delay writes to it'''
62 def __init__(self, fp, buf):
62 def __init__(self, vfs, name, mode, buf):
63 63 self.data = buf
64 fp = vfs(name, mode)
64 65 self.fp = fp
65 66 self.offset = fp.tell()
66 self.size = util.fstat(fp).st_size
67 self.size = vfs.fstat(fp).st_size
67 68
68 69 def end(self):
69 70 return self.size + len("".join(self.data))
70 71 def tell(self):
71 72 return self.offset
72 73 def flush(self):
73 74 pass
74 75 def close(self):
75 76 self.fp.close()
76 77
77 78 def seek(self, offset, whence=0):
78 79 '''virtual file offset spans real file and data'''
79 80 if whence == 0:
80 81 self.offset = offset
81 82 elif whence == 1:
82 83 self.offset += offset
83 84 elif whence == 2:
84 85 self.offset = self.end() + offset
85 86 if self.offset < self.size:
86 87 self.fp.seek(self.offset)
87 88
88 89 def read(self, count=-1):
89 90 '''only trick here is reads that span real file and data'''
90 91 ret = ""
91 92 if self.offset < self.size:
92 93 s = self.fp.read(count)
93 94 ret = s
94 95 self.offset += len(s)
95 96 if count > 0:
96 97 count -= len(s)
97 98 if count != 0:
98 99 doff = self.offset - self.size
99 100 self.data.insert(0, "".join(self.data))
100 101 del self.data[1:]
101 102 s = self.data[0][doff:doff + count]
102 103 self.offset += len(s)
103 104 ret += s
104 105 return ret
105 106
106 107 def write(self, s):
107 108 self.data.append(str(s))
108 109 self.offset += len(s)
109 110
110 111 def delayopener(opener, target, divert, buf):
111 112 def o(name, mode='r'):
112 113 if name != target:
113 114 return opener(name, mode)
114 115 if divert:
115 116 return opener(name + ".a", mode.replace('a', 'w'))
116 117 # otherwise, divert to memory
117 return appender(opener(name, mode), buf)
118 return appender(opener, name, mode, buf)
118 119 return o
119 120
120 121 class changelog(revlog.revlog):
121 122 def __init__(self, opener):
122 123 revlog.revlog.__init__(self, opener, "00changelog.i")
123 124 if self._initempty:
124 125 # changelogs don't benefit from generaldelta
125 126 self.version &= ~revlog.REVLOGGENERALDELTA
126 127 self._generaldelta = False
127 128 self._realopener = opener
128 129 self._delayed = False
129 130 self._divert = False
130 131 self.filteredrevs = frozenset()
131 132
132 133 def tip(self):
133 134 """filtered version of revlog.tip"""
134 135 for i in xrange(len(self) -1, -2, -1):
135 136 if i not in self.filteredrevs:
136 137 return self.node(i)
137 138
138 139 def __iter__(self):
139 140 """filtered version of revlog.__iter__"""
140 141 if len(self.filteredrevs) == 0:
141 142 return revlog.revlog.__iter__(self)
142 143
143 144 def filterediter():
144 145 for i in xrange(len(self)):
145 146 if i not in self.filteredrevs:
146 147 yield i
147 148
148 149 return filterediter()
149 150
150 151 def revs(self, start=0, stop=None):
151 152 """filtered version of revlog.revs"""
152 153 for i in super(changelog, self).revs(start, stop):
153 154 if i not in self.filteredrevs:
154 155 yield i
155 156
156 157 @util.propertycache
157 158 def nodemap(self):
158 159 # XXX need filtering too
159 160 self.rev(self.node(0))
160 161 return self._nodecache
161 162
162 163 def hasnode(self, node):
163 164 """filtered version of revlog.hasnode"""
164 165 try:
165 166 i = self.rev(node)
166 167 return i not in self.filteredrevs
167 168 except KeyError:
168 169 return False
169 170
170 171 def headrevs(self):
171 172 if self.filteredrevs:
172 173 # XXX we should fix and use the C version
173 174 return self._headrevs()
174 175 return super(changelog, self).headrevs()
175 176
176 177 def strip(self, *args, **kwargs):
177 178 # XXX make something better than assert
178 179 # We can't expect proper strip behavior if we are filtered.
179 180 assert not self.filteredrevs
180 181 super(changelog, self).strip(*args, **kwargs)
181 182
182 183 def rev(self, node):
183 184 """filtered version of revlog.rev"""
184 185 r = super(changelog, self).rev(node)
185 186 if r in self.filteredrevs:
186 187 raise error.LookupError(hex(node), self.indexfile, _('no node'))
187 188 return r
188 189
189 190 def node(self, rev):
190 191 """filtered version of revlog.node"""
191 192 if rev in self.filteredrevs:
192 193 raise IndexError(rev)
193 194 return super(changelog, self).node(rev)
194 195
195 196 def linkrev(self, rev):
196 197 """filtered version of revlog.linkrev"""
197 198 if rev in self.filteredrevs:
198 199 raise IndexError(rev)
199 200 return super(changelog, self).linkrev(rev)
200 201
201 202 def parentrevs(self, rev):
202 203 """filtered version of revlog.parentrevs"""
203 204 if rev in self.filteredrevs:
204 205 raise IndexError(rev)
205 206 return super(changelog, self).parentrevs(rev)
206 207
207 208 def flags(self, rev):
208 209 """filtered version of revlog.flags"""
209 210 if rev in self.filteredrevs:
210 211 raise IndexError(rev)
211 212 return super(changelog, self).flags(rev)
212 213
213 214 def delayupdate(self):
214 215 "delay visibility of index updates to other readers"
215 216 self._delayed = True
216 217 self._divert = (len(self) == 0)
217 218 self._delaybuf = []
218 219 self.opener = delayopener(self._realopener, self.indexfile,
219 220 self._divert, self._delaybuf)
220 221
221 222 def finalize(self, tr):
222 223 "finalize index updates"
223 224 self._delayed = False
224 225 self.opener = self._realopener
225 226 # move redirected index data back into place
226 227 if self._divert:
227 228 tmpname = self.indexfile + ".a"
228 229 nfile = self.opener.open(tmpname)
229 230 nfile.close()
230 231 self.opener.rename(tmpname, self.indexfile)
231 232 elif self._delaybuf:
232 233 fp = self.opener(self.indexfile, 'a')
233 234 fp.write("".join(self._delaybuf))
234 235 fp.close()
235 236 self._delaybuf = []
236 237 # split when we're done
237 238 self.checkinlinesize(tr)
238 239
239 240 def readpending(self, file):
240 241 r = revlog.revlog(self.opener, file)
241 242 self.index = r.index
242 243 self.nodemap = r.nodemap
243 244 self._nodecache = r._nodecache
244 245 self._chunkcache = r._chunkcache
245 246
246 247 def writepending(self):
247 248 "create a file containing the unfinalized state for pretxnchangegroup"
248 249 if self._delaybuf:
249 250 # make a temporary copy of the index
250 251 fp1 = self._realopener(self.indexfile)
251 252 fp2 = self._realopener(self.indexfile + ".a", "w")
252 253 fp2.write(fp1.read())
253 254 # add pending data
254 255 fp2.write("".join(self._delaybuf))
255 256 fp2.close()
256 257 # switch modes so finalize can simply rename
257 258 self._delaybuf = []
258 259 self._divert = True
259 260
260 261 if self._divert:
261 262 return True
262 263
263 264 return False
264 265
265 266 def checkinlinesize(self, tr, fp=None):
266 267 if not self._delayed:
267 268 revlog.revlog.checkinlinesize(self, tr, fp)
268 269
269 270 def read(self, node):
270 271 """
271 272 format used:
272 273 nodeid\n : manifest node in ascii
273 274 user\n : user, no \n or \r allowed
274 275 time tz extra\n : date (time is int or float, timezone is int)
275 276 : extra is metadata, encoded and separated by '\0'
276 277 : older versions ignore it
277 278 files\n\n : files modified by the cset, no \n or \r allowed
278 279 (.*) : comment (free text, ideally utf-8)
279 280
280 281 changelog v0 doesn't use extra
281 282 """
282 283 text = self.revision(node)
283 284 if not text:
284 285 return (nullid, "", (0, 0), [], "", _defaultextra)
285 286 last = text.index("\n\n")
286 287 desc = encoding.tolocal(text[last + 2:])
287 288 l = text[:last].split('\n')
288 289 manifest = bin(l[0])
289 290 user = encoding.tolocal(l[1])
290 291
291 292 tdata = l[2].split(' ', 2)
292 293 if len(tdata) != 3:
293 294 time = float(tdata[0])
294 295 try:
295 296 # various tools did silly things with the time zone field.
296 297 timezone = int(tdata[1])
297 298 except ValueError:
298 299 timezone = 0
299 300 extra = _defaultextra
300 301 else:
301 302 time, timezone = float(tdata[0]), int(tdata[1])
302 303 extra = decodeextra(tdata[2])
303 304
304 305 files = l[3:]
305 306 return (manifest, user, (time, timezone), files, desc, extra)
306 307
307 308 def add(self, manifest, files, desc, transaction, p1, p2,
308 309 user, date=None, extra=None):
309 310 # Convert to UTF-8 encoded bytestrings as the very first
310 311 # thing: calling any method on a localstr object will turn it
311 312 # into a str object and the cached UTF-8 string is thus lost.
312 313 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
313 314
314 315 user = user.strip()
315 316 # An empty username or a username with a "\n" will make the
316 317 # revision text contain two "\n\n" sequences -> corrupt
317 318 # repository since read cannot unpack the revision.
318 319 if not user:
319 320 raise error.RevlogError(_("empty username"))
320 321 if "\n" in user:
321 322 raise error.RevlogError(_("username %s contains a newline")
322 323 % repr(user))
323 324
324 325 desc = stripdesc(desc)
325 326
326 327 if date:
327 328 parseddate = "%d %d" % util.parsedate(date)
328 329 else:
329 330 parseddate = "%d %d" % util.makedate()
330 331 if extra:
331 332 branch = extra.get("branch")
332 333 if branch in ("default", ""):
333 334 del extra["branch"]
334 335 elif branch in (".", "null", "tip"):
335 336 raise error.RevlogError(_('the name \'%s\' is reserved')
336 337 % branch)
337 338 if extra:
338 339 extra = encodeextra(extra)
339 340 parseddate = "%s %s" % (parseddate, extra)
340 341 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
341 342 text = "\n".join(l)
342 343 return self.addrevision(text, transaction, len(self), p1, p2)
343 344
344 345 def branch(self, rev):
345 346 """return the branch of a revision
346 347
347 348 This function exists because creating a changectx object
348 349 just to access this is costly."""
349 350 return encoding.tolocal(self.read(rev)[5].get("branch"))
@@ -1,1018 +1,1021
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 11 import match as matchmod
12 12 import os, errno, re, stat, glob
13 13
14 14 if os.name == 'nt':
15 15 import scmwindows as scmplatform
16 16 else:
17 17 import scmposix as scmplatform
18 18
19 19 systemrcpath = scmplatform.systemrcpath
20 20 userrcpath = scmplatform.userrcpath
21 21
22 22 def nochangesfound(ui, repo, excluded=None):
23 23 '''Report no changes for push/pull, excluded is None or a list of
24 24 nodes excluded from the push/pull.
25 25 '''
26 26 secretlist = []
27 27 if excluded:
28 28 for n in excluded:
29 29 if n not in repo:
30 30 # discovery should not have included the filtered revision,
31 31 # we have to explicitly exclude it until discovery is cleanup.
32 32 continue
33 33 ctx = repo[n]
34 34 if ctx.phase() >= phases.secret and not ctx.extinct():
35 35 secretlist.append(n)
36 36
37 37 if secretlist:
38 38 ui.status(_("no changes found (ignored %d secret changesets)\n")
39 39 % len(secretlist))
40 40 else:
41 41 ui.status(_("no changes found\n"))
42 42
43 43 def checknewlabel(repo, lbl, kind):
44 44 # Do not use the "kind" parameter in ui output.
45 45 # It makes strings difficult to translate.
46 46 if lbl in ['tip', '.', 'null']:
47 47 raise util.Abort(_("the name '%s' is reserved") % lbl)
48 48 for c in (':', '\0', '\n', '\r'):
49 49 if c in lbl:
50 50 raise util.Abort(_("%r cannot be used in a name") % c)
51 51 try:
52 52 int(lbl)
53 53 raise util.Abort(_("cannot use an integer as a name"))
54 54 except ValueError:
55 55 pass
56 56
57 57 def checkfilename(f):
58 58 '''Check that the filename f is an acceptable filename for a tracked file'''
59 59 if '\r' in f or '\n' in f:
60 60 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
61 61
62 62 def checkportable(ui, f):
63 63 '''Check if filename f is portable and warn or abort depending on config'''
64 64 checkfilename(f)
65 65 abort, warn = checkportabilityalert(ui)
66 66 if abort or warn:
67 67 msg = util.checkwinfilename(f)
68 68 if msg:
69 69 msg = "%s: %r" % (msg, f)
70 70 if abort:
71 71 raise util.Abort(msg)
72 72 ui.warn(_("warning: %s\n") % msg)
73 73
74 74 def checkportabilityalert(ui):
75 75 '''check if the user's config requests nothing, a warning, or abort for
76 76 non-portable filenames'''
77 77 val = ui.config('ui', 'portablefilenames', 'warn')
78 78 lval = val.lower()
79 79 bval = util.parsebool(val)
80 80 abort = os.name == 'nt' or lval == 'abort'
81 81 warn = bval or lval == 'warn'
82 82 if bval is None and not (warn or abort or lval == 'ignore'):
83 83 raise error.ConfigError(
84 84 _("ui.portablefilenames value is invalid ('%s')") % val)
85 85 return abort, warn
86 86
87 87 class casecollisionauditor(object):
88 88 def __init__(self, ui, abort, dirstate):
89 89 self._ui = ui
90 90 self._abort = abort
91 91 allfiles = '\0'.join(dirstate._map)
92 92 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
93 93 self._dirstate = dirstate
94 94 # The purpose of _newfiles is so that we don't complain about
95 95 # case collisions if someone were to call this object with the
96 96 # same filename twice.
97 97 self._newfiles = set()
98 98
99 99 def __call__(self, f):
100 100 fl = encoding.lower(f)
101 101 if (fl in self._loweredfiles and f not in self._dirstate and
102 102 f not in self._newfiles):
103 103 msg = _('possible case-folding collision for %s') % f
104 104 if self._abort:
105 105 raise util.Abort(msg)
106 106 self._ui.warn(_("warning: %s\n") % msg)
107 107 self._loweredfiles.add(fl)
108 108 self._newfiles.add(f)
109 109
110 110 class pathauditor(object):
111 111 '''ensure that a filesystem path contains no banned components.
112 112 the following properties of a path are checked:
113 113
114 114 - ends with a directory separator
115 115 - under top-level .hg
116 116 - starts at the root of a windows drive
117 117 - contains ".."
118 118 - traverses a symlink (e.g. a/symlink_here/b)
119 119 - inside a nested repository (a callback can be used to approve
120 120 some nested repositories, e.g., subrepositories)
121 121 '''
122 122
123 123 def __init__(self, root, callback=None):
124 124 self.audited = set()
125 125 self.auditeddir = set()
126 126 self.root = root
127 127 self.callback = callback
128 128 if os.path.lexists(root) and not util.checkcase(root):
129 129 self.normcase = util.normcase
130 130 else:
131 131 self.normcase = lambda x: x
132 132
133 133 def __call__(self, path):
134 134 '''Check the relative path.
135 135 path may contain a pattern (e.g. foodir/**.txt)'''
136 136
137 137 path = util.localpath(path)
138 138 normpath = self.normcase(path)
139 139 if normpath in self.audited:
140 140 return
141 141 # AIX ignores "/" at end of path, others raise EISDIR.
142 142 if util.endswithsep(path):
143 143 raise util.Abort(_("path ends in directory separator: %s") % path)
144 144 parts = util.splitpath(path)
145 145 if (os.path.splitdrive(path)[0]
146 146 or parts[0].lower() in ('.hg', '.hg.', '')
147 147 or os.pardir in parts):
148 148 raise util.Abort(_("path contains illegal component: %s") % path)
149 149 if '.hg' in path.lower():
150 150 lparts = [p.lower() for p in parts]
151 151 for p in '.hg', '.hg.':
152 152 if p in lparts[1:]:
153 153 pos = lparts.index(p)
154 154 base = os.path.join(*parts[:pos])
155 155 raise util.Abort(_("path '%s' is inside nested repo %r")
156 156 % (path, base))
157 157
158 158 normparts = util.splitpath(normpath)
159 159 assert len(parts) == len(normparts)
160 160
161 161 parts.pop()
162 162 normparts.pop()
163 163 prefixes = []
164 164 while parts:
165 165 prefix = os.sep.join(parts)
166 166 normprefix = os.sep.join(normparts)
167 167 if normprefix in self.auditeddir:
168 168 break
169 169 curpath = os.path.join(self.root, prefix)
170 170 try:
171 171 st = os.lstat(curpath)
172 172 except OSError, err:
173 173 # EINVAL can be raised as invalid path syntax under win32.
174 174 # They must be ignored for patterns can be checked too.
175 175 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
176 176 raise
177 177 else:
178 178 if stat.S_ISLNK(st.st_mode):
179 179 raise util.Abort(
180 180 _('path %r traverses symbolic link %r')
181 181 % (path, prefix))
182 182 elif (stat.S_ISDIR(st.st_mode) and
183 183 os.path.isdir(os.path.join(curpath, '.hg'))):
184 184 if not self.callback or not self.callback(curpath):
185 185 raise util.Abort(_("path '%s' is inside nested "
186 186 "repo %r")
187 187 % (path, prefix))
188 188 prefixes.append(normprefix)
189 189 parts.pop()
190 190 normparts.pop()
191 191
192 192 self.audited.add(normpath)
193 193 # only add prefixes to the cache after checking everything: we don't
194 194 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
195 195 self.auditeddir.update(prefixes)
196 196
197 197 def check(self, path):
198 198 try:
199 199 self(path)
200 200 return True
201 201 except (OSError, util.Abort):
202 202 return False
203 203
204 204 class abstractvfs(object):
205 205 """Abstract base class; cannot be instantiated"""
206 206
207 207 def __init__(self, *args, **kwargs):
208 208 '''Prevent instantiation; don't call this from subclasses.'''
209 209 raise NotImplementedError('attempted instantiating ' + str(type(self)))
210 210
211 211 def tryread(self, path):
212 212 '''gracefully return an empty string for missing files'''
213 213 try:
214 214 return self.read(path)
215 215 except IOError, inst:
216 216 if inst.errno != errno.ENOENT:
217 217 raise
218 218 return ""
219 219
220 220 def open(self, path, mode="r", text=False, atomictemp=False):
221 221 self.open = self.__call__
222 222 return self.__call__(path, mode, text, atomictemp)
223 223
224 224 def read(self, path):
225 225 fp = self(path, 'rb')
226 226 try:
227 227 return fp.read()
228 228 finally:
229 229 fp.close()
230 230
231 231 def write(self, path, data):
232 232 fp = self(path, 'wb')
233 233 try:
234 234 return fp.write(data)
235 235 finally:
236 236 fp.close()
237 237
238 238 def append(self, path, data):
239 239 fp = self(path, 'ab')
240 240 try:
241 241 return fp.write(data)
242 242 finally:
243 243 fp.close()
244 244
245 245 def exists(self, path=None):
246 246 return os.path.exists(self.join(path))
247 247
248 def fstat(self, fp):
249 return util.fstat(fp)
250
248 251 def isdir(self, path=None):
249 252 return os.path.isdir(self.join(path))
250 253
251 254 def islink(self, path=None):
252 255 return os.path.islink(self.join(path))
253 256
254 257 def makedir(self, path=None, notindexed=True):
255 258 return util.makedir(self.join(path), notindexed)
256 259
257 260 def makedirs(self, path=None, mode=None):
258 261 return util.makedirs(self.join(path), mode)
259 262
260 263 def mkdir(self, path=None):
261 264 return os.mkdir(self.join(path))
262 265
263 266 def readdir(self, path=None, stat=None, skip=None):
264 267 return osutil.listdir(self.join(path), stat, skip)
265 268
266 269 def rename(self, src, dst):
267 270 return util.rename(self.join(src), self.join(dst))
268 271
269 272 def readlink(self, path):
270 273 return os.readlink(self.join(path))
271 274
272 275 def setflags(self, path, l, x):
273 276 return util.setflags(self.join(path), l, x)
274 277
275 278 def stat(self, path=None):
276 279 return os.stat(self.join(path))
277 280
278 281 def unlink(self, path=None):
279 282 return util.unlink(self.join(path))
280 283
281 284 def utime(self, path=None, t=None):
282 285 return os.utime(self.join(path), t)
283 286
284 287 class vfs(abstractvfs):
285 288 '''Operate files relative to a base directory
286 289
287 290 This class is used to hide the details of COW semantics and
288 291 remote file access from higher level code.
289 292 '''
290 293 def __init__(self, base, audit=True, expandpath=False, realpath=False):
291 294 if expandpath:
292 295 base = util.expandpath(base)
293 296 if realpath:
294 297 base = os.path.realpath(base)
295 298 self.base = base
296 299 self._setmustaudit(audit)
297 300 self.createmode = None
298 301 self._trustnlink = None
299 302
300 303 def _getmustaudit(self):
301 304 return self._audit
302 305
303 306 def _setmustaudit(self, onoff):
304 307 self._audit = onoff
305 308 if onoff:
306 309 self.audit = pathauditor(self.base)
307 310 else:
308 311 self.audit = util.always
309 312
310 313 mustaudit = property(_getmustaudit, _setmustaudit)
311 314
312 315 @util.propertycache
313 316 def _cansymlink(self):
314 317 return util.checklink(self.base)
315 318
316 319 @util.propertycache
317 320 def _chmod(self):
318 321 return util.checkexec(self.base)
319 322
320 323 def _fixfilemode(self, name):
321 324 if self.createmode is None or not self._chmod:
322 325 return
323 326 os.chmod(name, self.createmode & 0666)
324 327
325 328 def __call__(self, path, mode="r", text=False, atomictemp=False):
326 329 if self._audit:
327 330 r = util.checkosfilename(path)
328 331 if r:
329 332 raise util.Abort("%s: %r" % (r, path))
330 333 self.audit(path)
331 334 f = self.join(path)
332 335
333 336 if not text and "b" not in mode:
334 337 mode += "b" # for that other OS
335 338
336 339 nlink = -1
337 340 if mode not in ('r', 'rb'):
338 341 dirname, basename = util.split(f)
339 342 # If basename is empty, then the path is malformed because it points
340 343 # to a directory. Let the posixfile() call below raise IOError.
341 344 if basename:
342 345 if atomictemp:
343 346 util.ensuredirs(dirname, self.createmode)
344 347 return util.atomictempfile(f, mode, self.createmode)
345 348 try:
346 349 if 'w' in mode:
347 350 util.unlink(f)
348 351 nlink = 0
349 352 else:
350 353 # nlinks() may behave differently for files on Windows
351 354 # shares if the file is open.
352 355 fd = util.posixfile(f)
353 356 nlink = util.nlinks(f)
354 357 if nlink < 1:
355 358 nlink = 2 # force mktempcopy (issue1922)
356 359 fd.close()
357 360 except (OSError, IOError), e:
358 361 if e.errno != errno.ENOENT:
359 362 raise
360 363 nlink = 0
361 364 util.ensuredirs(dirname, self.createmode)
362 365 if nlink > 0:
363 366 if self._trustnlink is None:
364 367 self._trustnlink = nlink > 1 or util.checknlink(f)
365 368 if nlink > 1 or not self._trustnlink:
366 369 util.rename(util.mktempcopy(f), f)
367 370 fp = util.posixfile(f, mode)
368 371 if nlink == 0:
369 372 self._fixfilemode(f)
370 373 return fp
371 374
372 375 def symlink(self, src, dst):
373 376 self.audit(dst)
374 377 linkname = self.join(dst)
375 378 try:
376 379 os.unlink(linkname)
377 380 except OSError:
378 381 pass
379 382
380 383 util.ensuredirs(os.path.dirname(linkname), self.createmode)
381 384
382 385 if self._cansymlink:
383 386 try:
384 387 os.symlink(src, linkname)
385 388 except OSError, err:
386 389 raise OSError(err.errno, _('could not symlink to %r: %s') %
387 390 (src, err.strerror), linkname)
388 391 else:
389 392 self.write(dst, src)
390 393
391 394 def join(self, path):
392 395 if path:
393 396 return os.path.join(self.base, path)
394 397 else:
395 398 return self.base
396 399
397 400 opener = vfs
398 401
399 402 class auditvfs(object):
400 403 def __init__(self, vfs):
401 404 self.vfs = vfs
402 405
403 406 def _getmustaudit(self):
404 407 return self.vfs.mustaudit
405 408
406 409 def _setmustaudit(self, onoff):
407 410 self.vfs.mustaudit = onoff
408 411
409 412 mustaudit = property(_getmustaudit, _setmustaudit)
410 413
411 414 class filtervfs(abstractvfs, auditvfs):
412 415 '''Wrapper vfs for filtering filenames with a function.'''
413 416
414 417 def __init__(self, vfs, filter):
415 418 auditvfs.__init__(self, vfs)
416 419 self._filter = filter
417 420
418 421 def __call__(self, path, *args, **kwargs):
419 422 return self.vfs(self._filter(path), *args, **kwargs)
420 423
421 424 def join(self, path):
422 425 if path:
423 426 return self.vfs.join(self._filter(path))
424 427 else:
425 428 return self.vfs.join(path)
426 429
427 430 filteropener = filtervfs
428 431
429 432 class readonlyvfs(abstractvfs, auditvfs):
430 433 '''Wrapper vfs preventing any writing.'''
431 434
432 435 def __init__(self, vfs):
433 436 auditvfs.__init__(self, vfs)
434 437
435 438 def __call__(self, path, mode='r', *args, **kw):
436 439 if mode not in ('r', 'rb'):
437 440 raise util.Abort('this vfs is read only')
438 441 return self.vfs(path, mode, *args, **kw)
439 442
440 443
441 444 def canonpath(root, cwd, myname, auditor=None):
442 445 '''return the canonical path of myname, given cwd and root'''
443 446 if util.endswithsep(root):
444 447 rootsep = root
445 448 else:
446 449 rootsep = root + os.sep
447 450 name = myname
448 451 if not os.path.isabs(name):
449 452 name = os.path.join(root, cwd, name)
450 453 name = os.path.normpath(name)
451 454 if auditor is None:
452 455 auditor = pathauditor(root)
453 456 if name != rootsep and name.startswith(rootsep):
454 457 name = name[len(rootsep):]
455 458 auditor(name)
456 459 return util.pconvert(name)
457 460 elif name == root:
458 461 return ''
459 462 else:
460 463 # Determine whether `name' is in the hierarchy at or beneath `root',
461 464 # by iterating name=dirname(name) until that causes no change (can't
462 465 # check name == '/', because that doesn't work on windows). The list
463 466 # `rel' holds the reversed list of components making up the relative
464 467 # file name we want.
465 468 rel = []
466 469 while True:
467 470 try:
468 471 s = util.samefile(name, root)
469 472 except OSError:
470 473 s = False
471 474 if s:
472 475 if not rel:
473 476 # name was actually the same as root (maybe a symlink)
474 477 return ''
475 478 rel.reverse()
476 479 name = os.path.join(*rel)
477 480 auditor(name)
478 481 return util.pconvert(name)
479 482 dirname, basename = util.split(name)
480 483 rel.append(basename)
481 484 if dirname == name:
482 485 break
483 486 name = dirname
484 487
485 488 raise util.Abort(_("%s not under root '%s'") % (myname, root))
486 489
487 490 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
488 491 '''yield every hg repository under path, always recursively.
489 492 The recurse flag will only control recursion into repo working dirs'''
490 493 def errhandler(err):
491 494 if err.filename == path:
492 495 raise err
493 496 samestat = getattr(os.path, 'samestat', None)
494 497 if followsym and samestat is not None:
495 498 def adddir(dirlst, dirname):
496 499 match = False
497 500 dirstat = os.stat(dirname)
498 501 for lstdirstat in dirlst:
499 502 if samestat(dirstat, lstdirstat):
500 503 match = True
501 504 break
502 505 if not match:
503 506 dirlst.append(dirstat)
504 507 return not match
505 508 else:
506 509 followsym = False
507 510
508 511 if (seen_dirs is None) and followsym:
509 512 seen_dirs = []
510 513 adddir(seen_dirs, path)
511 514 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
512 515 dirs.sort()
513 516 if '.hg' in dirs:
514 517 yield root # found a repository
515 518 qroot = os.path.join(root, '.hg', 'patches')
516 519 if os.path.isdir(os.path.join(qroot, '.hg')):
517 520 yield qroot # we have a patch queue repo here
518 521 if recurse:
519 522 # avoid recursing inside the .hg directory
520 523 dirs.remove('.hg')
521 524 else:
522 525 dirs[:] = [] # don't descend further
523 526 elif followsym:
524 527 newdirs = []
525 528 for d in dirs:
526 529 fname = os.path.join(root, d)
527 530 if adddir(seen_dirs, fname):
528 531 if os.path.islink(fname):
529 532 for hgname in walkrepos(fname, True, seen_dirs):
530 533 yield hgname
531 534 else:
532 535 newdirs.append(d)
533 536 dirs[:] = newdirs
534 537
535 538 def osrcpath():
536 539 '''return default os-specific hgrc search path'''
537 540 path = systemrcpath()
538 541 path.extend(userrcpath())
539 542 path = [os.path.normpath(f) for f in path]
540 543 return path
541 544
542 545 _rcpath = None
543 546
544 547 def rcpath():
545 548 '''return hgrc search path. if env var HGRCPATH is set, use it.
546 549 for each item in path, if directory, use files ending in .rc,
547 550 else use item.
548 551 make HGRCPATH empty to only look in .hg/hgrc of current repo.
549 552 if no HGRCPATH, use default os-specific path.'''
550 553 global _rcpath
551 554 if _rcpath is None:
552 555 if 'HGRCPATH' in os.environ:
553 556 _rcpath = []
554 557 for p in os.environ['HGRCPATH'].split(os.pathsep):
555 558 if not p:
556 559 continue
557 560 p = util.expandpath(p)
558 561 if os.path.isdir(p):
559 562 for f, kind in osutil.listdir(p):
560 563 if f.endswith('.rc'):
561 564 _rcpath.append(os.path.join(p, f))
562 565 else:
563 566 _rcpath.append(p)
564 567 else:
565 568 _rcpath = osrcpath()
566 569 return _rcpath
567 570
568 571 def revsingle(repo, revspec, default='.'):
569 572 if not revspec and revspec != 0:
570 573 return repo[default]
571 574
572 575 l = revrange(repo, [revspec])
573 576 if len(l) < 1:
574 577 raise util.Abort(_('empty revision set'))
575 578 return repo[l[-1]]
576 579
577 580 def revpair(repo, revs):
578 581 if not revs:
579 582 return repo.dirstate.p1(), None
580 583
581 584 l = revrange(repo, revs)
582 585
583 586 if len(l) == 0:
584 587 if revs:
585 588 raise util.Abort(_('empty revision range'))
586 589 return repo.dirstate.p1(), None
587 590
588 591 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
589 592 return repo.lookup(l[0]), None
590 593
591 594 return repo.lookup(l[0]), repo.lookup(l[-1])
592 595
593 596 _revrangesep = ':'
594 597
595 598 def revrange(repo, revs):
596 599 """Yield revision as strings from a list of revision specifications."""
597 600
598 601 def revfix(repo, val, defval):
599 602 if not val and val != 0 and defval is not None:
600 603 return defval
601 604 return repo[val].rev()
602 605
603 606 seen, l = set(), []
604 607 for spec in revs:
605 608 if l and not seen:
606 609 seen = set(l)
607 610 # attempt to parse old-style ranges first to deal with
608 611 # things like old-tag which contain query metacharacters
609 612 try:
610 613 if isinstance(spec, int):
611 614 seen.add(spec)
612 615 l.append(spec)
613 616 continue
614 617
615 618 if _revrangesep in spec:
616 619 start, end = spec.split(_revrangesep, 1)
617 620 start = revfix(repo, start, 0)
618 621 end = revfix(repo, end, len(repo) - 1)
619 622 if end == nullrev and start <= 0:
620 623 start = nullrev
621 624 rangeiter = repo.changelog.revs(start, end)
622 625 if not seen and not l:
623 626 # by far the most common case: revs = ["-1:0"]
624 627 l = list(rangeiter)
625 628 # defer syncing seen until next iteration
626 629 continue
627 630 newrevs = set(rangeiter)
628 631 if seen:
629 632 newrevs.difference_update(seen)
630 633 seen.update(newrevs)
631 634 else:
632 635 seen = newrevs
633 636 l.extend(sorted(newrevs, reverse=start > end))
634 637 continue
635 638 elif spec and spec in repo: # single unquoted rev
636 639 rev = revfix(repo, spec, None)
637 640 if rev in seen:
638 641 continue
639 642 seen.add(rev)
640 643 l.append(rev)
641 644 continue
642 645 except error.RepoLookupError:
643 646 pass
644 647
645 648 # fall through to new-style queries if old-style fails
646 649 m = revset.match(repo.ui, spec)
647 650 dl = [r for r in m(repo, list(repo)) if r not in seen]
648 651 l.extend(dl)
649 652 seen.update(dl)
650 653
651 654 return l
652 655
653 656 def expandpats(pats):
654 657 if not util.expandglobs:
655 658 return list(pats)
656 659 ret = []
657 660 for p in pats:
658 661 kind, name = matchmod._patsplit(p, None)
659 662 if kind is None:
660 663 try:
661 664 globbed = glob.glob(name)
662 665 except re.error:
663 666 globbed = [name]
664 667 if globbed:
665 668 ret.extend(globbed)
666 669 continue
667 670 ret.append(p)
668 671 return ret
669 672
670 673 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
671 674 if pats == ("",):
672 675 pats = []
673 676 if not globbed and default == 'relpath':
674 677 pats = expandpats(pats or [])
675 678
676 679 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
677 680 default)
678 681 def badfn(f, msg):
679 682 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
680 683 m.bad = badfn
681 684 return m, pats
682 685
683 686 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
684 687 return matchandpats(ctx, pats, opts, globbed, default)[0]
685 688
686 689 def matchall(repo):
687 690 return matchmod.always(repo.root, repo.getcwd())
688 691
689 692 def matchfiles(repo, files):
690 693 return matchmod.exact(repo.root, repo.getcwd(), files)
691 694
692 695 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
693 696 if dry_run is None:
694 697 dry_run = opts.get('dry_run')
695 698 if similarity is None:
696 699 similarity = float(opts.get('similarity') or 0)
697 700 # we'd use status here, except handling of symlinks and ignore is tricky
698 701 m = match(repo[None], pats, opts)
699 702 rejected = []
700 703 m.bad = lambda x, y: rejected.append(x)
701 704
702 705 added, unknown, deleted, removed = _interestingfiles(repo, m)
703 706
704 707 unknownset = set(unknown)
705 708 toprint = unknownset.copy()
706 709 toprint.update(deleted)
707 710 for abs in sorted(toprint):
708 711 if repo.ui.verbose or not m.exact(abs):
709 712 rel = m.rel(abs)
710 713 if abs in unknownset:
711 714 status = _('adding %s\n') % ((pats and rel) or abs)
712 715 else:
713 716 status = _('removing %s\n') % ((pats and rel) or abs)
714 717 repo.ui.status(status)
715 718
716 719 renames = _findrenames(repo, m, added + unknown, removed + deleted,
717 720 similarity)
718 721
719 722 if not dry_run:
720 723 _markchanges(repo, unknown, deleted, renames)
721 724
722 725 for f in rejected:
723 726 if f in m.files():
724 727 return 1
725 728 return 0
726 729
727 730 def marktouched(repo, files, similarity=0.0):
728 731 '''Assert that files have somehow been operated upon. files are relative to
729 732 the repo root.'''
730 733 m = matchfiles(repo, files)
731 734 rejected = []
732 735 m.bad = lambda x, y: rejected.append(x)
733 736
734 737 added, unknown, deleted, removed = _interestingfiles(repo, m)
735 738
736 739 if repo.ui.verbose:
737 740 unknownset = set(unknown)
738 741 toprint = unknownset.copy()
739 742 toprint.update(deleted)
740 743 for abs in sorted(toprint):
741 744 if abs in unknownset:
742 745 status = _('adding %s\n') % abs
743 746 else:
744 747 status = _('removing %s\n') % abs
745 748 repo.ui.status(status)
746 749
747 750 renames = _findrenames(repo, m, added + unknown, removed + deleted,
748 751 similarity)
749 752
750 753 _markchanges(repo, unknown, deleted, renames)
751 754
752 755 for f in rejected:
753 756 if f in m.files():
754 757 return 1
755 758 return 0
756 759
757 760 def _interestingfiles(repo, matcher):
758 761 '''Walk dirstate with matcher, looking for files that addremove would care
759 762 about.
760 763
761 764 This is different from dirstate.status because it doesn't care about
762 765 whether files are modified or clean.'''
763 766 added, unknown, deleted, removed = [], [], [], []
764 767 audit_path = pathauditor(repo.root)
765 768
766 769 ctx = repo[None]
767 770 dirstate = repo.dirstate
768 771 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
769 772 full=False)
770 773 for abs, st in walkresults.iteritems():
771 774 dstate = dirstate[abs]
772 775 if dstate == '?' and audit_path.check(abs):
773 776 unknown.append(abs)
774 777 elif dstate != 'r' and not st:
775 778 deleted.append(abs)
776 779 # for finding renames
777 780 elif dstate == 'r':
778 781 removed.append(abs)
779 782 elif dstate == 'a':
780 783 added.append(abs)
781 784
782 785 return added, unknown, deleted, removed
783 786
784 787 def _findrenames(repo, matcher, added, removed, similarity):
785 788 '''Find renames from removed files to added ones.'''
786 789 renames = {}
787 790 if similarity > 0:
788 791 for old, new, score in similar.findrenames(repo, added, removed,
789 792 similarity):
790 793 if (repo.ui.verbose or not matcher.exact(old)
791 794 or not matcher.exact(new)):
792 795 repo.ui.status(_('recording removal of %s as rename to %s '
793 796 '(%d%% similar)\n') %
794 797 (matcher.rel(old), matcher.rel(new),
795 798 score * 100))
796 799 renames[new] = old
797 800 return renames
798 801
799 802 def _markchanges(repo, unknown, deleted, renames):
800 803 '''Marks the files in unknown as added, the files in deleted as removed,
801 804 and the files in renames as copied.'''
802 805 wctx = repo[None]
803 806 wlock = repo.wlock()
804 807 try:
805 808 wctx.forget(deleted)
806 809 wctx.add(unknown)
807 810 for new, old in renames.iteritems():
808 811 wctx.copy(old, new)
809 812 finally:
810 813 wlock.release()
811 814
812 815 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
813 816 """Update the dirstate to reflect the intent of copying src to dst. For
814 817 different reasons it might not end with dst being marked as copied from src.
815 818 """
816 819 origsrc = repo.dirstate.copied(src) or src
817 820 if dst == origsrc: # copying back a copy?
818 821 if repo.dirstate[dst] not in 'mn' and not dryrun:
819 822 repo.dirstate.normallookup(dst)
820 823 else:
821 824 if repo.dirstate[origsrc] == 'a' and origsrc == src:
822 825 if not ui.quiet:
823 826 ui.warn(_("%s has not been committed yet, so no copy "
824 827 "data will be stored for %s.\n")
825 828 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
826 829 if repo.dirstate[dst] in '?r' and not dryrun:
827 830 wctx.add([dst])
828 831 elif not dryrun:
829 832 wctx.copy(origsrc, dst)
830 833
831 834 def readrequires(opener, supported):
832 835 '''Reads and parses .hg/requires and checks if all entries found
833 836 are in the list of supported features.'''
834 837 requirements = set(opener.read("requires").splitlines())
835 838 missings = []
836 839 for r in requirements:
837 840 if r not in supported:
838 841 if not r or not r[0].isalnum():
839 842 raise error.RequirementError(_(".hg/requires file is corrupt"))
840 843 missings.append(r)
841 844 missings.sort()
842 845 if missings:
843 846 raise error.RequirementError(
844 847 _("unknown repository format: requires features '%s' (upgrade "
845 848 "Mercurial)") % "', '".join(missings))
846 849 return requirements
847 850
848 851 class filecacheentry(object):
849 852 def __init__(self, path, stat=True):
850 853 self.path = path
851 854 self.cachestat = None
852 855 self._cacheable = None
853 856
854 857 if stat:
855 858 self.cachestat = filecacheentry.stat(self.path)
856 859
857 860 if self.cachestat:
858 861 self._cacheable = self.cachestat.cacheable()
859 862 else:
860 863 # None means we don't know yet
861 864 self._cacheable = None
862 865
863 866 def refresh(self):
864 867 if self.cacheable():
865 868 self.cachestat = filecacheentry.stat(self.path)
866 869
867 870 def cacheable(self):
868 871 if self._cacheable is not None:
869 872 return self._cacheable
870 873
871 874 # we don't know yet, assume it is for now
872 875 return True
873 876
874 877 def changed(self):
875 878 # no point in going further if we can't cache it
876 879 if not self.cacheable():
877 880 return True
878 881
879 882 newstat = filecacheentry.stat(self.path)
880 883
881 884 # we may not know if it's cacheable yet, check again now
882 885 if newstat and self._cacheable is None:
883 886 self._cacheable = newstat.cacheable()
884 887
885 888 # check again
886 889 if not self._cacheable:
887 890 return True
888 891
889 892 if self.cachestat != newstat:
890 893 self.cachestat = newstat
891 894 return True
892 895 else:
893 896 return False
894 897
895 898 @staticmethod
896 899 def stat(path):
897 900 try:
898 901 return util.cachestat(path)
899 902 except OSError, e:
900 903 if e.errno != errno.ENOENT:
901 904 raise
902 905
903 906 class filecache(object):
904 907 '''A property like decorator that tracks a file under .hg/ for updates.
905 908
906 909 Records stat info when called in _filecache.
907 910
908 911 On subsequent calls, compares old stat info with new info, and recreates
909 912 the object when needed, updating the new stat info in _filecache.
910 913
911 914 Mercurial either atomic renames or appends for files under .hg,
912 915 so to ensure the cache is reliable we need the filesystem to be able
913 916 to tell us if a file has been replaced. If it can't, we fallback to
914 917 recreating the object on every call (essentially the same behaviour as
915 918 propertycache).'''
916 919 def __init__(self, path):
917 920 self.path = path
918 921
919 922 def join(self, obj, fname):
920 923 """Used to compute the runtime path of the cached file.
921 924
922 925 Users should subclass filecache and provide their own version of this
923 926 function to call the appropriate join function on 'obj' (an instance
924 927 of the class that its member function was decorated).
925 928 """
926 929 return obj.join(fname)
927 930
928 931 def __call__(self, func):
929 932 self.func = func
930 933 self.name = func.__name__
931 934 return self
932 935
933 936 def __get__(self, obj, type=None):
934 937 # do we need to check if the file changed?
935 938 if self.name in obj.__dict__:
936 939 assert self.name in obj._filecache, self.name
937 940 return obj.__dict__[self.name]
938 941
939 942 entry = obj._filecache.get(self.name)
940 943
941 944 if entry:
942 945 if entry.changed():
943 946 entry.obj = self.func(obj)
944 947 else:
945 948 path = self.join(obj, self.path)
946 949
947 950 # We stat -before- creating the object so our cache doesn't lie if
948 951 # a writer modified between the time we read and stat
949 952 entry = filecacheentry(path)
950 953 entry.obj = self.func(obj)
951 954
952 955 obj._filecache[self.name] = entry
953 956
954 957 obj.__dict__[self.name] = entry.obj
955 958 return entry.obj
956 959
957 960 def __set__(self, obj, value):
958 961 if self.name not in obj._filecache:
959 962 # we add an entry for the missing value because X in __dict__
960 963 # implies X in _filecache
961 964 ce = filecacheentry(self.join(obj, self.path), False)
962 965 obj._filecache[self.name] = ce
963 966 else:
964 967 ce = obj._filecache[self.name]
965 968
966 969 ce.obj = value # update cached copy
967 970 obj.__dict__[self.name] = value # update copy returned by obj.x
968 971
969 972 def __delete__(self, obj):
970 973 try:
971 974 del obj.__dict__[self.name]
972 975 except KeyError:
973 976 raise AttributeError(self.name)
974 977
975 978 class dirs(object):
976 979 '''a multiset of directory names from a dirstate or manifest'''
977 980
978 981 def __init__(self, map, skip=None):
979 982 self._dirs = {}
980 983 addpath = self.addpath
981 984 if util.safehasattr(map, 'iteritems') and skip is not None:
982 985 for f, s in map.iteritems():
983 986 if s[0] != skip:
984 987 addpath(f)
985 988 else:
986 989 for f in map:
987 990 addpath(f)
988 991
989 992 def addpath(self, path):
990 993 dirs = self._dirs
991 994 for base in finddirs(path):
992 995 if base in dirs:
993 996 dirs[base] += 1
994 997 return
995 998 dirs[base] = 1
996 999
997 1000 def delpath(self, path):
998 1001 dirs = self._dirs
999 1002 for base in finddirs(path):
1000 1003 if dirs[base] > 1:
1001 1004 dirs[base] -= 1
1002 1005 return
1003 1006 del dirs[base]
1004 1007
1005 1008 def __iter__(self):
1006 1009 return self._dirs.iterkeys()
1007 1010
1008 1011 def __contains__(self, d):
1009 1012 return d in self._dirs
1010 1013
1011 1014 if util.safehasattr(parsers, 'dirs'):
1012 1015 dirs = parsers.dirs
1013 1016
1014 1017 def finddirs(path):
1015 1018 pos = path.rfind('/')
1016 1019 while pos != -1:
1017 1020 yield path[:pos]
1018 1021 pos = path.rfind('/', 0, pos)
General Comments 0
You need to be logged in to leave comments. Login now