##// END OF EJS Templates
changelog: use "vfs.fstat()" instead of "util.fstat()"...
FUJIWARA Katsunori -
r19899:8c3dcbbf default
parent child Browse files
Show More
@@ -1,349 +1,350
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid
8 from node import bin, hex, nullid
9 from i18n import _
9 from i18n import _
10 import util, error, revlog, encoding
10 import util, error, revlog, encoding
11
11
12 _defaultextra = {'branch': 'default'}
12 _defaultextra = {'branch': 'default'}
13
13
14 def _string_escape(text):
14 def _string_escape(text):
15 """
15 """
16 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
16 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
17 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
17 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
18 >>> s
18 >>> s
19 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
19 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
20 >>> res = _string_escape(s)
20 >>> res = _string_escape(s)
21 >>> s == res.decode('string_escape')
21 >>> s == res.decode('string_escape')
22 True
22 True
23 """
23 """
24 # subset of the string_escape codec
24 # subset of the string_escape codec
25 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
25 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
26 return text.replace('\0', '\\0')
26 return text.replace('\0', '\\0')
27
27
28 def decodeextra(text):
28 def decodeextra(text):
29 """
29 """
30 >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
30 >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
31 ... ).iteritems())
31 ... ).iteritems())
32 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
32 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
33 >>> sorted(decodeextra(encodeextra({'foo': 'bar',
33 >>> sorted(decodeextra(encodeextra({'foo': 'bar',
34 ... 'baz': chr(92) + chr(0) + '2'})
34 ... 'baz': chr(92) + chr(0) + '2'})
35 ... ).iteritems())
35 ... ).iteritems())
36 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
36 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
37 """
37 """
38 extra = _defaultextra.copy()
38 extra = _defaultextra.copy()
39 for l in text.split('\0'):
39 for l in text.split('\0'):
40 if l:
40 if l:
41 if '\\0' in l:
41 if '\\0' in l:
42 # fix up \0 without getting into trouble with \\0
42 # fix up \0 without getting into trouble with \\0
43 l = l.replace('\\\\', '\\\\\n')
43 l = l.replace('\\\\', '\\\\\n')
44 l = l.replace('\\0', '\0')
44 l = l.replace('\\0', '\0')
45 l = l.replace('\n', '')
45 l = l.replace('\n', '')
46 k, v = l.decode('string_escape').split(':', 1)
46 k, v = l.decode('string_escape').split(':', 1)
47 extra[k] = v
47 extra[k] = v
48 return extra
48 return extra
49
49
50 def encodeextra(d):
50 def encodeextra(d):
51 # keys must be sorted to produce a deterministic changelog entry
51 # keys must be sorted to produce a deterministic changelog entry
52 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
52 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
53 return "\0".join(items)
53 return "\0".join(items)
54
54
55 def stripdesc(desc):
55 def stripdesc(desc):
56 """strip trailing whitespace and leading and trailing empty lines"""
56 """strip trailing whitespace and leading and trailing empty lines"""
57 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
57 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
58
58
59 class appender(object):
59 class appender(object):
60 '''the changelog index must be updated last on disk, so we use this class
60 '''the changelog index must be updated last on disk, so we use this class
61 to delay writes to it'''
61 to delay writes to it'''
62 def __init__(self, fp, buf):
62 def __init__(self, vfs, name, mode, buf):
63 self.data = buf
63 self.data = buf
64 fp = vfs(name, mode)
64 self.fp = fp
65 self.fp = fp
65 self.offset = fp.tell()
66 self.offset = fp.tell()
66 self.size = util.fstat(fp).st_size
67 self.size = vfs.fstat(fp).st_size
67
68
68 def end(self):
69 def end(self):
69 return self.size + len("".join(self.data))
70 return self.size + len("".join(self.data))
70 def tell(self):
71 def tell(self):
71 return self.offset
72 return self.offset
72 def flush(self):
73 def flush(self):
73 pass
74 pass
74 def close(self):
75 def close(self):
75 self.fp.close()
76 self.fp.close()
76
77
77 def seek(self, offset, whence=0):
78 def seek(self, offset, whence=0):
78 '''virtual file offset spans real file and data'''
79 '''virtual file offset spans real file and data'''
79 if whence == 0:
80 if whence == 0:
80 self.offset = offset
81 self.offset = offset
81 elif whence == 1:
82 elif whence == 1:
82 self.offset += offset
83 self.offset += offset
83 elif whence == 2:
84 elif whence == 2:
84 self.offset = self.end() + offset
85 self.offset = self.end() + offset
85 if self.offset < self.size:
86 if self.offset < self.size:
86 self.fp.seek(self.offset)
87 self.fp.seek(self.offset)
87
88
88 def read(self, count=-1):
89 def read(self, count=-1):
89 '''only trick here is reads that span real file and data'''
90 '''only trick here is reads that span real file and data'''
90 ret = ""
91 ret = ""
91 if self.offset < self.size:
92 if self.offset < self.size:
92 s = self.fp.read(count)
93 s = self.fp.read(count)
93 ret = s
94 ret = s
94 self.offset += len(s)
95 self.offset += len(s)
95 if count > 0:
96 if count > 0:
96 count -= len(s)
97 count -= len(s)
97 if count != 0:
98 if count != 0:
98 doff = self.offset - self.size
99 doff = self.offset - self.size
99 self.data.insert(0, "".join(self.data))
100 self.data.insert(0, "".join(self.data))
100 del self.data[1:]
101 del self.data[1:]
101 s = self.data[0][doff:doff + count]
102 s = self.data[0][doff:doff + count]
102 self.offset += len(s)
103 self.offset += len(s)
103 ret += s
104 ret += s
104 return ret
105 return ret
105
106
106 def write(self, s):
107 def write(self, s):
107 self.data.append(str(s))
108 self.data.append(str(s))
108 self.offset += len(s)
109 self.offset += len(s)
109
110
110 def delayopener(opener, target, divert, buf):
111 def delayopener(opener, target, divert, buf):
111 def o(name, mode='r'):
112 def o(name, mode='r'):
112 if name != target:
113 if name != target:
113 return opener(name, mode)
114 return opener(name, mode)
114 if divert:
115 if divert:
115 return opener(name + ".a", mode.replace('a', 'w'))
116 return opener(name + ".a", mode.replace('a', 'w'))
116 # otherwise, divert to memory
117 # otherwise, divert to memory
117 return appender(opener(name, mode), buf)
118 return appender(opener, name, mode, buf)
118 return o
119 return o
119
120
120 class changelog(revlog.revlog):
121 class changelog(revlog.revlog):
121 def __init__(self, opener):
122 def __init__(self, opener):
122 revlog.revlog.__init__(self, opener, "00changelog.i")
123 revlog.revlog.__init__(self, opener, "00changelog.i")
123 if self._initempty:
124 if self._initempty:
124 # changelogs don't benefit from generaldelta
125 # changelogs don't benefit from generaldelta
125 self.version &= ~revlog.REVLOGGENERALDELTA
126 self.version &= ~revlog.REVLOGGENERALDELTA
126 self._generaldelta = False
127 self._generaldelta = False
127 self._realopener = opener
128 self._realopener = opener
128 self._delayed = False
129 self._delayed = False
129 self._divert = False
130 self._divert = False
130 self.filteredrevs = frozenset()
131 self.filteredrevs = frozenset()
131
132
132 def tip(self):
133 def tip(self):
133 """filtered version of revlog.tip"""
134 """filtered version of revlog.tip"""
134 for i in xrange(len(self) -1, -2, -1):
135 for i in xrange(len(self) -1, -2, -1):
135 if i not in self.filteredrevs:
136 if i not in self.filteredrevs:
136 return self.node(i)
137 return self.node(i)
137
138
138 def __iter__(self):
139 def __iter__(self):
139 """filtered version of revlog.__iter__"""
140 """filtered version of revlog.__iter__"""
140 if len(self.filteredrevs) == 0:
141 if len(self.filteredrevs) == 0:
141 return revlog.revlog.__iter__(self)
142 return revlog.revlog.__iter__(self)
142
143
143 def filterediter():
144 def filterediter():
144 for i in xrange(len(self)):
145 for i in xrange(len(self)):
145 if i not in self.filteredrevs:
146 if i not in self.filteredrevs:
146 yield i
147 yield i
147
148
148 return filterediter()
149 return filterediter()
149
150
150 def revs(self, start=0, stop=None):
151 def revs(self, start=0, stop=None):
151 """filtered version of revlog.revs"""
152 """filtered version of revlog.revs"""
152 for i in super(changelog, self).revs(start, stop):
153 for i in super(changelog, self).revs(start, stop):
153 if i not in self.filteredrevs:
154 if i not in self.filteredrevs:
154 yield i
155 yield i
155
156
156 @util.propertycache
157 @util.propertycache
157 def nodemap(self):
158 def nodemap(self):
158 # XXX need filtering too
159 # XXX need filtering too
159 self.rev(self.node(0))
160 self.rev(self.node(0))
160 return self._nodecache
161 return self._nodecache
161
162
162 def hasnode(self, node):
163 def hasnode(self, node):
163 """filtered version of revlog.hasnode"""
164 """filtered version of revlog.hasnode"""
164 try:
165 try:
165 i = self.rev(node)
166 i = self.rev(node)
166 return i not in self.filteredrevs
167 return i not in self.filteredrevs
167 except KeyError:
168 except KeyError:
168 return False
169 return False
169
170
170 def headrevs(self):
171 def headrevs(self):
171 if self.filteredrevs:
172 if self.filteredrevs:
172 # XXX we should fix and use the C version
173 # XXX we should fix and use the C version
173 return self._headrevs()
174 return self._headrevs()
174 return super(changelog, self).headrevs()
175 return super(changelog, self).headrevs()
175
176
176 def strip(self, *args, **kwargs):
177 def strip(self, *args, **kwargs):
177 # XXX make something better than assert
178 # XXX make something better than assert
178 # We can't expect proper strip behavior if we are filtered.
179 # We can't expect proper strip behavior if we are filtered.
179 assert not self.filteredrevs
180 assert not self.filteredrevs
180 super(changelog, self).strip(*args, **kwargs)
181 super(changelog, self).strip(*args, **kwargs)
181
182
182 def rev(self, node):
183 def rev(self, node):
183 """filtered version of revlog.rev"""
184 """filtered version of revlog.rev"""
184 r = super(changelog, self).rev(node)
185 r = super(changelog, self).rev(node)
185 if r in self.filteredrevs:
186 if r in self.filteredrevs:
186 raise error.LookupError(hex(node), self.indexfile, _('no node'))
187 raise error.LookupError(hex(node), self.indexfile, _('no node'))
187 return r
188 return r
188
189
189 def node(self, rev):
190 def node(self, rev):
190 """filtered version of revlog.node"""
191 """filtered version of revlog.node"""
191 if rev in self.filteredrevs:
192 if rev in self.filteredrevs:
192 raise IndexError(rev)
193 raise IndexError(rev)
193 return super(changelog, self).node(rev)
194 return super(changelog, self).node(rev)
194
195
195 def linkrev(self, rev):
196 def linkrev(self, rev):
196 """filtered version of revlog.linkrev"""
197 """filtered version of revlog.linkrev"""
197 if rev in self.filteredrevs:
198 if rev in self.filteredrevs:
198 raise IndexError(rev)
199 raise IndexError(rev)
199 return super(changelog, self).linkrev(rev)
200 return super(changelog, self).linkrev(rev)
200
201
201 def parentrevs(self, rev):
202 def parentrevs(self, rev):
202 """filtered version of revlog.parentrevs"""
203 """filtered version of revlog.parentrevs"""
203 if rev in self.filteredrevs:
204 if rev in self.filteredrevs:
204 raise IndexError(rev)
205 raise IndexError(rev)
205 return super(changelog, self).parentrevs(rev)
206 return super(changelog, self).parentrevs(rev)
206
207
207 def flags(self, rev):
208 def flags(self, rev):
208 """filtered version of revlog.flags"""
209 """filtered version of revlog.flags"""
209 if rev in self.filteredrevs:
210 if rev in self.filteredrevs:
210 raise IndexError(rev)
211 raise IndexError(rev)
211 return super(changelog, self).flags(rev)
212 return super(changelog, self).flags(rev)
212
213
213 def delayupdate(self):
214 def delayupdate(self):
214 "delay visibility of index updates to other readers"
215 "delay visibility of index updates to other readers"
215 self._delayed = True
216 self._delayed = True
216 self._divert = (len(self) == 0)
217 self._divert = (len(self) == 0)
217 self._delaybuf = []
218 self._delaybuf = []
218 self.opener = delayopener(self._realopener, self.indexfile,
219 self.opener = delayopener(self._realopener, self.indexfile,
219 self._divert, self._delaybuf)
220 self._divert, self._delaybuf)
220
221
221 def finalize(self, tr):
222 def finalize(self, tr):
222 "finalize index updates"
223 "finalize index updates"
223 self._delayed = False
224 self._delayed = False
224 self.opener = self._realopener
225 self.opener = self._realopener
225 # move redirected index data back into place
226 # move redirected index data back into place
226 if self._divert:
227 if self._divert:
227 tmpname = self.indexfile + ".a"
228 tmpname = self.indexfile + ".a"
228 nfile = self.opener.open(tmpname)
229 nfile = self.opener.open(tmpname)
229 nfile.close()
230 nfile.close()
230 self.opener.rename(tmpname, self.indexfile)
231 self.opener.rename(tmpname, self.indexfile)
231 elif self._delaybuf:
232 elif self._delaybuf:
232 fp = self.opener(self.indexfile, 'a')
233 fp = self.opener(self.indexfile, 'a')
233 fp.write("".join(self._delaybuf))
234 fp.write("".join(self._delaybuf))
234 fp.close()
235 fp.close()
235 self._delaybuf = []
236 self._delaybuf = []
236 # split when we're done
237 # split when we're done
237 self.checkinlinesize(tr)
238 self.checkinlinesize(tr)
238
239
239 def readpending(self, file):
240 def readpending(self, file):
240 r = revlog.revlog(self.opener, file)
241 r = revlog.revlog(self.opener, file)
241 self.index = r.index
242 self.index = r.index
242 self.nodemap = r.nodemap
243 self.nodemap = r.nodemap
243 self._nodecache = r._nodecache
244 self._nodecache = r._nodecache
244 self._chunkcache = r._chunkcache
245 self._chunkcache = r._chunkcache
245
246
246 def writepending(self):
247 def writepending(self):
247 "create a file containing the unfinalized state for pretxnchangegroup"
248 "create a file containing the unfinalized state for pretxnchangegroup"
248 if self._delaybuf:
249 if self._delaybuf:
249 # make a temporary copy of the index
250 # make a temporary copy of the index
250 fp1 = self._realopener(self.indexfile)
251 fp1 = self._realopener(self.indexfile)
251 fp2 = self._realopener(self.indexfile + ".a", "w")
252 fp2 = self._realopener(self.indexfile + ".a", "w")
252 fp2.write(fp1.read())
253 fp2.write(fp1.read())
253 # add pending data
254 # add pending data
254 fp2.write("".join(self._delaybuf))
255 fp2.write("".join(self._delaybuf))
255 fp2.close()
256 fp2.close()
256 # switch modes so finalize can simply rename
257 # switch modes so finalize can simply rename
257 self._delaybuf = []
258 self._delaybuf = []
258 self._divert = True
259 self._divert = True
259
260
260 if self._divert:
261 if self._divert:
261 return True
262 return True
262
263
263 return False
264 return False
264
265
265 def checkinlinesize(self, tr, fp=None):
266 def checkinlinesize(self, tr, fp=None):
266 if not self._delayed:
267 if not self._delayed:
267 revlog.revlog.checkinlinesize(self, tr, fp)
268 revlog.revlog.checkinlinesize(self, tr, fp)
268
269
269 def read(self, node):
270 def read(self, node):
270 """
271 """
271 format used:
272 format used:
272 nodeid\n : manifest node in ascii
273 nodeid\n : manifest node in ascii
273 user\n : user, no \n or \r allowed
274 user\n : user, no \n or \r allowed
274 time tz extra\n : date (time is int or float, timezone is int)
275 time tz extra\n : date (time is int or float, timezone is int)
275 : extra is metadata, encoded and separated by '\0'
276 : extra is metadata, encoded and separated by '\0'
276 : older versions ignore it
277 : older versions ignore it
277 files\n\n : files modified by the cset, no \n or \r allowed
278 files\n\n : files modified by the cset, no \n or \r allowed
278 (.*) : comment (free text, ideally utf-8)
279 (.*) : comment (free text, ideally utf-8)
279
280
280 changelog v0 doesn't use extra
281 changelog v0 doesn't use extra
281 """
282 """
282 text = self.revision(node)
283 text = self.revision(node)
283 if not text:
284 if not text:
284 return (nullid, "", (0, 0), [], "", _defaultextra)
285 return (nullid, "", (0, 0), [], "", _defaultextra)
285 last = text.index("\n\n")
286 last = text.index("\n\n")
286 desc = encoding.tolocal(text[last + 2:])
287 desc = encoding.tolocal(text[last + 2:])
287 l = text[:last].split('\n')
288 l = text[:last].split('\n')
288 manifest = bin(l[0])
289 manifest = bin(l[0])
289 user = encoding.tolocal(l[1])
290 user = encoding.tolocal(l[1])
290
291
291 tdata = l[2].split(' ', 2)
292 tdata = l[2].split(' ', 2)
292 if len(tdata) != 3:
293 if len(tdata) != 3:
293 time = float(tdata[0])
294 time = float(tdata[0])
294 try:
295 try:
295 # various tools did silly things with the time zone field.
296 # various tools did silly things with the time zone field.
296 timezone = int(tdata[1])
297 timezone = int(tdata[1])
297 except ValueError:
298 except ValueError:
298 timezone = 0
299 timezone = 0
299 extra = _defaultextra
300 extra = _defaultextra
300 else:
301 else:
301 time, timezone = float(tdata[0]), int(tdata[1])
302 time, timezone = float(tdata[0]), int(tdata[1])
302 extra = decodeextra(tdata[2])
303 extra = decodeextra(tdata[2])
303
304
304 files = l[3:]
305 files = l[3:]
305 return (manifest, user, (time, timezone), files, desc, extra)
306 return (manifest, user, (time, timezone), files, desc, extra)
306
307
307 def add(self, manifest, files, desc, transaction, p1, p2,
308 def add(self, manifest, files, desc, transaction, p1, p2,
308 user, date=None, extra=None):
309 user, date=None, extra=None):
309 # Convert to UTF-8 encoded bytestrings as the very first
310 # Convert to UTF-8 encoded bytestrings as the very first
310 # thing: calling any method on a localstr object will turn it
311 # thing: calling any method on a localstr object will turn it
311 # into a str object and the cached UTF-8 string is thus lost.
312 # into a str object and the cached UTF-8 string is thus lost.
312 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
313 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
313
314
314 user = user.strip()
315 user = user.strip()
315 # An empty username or a username with a "\n" will make the
316 # An empty username or a username with a "\n" will make the
316 # revision text contain two "\n\n" sequences -> corrupt
317 # revision text contain two "\n\n" sequences -> corrupt
317 # repository since read cannot unpack the revision.
318 # repository since read cannot unpack the revision.
318 if not user:
319 if not user:
319 raise error.RevlogError(_("empty username"))
320 raise error.RevlogError(_("empty username"))
320 if "\n" in user:
321 if "\n" in user:
321 raise error.RevlogError(_("username %s contains a newline")
322 raise error.RevlogError(_("username %s contains a newline")
322 % repr(user))
323 % repr(user))
323
324
324 desc = stripdesc(desc)
325 desc = stripdesc(desc)
325
326
326 if date:
327 if date:
327 parseddate = "%d %d" % util.parsedate(date)
328 parseddate = "%d %d" % util.parsedate(date)
328 else:
329 else:
329 parseddate = "%d %d" % util.makedate()
330 parseddate = "%d %d" % util.makedate()
330 if extra:
331 if extra:
331 branch = extra.get("branch")
332 branch = extra.get("branch")
332 if branch in ("default", ""):
333 if branch in ("default", ""):
333 del extra["branch"]
334 del extra["branch"]
334 elif branch in (".", "null", "tip"):
335 elif branch in (".", "null", "tip"):
335 raise error.RevlogError(_('the name \'%s\' is reserved')
336 raise error.RevlogError(_('the name \'%s\' is reserved')
336 % branch)
337 % branch)
337 if extra:
338 if extra:
338 extra = encodeextra(extra)
339 extra = encodeextra(extra)
339 parseddate = "%s %s" % (parseddate, extra)
340 parseddate = "%s %s" % (parseddate, extra)
340 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
341 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
341 text = "\n".join(l)
342 text = "\n".join(l)
342 return self.addrevision(text, transaction, len(self), p1, p2)
343 return self.addrevision(text, transaction, len(self), p1, p2)
343
344
344 def branch(self, rev):
345 def branch(self, rev):
345 """return the branch of a revision
346 """return the branch of a revision
346
347
347 This function exists because creating a changectx object
348 This function exists because creating a changectx object
348 just to access this is costly."""
349 just to access this is costly."""
349 return encoding.tolocal(self.read(rev)[5].get("branch"))
350 return encoding.tolocal(self.read(rev)[5].get("branch"))
@@ -1,1018 +1,1021
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from mercurial.node import nullrev
9 from mercurial.node import nullrev
10 import util, error, osutil, revset, similar, encoding, phases, parsers
10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 import match as matchmod
11 import match as matchmod
12 import os, errno, re, stat, glob
12 import os, errno, re, stat, glob
13
13
14 if os.name == 'nt':
14 if os.name == 'nt':
15 import scmwindows as scmplatform
15 import scmwindows as scmplatform
16 else:
16 else:
17 import scmposix as scmplatform
17 import scmposix as scmplatform
18
18
19 systemrcpath = scmplatform.systemrcpath
19 systemrcpath = scmplatform.systemrcpath
20 userrcpath = scmplatform.userrcpath
20 userrcpath = scmplatform.userrcpath
21
21
22 def nochangesfound(ui, repo, excluded=None):
22 def nochangesfound(ui, repo, excluded=None):
23 '''Report no changes for push/pull, excluded is None or a list of
23 '''Report no changes for push/pull, excluded is None or a list of
24 nodes excluded from the push/pull.
24 nodes excluded from the push/pull.
25 '''
25 '''
26 secretlist = []
26 secretlist = []
27 if excluded:
27 if excluded:
28 for n in excluded:
28 for n in excluded:
29 if n not in repo:
29 if n not in repo:
30 # discovery should not have included the filtered revision,
30 # discovery should not have included the filtered revision,
31 # we have to explicitly exclude it until discovery is cleanup.
31 # we have to explicitly exclude it until discovery is cleanup.
32 continue
32 continue
33 ctx = repo[n]
33 ctx = repo[n]
34 if ctx.phase() >= phases.secret and not ctx.extinct():
34 if ctx.phase() >= phases.secret and not ctx.extinct():
35 secretlist.append(n)
35 secretlist.append(n)
36
36
37 if secretlist:
37 if secretlist:
38 ui.status(_("no changes found (ignored %d secret changesets)\n")
38 ui.status(_("no changes found (ignored %d secret changesets)\n")
39 % len(secretlist))
39 % len(secretlist))
40 else:
40 else:
41 ui.status(_("no changes found\n"))
41 ui.status(_("no changes found\n"))
42
42
43 def checknewlabel(repo, lbl, kind):
43 def checknewlabel(repo, lbl, kind):
44 # Do not use the "kind" parameter in ui output.
44 # Do not use the "kind" parameter in ui output.
45 # It makes strings difficult to translate.
45 # It makes strings difficult to translate.
46 if lbl in ['tip', '.', 'null']:
46 if lbl in ['tip', '.', 'null']:
47 raise util.Abort(_("the name '%s' is reserved") % lbl)
47 raise util.Abort(_("the name '%s' is reserved") % lbl)
48 for c in (':', '\0', '\n', '\r'):
48 for c in (':', '\0', '\n', '\r'):
49 if c in lbl:
49 if c in lbl:
50 raise util.Abort(_("%r cannot be used in a name") % c)
50 raise util.Abort(_("%r cannot be used in a name") % c)
51 try:
51 try:
52 int(lbl)
52 int(lbl)
53 raise util.Abort(_("cannot use an integer as a name"))
53 raise util.Abort(_("cannot use an integer as a name"))
54 except ValueError:
54 except ValueError:
55 pass
55 pass
56
56
57 def checkfilename(f):
57 def checkfilename(f):
58 '''Check that the filename f is an acceptable filename for a tracked file'''
58 '''Check that the filename f is an acceptable filename for a tracked file'''
59 if '\r' in f or '\n' in f:
59 if '\r' in f or '\n' in f:
60 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
60 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
61
61
62 def checkportable(ui, f):
62 def checkportable(ui, f):
63 '''Check if filename f is portable and warn or abort depending on config'''
63 '''Check if filename f is portable and warn or abort depending on config'''
64 checkfilename(f)
64 checkfilename(f)
65 abort, warn = checkportabilityalert(ui)
65 abort, warn = checkportabilityalert(ui)
66 if abort or warn:
66 if abort or warn:
67 msg = util.checkwinfilename(f)
67 msg = util.checkwinfilename(f)
68 if msg:
68 if msg:
69 msg = "%s: %r" % (msg, f)
69 msg = "%s: %r" % (msg, f)
70 if abort:
70 if abort:
71 raise util.Abort(msg)
71 raise util.Abort(msg)
72 ui.warn(_("warning: %s\n") % msg)
72 ui.warn(_("warning: %s\n") % msg)
73
73
74 def checkportabilityalert(ui):
74 def checkportabilityalert(ui):
75 '''check if the user's config requests nothing, a warning, or abort for
75 '''check if the user's config requests nothing, a warning, or abort for
76 non-portable filenames'''
76 non-portable filenames'''
77 val = ui.config('ui', 'portablefilenames', 'warn')
77 val = ui.config('ui', 'portablefilenames', 'warn')
78 lval = val.lower()
78 lval = val.lower()
79 bval = util.parsebool(val)
79 bval = util.parsebool(val)
80 abort = os.name == 'nt' or lval == 'abort'
80 abort = os.name == 'nt' or lval == 'abort'
81 warn = bval or lval == 'warn'
81 warn = bval or lval == 'warn'
82 if bval is None and not (warn or abort or lval == 'ignore'):
82 if bval is None and not (warn or abort or lval == 'ignore'):
83 raise error.ConfigError(
83 raise error.ConfigError(
84 _("ui.portablefilenames value is invalid ('%s')") % val)
84 _("ui.portablefilenames value is invalid ('%s')") % val)
85 return abort, warn
85 return abort, warn
86
86
87 class casecollisionauditor(object):
87 class casecollisionauditor(object):
88 def __init__(self, ui, abort, dirstate):
88 def __init__(self, ui, abort, dirstate):
89 self._ui = ui
89 self._ui = ui
90 self._abort = abort
90 self._abort = abort
91 allfiles = '\0'.join(dirstate._map)
91 allfiles = '\0'.join(dirstate._map)
92 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
92 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
93 self._dirstate = dirstate
93 self._dirstate = dirstate
94 # The purpose of _newfiles is so that we don't complain about
94 # The purpose of _newfiles is so that we don't complain about
95 # case collisions if someone were to call this object with the
95 # case collisions if someone were to call this object with the
96 # same filename twice.
96 # same filename twice.
97 self._newfiles = set()
97 self._newfiles = set()
98
98
99 def __call__(self, f):
99 def __call__(self, f):
100 fl = encoding.lower(f)
100 fl = encoding.lower(f)
101 if (fl in self._loweredfiles and f not in self._dirstate and
101 if (fl in self._loweredfiles and f not in self._dirstate and
102 f not in self._newfiles):
102 f not in self._newfiles):
103 msg = _('possible case-folding collision for %s') % f
103 msg = _('possible case-folding collision for %s') % f
104 if self._abort:
104 if self._abort:
105 raise util.Abort(msg)
105 raise util.Abort(msg)
106 self._ui.warn(_("warning: %s\n") % msg)
106 self._ui.warn(_("warning: %s\n") % msg)
107 self._loweredfiles.add(fl)
107 self._loweredfiles.add(fl)
108 self._newfiles.add(f)
108 self._newfiles.add(f)
109
109
110 class pathauditor(object):
110 class pathauditor(object):
111 '''ensure that a filesystem path contains no banned components.
111 '''ensure that a filesystem path contains no banned components.
112 the following properties of a path are checked:
112 the following properties of a path are checked:
113
113
114 - ends with a directory separator
114 - ends with a directory separator
115 - under top-level .hg
115 - under top-level .hg
116 - starts at the root of a windows drive
116 - starts at the root of a windows drive
117 - contains ".."
117 - contains ".."
118 - traverses a symlink (e.g. a/symlink_here/b)
118 - traverses a symlink (e.g. a/symlink_here/b)
119 - inside a nested repository (a callback can be used to approve
119 - inside a nested repository (a callback can be used to approve
120 some nested repositories, e.g., subrepositories)
120 some nested repositories, e.g., subrepositories)
121 '''
121 '''
122
122
123 def __init__(self, root, callback=None):
123 def __init__(self, root, callback=None):
124 self.audited = set()
124 self.audited = set()
125 self.auditeddir = set()
125 self.auditeddir = set()
126 self.root = root
126 self.root = root
127 self.callback = callback
127 self.callback = callback
128 if os.path.lexists(root) and not util.checkcase(root):
128 if os.path.lexists(root) and not util.checkcase(root):
129 self.normcase = util.normcase
129 self.normcase = util.normcase
130 else:
130 else:
131 self.normcase = lambda x: x
131 self.normcase = lambda x: x
132
132
133 def __call__(self, path):
133 def __call__(self, path):
134 '''Check the relative path.
134 '''Check the relative path.
135 path may contain a pattern (e.g. foodir/**.txt)'''
135 path may contain a pattern (e.g. foodir/**.txt)'''
136
136
137 path = util.localpath(path)
137 path = util.localpath(path)
138 normpath = self.normcase(path)
138 normpath = self.normcase(path)
139 if normpath in self.audited:
139 if normpath in self.audited:
140 return
140 return
141 # AIX ignores "/" at end of path, others raise EISDIR.
141 # AIX ignores "/" at end of path, others raise EISDIR.
142 if util.endswithsep(path):
142 if util.endswithsep(path):
143 raise util.Abort(_("path ends in directory separator: %s") % path)
143 raise util.Abort(_("path ends in directory separator: %s") % path)
144 parts = util.splitpath(path)
144 parts = util.splitpath(path)
145 if (os.path.splitdrive(path)[0]
145 if (os.path.splitdrive(path)[0]
146 or parts[0].lower() in ('.hg', '.hg.', '')
146 or parts[0].lower() in ('.hg', '.hg.', '')
147 or os.pardir in parts):
147 or os.pardir in parts):
148 raise util.Abort(_("path contains illegal component: %s") % path)
148 raise util.Abort(_("path contains illegal component: %s") % path)
149 if '.hg' in path.lower():
149 if '.hg' in path.lower():
150 lparts = [p.lower() for p in parts]
150 lparts = [p.lower() for p in parts]
151 for p in '.hg', '.hg.':
151 for p in '.hg', '.hg.':
152 if p in lparts[1:]:
152 if p in lparts[1:]:
153 pos = lparts.index(p)
153 pos = lparts.index(p)
154 base = os.path.join(*parts[:pos])
154 base = os.path.join(*parts[:pos])
155 raise util.Abort(_("path '%s' is inside nested repo %r")
155 raise util.Abort(_("path '%s' is inside nested repo %r")
156 % (path, base))
156 % (path, base))
157
157
158 normparts = util.splitpath(normpath)
158 normparts = util.splitpath(normpath)
159 assert len(parts) == len(normparts)
159 assert len(parts) == len(normparts)
160
160
161 parts.pop()
161 parts.pop()
162 normparts.pop()
162 normparts.pop()
163 prefixes = []
163 prefixes = []
164 while parts:
164 while parts:
165 prefix = os.sep.join(parts)
165 prefix = os.sep.join(parts)
166 normprefix = os.sep.join(normparts)
166 normprefix = os.sep.join(normparts)
167 if normprefix in self.auditeddir:
167 if normprefix in self.auditeddir:
168 break
168 break
169 curpath = os.path.join(self.root, prefix)
169 curpath = os.path.join(self.root, prefix)
170 try:
170 try:
171 st = os.lstat(curpath)
171 st = os.lstat(curpath)
172 except OSError, err:
172 except OSError, err:
173 # EINVAL can be raised as invalid path syntax under win32.
173 # EINVAL can be raised as invalid path syntax under win32.
174 # They must be ignored for patterns can be checked too.
174 # They must be ignored for patterns can be checked too.
175 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
175 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
176 raise
176 raise
177 else:
177 else:
178 if stat.S_ISLNK(st.st_mode):
178 if stat.S_ISLNK(st.st_mode):
179 raise util.Abort(
179 raise util.Abort(
180 _('path %r traverses symbolic link %r')
180 _('path %r traverses symbolic link %r')
181 % (path, prefix))
181 % (path, prefix))
182 elif (stat.S_ISDIR(st.st_mode) and
182 elif (stat.S_ISDIR(st.st_mode) and
183 os.path.isdir(os.path.join(curpath, '.hg'))):
183 os.path.isdir(os.path.join(curpath, '.hg'))):
184 if not self.callback or not self.callback(curpath):
184 if not self.callback or not self.callback(curpath):
185 raise util.Abort(_("path '%s' is inside nested "
185 raise util.Abort(_("path '%s' is inside nested "
186 "repo %r")
186 "repo %r")
187 % (path, prefix))
187 % (path, prefix))
188 prefixes.append(normprefix)
188 prefixes.append(normprefix)
189 parts.pop()
189 parts.pop()
190 normparts.pop()
190 normparts.pop()
191
191
192 self.audited.add(normpath)
192 self.audited.add(normpath)
193 # only add prefixes to the cache after checking everything: we don't
193 # only add prefixes to the cache after checking everything: we don't
194 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
194 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
195 self.auditeddir.update(prefixes)
195 self.auditeddir.update(prefixes)
196
196
197 def check(self, path):
197 def check(self, path):
198 try:
198 try:
199 self(path)
199 self(path)
200 return True
200 return True
201 except (OSError, util.Abort):
201 except (OSError, util.Abort):
202 return False
202 return False
203
203
204 class abstractvfs(object):
204 class abstractvfs(object):
205 """Abstract base class; cannot be instantiated"""
205 """Abstract base class; cannot be instantiated"""
206
206
207 def __init__(self, *args, **kwargs):
207 def __init__(self, *args, **kwargs):
208 '''Prevent instantiation; don't call this from subclasses.'''
208 '''Prevent instantiation; don't call this from subclasses.'''
209 raise NotImplementedError('attempted instantiating ' + str(type(self)))
209 raise NotImplementedError('attempted instantiating ' + str(type(self)))
210
210
211 def tryread(self, path):
211 def tryread(self, path):
212 '''gracefully return an empty string for missing files'''
212 '''gracefully return an empty string for missing files'''
213 try:
213 try:
214 return self.read(path)
214 return self.read(path)
215 except IOError, inst:
215 except IOError, inst:
216 if inst.errno != errno.ENOENT:
216 if inst.errno != errno.ENOENT:
217 raise
217 raise
218 return ""
218 return ""
219
219
220 def open(self, path, mode="r", text=False, atomictemp=False):
220 def open(self, path, mode="r", text=False, atomictemp=False):
221 self.open = self.__call__
221 self.open = self.__call__
222 return self.__call__(path, mode, text, atomictemp)
222 return self.__call__(path, mode, text, atomictemp)
223
223
224 def read(self, path):
224 def read(self, path):
225 fp = self(path, 'rb')
225 fp = self(path, 'rb')
226 try:
226 try:
227 return fp.read()
227 return fp.read()
228 finally:
228 finally:
229 fp.close()
229 fp.close()
230
230
231 def write(self, path, data):
231 def write(self, path, data):
232 fp = self(path, 'wb')
232 fp = self(path, 'wb')
233 try:
233 try:
234 return fp.write(data)
234 return fp.write(data)
235 finally:
235 finally:
236 fp.close()
236 fp.close()
237
237
238 def append(self, path, data):
238 def append(self, path, data):
239 fp = self(path, 'ab')
239 fp = self(path, 'ab')
240 try:
240 try:
241 return fp.write(data)
241 return fp.write(data)
242 finally:
242 finally:
243 fp.close()
243 fp.close()
244
244
245 def exists(self, path=None):
245 def exists(self, path=None):
246 return os.path.exists(self.join(path))
246 return os.path.exists(self.join(path))
247
247
248 def fstat(self, fp):
249 return util.fstat(fp)
250
248 def isdir(self, path=None):
251 def isdir(self, path=None):
249 return os.path.isdir(self.join(path))
252 return os.path.isdir(self.join(path))
250
253
251 def islink(self, path=None):
254 def islink(self, path=None):
252 return os.path.islink(self.join(path))
255 return os.path.islink(self.join(path))
253
256
254 def makedir(self, path=None, notindexed=True):
257 def makedir(self, path=None, notindexed=True):
255 return util.makedir(self.join(path), notindexed)
258 return util.makedir(self.join(path), notindexed)
256
259
257 def makedirs(self, path=None, mode=None):
260 def makedirs(self, path=None, mode=None):
258 return util.makedirs(self.join(path), mode)
261 return util.makedirs(self.join(path), mode)
259
262
260 def mkdir(self, path=None):
263 def mkdir(self, path=None):
261 return os.mkdir(self.join(path))
264 return os.mkdir(self.join(path))
262
265
263 def readdir(self, path=None, stat=None, skip=None):
266 def readdir(self, path=None, stat=None, skip=None):
264 return osutil.listdir(self.join(path), stat, skip)
267 return osutil.listdir(self.join(path), stat, skip)
265
268
266 def rename(self, src, dst):
269 def rename(self, src, dst):
267 return util.rename(self.join(src), self.join(dst))
270 return util.rename(self.join(src), self.join(dst))
268
271
269 def readlink(self, path):
272 def readlink(self, path):
270 return os.readlink(self.join(path))
273 return os.readlink(self.join(path))
271
274
272 def setflags(self, path, l, x):
275 def setflags(self, path, l, x):
273 return util.setflags(self.join(path), l, x)
276 return util.setflags(self.join(path), l, x)
274
277
275 def stat(self, path=None):
278 def stat(self, path=None):
276 return os.stat(self.join(path))
279 return os.stat(self.join(path))
277
280
278 def unlink(self, path=None):
281 def unlink(self, path=None):
279 return util.unlink(self.join(path))
282 return util.unlink(self.join(path))
280
283
281 def utime(self, path=None, t=None):
284 def utime(self, path=None, t=None):
282 return os.utime(self.join(path), t)
285 return os.utime(self.join(path), t)
283
286
284 class vfs(abstractvfs):
287 class vfs(abstractvfs):
285 '''Operate files relative to a base directory
288 '''Operate files relative to a base directory
286
289
287 This class is used to hide the details of COW semantics and
290 This class is used to hide the details of COW semantics and
288 remote file access from higher level code.
291 remote file access from higher level code.
289 '''
292 '''
290 def __init__(self, base, audit=True, expandpath=False, realpath=False):
293 def __init__(self, base, audit=True, expandpath=False, realpath=False):
291 if expandpath:
294 if expandpath:
292 base = util.expandpath(base)
295 base = util.expandpath(base)
293 if realpath:
296 if realpath:
294 base = os.path.realpath(base)
297 base = os.path.realpath(base)
295 self.base = base
298 self.base = base
296 self._setmustaudit(audit)
299 self._setmustaudit(audit)
297 self.createmode = None
300 self.createmode = None
298 self._trustnlink = None
301 self._trustnlink = None
299
302
300 def _getmustaudit(self):
303 def _getmustaudit(self):
301 return self._audit
304 return self._audit
302
305
303 def _setmustaudit(self, onoff):
306 def _setmustaudit(self, onoff):
304 self._audit = onoff
307 self._audit = onoff
305 if onoff:
308 if onoff:
306 self.audit = pathauditor(self.base)
309 self.audit = pathauditor(self.base)
307 else:
310 else:
308 self.audit = util.always
311 self.audit = util.always
309
312
310 mustaudit = property(_getmustaudit, _setmustaudit)
313 mustaudit = property(_getmustaudit, _setmustaudit)
311
314
312 @util.propertycache
315 @util.propertycache
313 def _cansymlink(self):
316 def _cansymlink(self):
314 return util.checklink(self.base)
317 return util.checklink(self.base)
315
318
316 @util.propertycache
319 @util.propertycache
317 def _chmod(self):
320 def _chmod(self):
318 return util.checkexec(self.base)
321 return util.checkexec(self.base)
319
322
320 def _fixfilemode(self, name):
323 def _fixfilemode(self, name):
321 if self.createmode is None or not self._chmod:
324 if self.createmode is None or not self._chmod:
322 return
325 return
323 os.chmod(name, self.createmode & 0666)
326 os.chmod(name, self.createmode & 0666)
324
327
325 def __call__(self, path, mode="r", text=False, atomictemp=False):
328 def __call__(self, path, mode="r", text=False, atomictemp=False):
326 if self._audit:
329 if self._audit:
327 r = util.checkosfilename(path)
330 r = util.checkosfilename(path)
328 if r:
331 if r:
329 raise util.Abort("%s: %r" % (r, path))
332 raise util.Abort("%s: %r" % (r, path))
330 self.audit(path)
333 self.audit(path)
331 f = self.join(path)
334 f = self.join(path)
332
335
333 if not text and "b" not in mode:
336 if not text and "b" not in mode:
334 mode += "b" # for that other OS
337 mode += "b" # for that other OS
335
338
336 nlink = -1
339 nlink = -1
337 if mode not in ('r', 'rb'):
340 if mode not in ('r', 'rb'):
338 dirname, basename = util.split(f)
341 dirname, basename = util.split(f)
339 # If basename is empty, then the path is malformed because it points
342 # If basename is empty, then the path is malformed because it points
340 # to a directory. Let the posixfile() call below raise IOError.
343 # to a directory. Let the posixfile() call below raise IOError.
341 if basename:
344 if basename:
342 if atomictemp:
345 if atomictemp:
343 util.ensuredirs(dirname, self.createmode)
346 util.ensuredirs(dirname, self.createmode)
344 return util.atomictempfile(f, mode, self.createmode)
347 return util.atomictempfile(f, mode, self.createmode)
345 try:
348 try:
346 if 'w' in mode:
349 if 'w' in mode:
347 util.unlink(f)
350 util.unlink(f)
348 nlink = 0
351 nlink = 0
349 else:
352 else:
350 # nlinks() may behave differently for files on Windows
353 # nlinks() may behave differently for files on Windows
351 # shares if the file is open.
354 # shares if the file is open.
352 fd = util.posixfile(f)
355 fd = util.posixfile(f)
353 nlink = util.nlinks(f)
356 nlink = util.nlinks(f)
354 if nlink < 1:
357 if nlink < 1:
355 nlink = 2 # force mktempcopy (issue1922)
358 nlink = 2 # force mktempcopy (issue1922)
356 fd.close()
359 fd.close()
357 except (OSError, IOError), e:
360 except (OSError, IOError), e:
358 if e.errno != errno.ENOENT:
361 if e.errno != errno.ENOENT:
359 raise
362 raise
360 nlink = 0
363 nlink = 0
361 util.ensuredirs(dirname, self.createmode)
364 util.ensuredirs(dirname, self.createmode)
362 if nlink > 0:
365 if nlink > 0:
363 if self._trustnlink is None:
366 if self._trustnlink is None:
364 self._trustnlink = nlink > 1 or util.checknlink(f)
367 self._trustnlink = nlink > 1 or util.checknlink(f)
365 if nlink > 1 or not self._trustnlink:
368 if nlink > 1 or not self._trustnlink:
366 util.rename(util.mktempcopy(f), f)
369 util.rename(util.mktempcopy(f), f)
367 fp = util.posixfile(f, mode)
370 fp = util.posixfile(f, mode)
368 if nlink == 0:
371 if nlink == 0:
369 self._fixfilemode(f)
372 self._fixfilemode(f)
370 return fp
373 return fp
371
374
372 def symlink(self, src, dst):
375 def symlink(self, src, dst):
373 self.audit(dst)
376 self.audit(dst)
374 linkname = self.join(dst)
377 linkname = self.join(dst)
375 try:
378 try:
376 os.unlink(linkname)
379 os.unlink(linkname)
377 except OSError:
380 except OSError:
378 pass
381 pass
379
382
380 util.ensuredirs(os.path.dirname(linkname), self.createmode)
383 util.ensuredirs(os.path.dirname(linkname), self.createmode)
381
384
382 if self._cansymlink:
385 if self._cansymlink:
383 try:
386 try:
384 os.symlink(src, linkname)
387 os.symlink(src, linkname)
385 except OSError, err:
388 except OSError, err:
386 raise OSError(err.errno, _('could not symlink to %r: %s') %
389 raise OSError(err.errno, _('could not symlink to %r: %s') %
387 (src, err.strerror), linkname)
390 (src, err.strerror), linkname)
388 else:
391 else:
389 self.write(dst, src)
392 self.write(dst, src)
390
393
391 def join(self, path):
394 def join(self, path):
392 if path:
395 if path:
393 return os.path.join(self.base, path)
396 return os.path.join(self.base, path)
394 else:
397 else:
395 return self.base
398 return self.base
396
399
397 opener = vfs
400 opener = vfs
398
401
399 class auditvfs(object):
402 class auditvfs(object):
400 def __init__(self, vfs):
403 def __init__(self, vfs):
401 self.vfs = vfs
404 self.vfs = vfs
402
405
403 def _getmustaudit(self):
406 def _getmustaudit(self):
404 return self.vfs.mustaudit
407 return self.vfs.mustaudit
405
408
406 def _setmustaudit(self, onoff):
409 def _setmustaudit(self, onoff):
407 self.vfs.mustaudit = onoff
410 self.vfs.mustaudit = onoff
408
411
409 mustaudit = property(_getmustaudit, _setmustaudit)
412 mustaudit = property(_getmustaudit, _setmustaudit)
410
413
411 class filtervfs(abstractvfs, auditvfs):
414 class filtervfs(abstractvfs, auditvfs):
412 '''Wrapper vfs for filtering filenames with a function.'''
415 '''Wrapper vfs for filtering filenames with a function.'''
413
416
414 def __init__(self, vfs, filter):
417 def __init__(self, vfs, filter):
415 auditvfs.__init__(self, vfs)
418 auditvfs.__init__(self, vfs)
416 self._filter = filter
419 self._filter = filter
417
420
418 def __call__(self, path, *args, **kwargs):
421 def __call__(self, path, *args, **kwargs):
419 return self.vfs(self._filter(path), *args, **kwargs)
422 return self.vfs(self._filter(path), *args, **kwargs)
420
423
421 def join(self, path):
424 def join(self, path):
422 if path:
425 if path:
423 return self.vfs.join(self._filter(path))
426 return self.vfs.join(self._filter(path))
424 else:
427 else:
425 return self.vfs.join(path)
428 return self.vfs.join(path)
426
429
427 filteropener = filtervfs
430 filteropener = filtervfs
428
431
429 class readonlyvfs(abstractvfs, auditvfs):
432 class readonlyvfs(abstractvfs, auditvfs):
430 '''Wrapper vfs preventing any writing.'''
433 '''Wrapper vfs preventing any writing.'''
431
434
432 def __init__(self, vfs):
435 def __init__(self, vfs):
433 auditvfs.__init__(self, vfs)
436 auditvfs.__init__(self, vfs)
434
437
435 def __call__(self, path, mode='r', *args, **kw):
438 def __call__(self, path, mode='r', *args, **kw):
436 if mode not in ('r', 'rb'):
439 if mode not in ('r', 'rb'):
437 raise util.Abort('this vfs is read only')
440 raise util.Abort('this vfs is read only')
438 return self.vfs(path, mode, *args, **kw)
441 return self.vfs(path, mode, *args, **kw)
439
442
440
443
441 def canonpath(root, cwd, myname, auditor=None):
444 def canonpath(root, cwd, myname, auditor=None):
442 '''return the canonical path of myname, given cwd and root'''
445 '''return the canonical path of myname, given cwd and root'''
443 if util.endswithsep(root):
446 if util.endswithsep(root):
444 rootsep = root
447 rootsep = root
445 else:
448 else:
446 rootsep = root + os.sep
449 rootsep = root + os.sep
447 name = myname
450 name = myname
448 if not os.path.isabs(name):
451 if not os.path.isabs(name):
449 name = os.path.join(root, cwd, name)
452 name = os.path.join(root, cwd, name)
450 name = os.path.normpath(name)
453 name = os.path.normpath(name)
451 if auditor is None:
454 if auditor is None:
452 auditor = pathauditor(root)
455 auditor = pathauditor(root)
453 if name != rootsep and name.startswith(rootsep):
456 if name != rootsep and name.startswith(rootsep):
454 name = name[len(rootsep):]
457 name = name[len(rootsep):]
455 auditor(name)
458 auditor(name)
456 return util.pconvert(name)
459 return util.pconvert(name)
457 elif name == root:
460 elif name == root:
458 return ''
461 return ''
459 else:
462 else:
460 # Determine whether `name' is in the hierarchy at or beneath `root',
463 # Determine whether `name' is in the hierarchy at or beneath `root',
461 # by iterating name=dirname(name) until that causes no change (can't
464 # by iterating name=dirname(name) until that causes no change (can't
462 # check name == '/', because that doesn't work on windows). The list
465 # check name == '/', because that doesn't work on windows). The list
463 # `rel' holds the reversed list of components making up the relative
466 # `rel' holds the reversed list of components making up the relative
464 # file name we want.
467 # file name we want.
465 rel = []
468 rel = []
466 while True:
469 while True:
467 try:
470 try:
468 s = util.samefile(name, root)
471 s = util.samefile(name, root)
469 except OSError:
472 except OSError:
470 s = False
473 s = False
471 if s:
474 if s:
472 if not rel:
475 if not rel:
473 # name was actually the same as root (maybe a symlink)
476 # name was actually the same as root (maybe a symlink)
474 return ''
477 return ''
475 rel.reverse()
478 rel.reverse()
476 name = os.path.join(*rel)
479 name = os.path.join(*rel)
477 auditor(name)
480 auditor(name)
478 return util.pconvert(name)
481 return util.pconvert(name)
479 dirname, basename = util.split(name)
482 dirname, basename = util.split(name)
480 rel.append(basename)
483 rel.append(basename)
481 if dirname == name:
484 if dirname == name:
482 break
485 break
483 name = dirname
486 name = dirname
484
487
485 raise util.Abort(_("%s not under root '%s'") % (myname, root))
488 raise util.Abort(_("%s not under root '%s'") % (myname, root))
486
489
487 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
490 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
488 '''yield every hg repository under path, always recursively.
491 '''yield every hg repository under path, always recursively.
489 The recurse flag will only control recursion into repo working dirs'''
492 The recurse flag will only control recursion into repo working dirs'''
490 def errhandler(err):
493 def errhandler(err):
491 if err.filename == path:
494 if err.filename == path:
492 raise err
495 raise err
493 samestat = getattr(os.path, 'samestat', None)
496 samestat = getattr(os.path, 'samestat', None)
494 if followsym and samestat is not None:
497 if followsym and samestat is not None:
495 def adddir(dirlst, dirname):
498 def adddir(dirlst, dirname):
496 match = False
499 match = False
497 dirstat = os.stat(dirname)
500 dirstat = os.stat(dirname)
498 for lstdirstat in dirlst:
501 for lstdirstat in dirlst:
499 if samestat(dirstat, lstdirstat):
502 if samestat(dirstat, lstdirstat):
500 match = True
503 match = True
501 break
504 break
502 if not match:
505 if not match:
503 dirlst.append(dirstat)
506 dirlst.append(dirstat)
504 return not match
507 return not match
505 else:
508 else:
506 followsym = False
509 followsym = False
507
510
508 if (seen_dirs is None) and followsym:
511 if (seen_dirs is None) and followsym:
509 seen_dirs = []
512 seen_dirs = []
510 adddir(seen_dirs, path)
513 adddir(seen_dirs, path)
511 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
514 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
512 dirs.sort()
515 dirs.sort()
513 if '.hg' in dirs:
516 if '.hg' in dirs:
514 yield root # found a repository
517 yield root # found a repository
515 qroot = os.path.join(root, '.hg', 'patches')
518 qroot = os.path.join(root, '.hg', 'patches')
516 if os.path.isdir(os.path.join(qroot, '.hg')):
519 if os.path.isdir(os.path.join(qroot, '.hg')):
517 yield qroot # we have a patch queue repo here
520 yield qroot # we have a patch queue repo here
518 if recurse:
521 if recurse:
519 # avoid recursing inside the .hg directory
522 # avoid recursing inside the .hg directory
520 dirs.remove('.hg')
523 dirs.remove('.hg')
521 else:
524 else:
522 dirs[:] = [] # don't descend further
525 dirs[:] = [] # don't descend further
523 elif followsym:
526 elif followsym:
524 newdirs = []
527 newdirs = []
525 for d in dirs:
528 for d in dirs:
526 fname = os.path.join(root, d)
529 fname = os.path.join(root, d)
527 if adddir(seen_dirs, fname):
530 if adddir(seen_dirs, fname):
528 if os.path.islink(fname):
531 if os.path.islink(fname):
529 for hgname in walkrepos(fname, True, seen_dirs):
532 for hgname in walkrepos(fname, True, seen_dirs):
530 yield hgname
533 yield hgname
531 else:
534 else:
532 newdirs.append(d)
535 newdirs.append(d)
533 dirs[:] = newdirs
536 dirs[:] = newdirs
534
537
535 def osrcpath():
538 def osrcpath():
536 '''return default os-specific hgrc search path'''
539 '''return default os-specific hgrc search path'''
537 path = systemrcpath()
540 path = systemrcpath()
538 path.extend(userrcpath())
541 path.extend(userrcpath())
539 path = [os.path.normpath(f) for f in path]
542 path = [os.path.normpath(f) for f in path]
540 return path
543 return path
541
544
542 _rcpath = None
545 _rcpath = None
543
546
544 def rcpath():
547 def rcpath():
545 '''return hgrc search path. if env var HGRCPATH is set, use it.
548 '''return hgrc search path. if env var HGRCPATH is set, use it.
546 for each item in path, if directory, use files ending in .rc,
549 for each item in path, if directory, use files ending in .rc,
547 else use item.
550 else use item.
548 make HGRCPATH empty to only look in .hg/hgrc of current repo.
551 make HGRCPATH empty to only look in .hg/hgrc of current repo.
549 if no HGRCPATH, use default os-specific path.'''
552 if no HGRCPATH, use default os-specific path.'''
550 global _rcpath
553 global _rcpath
551 if _rcpath is None:
554 if _rcpath is None:
552 if 'HGRCPATH' in os.environ:
555 if 'HGRCPATH' in os.environ:
553 _rcpath = []
556 _rcpath = []
554 for p in os.environ['HGRCPATH'].split(os.pathsep):
557 for p in os.environ['HGRCPATH'].split(os.pathsep):
555 if not p:
558 if not p:
556 continue
559 continue
557 p = util.expandpath(p)
560 p = util.expandpath(p)
558 if os.path.isdir(p):
561 if os.path.isdir(p):
559 for f, kind in osutil.listdir(p):
562 for f, kind in osutil.listdir(p):
560 if f.endswith('.rc'):
563 if f.endswith('.rc'):
561 _rcpath.append(os.path.join(p, f))
564 _rcpath.append(os.path.join(p, f))
562 else:
565 else:
563 _rcpath.append(p)
566 _rcpath.append(p)
564 else:
567 else:
565 _rcpath = osrcpath()
568 _rcpath = osrcpath()
566 return _rcpath
569 return _rcpath
567
570
568 def revsingle(repo, revspec, default='.'):
571 def revsingle(repo, revspec, default='.'):
569 if not revspec and revspec != 0:
572 if not revspec and revspec != 0:
570 return repo[default]
573 return repo[default]
571
574
572 l = revrange(repo, [revspec])
575 l = revrange(repo, [revspec])
573 if len(l) < 1:
576 if len(l) < 1:
574 raise util.Abort(_('empty revision set'))
577 raise util.Abort(_('empty revision set'))
575 return repo[l[-1]]
578 return repo[l[-1]]
576
579
577 def revpair(repo, revs):
580 def revpair(repo, revs):
578 if not revs:
581 if not revs:
579 return repo.dirstate.p1(), None
582 return repo.dirstate.p1(), None
580
583
581 l = revrange(repo, revs)
584 l = revrange(repo, revs)
582
585
583 if len(l) == 0:
586 if len(l) == 0:
584 if revs:
587 if revs:
585 raise util.Abort(_('empty revision range'))
588 raise util.Abort(_('empty revision range'))
586 return repo.dirstate.p1(), None
589 return repo.dirstate.p1(), None
587
590
588 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
591 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
589 return repo.lookup(l[0]), None
592 return repo.lookup(l[0]), None
590
593
591 return repo.lookup(l[0]), repo.lookup(l[-1])
594 return repo.lookup(l[0]), repo.lookup(l[-1])
592
595
593 _revrangesep = ':'
596 _revrangesep = ':'
594
597
595 def revrange(repo, revs):
598 def revrange(repo, revs):
596 """Yield revision as strings from a list of revision specifications."""
599 """Yield revision as strings from a list of revision specifications."""
597
600
598 def revfix(repo, val, defval):
601 def revfix(repo, val, defval):
599 if not val and val != 0 and defval is not None:
602 if not val and val != 0 and defval is not None:
600 return defval
603 return defval
601 return repo[val].rev()
604 return repo[val].rev()
602
605
603 seen, l = set(), []
606 seen, l = set(), []
604 for spec in revs:
607 for spec in revs:
605 if l and not seen:
608 if l and not seen:
606 seen = set(l)
609 seen = set(l)
607 # attempt to parse old-style ranges first to deal with
610 # attempt to parse old-style ranges first to deal with
608 # things like old-tag which contain query metacharacters
611 # things like old-tag which contain query metacharacters
609 try:
612 try:
610 if isinstance(spec, int):
613 if isinstance(spec, int):
611 seen.add(spec)
614 seen.add(spec)
612 l.append(spec)
615 l.append(spec)
613 continue
616 continue
614
617
615 if _revrangesep in spec:
618 if _revrangesep in spec:
616 start, end = spec.split(_revrangesep, 1)
619 start, end = spec.split(_revrangesep, 1)
617 start = revfix(repo, start, 0)
620 start = revfix(repo, start, 0)
618 end = revfix(repo, end, len(repo) - 1)
621 end = revfix(repo, end, len(repo) - 1)
619 if end == nullrev and start <= 0:
622 if end == nullrev and start <= 0:
620 start = nullrev
623 start = nullrev
621 rangeiter = repo.changelog.revs(start, end)
624 rangeiter = repo.changelog.revs(start, end)
622 if not seen and not l:
625 if not seen and not l:
623 # by far the most common case: revs = ["-1:0"]
626 # by far the most common case: revs = ["-1:0"]
624 l = list(rangeiter)
627 l = list(rangeiter)
625 # defer syncing seen until next iteration
628 # defer syncing seen until next iteration
626 continue
629 continue
627 newrevs = set(rangeiter)
630 newrevs = set(rangeiter)
628 if seen:
631 if seen:
629 newrevs.difference_update(seen)
632 newrevs.difference_update(seen)
630 seen.update(newrevs)
633 seen.update(newrevs)
631 else:
634 else:
632 seen = newrevs
635 seen = newrevs
633 l.extend(sorted(newrevs, reverse=start > end))
636 l.extend(sorted(newrevs, reverse=start > end))
634 continue
637 continue
635 elif spec and spec in repo: # single unquoted rev
638 elif spec and spec in repo: # single unquoted rev
636 rev = revfix(repo, spec, None)
639 rev = revfix(repo, spec, None)
637 if rev in seen:
640 if rev in seen:
638 continue
641 continue
639 seen.add(rev)
642 seen.add(rev)
640 l.append(rev)
643 l.append(rev)
641 continue
644 continue
642 except error.RepoLookupError:
645 except error.RepoLookupError:
643 pass
646 pass
644
647
645 # fall through to new-style queries if old-style fails
648 # fall through to new-style queries if old-style fails
646 m = revset.match(repo.ui, spec)
649 m = revset.match(repo.ui, spec)
647 dl = [r for r in m(repo, list(repo)) if r not in seen]
650 dl = [r for r in m(repo, list(repo)) if r not in seen]
648 l.extend(dl)
651 l.extend(dl)
649 seen.update(dl)
652 seen.update(dl)
650
653
651 return l
654 return l
652
655
653 def expandpats(pats):
656 def expandpats(pats):
654 if not util.expandglobs:
657 if not util.expandglobs:
655 return list(pats)
658 return list(pats)
656 ret = []
659 ret = []
657 for p in pats:
660 for p in pats:
658 kind, name = matchmod._patsplit(p, None)
661 kind, name = matchmod._patsplit(p, None)
659 if kind is None:
662 if kind is None:
660 try:
663 try:
661 globbed = glob.glob(name)
664 globbed = glob.glob(name)
662 except re.error:
665 except re.error:
663 globbed = [name]
666 globbed = [name]
664 if globbed:
667 if globbed:
665 ret.extend(globbed)
668 ret.extend(globbed)
666 continue
669 continue
667 ret.append(p)
670 ret.append(p)
668 return ret
671 return ret
669
672
670 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
673 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
671 if pats == ("",):
674 if pats == ("",):
672 pats = []
675 pats = []
673 if not globbed and default == 'relpath':
676 if not globbed and default == 'relpath':
674 pats = expandpats(pats or [])
677 pats = expandpats(pats or [])
675
678
676 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
679 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
677 default)
680 default)
678 def badfn(f, msg):
681 def badfn(f, msg):
679 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
682 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
680 m.bad = badfn
683 m.bad = badfn
681 return m, pats
684 return m, pats
682
685
683 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
686 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
684 return matchandpats(ctx, pats, opts, globbed, default)[0]
687 return matchandpats(ctx, pats, opts, globbed, default)[0]
685
688
686 def matchall(repo):
689 def matchall(repo):
687 return matchmod.always(repo.root, repo.getcwd())
690 return matchmod.always(repo.root, repo.getcwd())
688
691
689 def matchfiles(repo, files):
692 def matchfiles(repo, files):
690 return matchmod.exact(repo.root, repo.getcwd(), files)
693 return matchmod.exact(repo.root, repo.getcwd(), files)
691
694
692 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
695 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
693 if dry_run is None:
696 if dry_run is None:
694 dry_run = opts.get('dry_run')
697 dry_run = opts.get('dry_run')
695 if similarity is None:
698 if similarity is None:
696 similarity = float(opts.get('similarity') or 0)
699 similarity = float(opts.get('similarity') or 0)
697 # we'd use status here, except handling of symlinks and ignore is tricky
700 # we'd use status here, except handling of symlinks and ignore is tricky
698 m = match(repo[None], pats, opts)
701 m = match(repo[None], pats, opts)
699 rejected = []
702 rejected = []
700 m.bad = lambda x, y: rejected.append(x)
703 m.bad = lambda x, y: rejected.append(x)
701
704
702 added, unknown, deleted, removed = _interestingfiles(repo, m)
705 added, unknown, deleted, removed = _interestingfiles(repo, m)
703
706
704 unknownset = set(unknown)
707 unknownset = set(unknown)
705 toprint = unknownset.copy()
708 toprint = unknownset.copy()
706 toprint.update(deleted)
709 toprint.update(deleted)
707 for abs in sorted(toprint):
710 for abs in sorted(toprint):
708 if repo.ui.verbose or not m.exact(abs):
711 if repo.ui.verbose or not m.exact(abs):
709 rel = m.rel(abs)
712 rel = m.rel(abs)
710 if abs in unknownset:
713 if abs in unknownset:
711 status = _('adding %s\n') % ((pats and rel) or abs)
714 status = _('adding %s\n') % ((pats and rel) or abs)
712 else:
715 else:
713 status = _('removing %s\n') % ((pats and rel) or abs)
716 status = _('removing %s\n') % ((pats and rel) or abs)
714 repo.ui.status(status)
717 repo.ui.status(status)
715
718
716 renames = _findrenames(repo, m, added + unknown, removed + deleted,
719 renames = _findrenames(repo, m, added + unknown, removed + deleted,
717 similarity)
720 similarity)
718
721
719 if not dry_run:
722 if not dry_run:
720 _markchanges(repo, unknown, deleted, renames)
723 _markchanges(repo, unknown, deleted, renames)
721
724
722 for f in rejected:
725 for f in rejected:
723 if f in m.files():
726 if f in m.files():
724 return 1
727 return 1
725 return 0
728 return 0
726
729
727 def marktouched(repo, files, similarity=0.0):
730 def marktouched(repo, files, similarity=0.0):
728 '''Assert that files have somehow been operated upon. files are relative to
731 '''Assert that files have somehow been operated upon. files are relative to
729 the repo root.'''
732 the repo root.'''
730 m = matchfiles(repo, files)
733 m = matchfiles(repo, files)
731 rejected = []
734 rejected = []
732 m.bad = lambda x, y: rejected.append(x)
735 m.bad = lambda x, y: rejected.append(x)
733
736
734 added, unknown, deleted, removed = _interestingfiles(repo, m)
737 added, unknown, deleted, removed = _interestingfiles(repo, m)
735
738
736 if repo.ui.verbose:
739 if repo.ui.verbose:
737 unknownset = set(unknown)
740 unknownset = set(unknown)
738 toprint = unknownset.copy()
741 toprint = unknownset.copy()
739 toprint.update(deleted)
742 toprint.update(deleted)
740 for abs in sorted(toprint):
743 for abs in sorted(toprint):
741 if abs in unknownset:
744 if abs in unknownset:
742 status = _('adding %s\n') % abs
745 status = _('adding %s\n') % abs
743 else:
746 else:
744 status = _('removing %s\n') % abs
747 status = _('removing %s\n') % abs
745 repo.ui.status(status)
748 repo.ui.status(status)
746
749
747 renames = _findrenames(repo, m, added + unknown, removed + deleted,
750 renames = _findrenames(repo, m, added + unknown, removed + deleted,
748 similarity)
751 similarity)
749
752
750 _markchanges(repo, unknown, deleted, renames)
753 _markchanges(repo, unknown, deleted, renames)
751
754
752 for f in rejected:
755 for f in rejected:
753 if f in m.files():
756 if f in m.files():
754 return 1
757 return 1
755 return 0
758 return 0
756
759
757 def _interestingfiles(repo, matcher):
760 def _interestingfiles(repo, matcher):
758 '''Walk dirstate with matcher, looking for files that addremove would care
761 '''Walk dirstate with matcher, looking for files that addremove would care
759 about.
762 about.
760
763
761 This is different from dirstate.status because it doesn't care about
764 This is different from dirstate.status because it doesn't care about
762 whether files are modified or clean.'''
765 whether files are modified or clean.'''
763 added, unknown, deleted, removed = [], [], [], []
766 added, unknown, deleted, removed = [], [], [], []
764 audit_path = pathauditor(repo.root)
767 audit_path = pathauditor(repo.root)
765
768
766 ctx = repo[None]
769 ctx = repo[None]
767 dirstate = repo.dirstate
770 dirstate = repo.dirstate
768 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
771 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
769 full=False)
772 full=False)
770 for abs, st in walkresults.iteritems():
773 for abs, st in walkresults.iteritems():
771 dstate = dirstate[abs]
774 dstate = dirstate[abs]
772 if dstate == '?' and audit_path.check(abs):
775 if dstate == '?' and audit_path.check(abs):
773 unknown.append(abs)
776 unknown.append(abs)
774 elif dstate != 'r' and not st:
777 elif dstate != 'r' and not st:
775 deleted.append(abs)
778 deleted.append(abs)
776 # for finding renames
779 # for finding renames
777 elif dstate == 'r':
780 elif dstate == 'r':
778 removed.append(abs)
781 removed.append(abs)
779 elif dstate == 'a':
782 elif dstate == 'a':
780 added.append(abs)
783 added.append(abs)
781
784
782 return added, unknown, deleted, removed
785 return added, unknown, deleted, removed
783
786
784 def _findrenames(repo, matcher, added, removed, similarity):
787 def _findrenames(repo, matcher, added, removed, similarity):
785 '''Find renames from removed files to added ones.'''
788 '''Find renames from removed files to added ones.'''
786 renames = {}
789 renames = {}
787 if similarity > 0:
790 if similarity > 0:
788 for old, new, score in similar.findrenames(repo, added, removed,
791 for old, new, score in similar.findrenames(repo, added, removed,
789 similarity):
792 similarity):
790 if (repo.ui.verbose or not matcher.exact(old)
793 if (repo.ui.verbose or not matcher.exact(old)
791 or not matcher.exact(new)):
794 or not matcher.exact(new)):
792 repo.ui.status(_('recording removal of %s as rename to %s '
795 repo.ui.status(_('recording removal of %s as rename to %s '
793 '(%d%% similar)\n') %
796 '(%d%% similar)\n') %
794 (matcher.rel(old), matcher.rel(new),
797 (matcher.rel(old), matcher.rel(new),
795 score * 100))
798 score * 100))
796 renames[new] = old
799 renames[new] = old
797 return renames
800 return renames
798
801
799 def _markchanges(repo, unknown, deleted, renames):
802 def _markchanges(repo, unknown, deleted, renames):
800 '''Marks the files in unknown as added, the files in deleted as removed,
803 '''Marks the files in unknown as added, the files in deleted as removed,
801 and the files in renames as copied.'''
804 and the files in renames as copied.'''
802 wctx = repo[None]
805 wctx = repo[None]
803 wlock = repo.wlock()
806 wlock = repo.wlock()
804 try:
807 try:
805 wctx.forget(deleted)
808 wctx.forget(deleted)
806 wctx.add(unknown)
809 wctx.add(unknown)
807 for new, old in renames.iteritems():
810 for new, old in renames.iteritems():
808 wctx.copy(old, new)
811 wctx.copy(old, new)
809 finally:
812 finally:
810 wlock.release()
813 wlock.release()
811
814
812 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
815 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
813 """Update the dirstate to reflect the intent of copying src to dst. For
816 """Update the dirstate to reflect the intent of copying src to dst. For
814 different reasons it might not end with dst being marked as copied from src.
817 different reasons it might not end with dst being marked as copied from src.
815 """
818 """
816 origsrc = repo.dirstate.copied(src) or src
819 origsrc = repo.dirstate.copied(src) or src
817 if dst == origsrc: # copying back a copy?
820 if dst == origsrc: # copying back a copy?
818 if repo.dirstate[dst] not in 'mn' and not dryrun:
821 if repo.dirstate[dst] not in 'mn' and not dryrun:
819 repo.dirstate.normallookup(dst)
822 repo.dirstate.normallookup(dst)
820 else:
823 else:
821 if repo.dirstate[origsrc] == 'a' and origsrc == src:
824 if repo.dirstate[origsrc] == 'a' and origsrc == src:
822 if not ui.quiet:
825 if not ui.quiet:
823 ui.warn(_("%s has not been committed yet, so no copy "
826 ui.warn(_("%s has not been committed yet, so no copy "
824 "data will be stored for %s.\n")
827 "data will be stored for %s.\n")
825 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
828 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
826 if repo.dirstate[dst] in '?r' and not dryrun:
829 if repo.dirstate[dst] in '?r' and not dryrun:
827 wctx.add([dst])
830 wctx.add([dst])
828 elif not dryrun:
831 elif not dryrun:
829 wctx.copy(origsrc, dst)
832 wctx.copy(origsrc, dst)
830
833
831 def readrequires(opener, supported):
834 def readrequires(opener, supported):
832 '''Reads and parses .hg/requires and checks if all entries found
835 '''Reads and parses .hg/requires and checks if all entries found
833 are in the list of supported features.'''
836 are in the list of supported features.'''
834 requirements = set(opener.read("requires").splitlines())
837 requirements = set(opener.read("requires").splitlines())
835 missings = []
838 missings = []
836 for r in requirements:
839 for r in requirements:
837 if r not in supported:
840 if r not in supported:
838 if not r or not r[0].isalnum():
841 if not r or not r[0].isalnum():
839 raise error.RequirementError(_(".hg/requires file is corrupt"))
842 raise error.RequirementError(_(".hg/requires file is corrupt"))
840 missings.append(r)
843 missings.append(r)
841 missings.sort()
844 missings.sort()
842 if missings:
845 if missings:
843 raise error.RequirementError(
846 raise error.RequirementError(
844 _("unknown repository format: requires features '%s' (upgrade "
847 _("unknown repository format: requires features '%s' (upgrade "
845 "Mercurial)") % "', '".join(missings))
848 "Mercurial)") % "', '".join(missings))
846 return requirements
849 return requirements
847
850
848 class filecacheentry(object):
851 class filecacheentry(object):
849 def __init__(self, path, stat=True):
852 def __init__(self, path, stat=True):
850 self.path = path
853 self.path = path
851 self.cachestat = None
854 self.cachestat = None
852 self._cacheable = None
855 self._cacheable = None
853
856
854 if stat:
857 if stat:
855 self.cachestat = filecacheentry.stat(self.path)
858 self.cachestat = filecacheentry.stat(self.path)
856
859
857 if self.cachestat:
860 if self.cachestat:
858 self._cacheable = self.cachestat.cacheable()
861 self._cacheable = self.cachestat.cacheable()
859 else:
862 else:
860 # None means we don't know yet
863 # None means we don't know yet
861 self._cacheable = None
864 self._cacheable = None
862
865
863 def refresh(self):
866 def refresh(self):
864 if self.cacheable():
867 if self.cacheable():
865 self.cachestat = filecacheentry.stat(self.path)
868 self.cachestat = filecacheentry.stat(self.path)
866
869
867 def cacheable(self):
870 def cacheable(self):
868 if self._cacheable is not None:
871 if self._cacheable is not None:
869 return self._cacheable
872 return self._cacheable
870
873
871 # we don't know yet, assume it is for now
874 # we don't know yet, assume it is for now
872 return True
875 return True
873
876
874 def changed(self):
877 def changed(self):
875 # no point in going further if we can't cache it
878 # no point in going further if we can't cache it
876 if not self.cacheable():
879 if not self.cacheable():
877 return True
880 return True
878
881
879 newstat = filecacheentry.stat(self.path)
882 newstat = filecacheentry.stat(self.path)
880
883
881 # we may not know if it's cacheable yet, check again now
884 # we may not know if it's cacheable yet, check again now
882 if newstat and self._cacheable is None:
885 if newstat and self._cacheable is None:
883 self._cacheable = newstat.cacheable()
886 self._cacheable = newstat.cacheable()
884
887
885 # check again
888 # check again
886 if not self._cacheable:
889 if not self._cacheable:
887 return True
890 return True
888
891
889 if self.cachestat != newstat:
892 if self.cachestat != newstat:
890 self.cachestat = newstat
893 self.cachestat = newstat
891 return True
894 return True
892 else:
895 else:
893 return False
896 return False
894
897
895 @staticmethod
898 @staticmethod
896 def stat(path):
899 def stat(path):
897 try:
900 try:
898 return util.cachestat(path)
901 return util.cachestat(path)
899 except OSError, e:
902 except OSError, e:
900 if e.errno != errno.ENOENT:
903 if e.errno != errno.ENOENT:
901 raise
904 raise
902
905
903 class filecache(object):
906 class filecache(object):
904 '''A property like decorator that tracks a file under .hg/ for updates.
907 '''A property like decorator that tracks a file under .hg/ for updates.
905
908
906 Records stat info when called in _filecache.
909 Records stat info when called in _filecache.
907
910
908 On subsequent calls, compares old stat info with new info, and recreates
911 On subsequent calls, compares old stat info with new info, and recreates
909 the object when needed, updating the new stat info in _filecache.
912 the object when needed, updating the new stat info in _filecache.
910
913
911 Mercurial either atomic renames or appends for files under .hg,
914 Mercurial either atomic renames or appends for files under .hg,
912 so to ensure the cache is reliable we need the filesystem to be able
915 so to ensure the cache is reliable we need the filesystem to be able
913 to tell us if a file has been replaced. If it can't, we fallback to
916 to tell us if a file has been replaced. If it can't, we fallback to
914 recreating the object on every call (essentially the same behaviour as
917 recreating the object on every call (essentially the same behaviour as
915 propertycache).'''
918 propertycache).'''
916 def __init__(self, path):
919 def __init__(self, path):
917 self.path = path
920 self.path = path
918
921
919 def join(self, obj, fname):
922 def join(self, obj, fname):
920 """Used to compute the runtime path of the cached file.
923 """Used to compute the runtime path of the cached file.
921
924
922 Users should subclass filecache and provide their own version of this
925 Users should subclass filecache and provide their own version of this
923 function to call the appropriate join function on 'obj' (an instance
926 function to call the appropriate join function on 'obj' (an instance
924 of the class that its member function was decorated).
927 of the class that its member function was decorated).
925 """
928 """
926 return obj.join(fname)
929 return obj.join(fname)
927
930
928 def __call__(self, func):
931 def __call__(self, func):
929 self.func = func
932 self.func = func
930 self.name = func.__name__
933 self.name = func.__name__
931 return self
934 return self
932
935
933 def __get__(self, obj, type=None):
936 def __get__(self, obj, type=None):
934 # do we need to check if the file changed?
937 # do we need to check if the file changed?
935 if self.name in obj.__dict__:
938 if self.name in obj.__dict__:
936 assert self.name in obj._filecache, self.name
939 assert self.name in obj._filecache, self.name
937 return obj.__dict__[self.name]
940 return obj.__dict__[self.name]
938
941
939 entry = obj._filecache.get(self.name)
942 entry = obj._filecache.get(self.name)
940
943
941 if entry:
944 if entry:
942 if entry.changed():
945 if entry.changed():
943 entry.obj = self.func(obj)
946 entry.obj = self.func(obj)
944 else:
947 else:
945 path = self.join(obj, self.path)
948 path = self.join(obj, self.path)
946
949
947 # We stat -before- creating the object so our cache doesn't lie if
950 # We stat -before- creating the object so our cache doesn't lie if
948 # a writer modified between the time we read and stat
951 # a writer modified between the time we read and stat
949 entry = filecacheentry(path)
952 entry = filecacheentry(path)
950 entry.obj = self.func(obj)
953 entry.obj = self.func(obj)
951
954
952 obj._filecache[self.name] = entry
955 obj._filecache[self.name] = entry
953
956
954 obj.__dict__[self.name] = entry.obj
957 obj.__dict__[self.name] = entry.obj
955 return entry.obj
958 return entry.obj
956
959
957 def __set__(self, obj, value):
960 def __set__(self, obj, value):
958 if self.name not in obj._filecache:
961 if self.name not in obj._filecache:
959 # we add an entry for the missing value because X in __dict__
962 # we add an entry for the missing value because X in __dict__
960 # implies X in _filecache
963 # implies X in _filecache
961 ce = filecacheentry(self.join(obj, self.path), False)
964 ce = filecacheentry(self.join(obj, self.path), False)
962 obj._filecache[self.name] = ce
965 obj._filecache[self.name] = ce
963 else:
966 else:
964 ce = obj._filecache[self.name]
967 ce = obj._filecache[self.name]
965
968
966 ce.obj = value # update cached copy
969 ce.obj = value # update cached copy
967 obj.__dict__[self.name] = value # update copy returned by obj.x
970 obj.__dict__[self.name] = value # update copy returned by obj.x
968
971
969 def __delete__(self, obj):
972 def __delete__(self, obj):
970 try:
973 try:
971 del obj.__dict__[self.name]
974 del obj.__dict__[self.name]
972 except KeyError:
975 except KeyError:
973 raise AttributeError(self.name)
976 raise AttributeError(self.name)
974
977
975 class dirs(object):
978 class dirs(object):
976 '''a multiset of directory names from a dirstate or manifest'''
979 '''a multiset of directory names from a dirstate or manifest'''
977
980
978 def __init__(self, map, skip=None):
981 def __init__(self, map, skip=None):
979 self._dirs = {}
982 self._dirs = {}
980 addpath = self.addpath
983 addpath = self.addpath
981 if util.safehasattr(map, 'iteritems') and skip is not None:
984 if util.safehasattr(map, 'iteritems') and skip is not None:
982 for f, s in map.iteritems():
985 for f, s in map.iteritems():
983 if s[0] != skip:
986 if s[0] != skip:
984 addpath(f)
987 addpath(f)
985 else:
988 else:
986 for f in map:
989 for f in map:
987 addpath(f)
990 addpath(f)
988
991
989 def addpath(self, path):
992 def addpath(self, path):
990 dirs = self._dirs
993 dirs = self._dirs
991 for base in finddirs(path):
994 for base in finddirs(path):
992 if base in dirs:
995 if base in dirs:
993 dirs[base] += 1
996 dirs[base] += 1
994 return
997 return
995 dirs[base] = 1
998 dirs[base] = 1
996
999
997 def delpath(self, path):
1000 def delpath(self, path):
998 dirs = self._dirs
1001 dirs = self._dirs
999 for base in finddirs(path):
1002 for base in finddirs(path):
1000 if dirs[base] > 1:
1003 if dirs[base] > 1:
1001 dirs[base] -= 1
1004 dirs[base] -= 1
1002 return
1005 return
1003 del dirs[base]
1006 del dirs[base]
1004
1007
1005 def __iter__(self):
1008 def __iter__(self):
1006 return self._dirs.iterkeys()
1009 return self._dirs.iterkeys()
1007
1010
1008 def __contains__(self, d):
1011 def __contains__(self, d):
1009 return d in self._dirs
1012 return d in self._dirs
1010
1013
1011 if util.safehasattr(parsers, 'dirs'):
1014 if util.safehasattr(parsers, 'dirs'):
1012 dirs = parsers.dirs
1015 dirs = parsers.dirs
1013
1016
1014 def finddirs(path):
1017 def finddirs(path):
1015 pos = path.rfind('/')
1018 pos = path.rfind('/')
1016 while pos != -1:
1019 while pos != -1:
1017 yield path[:pos]
1020 yield path[:pos]
1018 pos = path.rfind('/', 0, pos)
1021 pos = path.rfind('/', 0, pos)
General Comments 0
You need to be logged in to leave comments. Login now