##// END OF EJS Templates
copies: move file input processsing early...
marmoute -
r43297:041f042a default
parent child Browse files
Show More
@@ -1,674 +1,682 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 nullid,
14 nullid,
15 )
15 )
16 from .thirdparty import (
16 from .thirdparty import (
17 attr,
17 attr,
18 )
18 )
19
19
20 from . import (
20 from . import (
21 encoding,
21 encoding,
22 error,
22 error,
23 pycompat,
23 pycompat,
24 revlog,
24 revlog,
25 util,
25 util,
26 )
26 )
27 from .utils import (
27 from .utils import (
28 dateutil,
28 dateutil,
29 stringutil,
29 stringutil,
30 )
30 )
31
31
32 _defaultextra = {'branch': 'default'}
32 _defaultextra = {'branch': 'default'}
33
33
34 def _string_escape(text):
34 def _string_escape(text):
35 """
35 """
36 >>> from .pycompat import bytechr as chr
36 >>> from .pycompat import bytechr as chr
37 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
37 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
38 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
38 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
39 >>> s
39 >>> s
40 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
40 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
41 >>> res = _string_escape(s)
41 >>> res = _string_escape(s)
42 >>> s == _string_unescape(res)
42 >>> s == _string_unescape(res)
43 True
43 True
44 """
44 """
45 # subset of the string_escape codec
45 # subset of the string_escape codec
46 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
46 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
47 return text.replace('\0', '\\0')
47 return text.replace('\0', '\\0')
48
48
49 def _string_unescape(text):
49 def _string_unescape(text):
50 if '\\0' in text:
50 if '\\0' in text:
51 # fix up \0 without getting into trouble with \\0
51 # fix up \0 without getting into trouble with \\0
52 text = text.replace('\\\\', '\\\\\n')
52 text = text.replace('\\\\', '\\\\\n')
53 text = text.replace('\\0', '\0')
53 text = text.replace('\\0', '\0')
54 text = text.replace('\n', '')
54 text = text.replace('\n', '')
55 return stringutil.unescapestr(text)
55 return stringutil.unescapestr(text)
56
56
57 def decodeextra(text):
57 def decodeextra(text):
58 """
58 """
59 >>> from .pycompat import bytechr as chr
59 >>> from .pycompat import bytechr as chr
60 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
60 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
61 ... ).items())
61 ... ).items())
62 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
62 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
63 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
63 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
64 ... b'baz': chr(92) + chr(0) + b'2'})
64 ... b'baz': chr(92) + chr(0) + b'2'})
65 ... ).items())
65 ... ).items())
66 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
66 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
67 """
67 """
68 extra = _defaultextra.copy()
68 extra = _defaultextra.copy()
69 for l in text.split('\0'):
69 for l in text.split('\0'):
70 if l:
70 if l:
71 k, v = _string_unescape(l).split(':', 1)
71 k, v = _string_unescape(l).split(':', 1)
72 extra[k] = v
72 extra[k] = v
73 return extra
73 return extra
74
74
75 def encodeextra(d):
75 def encodeextra(d):
76 # keys must be sorted to produce a deterministic changelog entry
76 # keys must be sorted to produce a deterministic changelog entry
77 items = [
77 items = [
78 _string_escape('%s:%s' % (k, pycompat.bytestr(d[k])))
78 _string_escape('%s:%s' % (k, pycompat.bytestr(d[k])))
79 for k in sorted(d)
79 for k in sorted(d)
80 ]
80 ]
81 return "\0".join(items)
81 return "\0".join(items)
82
82
83 def encodecopies(files, copies):
83 def encodecopies(files, copies):
84 items = []
84 items = []
85 for i, dst in enumerate(files):
85 for i, dst in enumerate(files):
86 if dst in copies:
86 if dst in copies:
87 items.append('%d\0%s' % (i, copies[dst]))
87 items.append('%d\0%s' % (i, copies[dst]))
88 if len(items) != len(copies):
88 if len(items) != len(copies):
89 raise error.ProgrammingError('some copy targets missing from file list')
89 raise error.ProgrammingError('some copy targets missing from file list')
90 return "\n".join(items)
90 return "\n".join(items)
91
91
92 def decodecopies(files, data):
92 def decodecopies(files, data):
93 try:
93 try:
94 copies = {}
94 copies = {}
95 if not data:
95 if not data:
96 return copies
96 return copies
97 for l in data.split('\n'):
97 for l in data.split('\n'):
98 strindex, src = l.split('\0')
98 strindex, src = l.split('\0')
99 i = int(strindex)
99 i = int(strindex)
100 dst = files[i]
100 dst = files[i]
101 copies[dst] = src
101 copies[dst] = src
102 return copies
102 return copies
103 except (ValueError, IndexError):
103 except (ValueError, IndexError):
104 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
104 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
105 # used different syntax for the value.
105 # used different syntax for the value.
106 return None
106 return None
107
107
108 def encodefileindices(files, subset):
108 def encodefileindices(files, subset):
109 subset = set(subset)
109 subset = set(subset)
110 indices = []
110 indices = []
111 for i, f in enumerate(files):
111 for i, f in enumerate(files):
112 if f in subset:
112 if f in subset:
113 indices.append('%d' % i)
113 indices.append('%d' % i)
114 return '\n'.join(indices)
114 return '\n'.join(indices)
115
115
116 def decodefileindices(files, data):
116 def decodefileindices(files, data):
117 try:
117 try:
118 subset = []
118 subset = []
119 if not data:
119 if not data:
120 return subset
120 return subset
121 for strindex in data.split('\n'):
121 for strindex in data.split('\n'):
122 i = int(strindex)
122 i = int(strindex)
123 if i < 0 or i >= len(files):
123 if i < 0 or i >= len(files):
124 return None
124 return None
125 subset.append(files[i])
125 subset.append(files[i])
126 return subset
126 return subset
127 except (ValueError, IndexError):
127 except (ValueError, IndexError):
128 # Perhaps someone had chosen the same key name (e.g. "added") and
128 # Perhaps someone had chosen the same key name (e.g. "added") and
129 # used different syntax for the value.
129 # used different syntax for the value.
130 return None
130 return None
131
131
132 def stripdesc(desc):
132 def stripdesc(desc):
133 """strip trailing whitespace and leading and trailing empty lines"""
133 """strip trailing whitespace and leading and trailing empty lines"""
134 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
134 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
135
135
136 class appender(object):
136 class appender(object):
137 '''the changelog index must be updated last on disk, so we use this class
137 '''the changelog index must be updated last on disk, so we use this class
138 to delay writes to it'''
138 to delay writes to it'''
139 def __init__(self, vfs, name, mode, buf):
139 def __init__(self, vfs, name, mode, buf):
140 self.data = buf
140 self.data = buf
141 fp = vfs(name, mode)
141 fp = vfs(name, mode)
142 self.fp = fp
142 self.fp = fp
143 self.offset = fp.tell()
143 self.offset = fp.tell()
144 self.size = vfs.fstat(fp).st_size
144 self.size = vfs.fstat(fp).st_size
145 self._end = self.size
145 self._end = self.size
146
146
147 def end(self):
147 def end(self):
148 return self._end
148 return self._end
149 def tell(self):
149 def tell(self):
150 return self.offset
150 return self.offset
151 def flush(self):
151 def flush(self):
152 pass
152 pass
153
153
154 @property
154 @property
155 def closed(self):
155 def closed(self):
156 return self.fp.closed
156 return self.fp.closed
157
157
158 def close(self):
158 def close(self):
159 self.fp.close()
159 self.fp.close()
160
160
161 def seek(self, offset, whence=0):
161 def seek(self, offset, whence=0):
162 '''virtual file offset spans real file and data'''
162 '''virtual file offset spans real file and data'''
163 if whence == 0:
163 if whence == 0:
164 self.offset = offset
164 self.offset = offset
165 elif whence == 1:
165 elif whence == 1:
166 self.offset += offset
166 self.offset += offset
167 elif whence == 2:
167 elif whence == 2:
168 self.offset = self.end() + offset
168 self.offset = self.end() + offset
169 if self.offset < self.size:
169 if self.offset < self.size:
170 self.fp.seek(self.offset)
170 self.fp.seek(self.offset)
171
171
172 def read(self, count=-1):
172 def read(self, count=-1):
173 '''only trick here is reads that span real file and data'''
173 '''only trick here is reads that span real file and data'''
174 ret = ""
174 ret = ""
175 if self.offset < self.size:
175 if self.offset < self.size:
176 s = self.fp.read(count)
176 s = self.fp.read(count)
177 ret = s
177 ret = s
178 self.offset += len(s)
178 self.offset += len(s)
179 if count > 0:
179 if count > 0:
180 count -= len(s)
180 count -= len(s)
181 if count != 0:
181 if count != 0:
182 doff = self.offset - self.size
182 doff = self.offset - self.size
183 self.data.insert(0, "".join(self.data))
183 self.data.insert(0, "".join(self.data))
184 del self.data[1:]
184 del self.data[1:]
185 s = self.data[0][doff:doff + count]
185 s = self.data[0][doff:doff + count]
186 self.offset += len(s)
186 self.offset += len(s)
187 ret += s
187 ret += s
188 return ret
188 return ret
189
189
190 def write(self, s):
190 def write(self, s):
191 self.data.append(bytes(s))
191 self.data.append(bytes(s))
192 self.offset += len(s)
192 self.offset += len(s)
193 self._end += len(s)
193 self._end += len(s)
194
194
195 def __enter__(self):
195 def __enter__(self):
196 self.fp.__enter__()
196 self.fp.__enter__()
197 return self
197 return self
198
198
199 def __exit__(self, *args):
199 def __exit__(self, *args):
200 return self.fp.__exit__(*args)
200 return self.fp.__exit__(*args)
201
201
202 def _divertopener(opener, target):
202 def _divertopener(opener, target):
203 """build an opener that writes in 'target.a' instead of 'target'"""
203 """build an opener that writes in 'target.a' instead of 'target'"""
204 def _divert(name, mode='r', checkambig=False):
204 def _divert(name, mode='r', checkambig=False):
205 if name != target:
205 if name != target:
206 return opener(name, mode)
206 return opener(name, mode)
207 return opener(name + ".a", mode)
207 return opener(name + ".a", mode)
208 return _divert
208 return _divert
209
209
210 def _delayopener(opener, target, buf):
210 def _delayopener(opener, target, buf):
211 """build an opener that stores chunks in 'buf' instead of 'target'"""
211 """build an opener that stores chunks in 'buf' instead of 'target'"""
212 def _delay(name, mode='r', checkambig=False):
212 def _delay(name, mode='r', checkambig=False):
213 if name != target:
213 if name != target:
214 return opener(name, mode)
214 return opener(name, mode)
215 return appender(opener, name, mode, buf)
215 return appender(opener, name, mode, buf)
216 return _delay
216 return _delay
217
217
218 @attr.s
218 @attr.s
219 class _changelogrevision(object):
219 class _changelogrevision(object):
220 # Extensions might modify _defaultextra, so let the constructor below pass
220 # Extensions might modify _defaultextra, so let the constructor below pass
221 # it in
221 # it in
222 extra = attr.ib()
222 extra = attr.ib()
223 manifest = attr.ib(default=nullid)
223 manifest = attr.ib(default=nullid)
224 user = attr.ib(default='')
224 user = attr.ib(default='')
225 date = attr.ib(default=(0, 0))
225 date = attr.ib(default=(0, 0))
226 files = attr.ib(default=attr.Factory(list))
226 files = attr.ib(default=attr.Factory(list))
227 filesadded = attr.ib(default=None)
227 filesadded = attr.ib(default=None)
228 filesremoved = attr.ib(default=None)
228 filesremoved = attr.ib(default=None)
229 p1copies = attr.ib(default=None)
229 p1copies = attr.ib(default=None)
230 p2copies = attr.ib(default=None)
230 p2copies = attr.ib(default=None)
231 description = attr.ib(default='')
231 description = attr.ib(default='')
232
232
233 class changelogrevision(object):
233 class changelogrevision(object):
234 """Holds results of a parsed changelog revision.
234 """Holds results of a parsed changelog revision.
235
235
236 Changelog revisions consist of multiple pieces of data, including
236 Changelog revisions consist of multiple pieces of data, including
237 the manifest node, user, and date. This object exposes a view into
237 the manifest node, user, and date. This object exposes a view into
238 the parsed object.
238 the parsed object.
239 """
239 """
240
240
241 __slots__ = (
241 __slots__ = (
242 r'_offsets',
242 r'_offsets',
243 r'_text',
243 r'_text',
244 )
244 )
245
245
246 def __new__(cls, text):
246 def __new__(cls, text):
247 if not text:
247 if not text:
248 return _changelogrevision(extra=_defaultextra)
248 return _changelogrevision(extra=_defaultextra)
249
249
250 self = super(changelogrevision, cls).__new__(cls)
250 self = super(changelogrevision, cls).__new__(cls)
251 # We could return here and implement the following as an __init__.
251 # We could return here and implement the following as an __init__.
252 # But doing it here is equivalent and saves an extra function call.
252 # But doing it here is equivalent and saves an extra function call.
253
253
254 # format used:
254 # format used:
255 # nodeid\n : manifest node in ascii
255 # nodeid\n : manifest node in ascii
256 # user\n : user, no \n or \r allowed
256 # user\n : user, no \n or \r allowed
257 # time tz extra\n : date (time is int or float, timezone is int)
257 # time tz extra\n : date (time is int or float, timezone is int)
258 # : extra is metadata, encoded and separated by '\0'
258 # : extra is metadata, encoded and separated by '\0'
259 # : older versions ignore it
259 # : older versions ignore it
260 # files\n\n : files modified by the cset, no \n or \r allowed
260 # files\n\n : files modified by the cset, no \n or \r allowed
261 # (.*) : comment (free text, ideally utf-8)
261 # (.*) : comment (free text, ideally utf-8)
262 #
262 #
263 # changelog v0 doesn't use extra
263 # changelog v0 doesn't use extra
264
264
265 nl1 = text.index('\n')
265 nl1 = text.index('\n')
266 nl2 = text.index('\n', nl1 + 1)
266 nl2 = text.index('\n', nl1 + 1)
267 nl3 = text.index('\n', nl2 + 1)
267 nl3 = text.index('\n', nl2 + 1)
268
268
269 # The list of files may be empty. Which means nl3 is the first of the
269 # The list of files may be empty. Which means nl3 is the first of the
270 # double newline that precedes the description.
270 # double newline that precedes the description.
271 if text[nl3 + 1:nl3 + 2] == '\n':
271 if text[nl3 + 1:nl3 + 2] == '\n':
272 doublenl = nl3
272 doublenl = nl3
273 else:
273 else:
274 doublenl = text.index('\n\n', nl3 + 1)
274 doublenl = text.index('\n\n', nl3 + 1)
275
275
276 self._offsets = (nl1, nl2, nl3, doublenl)
276 self._offsets = (nl1, nl2, nl3, doublenl)
277 self._text = text
277 self._text = text
278
278
279 return self
279 return self
280
280
281 @property
281 @property
282 def manifest(self):
282 def manifest(self):
283 return bin(self._text[0:self._offsets[0]])
283 return bin(self._text[0:self._offsets[0]])
284
284
285 @property
285 @property
286 def user(self):
286 def user(self):
287 off = self._offsets
287 off = self._offsets
288 return encoding.tolocal(self._text[off[0] + 1:off[1]])
288 return encoding.tolocal(self._text[off[0] + 1:off[1]])
289
289
290 @property
290 @property
291 def _rawdate(self):
291 def _rawdate(self):
292 off = self._offsets
292 off = self._offsets
293 dateextra = self._text[off[1] + 1:off[2]]
293 dateextra = self._text[off[1] + 1:off[2]]
294 return dateextra.split(' ', 2)[0:2]
294 return dateextra.split(' ', 2)[0:2]
295
295
296 @property
296 @property
297 def _rawextra(self):
297 def _rawextra(self):
298 off = self._offsets
298 off = self._offsets
299 dateextra = self._text[off[1] + 1:off[2]]
299 dateextra = self._text[off[1] + 1:off[2]]
300 fields = dateextra.split(' ', 2)
300 fields = dateextra.split(' ', 2)
301 if len(fields) != 3:
301 if len(fields) != 3:
302 return None
302 return None
303
303
304 return fields[2]
304 return fields[2]
305
305
306 @property
306 @property
307 def date(self):
307 def date(self):
308 raw = self._rawdate
308 raw = self._rawdate
309 time = float(raw[0])
309 time = float(raw[0])
310 # Various tools did silly things with the timezone.
310 # Various tools did silly things with the timezone.
311 try:
311 try:
312 timezone = int(raw[1])
312 timezone = int(raw[1])
313 except ValueError:
313 except ValueError:
314 timezone = 0
314 timezone = 0
315
315
316 return time, timezone
316 return time, timezone
317
317
318 @property
318 @property
319 def extra(self):
319 def extra(self):
320 raw = self._rawextra
320 raw = self._rawextra
321 if raw is None:
321 if raw is None:
322 return _defaultextra
322 return _defaultextra
323
323
324 return decodeextra(raw)
324 return decodeextra(raw)
325
325
326 @property
326 @property
327 def files(self):
327 def files(self):
328 off = self._offsets
328 off = self._offsets
329 if off[2] == off[3]:
329 if off[2] == off[3]:
330 return []
330 return []
331
331
332 return self._text[off[2] + 1:off[3]].split('\n')
332 return self._text[off[2] + 1:off[3]].split('\n')
333
333
334 @property
334 @property
335 def filesadded(self):
335 def filesadded(self):
336 rawindices = self.extra.get('filesadded')
336 rawindices = self.extra.get('filesadded')
337 return rawindices and decodefileindices(self.files, rawindices)
337 return rawindices and decodefileindices(self.files, rawindices)
338
338
339 @property
339 @property
340 def filesremoved(self):
340 def filesremoved(self):
341 rawindices = self.extra.get('filesremoved')
341 rawindices = self.extra.get('filesremoved')
342 return rawindices and decodefileindices(self.files, rawindices)
342 return rawindices and decodefileindices(self.files, rawindices)
343
343
344 @property
344 @property
345 def p1copies(self):
345 def p1copies(self):
346 rawcopies = self.extra.get('p1copies')
346 rawcopies = self.extra.get('p1copies')
347 return rawcopies and decodecopies(self.files, rawcopies)
347 return rawcopies and decodecopies(self.files, rawcopies)
348
348
349 @property
349 @property
350 def p2copies(self):
350 def p2copies(self):
351 rawcopies = self.extra.get('p2copies')
351 rawcopies = self.extra.get('p2copies')
352 return rawcopies and decodecopies(self.files, rawcopies)
352 return rawcopies and decodecopies(self.files, rawcopies)
353
353
354 @property
354 @property
355 def description(self):
355 def description(self):
356 return encoding.tolocal(self._text[self._offsets[3] + 2:])
356 return encoding.tolocal(self._text[self._offsets[3] + 2:])
357
357
358 class changelog(revlog.revlog):
358 class changelog(revlog.revlog):
359 def __init__(self, opener, trypending=False):
359 def __init__(self, opener, trypending=False):
360 """Load a changelog revlog using an opener.
360 """Load a changelog revlog using an opener.
361
361
362 If ``trypending`` is true, we attempt to load the index from a
362 If ``trypending`` is true, we attempt to load the index from a
363 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
363 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
364 The ``00changelog.i.a`` file contains index (and possibly inline
364 The ``00changelog.i.a`` file contains index (and possibly inline
365 revision) data for a transaction that hasn't been finalized yet.
365 revision) data for a transaction that hasn't been finalized yet.
366 It exists in a separate file to facilitate readers (such as
366 It exists in a separate file to facilitate readers (such as
367 hooks processes) accessing data before a transaction is finalized.
367 hooks processes) accessing data before a transaction is finalized.
368 """
368 """
369 if trypending and opener.exists('00changelog.i.a'):
369 if trypending and opener.exists('00changelog.i.a'):
370 indexfile = '00changelog.i.a'
370 indexfile = '00changelog.i.a'
371 else:
371 else:
372 indexfile = '00changelog.i'
372 indexfile = '00changelog.i'
373
373
374 datafile = '00changelog.d'
374 datafile = '00changelog.d'
375 revlog.revlog.__init__(self, opener, indexfile, datafile=datafile,
375 revlog.revlog.__init__(self, opener, indexfile, datafile=datafile,
376 checkambig=True, mmaplargeindex=True)
376 checkambig=True, mmaplargeindex=True)
377
377
378 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
378 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
379 # changelogs don't benefit from generaldelta.
379 # changelogs don't benefit from generaldelta.
380
380
381 self.version &= ~revlog.FLAG_GENERALDELTA
381 self.version &= ~revlog.FLAG_GENERALDELTA
382 self._generaldelta = False
382 self._generaldelta = False
383
383
384 # Delta chains for changelogs tend to be very small because entries
384 # Delta chains for changelogs tend to be very small because entries
385 # tend to be small and don't delta well with each. So disable delta
385 # tend to be small and don't delta well with each. So disable delta
386 # chains.
386 # chains.
387 self._storedeltachains = False
387 self._storedeltachains = False
388
388
389 self._realopener = opener
389 self._realopener = opener
390 self._delayed = False
390 self._delayed = False
391 self._delaybuf = None
391 self._delaybuf = None
392 self._divert = False
392 self._divert = False
393 self.filteredrevs = frozenset()
393 self.filteredrevs = frozenset()
394 self._copiesstorage = opener.options.get('copies-storage')
394 self._copiesstorage = opener.options.get('copies-storage')
395
395
396 def tiprev(self):
396 def tiprev(self):
397 for i in pycompat.xrange(len(self) -1, -2, -1):
397 for i in pycompat.xrange(len(self) -1, -2, -1):
398 if i not in self.filteredrevs:
398 if i not in self.filteredrevs:
399 return i
399 return i
400
400
401 def tip(self):
401 def tip(self):
402 """filtered version of revlog.tip"""
402 """filtered version of revlog.tip"""
403 return self.node(self.tiprev())
403 return self.node(self.tiprev())
404
404
405 def __contains__(self, rev):
405 def __contains__(self, rev):
406 """filtered version of revlog.__contains__"""
406 """filtered version of revlog.__contains__"""
407 return (0 <= rev < len(self)
407 return (0 <= rev < len(self)
408 and rev not in self.filteredrevs)
408 and rev not in self.filteredrevs)
409
409
410 def __iter__(self):
410 def __iter__(self):
411 """filtered version of revlog.__iter__"""
411 """filtered version of revlog.__iter__"""
412 if len(self.filteredrevs) == 0:
412 if len(self.filteredrevs) == 0:
413 return revlog.revlog.__iter__(self)
413 return revlog.revlog.__iter__(self)
414
414
415 def filterediter():
415 def filterediter():
416 for i in pycompat.xrange(len(self)):
416 for i in pycompat.xrange(len(self)):
417 if i not in self.filteredrevs:
417 if i not in self.filteredrevs:
418 yield i
418 yield i
419
419
420 return filterediter()
420 return filterediter()
421
421
422 def revs(self, start=0, stop=None):
422 def revs(self, start=0, stop=None):
423 """filtered version of revlog.revs"""
423 """filtered version of revlog.revs"""
424 for i in super(changelog, self).revs(start, stop):
424 for i in super(changelog, self).revs(start, stop):
425 if i not in self.filteredrevs:
425 if i not in self.filteredrevs:
426 yield i
426 yield i
427
427
428 def _checknofilteredinrevs(self, revs):
428 def _checknofilteredinrevs(self, revs):
429 """raise the appropriate error if 'revs' contains a filtered revision
429 """raise the appropriate error if 'revs' contains a filtered revision
430
430
431 This returns a version of 'revs' to be used thereafter by the caller.
431 This returns a version of 'revs' to be used thereafter by the caller.
432 In particular, if revs is an iterator, it is converted into a set.
432 In particular, if revs is an iterator, it is converted into a set.
433 """
433 """
434 safehasattr = util.safehasattr
434 safehasattr = util.safehasattr
435 if safehasattr(revs, '__next__'):
435 if safehasattr(revs, '__next__'):
436 # Note that inspect.isgenerator() is not true for iterators,
436 # Note that inspect.isgenerator() is not true for iterators,
437 revs = set(revs)
437 revs = set(revs)
438
438
439 filteredrevs = self.filteredrevs
439 filteredrevs = self.filteredrevs
440 if safehasattr(revs, 'first'): # smartset
440 if safehasattr(revs, 'first'): # smartset
441 offenders = revs & filteredrevs
441 offenders = revs & filteredrevs
442 else:
442 else:
443 offenders = filteredrevs.intersection(revs)
443 offenders = filteredrevs.intersection(revs)
444
444
445 for rev in offenders:
445 for rev in offenders:
446 raise error.FilteredIndexError(rev)
446 raise error.FilteredIndexError(rev)
447 return revs
447 return revs
448
448
449 def headrevs(self, revs=None):
449 def headrevs(self, revs=None):
450 if revs is None and self.filteredrevs:
450 if revs is None and self.filteredrevs:
451 try:
451 try:
452 return self.index.headrevsfiltered(self.filteredrevs)
452 return self.index.headrevsfiltered(self.filteredrevs)
453 # AttributeError covers non-c-extension environments and
453 # AttributeError covers non-c-extension environments and
454 # old c extensions without filter handling.
454 # old c extensions without filter handling.
455 except AttributeError:
455 except AttributeError:
456 return self._headrevs()
456 return self._headrevs()
457
457
458 if self.filteredrevs:
458 if self.filteredrevs:
459 revs = self._checknofilteredinrevs(revs)
459 revs = self._checknofilteredinrevs(revs)
460 return super(changelog, self).headrevs(revs)
460 return super(changelog, self).headrevs(revs)
461
461
462 def strip(self, *args, **kwargs):
462 def strip(self, *args, **kwargs):
463 # XXX make something better than assert
463 # XXX make something better than assert
464 # We can't expect proper strip behavior if we are filtered.
464 # We can't expect proper strip behavior if we are filtered.
465 assert not self.filteredrevs
465 assert not self.filteredrevs
466 super(changelog, self).strip(*args, **kwargs)
466 super(changelog, self).strip(*args, **kwargs)
467
467
468 def rev(self, node):
468 def rev(self, node):
469 """filtered version of revlog.rev"""
469 """filtered version of revlog.rev"""
470 r = super(changelog, self).rev(node)
470 r = super(changelog, self).rev(node)
471 if r in self.filteredrevs:
471 if r in self.filteredrevs:
472 raise error.FilteredLookupError(hex(node), self.indexfile,
472 raise error.FilteredLookupError(hex(node), self.indexfile,
473 _('filtered node'))
473 _('filtered node'))
474 return r
474 return r
475
475
476 def node(self, rev):
476 def node(self, rev):
477 """filtered version of revlog.node"""
477 """filtered version of revlog.node"""
478 if rev in self.filteredrevs:
478 if rev in self.filteredrevs:
479 raise error.FilteredIndexError(rev)
479 raise error.FilteredIndexError(rev)
480 return super(changelog, self).node(rev)
480 return super(changelog, self).node(rev)
481
481
482 def linkrev(self, rev):
482 def linkrev(self, rev):
483 """filtered version of revlog.linkrev"""
483 """filtered version of revlog.linkrev"""
484 if rev in self.filteredrevs:
484 if rev in self.filteredrevs:
485 raise error.FilteredIndexError(rev)
485 raise error.FilteredIndexError(rev)
486 return super(changelog, self).linkrev(rev)
486 return super(changelog, self).linkrev(rev)
487
487
488 def parentrevs(self, rev):
488 def parentrevs(self, rev):
489 """filtered version of revlog.parentrevs"""
489 """filtered version of revlog.parentrevs"""
490 if rev in self.filteredrevs:
490 if rev in self.filteredrevs:
491 raise error.FilteredIndexError(rev)
491 raise error.FilteredIndexError(rev)
492 return super(changelog, self).parentrevs(rev)
492 return super(changelog, self).parentrevs(rev)
493
493
494 def flags(self, rev):
494 def flags(self, rev):
495 """filtered version of revlog.flags"""
495 """filtered version of revlog.flags"""
496 if rev in self.filteredrevs:
496 if rev in self.filteredrevs:
497 raise error.FilteredIndexError(rev)
497 raise error.FilteredIndexError(rev)
498 return super(changelog, self).flags(rev)
498 return super(changelog, self).flags(rev)
499
499
500 def delayupdate(self, tr):
500 def delayupdate(self, tr):
501 "delay visibility of index updates to other readers"
501 "delay visibility of index updates to other readers"
502
502
503 if not self._delayed:
503 if not self._delayed:
504 if len(self) == 0:
504 if len(self) == 0:
505 self._divert = True
505 self._divert = True
506 if self._realopener.exists(self.indexfile + '.a'):
506 if self._realopener.exists(self.indexfile + '.a'):
507 self._realopener.unlink(self.indexfile + '.a')
507 self._realopener.unlink(self.indexfile + '.a')
508 self.opener = _divertopener(self._realopener, self.indexfile)
508 self.opener = _divertopener(self._realopener, self.indexfile)
509 else:
509 else:
510 self._delaybuf = []
510 self._delaybuf = []
511 self.opener = _delayopener(self._realopener, self.indexfile,
511 self.opener = _delayopener(self._realopener, self.indexfile,
512 self._delaybuf)
512 self._delaybuf)
513 self._delayed = True
513 self._delayed = True
514 tr.addpending('cl-%i' % id(self), self._writepending)
514 tr.addpending('cl-%i' % id(self), self._writepending)
515 tr.addfinalize('cl-%i' % id(self), self._finalize)
515 tr.addfinalize('cl-%i' % id(self), self._finalize)
516
516
517 def _finalize(self, tr):
517 def _finalize(self, tr):
518 "finalize index updates"
518 "finalize index updates"
519 self._delayed = False
519 self._delayed = False
520 self.opener = self._realopener
520 self.opener = self._realopener
521 # move redirected index data back into place
521 # move redirected index data back into place
522 if self._divert:
522 if self._divert:
523 assert not self._delaybuf
523 assert not self._delaybuf
524 tmpname = self.indexfile + ".a"
524 tmpname = self.indexfile + ".a"
525 nfile = self.opener.open(tmpname)
525 nfile = self.opener.open(tmpname)
526 nfile.close()
526 nfile.close()
527 self.opener.rename(tmpname, self.indexfile, checkambig=True)
527 self.opener.rename(tmpname, self.indexfile, checkambig=True)
528 elif self._delaybuf:
528 elif self._delaybuf:
529 fp = self.opener(self.indexfile, 'a', checkambig=True)
529 fp = self.opener(self.indexfile, 'a', checkambig=True)
530 fp.write("".join(self._delaybuf))
530 fp.write("".join(self._delaybuf))
531 fp.close()
531 fp.close()
532 self._delaybuf = None
532 self._delaybuf = None
533 self._divert = False
533 self._divert = False
534 # split when we're done
534 # split when we're done
535 self._enforceinlinesize(tr)
535 self._enforceinlinesize(tr)
536
536
537 def _writepending(self, tr):
537 def _writepending(self, tr):
538 "create a file containing the unfinalized state for pretxnchangegroup"
538 "create a file containing the unfinalized state for pretxnchangegroup"
539 if self._delaybuf:
539 if self._delaybuf:
540 # make a temporary copy of the index
540 # make a temporary copy of the index
541 fp1 = self._realopener(self.indexfile)
541 fp1 = self._realopener(self.indexfile)
542 pendingfilename = self.indexfile + ".a"
542 pendingfilename = self.indexfile + ".a"
543 # register as a temp file to ensure cleanup on failure
543 # register as a temp file to ensure cleanup on failure
544 tr.registertmp(pendingfilename)
544 tr.registertmp(pendingfilename)
545 # write existing data
545 # write existing data
546 fp2 = self._realopener(pendingfilename, "w")
546 fp2 = self._realopener(pendingfilename, "w")
547 fp2.write(fp1.read())
547 fp2.write(fp1.read())
548 # add pending data
548 # add pending data
549 fp2.write("".join(self._delaybuf))
549 fp2.write("".join(self._delaybuf))
550 fp2.close()
550 fp2.close()
551 # switch modes so finalize can simply rename
551 # switch modes so finalize can simply rename
552 self._delaybuf = None
552 self._delaybuf = None
553 self._divert = True
553 self._divert = True
554 self.opener = _divertopener(self._realopener, self.indexfile)
554 self.opener = _divertopener(self._realopener, self.indexfile)
555
555
556 if self._divert:
556 if self._divert:
557 return True
557 return True
558
558
559 return False
559 return False
560
560
561 def _enforceinlinesize(self, tr, fp=None):
561 def _enforceinlinesize(self, tr, fp=None):
562 if not self._delayed:
562 if not self._delayed:
563 revlog.revlog._enforceinlinesize(self, tr, fp)
563 revlog.revlog._enforceinlinesize(self, tr, fp)
564
564
565 def read(self, node):
565 def read(self, node):
566 """Obtain data from a parsed changelog revision.
566 """Obtain data from a parsed changelog revision.
567
567
568 Returns a 6-tuple of:
568 Returns a 6-tuple of:
569
569
570 - manifest node in binary
570 - manifest node in binary
571 - author/user as a localstr
571 - author/user as a localstr
572 - date as a 2-tuple of (time, timezone)
572 - date as a 2-tuple of (time, timezone)
573 - list of files
573 - list of files
574 - commit message as a localstr
574 - commit message as a localstr
575 - dict of extra metadata
575 - dict of extra metadata
576
576
577 Unless you need to access all fields, consider calling
577 Unless you need to access all fields, consider calling
578 ``changelogrevision`` instead, as it is faster for partial object
578 ``changelogrevision`` instead, as it is faster for partial object
579 access.
579 access.
580 """
580 """
581 c = changelogrevision(self.revision(node))
581 c = changelogrevision(self.revision(node))
582 return (
582 return (
583 c.manifest,
583 c.manifest,
584 c.user,
584 c.user,
585 c.date,
585 c.date,
586 c.files,
586 c.files,
587 c.description,
587 c.description,
588 c.extra
588 c.extra
589 )
589 )
590
590
591 def changelogrevision(self, nodeorrev):
591 def changelogrevision(self, nodeorrev):
592 """Obtain a ``changelogrevision`` for a node or revision."""
592 """Obtain a ``changelogrevision`` for a node or revision."""
593 return changelogrevision(self.revision(nodeorrev))
593 return changelogrevision(self.revision(nodeorrev))
594
594
595 def readfiles(self, node):
595 def readfiles(self, node):
596 """
596 """
597 short version of read that only returns the files modified by the cset
597 short version of read that only returns the files modified by the cset
598 """
598 """
599 text = self.revision(node)
599 text = self.revision(node)
600 if not text:
600 if not text:
601 return []
601 return []
602 last = text.index("\n\n")
602 last = text.index("\n\n")
603 l = text[:last].split('\n')
603 l = text[:last].split('\n')
604 return l[3:]
604 return l[3:]
605
605
606 def add(self, manifest, files, desc, transaction, p1, p2,
606 def add(self, manifest, files, desc, transaction, p1, p2,
607 user, date=None, extra=None, p1copies=None, p2copies=None,
607 user, date=None, extra=None, p1copies=None, p2copies=None,
608 filesadded=None, filesremoved=None):
608 filesadded=None, filesremoved=None):
609 # Convert to UTF-8 encoded bytestrings as the very first
609 # Convert to UTF-8 encoded bytestrings as the very first
610 # thing: calling any method on a localstr object will turn it
610 # thing: calling any method on a localstr object will turn it
611 # into a str object and the cached UTF-8 string is thus lost.
611 # into a str object and the cached UTF-8 string is thus lost.
612 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
612 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
613
613
614 user = user.strip()
614 user = user.strip()
615 # An empty username or a username with a "\n" will make the
615 # An empty username or a username with a "\n" will make the
616 # revision text contain two "\n\n" sequences -> corrupt
616 # revision text contain two "\n\n" sequences -> corrupt
617 # repository since read cannot unpack the revision.
617 # repository since read cannot unpack the revision.
618 if not user:
618 if not user:
619 raise error.StorageError(_("empty username"))
619 raise error.StorageError(_("empty username"))
620 if "\n" in user:
620 if "\n" in user:
621 raise error.StorageError(_("username %r contains a newline")
621 raise error.StorageError(_("username %r contains a newline")
622 % pycompat.bytestr(user))
622 % pycompat.bytestr(user))
623
623
624 desc = stripdesc(desc)
624 desc = stripdesc(desc)
625
625
626 if date:
626 if date:
627 parseddate = "%d %d" % dateutil.parsedate(date)
627 parseddate = "%d %d" % dateutil.parsedate(date)
628 else:
628 else:
629 parseddate = "%d %d" % dateutil.makedate()
629 parseddate = "%d %d" % dateutil.makedate()
630 if extra:
630 if extra:
631 branch = extra.get("branch")
631 branch = extra.get("branch")
632 if branch in ("default", ""):
632 if branch in ("default", ""):
633 del extra["branch"]
633 del extra["branch"]
634 elif branch in (".", "null", "tip"):
634 elif branch in (".", "null", "tip"):
635 raise error.StorageError(_('the name \'%s\' is reserved')
635 raise error.StorageError(_('the name \'%s\' is reserved')
636 % branch)
636 % branch)
637 sortedfiles = sorted(files)
637 sortedfiles = sorted(files)
638 if extra is not None:
638 if extra is not None:
639 for name in ('p1copies', 'p2copies', 'filesadded', 'filesremoved'):
639 for name in ('p1copies', 'p2copies', 'filesadded', 'filesremoved'):
640 extra.pop(name, None)
640 extra.pop(name, None)
641 if p1copies is not None:
642 p1copies = encodecopies(sortedfiles, p1copies)
643 if p2copies is not None:
644 p2copies = encodecopies(sortedfiles, p2copies)
645 if filesadded is not None:
646 filesadded = encodefileindices(sortedfiles, filesadded)
647 if filesremoved is not None:
648 filesremoved = encodefileindices(sortedfiles, filesremoved)
641 if self._copiesstorage == 'extra':
649 if self._copiesstorage == 'extra':
642 extrasentries = p1copies, p2copies, filesadded, filesremoved
650 extrasentries = p1copies, p2copies, filesadded, filesremoved
643 if extra is None and any(x is not None for x in extrasentries):
651 if extra is None and any(x is not None for x in extrasentries):
644 extra = {}
652 extra = {}
645 if p1copies is not None:
653 if p1copies is not None:
646 extra['p1copies'] = encodecopies(sortedfiles, p1copies)
654 extra['p1copies'] = p1copies
647 if p2copies is not None:
655 if p2copies is not None:
648 extra['p2copies'] = encodecopies(sortedfiles, p2copies)
656 extra['p2copies'] = p2copies
649 if filesadded is not None:
657 if filesadded is not None:
650 extra['filesadded'] = encodefileindices(sortedfiles, filesadded)
658 extra['filesadded'] = filesadded
651 if filesremoved is not None:
659 if filesremoved is not None:
652 extra['filesremoved'] = encodefileindices(sortedfiles, filesremoved)
660 extra['filesremoved'] = filesremoved
653
661
654 if extra:
662 if extra:
655 extra = encodeextra(extra)
663 extra = encodeextra(extra)
656 parseddate = "%s %s" % (parseddate, extra)
664 parseddate = "%s %s" % (parseddate, extra)
657 l = [hex(manifest), user, parseddate] + sortedfiles + ["", desc]
665 l = [hex(manifest), user, parseddate] + sortedfiles + ["", desc]
658 text = "\n".join(l)
666 text = "\n".join(l)
659 return self.addrevision(text, transaction, len(self), p1, p2)
667 return self.addrevision(text, transaction, len(self), p1, p2)
660
668
661 def branchinfo(self, rev):
669 def branchinfo(self, rev):
662 """return the branch name and open/close state of a revision
670 """return the branch name and open/close state of a revision
663
671
664 This function exists because creating a changectx object
672 This function exists because creating a changectx object
665 just to access this is costly."""
673 just to access this is costly."""
666 extra = self.read(rev)[5]
674 extra = self.read(rev)[5]
667 return encoding.tolocal(extra.get("branch")), 'close' in extra
675 return encoding.tolocal(extra.get("branch")), 'close' in extra
668
676
669 def _nodeduplicatecallback(self, transaction, node):
677 def _nodeduplicatecallback(self, transaction, node):
670 # keep track of revisions that got "re-added", eg: unbunde of know rev.
678 # keep track of revisions that got "re-added", eg: unbunde of know rev.
671 #
679 #
672 # We track them in a list to preserve their order from the source bundle
680 # We track them in a list to preserve their order from the source bundle
673 duplicates = transaction.changes.setdefault('revduplicates', [])
681 duplicates = transaction.changes.setdefault('revduplicates', [])
674 duplicates.append(self.rev(node))
682 duplicates.append(self.rev(node))
General Comments 0
You need to be logged in to leave comments. Login now