##// END OF EJS Templates
changelog: fix the diverted opener to accept more kwargs...
marmoute -
r44507:7f67f534 default
parent child Browse files
Show More
@@ -1,625 +1,626
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 nullid,
14 nullid,
15 )
15 )
16 from .thirdparty import attr
16 from .thirdparty import attr
17
17
18 from . import (
18 from . import (
19 copies,
19 copies,
20 encoding,
20 encoding,
21 error,
21 error,
22 pycompat,
22 pycompat,
23 revlog,
23 revlog,
24 )
24 )
25 from .utils import (
25 from .utils import (
26 dateutil,
26 dateutil,
27 stringutil,
27 stringutil,
28 )
28 )
29
29
30 from .revlogutils import sidedata as sidedatamod
30 from .revlogutils import sidedata as sidedatamod
31
31
32 _defaultextra = {b'branch': b'default'}
32 _defaultextra = {b'branch': b'default'}
33
33
34
34
35 def _string_escape(text):
35 def _string_escape(text):
36 """
36 """
37 >>> from .pycompat import bytechr as chr
37 >>> from .pycompat import bytechr as chr
38 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
38 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
39 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
39 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
40 >>> s
40 >>> s
41 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
41 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
42 >>> res = _string_escape(s)
42 >>> res = _string_escape(s)
43 >>> s == _string_unescape(res)
43 >>> s == _string_unescape(res)
44 True
44 True
45 """
45 """
46 # subset of the string_escape codec
46 # subset of the string_escape codec
47 text = (
47 text = (
48 text.replace(b'\\', b'\\\\')
48 text.replace(b'\\', b'\\\\')
49 .replace(b'\n', b'\\n')
49 .replace(b'\n', b'\\n')
50 .replace(b'\r', b'\\r')
50 .replace(b'\r', b'\\r')
51 )
51 )
52 return text.replace(b'\0', b'\\0')
52 return text.replace(b'\0', b'\\0')
53
53
54
54
55 def _string_unescape(text):
55 def _string_unescape(text):
56 if b'\\0' in text:
56 if b'\\0' in text:
57 # fix up \0 without getting into trouble with \\0
57 # fix up \0 without getting into trouble with \\0
58 text = text.replace(b'\\\\', b'\\\\\n')
58 text = text.replace(b'\\\\', b'\\\\\n')
59 text = text.replace(b'\\0', b'\0')
59 text = text.replace(b'\\0', b'\0')
60 text = text.replace(b'\n', b'')
60 text = text.replace(b'\n', b'')
61 return stringutil.unescapestr(text)
61 return stringutil.unescapestr(text)
62
62
63
63
64 def decodeextra(text):
64 def decodeextra(text):
65 """
65 """
66 >>> from .pycompat import bytechr as chr
66 >>> from .pycompat import bytechr as chr
67 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
67 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
68 ... ).items())
68 ... ).items())
69 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
69 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
70 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
70 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
71 ... b'baz': chr(92) + chr(0) + b'2'})
71 ... b'baz': chr(92) + chr(0) + b'2'})
72 ... ).items())
72 ... ).items())
73 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
73 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
74 """
74 """
75 extra = _defaultextra.copy()
75 extra = _defaultextra.copy()
76 for l in text.split(b'\0'):
76 for l in text.split(b'\0'):
77 if l:
77 if l:
78 k, v = _string_unescape(l).split(b':', 1)
78 k, v = _string_unescape(l).split(b':', 1)
79 extra[k] = v
79 extra[k] = v
80 return extra
80 return extra
81
81
82
82
83 def encodeextra(d):
83 def encodeextra(d):
84 # keys must be sorted to produce a deterministic changelog entry
84 # keys must be sorted to produce a deterministic changelog entry
85 items = [
85 items = [
86 _string_escape(b'%s:%s' % (k, pycompat.bytestr(d[k])))
86 _string_escape(b'%s:%s' % (k, pycompat.bytestr(d[k])))
87 for k in sorted(d)
87 for k in sorted(d)
88 ]
88 ]
89 return b"\0".join(items)
89 return b"\0".join(items)
90
90
91
91
92 def stripdesc(desc):
92 def stripdesc(desc):
93 """strip trailing whitespace and leading and trailing empty lines"""
93 """strip trailing whitespace and leading and trailing empty lines"""
94 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
94 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
95
95
96
96
97 class appender(object):
97 class appender(object):
98 '''the changelog index must be updated last on disk, so we use this class
98 '''the changelog index must be updated last on disk, so we use this class
99 to delay writes to it'''
99 to delay writes to it'''
100
100
101 def __init__(self, vfs, name, mode, buf):
101 def __init__(self, vfs, name, mode, buf):
102 self.data = buf
102 self.data = buf
103 fp = vfs(name, mode)
103 fp = vfs(name, mode)
104 self.fp = fp
104 self.fp = fp
105 self.offset = fp.tell()
105 self.offset = fp.tell()
106 self.size = vfs.fstat(fp).st_size
106 self.size = vfs.fstat(fp).st_size
107 self._end = self.size
107 self._end = self.size
108
108
109 def end(self):
109 def end(self):
110 return self._end
110 return self._end
111
111
112 def tell(self):
112 def tell(self):
113 return self.offset
113 return self.offset
114
114
115 def flush(self):
115 def flush(self):
116 pass
116 pass
117
117
118 @property
118 @property
119 def closed(self):
119 def closed(self):
120 return self.fp.closed
120 return self.fp.closed
121
121
122 def close(self):
122 def close(self):
123 self.fp.close()
123 self.fp.close()
124
124
125 def seek(self, offset, whence=0):
125 def seek(self, offset, whence=0):
126 '''virtual file offset spans real file and data'''
126 '''virtual file offset spans real file and data'''
127 if whence == 0:
127 if whence == 0:
128 self.offset = offset
128 self.offset = offset
129 elif whence == 1:
129 elif whence == 1:
130 self.offset += offset
130 self.offset += offset
131 elif whence == 2:
131 elif whence == 2:
132 self.offset = self.end() + offset
132 self.offset = self.end() + offset
133 if self.offset < self.size:
133 if self.offset < self.size:
134 self.fp.seek(self.offset)
134 self.fp.seek(self.offset)
135
135
136 def read(self, count=-1):
136 def read(self, count=-1):
137 '''only trick here is reads that span real file and data'''
137 '''only trick here is reads that span real file and data'''
138 ret = b""
138 ret = b""
139 if self.offset < self.size:
139 if self.offset < self.size:
140 s = self.fp.read(count)
140 s = self.fp.read(count)
141 ret = s
141 ret = s
142 self.offset += len(s)
142 self.offset += len(s)
143 if count > 0:
143 if count > 0:
144 count -= len(s)
144 count -= len(s)
145 if count != 0:
145 if count != 0:
146 doff = self.offset - self.size
146 doff = self.offset - self.size
147 self.data.insert(0, b"".join(self.data))
147 self.data.insert(0, b"".join(self.data))
148 del self.data[1:]
148 del self.data[1:]
149 s = self.data[0][doff : doff + count]
149 s = self.data[0][doff : doff + count]
150 self.offset += len(s)
150 self.offset += len(s)
151 ret += s
151 ret += s
152 return ret
152 return ret
153
153
154 def write(self, s):
154 def write(self, s):
155 self.data.append(bytes(s))
155 self.data.append(bytes(s))
156 self.offset += len(s)
156 self.offset += len(s)
157 self._end += len(s)
157 self._end += len(s)
158
158
159 def __enter__(self):
159 def __enter__(self):
160 self.fp.__enter__()
160 self.fp.__enter__()
161 return self
161 return self
162
162
163 def __exit__(self, *args):
163 def __exit__(self, *args):
164 return self.fp.__exit__(*args)
164 return self.fp.__exit__(*args)
165
165
166
166
167 def _divertopener(opener, target):
167 def _divertopener(opener, target):
168 """build an opener that writes in 'target.a' instead of 'target'"""
168 """build an opener that writes in 'target.a' instead of 'target'"""
169
169
170 def _divert(name, mode=b'r', checkambig=False):
170 def _divert(name, mode=b'r', checkambig=False, **kwargs):
171 if name != target:
171 if name != target:
172 return opener(name, mode)
172 return opener(name, mode, **kwargs)
173 return opener(name + b".a", mode)
173 return opener(name + b".a", mode, **kwargs)
174
174
175 return _divert
175 return _divert
176
176
177
177
178 def _delayopener(opener, target, buf):
178 def _delayopener(opener, target, buf):
179 """build an opener that stores chunks in 'buf' instead of 'target'"""
179 """build an opener that stores chunks in 'buf' instead of 'target'"""
180
180
181 def _delay(name, mode=b'r', checkambig=False):
181 def _delay(name, mode=b'r', checkambig=False, **kwargs):
182 if name != target:
182 if name != target:
183 return opener(name, mode)
183 return opener(name, mode, **kwargs)
184 assert not kwargs
184 return appender(opener, name, mode, buf)
185 return appender(opener, name, mode, buf)
185
186
186 return _delay
187 return _delay
187
188
188
189
189 @attr.s
190 @attr.s
190 class _changelogrevision(object):
191 class _changelogrevision(object):
191 # Extensions might modify _defaultextra, so let the constructor below pass
192 # Extensions might modify _defaultextra, so let the constructor below pass
192 # it in
193 # it in
193 extra = attr.ib()
194 extra = attr.ib()
194 manifest = attr.ib(default=nullid)
195 manifest = attr.ib(default=nullid)
195 user = attr.ib(default=b'')
196 user = attr.ib(default=b'')
196 date = attr.ib(default=(0, 0))
197 date = attr.ib(default=(0, 0))
197 files = attr.ib(default=attr.Factory(list))
198 files = attr.ib(default=attr.Factory(list))
198 filesadded = attr.ib(default=None)
199 filesadded = attr.ib(default=None)
199 filesremoved = attr.ib(default=None)
200 filesremoved = attr.ib(default=None)
200 p1copies = attr.ib(default=None)
201 p1copies = attr.ib(default=None)
201 p2copies = attr.ib(default=None)
202 p2copies = attr.ib(default=None)
202 description = attr.ib(default=b'')
203 description = attr.ib(default=b'')
203
204
204
205
205 class changelogrevision(object):
206 class changelogrevision(object):
206 """Holds results of a parsed changelog revision.
207 """Holds results of a parsed changelog revision.
207
208
208 Changelog revisions consist of multiple pieces of data, including
209 Changelog revisions consist of multiple pieces of data, including
209 the manifest node, user, and date. This object exposes a view into
210 the manifest node, user, and date. This object exposes a view into
210 the parsed object.
211 the parsed object.
211 """
212 """
212
213
213 __slots__ = (
214 __slots__ = (
214 '_offsets',
215 '_offsets',
215 '_text',
216 '_text',
216 '_sidedata',
217 '_sidedata',
217 '_cpsd',
218 '_cpsd',
218 )
219 )
219
220
220 def __new__(cls, text, sidedata, cpsd):
221 def __new__(cls, text, sidedata, cpsd):
221 if not text:
222 if not text:
222 return _changelogrevision(extra=_defaultextra)
223 return _changelogrevision(extra=_defaultextra)
223
224
224 self = super(changelogrevision, cls).__new__(cls)
225 self = super(changelogrevision, cls).__new__(cls)
225 # We could return here and implement the following as an __init__.
226 # We could return here and implement the following as an __init__.
226 # But doing it here is equivalent and saves an extra function call.
227 # But doing it here is equivalent and saves an extra function call.
227
228
228 # format used:
229 # format used:
229 # nodeid\n : manifest node in ascii
230 # nodeid\n : manifest node in ascii
230 # user\n : user, no \n or \r allowed
231 # user\n : user, no \n or \r allowed
231 # time tz extra\n : date (time is int or float, timezone is int)
232 # time tz extra\n : date (time is int or float, timezone is int)
232 # : extra is metadata, encoded and separated by '\0'
233 # : extra is metadata, encoded and separated by '\0'
233 # : older versions ignore it
234 # : older versions ignore it
234 # files\n\n : files modified by the cset, no \n or \r allowed
235 # files\n\n : files modified by the cset, no \n or \r allowed
235 # (.*) : comment (free text, ideally utf-8)
236 # (.*) : comment (free text, ideally utf-8)
236 #
237 #
237 # changelog v0 doesn't use extra
238 # changelog v0 doesn't use extra
238
239
239 nl1 = text.index(b'\n')
240 nl1 = text.index(b'\n')
240 nl2 = text.index(b'\n', nl1 + 1)
241 nl2 = text.index(b'\n', nl1 + 1)
241 nl3 = text.index(b'\n', nl2 + 1)
242 nl3 = text.index(b'\n', nl2 + 1)
242
243
243 # The list of files may be empty. Which means nl3 is the first of the
244 # The list of files may be empty. Which means nl3 is the first of the
244 # double newline that precedes the description.
245 # double newline that precedes the description.
245 if text[nl3 + 1 : nl3 + 2] == b'\n':
246 if text[nl3 + 1 : nl3 + 2] == b'\n':
246 doublenl = nl3
247 doublenl = nl3
247 else:
248 else:
248 doublenl = text.index(b'\n\n', nl3 + 1)
249 doublenl = text.index(b'\n\n', nl3 + 1)
249
250
250 self._offsets = (nl1, nl2, nl3, doublenl)
251 self._offsets = (nl1, nl2, nl3, doublenl)
251 self._text = text
252 self._text = text
252 self._sidedata = sidedata
253 self._sidedata = sidedata
253 self._cpsd = cpsd
254 self._cpsd = cpsd
254
255
255 return self
256 return self
256
257
257 @property
258 @property
258 def manifest(self):
259 def manifest(self):
259 return bin(self._text[0 : self._offsets[0]])
260 return bin(self._text[0 : self._offsets[0]])
260
261
261 @property
262 @property
262 def user(self):
263 def user(self):
263 off = self._offsets
264 off = self._offsets
264 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
265 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
265
266
266 @property
267 @property
267 def _rawdate(self):
268 def _rawdate(self):
268 off = self._offsets
269 off = self._offsets
269 dateextra = self._text[off[1] + 1 : off[2]]
270 dateextra = self._text[off[1] + 1 : off[2]]
270 return dateextra.split(b' ', 2)[0:2]
271 return dateextra.split(b' ', 2)[0:2]
271
272
272 @property
273 @property
273 def _rawextra(self):
274 def _rawextra(self):
274 off = self._offsets
275 off = self._offsets
275 dateextra = self._text[off[1] + 1 : off[2]]
276 dateextra = self._text[off[1] + 1 : off[2]]
276 fields = dateextra.split(b' ', 2)
277 fields = dateextra.split(b' ', 2)
277 if len(fields) != 3:
278 if len(fields) != 3:
278 return None
279 return None
279
280
280 return fields[2]
281 return fields[2]
281
282
282 @property
283 @property
283 def date(self):
284 def date(self):
284 raw = self._rawdate
285 raw = self._rawdate
285 time = float(raw[0])
286 time = float(raw[0])
286 # Various tools did silly things with the timezone.
287 # Various tools did silly things with the timezone.
287 try:
288 try:
288 timezone = int(raw[1])
289 timezone = int(raw[1])
289 except ValueError:
290 except ValueError:
290 timezone = 0
291 timezone = 0
291
292
292 return time, timezone
293 return time, timezone
293
294
294 @property
295 @property
295 def extra(self):
296 def extra(self):
296 raw = self._rawextra
297 raw = self._rawextra
297 if raw is None:
298 if raw is None:
298 return _defaultextra
299 return _defaultextra
299
300
300 return decodeextra(raw)
301 return decodeextra(raw)
301
302
302 @property
303 @property
303 def files(self):
304 def files(self):
304 off = self._offsets
305 off = self._offsets
305 if off[2] == off[3]:
306 if off[2] == off[3]:
306 return []
307 return []
307
308
308 return self._text[off[2] + 1 : off[3]].split(b'\n')
309 return self._text[off[2] + 1 : off[3]].split(b'\n')
309
310
310 @property
311 @property
311 def filesadded(self):
312 def filesadded(self):
312 if self._cpsd:
313 if self._cpsd:
313 rawindices = self._sidedata.get(sidedatamod.SD_FILESADDED)
314 rawindices = self._sidedata.get(sidedatamod.SD_FILESADDED)
314 if not rawindices:
315 if not rawindices:
315 return []
316 return []
316 else:
317 else:
317 rawindices = self.extra.get(b'filesadded')
318 rawindices = self.extra.get(b'filesadded')
318 if rawindices is None:
319 if rawindices is None:
319 return None
320 return None
320 return copies.decodefileindices(self.files, rawindices)
321 return copies.decodefileindices(self.files, rawindices)
321
322
322 @property
323 @property
323 def filesremoved(self):
324 def filesremoved(self):
324 if self._cpsd:
325 if self._cpsd:
325 rawindices = self._sidedata.get(sidedatamod.SD_FILESREMOVED)
326 rawindices = self._sidedata.get(sidedatamod.SD_FILESREMOVED)
326 if not rawindices:
327 if not rawindices:
327 return []
328 return []
328 else:
329 else:
329 rawindices = self.extra.get(b'filesremoved')
330 rawindices = self.extra.get(b'filesremoved')
330 if rawindices is None:
331 if rawindices is None:
331 return None
332 return None
332 return copies.decodefileindices(self.files, rawindices)
333 return copies.decodefileindices(self.files, rawindices)
333
334
334 @property
335 @property
335 def p1copies(self):
336 def p1copies(self):
336 if self._cpsd:
337 if self._cpsd:
337 rawcopies = self._sidedata.get(sidedatamod.SD_P1COPIES)
338 rawcopies = self._sidedata.get(sidedatamod.SD_P1COPIES)
338 if not rawcopies:
339 if not rawcopies:
339 return {}
340 return {}
340 else:
341 else:
341 rawcopies = self.extra.get(b'p1copies')
342 rawcopies = self.extra.get(b'p1copies')
342 if rawcopies is None:
343 if rawcopies is None:
343 return None
344 return None
344 return copies.decodecopies(self.files, rawcopies)
345 return copies.decodecopies(self.files, rawcopies)
345
346
346 @property
347 @property
347 def p2copies(self):
348 def p2copies(self):
348 if self._cpsd:
349 if self._cpsd:
349 rawcopies = self._sidedata.get(sidedatamod.SD_P2COPIES)
350 rawcopies = self._sidedata.get(sidedatamod.SD_P2COPIES)
350 if not rawcopies:
351 if not rawcopies:
351 return {}
352 return {}
352 else:
353 else:
353 rawcopies = self.extra.get(b'p2copies')
354 rawcopies = self.extra.get(b'p2copies')
354 if rawcopies is None:
355 if rawcopies is None:
355 return None
356 return None
356 return copies.decodecopies(self.files, rawcopies)
357 return copies.decodecopies(self.files, rawcopies)
357
358
358 @property
359 @property
359 def description(self):
360 def description(self):
360 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
361 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
361
362
362
363
363 class changelog(revlog.revlog):
364 class changelog(revlog.revlog):
364 def __init__(self, opener, trypending=False):
365 def __init__(self, opener, trypending=False):
365 """Load a changelog revlog using an opener.
366 """Load a changelog revlog using an opener.
366
367
367 If ``trypending`` is true, we attempt to load the index from a
368 If ``trypending`` is true, we attempt to load the index from a
368 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
369 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
369 The ``00changelog.i.a`` file contains index (and possibly inline
370 The ``00changelog.i.a`` file contains index (and possibly inline
370 revision) data for a transaction that hasn't been finalized yet.
371 revision) data for a transaction that hasn't been finalized yet.
371 It exists in a separate file to facilitate readers (such as
372 It exists in a separate file to facilitate readers (such as
372 hooks processes) accessing data before a transaction is finalized.
373 hooks processes) accessing data before a transaction is finalized.
373 """
374 """
374 if trypending and opener.exists(b'00changelog.i.a'):
375 if trypending and opener.exists(b'00changelog.i.a'):
375 indexfile = b'00changelog.i.a'
376 indexfile = b'00changelog.i.a'
376 else:
377 else:
377 indexfile = b'00changelog.i'
378 indexfile = b'00changelog.i'
378
379
379 datafile = b'00changelog.d'
380 datafile = b'00changelog.d'
380 revlog.revlog.__init__(
381 revlog.revlog.__init__(
381 self,
382 self,
382 opener,
383 opener,
383 indexfile,
384 indexfile,
384 datafile=datafile,
385 datafile=datafile,
385 checkambig=True,
386 checkambig=True,
386 mmaplargeindex=True,
387 mmaplargeindex=True,
387 )
388 )
388
389
389 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
390 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
390 # changelogs don't benefit from generaldelta.
391 # changelogs don't benefit from generaldelta.
391
392
392 self.version &= ~revlog.FLAG_GENERALDELTA
393 self.version &= ~revlog.FLAG_GENERALDELTA
393 self._generaldelta = False
394 self._generaldelta = False
394
395
395 # Delta chains for changelogs tend to be very small because entries
396 # Delta chains for changelogs tend to be very small because entries
396 # tend to be small and don't delta well with each. So disable delta
397 # tend to be small and don't delta well with each. So disable delta
397 # chains.
398 # chains.
398 self._storedeltachains = False
399 self._storedeltachains = False
399
400
400 self._realopener = opener
401 self._realopener = opener
401 self._delayed = False
402 self._delayed = False
402 self._delaybuf = None
403 self._delaybuf = None
403 self._divert = False
404 self._divert = False
404 self.filteredrevs = frozenset()
405 self.filteredrevs = frozenset()
405 self._copiesstorage = opener.options.get(b'copies-storage')
406 self._copiesstorage = opener.options.get(b'copies-storage')
406
407
407 def delayupdate(self, tr):
408 def delayupdate(self, tr):
408 """delay visibility of index updates to other readers"""
409 """delay visibility of index updates to other readers"""
409
410
410 if not self._delayed:
411 if not self._delayed:
411 if len(self) == 0:
412 if len(self) == 0:
412 self._divert = True
413 self._divert = True
413 if self._realopener.exists(self.indexfile + b'.a'):
414 if self._realopener.exists(self.indexfile + b'.a'):
414 self._realopener.unlink(self.indexfile + b'.a')
415 self._realopener.unlink(self.indexfile + b'.a')
415 self.opener = _divertopener(self._realopener, self.indexfile)
416 self.opener = _divertopener(self._realopener, self.indexfile)
416 else:
417 else:
417 self._delaybuf = []
418 self._delaybuf = []
418 self.opener = _delayopener(
419 self.opener = _delayopener(
419 self._realopener, self.indexfile, self._delaybuf
420 self._realopener, self.indexfile, self._delaybuf
420 )
421 )
421 self._delayed = True
422 self._delayed = True
422 tr.addpending(b'cl-%i' % id(self), self._writepending)
423 tr.addpending(b'cl-%i' % id(self), self._writepending)
423 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
424 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
424
425
425 def _finalize(self, tr):
426 def _finalize(self, tr):
426 """finalize index updates"""
427 """finalize index updates"""
427 self._delayed = False
428 self._delayed = False
428 self.opener = self._realopener
429 self.opener = self._realopener
429 # move redirected index data back into place
430 # move redirected index data back into place
430 if self._divert:
431 if self._divert:
431 assert not self._delaybuf
432 assert not self._delaybuf
432 tmpname = self.indexfile + b".a"
433 tmpname = self.indexfile + b".a"
433 nfile = self.opener.open(tmpname)
434 nfile = self.opener.open(tmpname)
434 nfile.close()
435 nfile.close()
435 self.opener.rename(tmpname, self.indexfile, checkambig=True)
436 self.opener.rename(tmpname, self.indexfile, checkambig=True)
436 elif self._delaybuf:
437 elif self._delaybuf:
437 fp = self.opener(self.indexfile, b'a', checkambig=True)
438 fp = self.opener(self.indexfile, b'a', checkambig=True)
438 fp.write(b"".join(self._delaybuf))
439 fp.write(b"".join(self._delaybuf))
439 fp.close()
440 fp.close()
440 self._delaybuf = None
441 self._delaybuf = None
441 self._divert = False
442 self._divert = False
442 # split when we're done
443 # split when we're done
443 self._enforceinlinesize(tr)
444 self._enforceinlinesize(tr)
444
445
445 def _writepending(self, tr):
446 def _writepending(self, tr):
446 """create a file containing the unfinalized state for
447 """create a file containing the unfinalized state for
447 pretxnchangegroup"""
448 pretxnchangegroup"""
448 if self._delaybuf:
449 if self._delaybuf:
449 # make a temporary copy of the index
450 # make a temporary copy of the index
450 fp1 = self._realopener(self.indexfile)
451 fp1 = self._realopener(self.indexfile)
451 pendingfilename = self.indexfile + b".a"
452 pendingfilename = self.indexfile + b".a"
452 # register as a temp file to ensure cleanup on failure
453 # register as a temp file to ensure cleanup on failure
453 tr.registertmp(pendingfilename)
454 tr.registertmp(pendingfilename)
454 # write existing data
455 # write existing data
455 fp2 = self._realopener(pendingfilename, b"w")
456 fp2 = self._realopener(pendingfilename, b"w")
456 fp2.write(fp1.read())
457 fp2.write(fp1.read())
457 # add pending data
458 # add pending data
458 fp2.write(b"".join(self._delaybuf))
459 fp2.write(b"".join(self._delaybuf))
459 fp2.close()
460 fp2.close()
460 # switch modes so finalize can simply rename
461 # switch modes so finalize can simply rename
461 self._delaybuf = None
462 self._delaybuf = None
462 self._divert = True
463 self._divert = True
463 self.opener = _divertopener(self._realopener, self.indexfile)
464 self.opener = _divertopener(self._realopener, self.indexfile)
464
465
465 if self._divert:
466 if self._divert:
466 return True
467 return True
467
468
468 return False
469 return False
469
470
470 def _enforceinlinesize(self, tr, fp=None):
471 def _enforceinlinesize(self, tr, fp=None):
471 if not self._delayed:
472 if not self._delayed:
472 revlog.revlog._enforceinlinesize(self, tr, fp)
473 revlog.revlog._enforceinlinesize(self, tr, fp)
473
474
474 def read(self, node):
475 def read(self, node):
475 """Obtain data from a parsed changelog revision.
476 """Obtain data from a parsed changelog revision.
476
477
477 Returns a 6-tuple of:
478 Returns a 6-tuple of:
478
479
479 - manifest node in binary
480 - manifest node in binary
480 - author/user as a localstr
481 - author/user as a localstr
481 - date as a 2-tuple of (time, timezone)
482 - date as a 2-tuple of (time, timezone)
482 - list of files
483 - list of files
483 - commit message as a localstr
484 - commit message as a localstr
484 - dict of extra metadata
485 - dict of extra metadata
485
486
486 Unless you need to access all fields, consider calling
487 Unless you need to access all fields, consider calling
487 ``changelogrevision`` instead, as it is faster for partial object
488 ``changelogrevision`` instead, as it is faster for partial object
488 access.
489 access.
489 """
490 """
490 d, s = self._revisiondata(node)
491 d, s = self._revisiondata(node)
491 c = changelogrevision(
492 c = changelogrevision(
492 d, s, self._copiesstorage == b'changeset-sidedata'
493 d, s, self._copiesstorage == b'changeset-sidedata'
493 )
494 )
494 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
495 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
495
496
496 def changelogrevision(self, nodeorrev):
497 def changelogrevision(self, nodeorrev):
497 """Obtain a ``changelogrevision`` for a node or revision."""
498 """Obtain a ``changelogrevision`` for a node or revision."""
498 text, sidedata = self._revisiondata(nodeorrev)
499 text, sidedata = self._revisiondata(nodeorrev)
499 return changelogrevision(
500 return changelogrevision(
500 text, sidedata, self._copiesstorage == b'changeset-sidedata'
501 text, sidedata, self._copiesstorage == b'changeset-sidedata'
501 )
502 )
502
503
503 def readfiles(self, node):
504 def readfiles(self, node):
504 """
505 """
505 short version of read that only returns the files modified by the cset
506 short version of read that only returns the files modified by the cset
506 """
507 """
507 text = self.revision(node)
508 text = self.revision(node)
508 if not text:
509 if not text:
509 return []
510 return []
510 last = text.index(b"\n\n")
511 last = text.index(b"\n\n")
511 l = text[:last].split(b'\n')
512 l = text[:last].split(b'\n')
512 return l[3:]
513 return l[3:]
513
514
514 def add(
515 def add(
515 self,
516 self,
516 manifest,
517 manifest,
517 files,
518 files,
518 desc,
519 desc,
519 transaction,
520 transaction,
520 p1,
521 p1,
521 p2,
522 p2,
522 user,
523 user,
523 date=None,
524 date=None,
524 extra=None,
525 extra=None,
525 p1copies=None,
526 p1copies=None,
526 p2copies=None,
527 p2copies=None,
527 filesadded=None,
528 filesadded=None,
528 filesremoved=None,
529 filesremoved=None,
529 ):
530 ):
530 # Convert to UTF-8 encoded bytestrings as the very first
531 # Convert to UTF-8 encoded bytestrings as the very first
531 # thing: calling any method on a localstr object will turn it
532 # thing: calling any method on a localstr object will turn it
532 # into a str object and the cached UTF-8 string is thus lost.
533 # into a str object and the cached UTF-8 string is thus lost.
533 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
534 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
534
535
535 user = user.strip()
536 user = user.strip()
536 # An empty username or a username with a "\n" will make the
537 # An empty username or a username with a "\n" will make the
537 # revision text contain two "\n\n" sequences -> corrupt
538 # revision text contain two "\n\n" sequences -> corrupt
538 # repository since read cannot unpack the revision.
539 # repository since read cannot unpack the revision.
539 if not user:
540 if not user:
540 raise error.StorageError(_(b"empty username"))
541 raise error.StorageError(_(b"empty username"))
541 if b"\n" in user:
542 if b"\n" in user:
542 raise error.StorageError(
543 raise error.StorageError(
543 _(b"username %r contains a newline") % pycompat.bytestr(user)
544 _(b"username %r contains a newline") % pycompat.bytestr(user)
544 )
545 )
545
546
546 desc = stripdesc(desc)
547 desc = stripdesc(desc)
547
548
548 if date:
549 if date:
549 parseddate = b"%d %d" % dateutil.parsedate(date)
550 parseddate = b"%d %d" % dateutil.parsedate(date)
550 else:
551 else:
551 parseddate = b"%d %d" % dateutil.makedate()
552 parseddate = b"%d %d" % dateutil.makedate()
552 if extra:
553 if extra:
553 branch = extra.get(b"branch")
554 branch = extra.get(b"branch")
554 if branch in (b"default", b""):
555 if branch in (b"default", b""):
555 del extra[b"branch"]
556 del extra[b"branch"]
556 elif branch in (b".", b"null", b"tip"):
557 elif branch in (b".", b"null", b"tip"):
557 raise error.StorageError(
558 raise error.StorageError(
558 _(b'the name \'%s\' is reserved') % branch
559 _(b'the name \'%s\' is reserved') % branch
559 )
560 )
560 sortedfiles = sorted(files)
561 sortedfiles = sorted(files)
561 sidedata = None
562 sidedata = None
562 if extra is not None:
563 if extra is not None:
563 for name in (
564 for name in (
564 b'p1copies',
565 b'p1copies',
565 b'p2copies',
566 b'p2copies',
566 b'filesadded',
567 b'filesadded',
567 b'filesremoved',
568 b'filesremoved',
568 ):
569 ):
569 extra.pop(name, None)
570 extra.pop(name, None)
570 if p1copies is not None:
571 if p1copies is not None:
571 p1copies = copies.encodecopies(sortedfiles, p1copies)
572 p1copies = copies.encodecopies(sortedfiles, p1copies)
572 if p2copies is not None:
573 if p2copies is not None:
573 p2copies = copies.encodecopies(sortedfiles, p2copies)
574 p2copies = copies.encodecopies(sortedfiles, p2copies)
574 if filesadded is not None:
575 if filesadded is not None:
575 filesadded = copies.encodefileindices(sortedfiles, filesadded)
576 filesadded = copies.encodefileindices(sortedfiles, filesadded)
576 if filesremoved is not None:
577 if filesremoved is not None:
577 filesremoved = copies.encodefileindices(sortedfiles, filesremoved)
578 filesremoved = copies.encodefileindices(sortedfiles, filesremoved)
578 if self._copiesstorage == b'extra':
579 if self._copiesstorage == b'extra':
579 extrasentries = p1copies, p2copies, filesadded, filesremoved
580 extrasentries = p1copies, p2copies, filesadded, filesremoved
580 if extra is None and any(x is not None for x in extrasentries):
581 if extra is None and any(x is not None for x in extrasentries):
581 extra = {}
582 extra = {}
582 if p1copies is not None:
583 if p1copies is not None:
583 extra[b'p1copies'] = p1copies
584 extra[b'p1copies'] = p1copies
584 if p2copies is not None:
585 if p2copies is not None:
585 extra[b'p2copies'] = p2copies
586 extra[b'p2copies'] = p2copies
586 if filesadded is not None:
587 if filesadded is not None:
587 extra[b'filesadded'] = filesadded
588 extra[b'filesadded'] = filesadded
588 if filesremoved is not None:
589 if filesremoved is not None:
589 extra[b'filesremoved'] = filesremoved
590 extra[b'filesremoved'] = filesremoved
590 elif self._copiesstorage == b'changeset-sidedata':
591 elif self._copiesstorage == b'changeset-sidedata':
591 sidedata = {}
592 sidedata = {}
592 if p1copies:
593 if p1copies:
593 sidedata[sidedatamod.SD_P1COPIES] = p1copies
594 sidedata[sidedatamod.SD_P1COPIES] = p1copies
594 if p2copies:
595 if p2copies:
595 sidedata[sidedatamod.SD_P2COPIES] = p2copies
596 sidedata[sidedatamod.SD_P2COPIES] = p2copies
596 if filesadded:
597 if filesadded:
597 sidedata[sidedatamod.SD_FILESADDED] = filesadded
598 sidedata[sidedatamod.SD_FILESADDED] = filesadded
598 if filesremoved:
599 if filesremoved:
599 sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
600 sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
600 if not sidedata:
601 if not sidedata:
601 sidedata = None
602 sidedata = None
602
603
603 if extra:
604 if extra:
604 extra = encodeextra(extra)
605 extra = encodeextra(extra)
605 parseddate = b"%s %s" % (parseddate, extra)
606 parseddate = b"%s %s" % (parseddate, extra)
606 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
607 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
607 text = b"\n".join(l)
608 text = b"\n".join(l)
608 return self.addrevision(
609 return self.addrevision(
609 text, transaction, len(self), p1, p2, sidedata=sidedata
610 text, transaction, len(self), p1, p2, sidedata=sidedata
610 )
611 )
611
612
612 def branchinfo(self, rev):
613 def branchinfo(self, rev):
613 """return the branch name and open/close state of a revision
614 """return the branch name and open/close state of a revision
614
615
615 This function exists because creating a changectx object
616 This function exists because creating a changectx object
616 just to access this is costly."""
617 just to access this is costly."""
617 extra = self.read(rev)[5]
618 extra = self.read(rev)[5]
618 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
619 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
619
620
620 def _nodeduplicatecallback(self, transaction, node):
621 def _nodeduplicatecallback(self, transaction, node):
621 # keep track of revisions that got "re-added", eg: unbunde of know rev.
622 # keep track of revisions that got "re-added", eg: unbunde of know rev.
622 #
623 #
623 # We track them in a list to preserve their order from the source bundle
624 # We track them in a list to preserve their order from the source bundle
624 duplicates = transaction.changes.setdefault(b'revduplicates', [])
625 duplicates = transaction.changes.setdefault(b'revduplicates', [])
625 duplicates.append(self.rev(node))
626 duplicates.append(self.rev(node))
General Comments 0
You need to be logged in to leave comments. Login now