##// END OF EJS Templates
changelog-delay: move the delay/divert logic inside the (inner) revlog...
marmoute -
r51999:d83d7885 default
parent child Browse files
Show More
@@ -1,579 +1,500 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 from .i18n import _
9 from .i18n import _
10 from .node import (
10 from .node import (
11 bin,
11 bin,
12 hex,
12 hex,
13 )
13 )
14 from .thirdparty import attr
14 from .thirdparty import attr
15
15
16 from . import (
16 from . import (
17 encoding,
17 encoding,
18 error,
18 error,
19 metadata,
19 metadata,
20 pycompat,
20 pycompat,
21 revlog,
21 revlog,
22 )
22 )
23 from .utils import (
23 from .utils import (
24 dateutil,
24 dateutil,
25 stringutil,
25 stringutil,
26 )
26 )
27 from .revlogutils import (
27 from .revlogutils import (
28 constants as revlog_constants,
28 constants as revlog_constants,
29 flagutil,
29 flagutil,
30 randomaccessfile,
31 )
30 )
32
31
33 _defaultextra = {b'branch': b'default'}
32 _defaultextra = {b'branch': b'default'}
34
33
35
34
36 def _string_escape(text):
35 def _string_escape(text):
37 """
36 """
38 >>> from .pycompat import bytechr as chr
37 >>> from .pycompat import bytechr as chr
39 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
38 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
40 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
39 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
41 >>> s
40 >>> s
42 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
41 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
43 >>> res = _string_escape(s)
42 >>> res = _string_escape(s)
44 >>> s == _string_unescape(res)
43 >>> s == _string_unescape(res)
45 True
44 True
46 """
45 """
47 # subset of the string_escape codec
46 # subset of the string_escape codec
48 text = (
47 text = (
49 text.replace(b'\\', b'\\\\')
48 text.replace(b'\\', b'\\\\')
50 .replace(b'\n', b'\\n')
49 .replace(b'\n', b'\\n')
51 .replace(b'\r', b'\\r')
50 .replace(b'\r', b'\\r')
52 )
51 )
53 return text.replace(b'\0', b'\\0')
52 return text.replace(b'\0', b'\\0')
54
53
55
54
56 def _string_unescape(text):
55 def _string_unescape(text):
57 if b'\\0' in text:
56 if b'\\0' in text:
58 # fix up \0 without getting into trouble with \\0
57 # fix up \0 without getting into trouble with \\0
59 text = text.replace(b'\\\\', b'\\\\\n')
58 text = text.replace(b'\\\\', b'\\\\\n')
60 text = text.replace(b'\\0', b'\0')
59 text = text.replace(b'\\0', b'\0')
61 text = text.replace(b'\n', b'')
60 text = text.replace(b'\n', b'')
62 return stringutil.unescapestr(text)
61 return stringutil.unescapestr(text)
63
62
64
63
65 def decodeextra(text):
64 def decodeextra(text):
66 """
65 """
67 >>> from .pycompat import bytechr as chr
66 >>> from .pycompat import bytechr as chr
68 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
67 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
69 ... ).items())
68 ... ).items())
70 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
69 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
71 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
70 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
72 ... b'baz': chr(92) + chr(0) + b'2'})
71 ... b'baz': chr(92) + chr(0) + b'2'})
73 ... ).items())
72 ... ).items())
74 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
73 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
75 """
74 """
76 extra = _defaultextra.copy()
75 extra = _defaultextra.copy()
77 for l in text.split(b'\0'):
76 for l in text.split(b'\0'):
78 if l:
77 if l:
79 k, v = _string_unescape(l).split(b':', 1)
78 k, v = _string_unescape(l).split(b':', 1)
80 extra[k] = v
79 extra[k] = v
81 return extra
80 return extra
82
81
83
82
84 def encodeextra(d):
83 def encodeextra(d):
85 # keys must be sorted to produce a deterministic changelog entry
84 # keys must be sorted to produce a deterministic changelog entry
86 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
85 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
87 return b"\0".join(items)
86 return b"\0".join(items)
88
87
89
88
90 def stripdesc(desc):
89 def stripdesc(desc):
91 """strip trailing whitespace and leading and trailing empty lines"""
90 """strip trailing whitespace and leading and trailing empty lines"""
92 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
91 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
93
92
94
93
95 class _divertopener:
96 def __init__(self, opener, target):
97 self._opener = opener
98 self._target = target
99
100 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
101 if name != self._target:
102 return self._opener(name, mode, **kwargs)
103 return self._opener(name + b".a", mode, **kwargs)
104
105 def __getattr__(self, attr):
106 return getattr(self._opener, attr)
107
108
109 class _delayopener:
110 """build an opener that stores chunks in 'buf' instead of 'target'"""
111
112 def __init__(self, opener, target, buf):
113 self._opener = opener
114 self._target = target
115 self._buf = buf
116
117 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
118 if name != self._target:
119 return self._opener(name, mode, **kwargs)
120 assert not kwargs
121 return randomaccessfile.appender(self._opener, name, mode, self._buf)
122
123 def __getattr__(self, attr):
124 return getattr(self._opener, attr)
125
126
127 @attr.s
94 @attr.s
128 class _changelogrevision:
95 class _changelogrevision:
129 # Extensions might modify _defaultextra, so let the constructor below pass
96 # Extensions might modify _defaultextra, so let the constructor below pass
130 # it in
97 # it in
131 extra = attr.ib()
98 extra = attr.ib()
132 manifest = attr.ib()
99 manifest = attr.ib()
133 user = attr.ib(default=b'')
100 user = attr.ib(default=b'')
134 date = attr.ib(default=(0, 0))
101 date = attr.ib(default=(0, 0))
135 files = attr.ib(default=attr.Factory(list))
102 files = attr.ib(default=attr.Factory(list))
136 filesadded = attr.ib(default=None)
103 filesadded = attr.ib(default=None)
137 filesremoved = attr.ib(default=None)
104 filesremoved = attr.ib(default=None)
138 p1copies = attr.ib(default=None)
105 p1copies = attr.ib(default=None)
139 p2copies = attr.ib(default=None)
106 p2copies = attr.ib(default=None)
140 description = attr.ib(default=b'')
107 description = attr.ib(default=b'')
141 branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
108 branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
142
109
143
110
144 class changelogrevision:
111 class changelogrevision:
145 """Holds results of a parsed changelog revision.
112 """Holds results of a parsed changelog revision.
146
113
147 Changelog revisions consist of multiple pieces of data, including
114 Changelog revisions consist of multiple pieces of data, including
148 the manifest node, user, and date. This object exposes a view into
115 the manifest node, user, and date. This object exposes a view into
149 the parsed object.
116 the parsed object.
150 """
117 """
151
118
152 __slots__ = (
119 __slots__ = (
153 '_offsets',
120 '_offsets',
154 '_text',
121 '_text',
155 '_sidedata',
122 '_sidedata',
156 '_cpsd',
123 '_cpsd',
157 '_changes',
124 '_changes',
158 )
125 )
159
126
160 def __new__(cls, cl, text, sidedata, cpsd):
127 def __new__(cls, cl, text, sidedata, cpsd):
161 if not text:
128 if not text:
162 return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
129 return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
163
130
164 self = super(changelogrevision, cls).__new__(cls)
131 self = super(changelogrevision, cls).__new__(cls)
165 # We could return here and implement the following as an __init__.
132 # We could return here and implement the following as an __init__.
166 # But doing it here is equivalent and saves an extra function call.
133 # But doing it here is equivalent and saves an extra function call.
167
134
168 # format used:
135 # format used:
169 # nodeid\n : manifest node in ascii
136 # nodeid\n : manifest node in ascii
170 # user\n : user, no \n or \r allowed
137 # user\n : user, no \n or \r allowed
171 # time tz extra\n : date (time is int or float, timezone is int)
138 # time tz extra\n : date (time is int or float, timezone is int)
172 # : extra is metadata, encoded and separated by '\0'
139 # : extra is metadata, encoded and separated by '\0'
173 # : older versions ignore it
140 # : older versions ignore it
174 # files\n\n : files modified by the cset, no \n or \r allowed
141 # files\n\n : files modified by the cset, no \n or \r allowed
175 # (.*) : comment (free text, ideally utf-8)
142 # (.*) : comment (free text, ideally utf-8)
176 #
143 #
177 # changelog v0 doesn't use extra
144 # changelog v0 doesn't use extra
178
145
179 nl1 = text.index(b'\n')
146 nl1 = text.index(b'\n')
180 nl2 = text.index(b'\n', nl1 + 1)
147 nl2 = text.index(b'\n', nl1 + 1)
181 nl3 = text.index(b'\n', nl2 + 1)
148 nl3 = text.index(b'\n', nl2 + 1)
182
149
183 # The list of files may be empty. Which means nl3 is the first of the
150 # The list of files may be empty. Which means nl3 is the first of the
184 # double newline that precedes the description.
151 # double newline that precedes the description.
185 if text[nl3 + 1 : nl3 + 2] == b'\n':
152 if text[nl3 + 1 : nl3 + 2] == b'\n':
186 doublenl = nl3
153 doublenl = nl3
187 else:
154 else:
188 doublenl = text.index(b'\n\n', nl3 + 1)
155 doublenl = text.index(b'\n\n', nl3 + 1)
189
156
190 self._offsets = (nl1, nl2, nl3, doublenl)
157 self._offsets = (nl1, nl2, nl3, doublenl)
191 self._text = text
158 self._text = text
192 self._sidedata = sidedata
159 self._sidedata = sidedata
193 self._cpsd = cpsd
160 self._cpsd = cpsd
194 self._changes = None
161 self._changes = None
195
162
196 return self
163 return self
197
164
198 @property
165 @property
199 def manifest(self):
166 def manifest(self):
200 return bin(self._text[0 : self._offsets[0]])
167 return bin(self._text[0 : self._offsets[0]])
201
168
202 @property
169 @property
203 def user(self):
170 def user(self):
204 off = self._offsets
171 off = self._offsets
205 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
172 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
206
173
207 @property
174 @property
208 def _rawdate(self):
175 def _rawdate(self):
209 off = self._offsets
176 off = self._offsets
210 dateextra = self._text[off[1] + 1 : off[2]]
177 dateextra = self._text[off[1] + 1 : off[2]]
211 return dateextra.split(b' ', 2)[0:2]
178 return dateextra.split(b' ', 2)[0:2]
212
179
213 @property
180 @property
214 def _rawextra(self):
181 def _rawextra(self):
215 off = self._offsets
182 off = self._offsets
216 dateextra = self._text[off[1] + 1 : off[2]]
183 dateextra = self._text[off[1] + 1 : off[2]]
217 fields = dateextra.split(b' ', 2)
184 fields = dateextra.split(b' ', 2)
218 if len(fields) != 3:
185 if len(fields) != 3:
219 return None
186 return None
220
187
221 return fields[2]
188 return fields[2]
222
189
223 @property
190 @property
224 def date(self):
191 def date(self):
225 raw = self._rawdate
192 raw = self._rawdate
226 time = float(raw[0])
193 time = float(raw[0])
227 # Various tools did silly things with the timezone.
194 # Various tools did silly things with the timezone.
228 try:
195 try:
229 timezone = int(raw[1])
196 timezone = int(raw[1])
230 except ValueError:
197 except ValueError:
231 timezone = 0
198 timezone = 0
232
199
233 return time, timezone
200 return time, timezone
234
201
235 @property
202 @property
236 def extra(self):
203 def extra(self):
237 raw = self._rawextra
204 raw = self._rawextra
238 if raw is None:
205 if raw is None:
239 return _defaultextra
206 return _defaultextra
240
207
241 return decodeextra(raw)
208 return decodeextra(raw)
242
209
243 @property
210 @property
244 def changes(self):
211 def changes(self):
245 if self._changes is not None:
212 if self._changes is not None:
246 return self._changes
213 return self._changes
247 if self._cpsd:
214 if self._cpsd:
248 changes = metadata.decode_files_sidedata(self._sidedata)
215 changes = metadata.decode_files_sidedata(self._sidedata)
249 else:
216 else:
250 changes = metadata.ChangingFiles(
217 changes = metadata.ChangingFiles(
251 touched=self.files or (),
218 touched=self.files or (),
252 added=self.filesadded or (),
219 added=self.filesadded or (),
253 removed=self.filesremoved or (),
220 removed=self.filesremoved or (),
254 p1_copies=self.p1copies or {},
221 p1_copies=self.p1copies or {},
255 p2_copies=self.p2copies or {},
222 p2_copies=self.p2copies or {},
256 )
223 )
257 self._changes = changes
224 self._changes = changes
258 return changes
225 return changes
259
226
260 @property
227 @property
261 def files(self):
228 def files(self):
262 if self._cpsd:
229 if self._cpsd:
263 return sorted(self.changes.touched)
230 return sorted(self.changes.touched)
264 off = self._offsets
231 off = self._offsets
265 if off[2] == off[3]:
232 if off[2] == off[3]:
266 return []
233 return []
267
234
268 return self._text[off[2] + 1 : off[3]].split(b'\n')
235 return self._text[off[2] + 1 : off[3]].split(b'\n')
269
236
270 @property
237 @property
271 def filesadded(self):
238 def filesadded(self):
272 if self._cpsd:
239 if self._cpsd:
273 return self.changes.added
240 return self.changes.added
274 else:
241 else:
275 rawindices = self.extra.get(b'filesadded')
242 rawindices = self.extra.get(b'filesadded')
276 if rawindices is None:
243 if rawindices is None:
277 return None
244 return None
278 return metadata.decodefileindices(self.files, rawindices)
245 return metadata.decodefileindices(self.files, rawindices)
279
246
280 @property
247 @property
281 def filesremoved(self):
248 def filesremoved(self):
282 if self._cpsd:
249 if self._cpsd:
283 return self.changes.removed
250 return self.changes.removed
284 else:
251 else:
285 rawindices = self.extra.get(b'filesremoved')
252 rawindices = self.extra.get(b'filesremoved')
286 if rawindices is None:
253 if rawindices is None:
287 return None
254 return None
288 return metadata.decodefileindices(self.files, rawindices)
255 return metadata.decodefileindices(self.files, rawindices)
289
256
290 @property
257 @property
291 def p1copies(self):
258 def p1copies(self):
292 if self._cpsd:
259 if self._cpsd:
293 return self.changes.copied_from_p1
260 return self.changes.copied_from_p1
294 else:
261 else:
295 rawcopies = self.extra.get(b'p1copies')
262 rawcopies = self.extra.get(b'p1copies')
296 if rawcopies is None:
263 if rawcopies is None:
297 return None
264 return None
298 return metadata.decodecopies(self.files, rawcopies)
265 return metadata.decodecopies(self.files, rawcopies)
299
266
300 @property
267 @property
301 def p2copies(self):
268 def p2copies(self):
302 if self._cpsd:
269 if self._cpsd:
303 return self.changes.copied_from_p2
270 return self.changes.copied_from_p2
304 else:
271 else:
305 rawcopies = self.extra.get(b'p2copies')
272 rawcopies = self.extra.get(b'p2copies')
306 if rawcopies is None:
273 if rawcopies is None:
307 return None
274 return None
308 return metadata.decodecopies(self.files, rawcopies)
275 return metadata.decodecopies(self.files, rawcopies)
309
276
310 @property
277 @property
311 def description(self):
278 def description(self):
312 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
279 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
313
280
314 @property
281 @property
315 def branchinfo(self):
282 def branchinfo(self):
316 extra = self.extra
283 extra = self.extra
317 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
284 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
318
285
319
286
320 class changelog(revlog.revlog):
287 class changelog(revlog.revlog):
321 def __init__(self, opener, trypending=False, concurrencychecker=None):
288 def __init__(self, opener, trypending=False, concurrencychecker=None):
322 """Load a changelog revlog using an opener.
289 """Load a changelog revlog using an opener.
323
290
324 If ``trypending`` is true, we attempt to load the index from a
291 If ``trypending`` is true, we attempt to load the index from a
325 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
292 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
326 The ``00changelog.i.a`` file contains index (and possibly inline
293 The ``00changelog.i.a`` file contains index (and possibly inline
327 revision) data for a transaction that hasn't been finalized yet.
294 revision) data for a transaction that hasn't been finalized yet.
328 It exists in a separate file to facilitate readers (such as
295 It exists in a separate file to facilitate readers (such as
329 hooks processes) accessing data before a transaction is finalized.
296 hooks processes) accessing data before a transaction is finalized.
330
297
331 ``concurrencychecker`` will be passed to the revlog init function, see
298 ``concurrencychecker`` will be passed to the revlog init function, see
332 the documentation there.
299 the documentation there.
333 """
300 """
334 revlog.revlog.__init__(
301 revlog.revlog.__init__(
335 self,
302 self,
336 opener,
303 opener,
337 target=(revlog_constants.KIND_CHANGELOG, None),
304 target=(revlog_constants.KIND_CHANGELOG, None),
338 radix=b'00changelog',
305 radix=b'00changelog',
339 checkambig=True,
306 checkambig=True,
340 mmaplargeindex=True,
307 mmaplargeindex=True,
341 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
308 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
342 concurrencychecker=concurrencychecker,
309 concurrencychecker=concurrencychecker,
343 trypending=trypending,
310 trypending=trypending,
344 )
311 )
345
312
346 if self._initempty and (self._format_version == revlog.REVLOGV1):
313 if self._initempty and (self._format_version == revlog.REVLOGV1):
347 # changelogs don't benefit from generaldelta.
314 # changelogs don't benefit from generaldelta.
348
315
349 self._format_flags &= ~revlog.FLAG_GENERALDELTA
316 self._format_flags &= ~revlog.FLAG_GENERALDELTA
350 self.delta_config.general_delta = False
317 self.delta_config.general_delta = False
351
318
352 # Delta chains for changelogs tend to be very small because entries
319 # Delta chains for changelogs tend to be very small because entries
353 # tend to be small and don't delta well with each. So disable delta
320 # tend to be small and don't delta well with each. So disable delta
354 # chains.
321 # chains.
355 self._storedeltachains = False
322 self._storedeltachains = False
356
323
357 self._realopener = opener
324 self._v2_delayed = False
358 self._delayed = False
359 self._delaybuf = None
360 self._divert = False
361 self._filteredrevs = frozenset()
325 self._filteredrevs = frozenset()
362 self._filteredrevs_hashcache = {}
326 self._filteredrevs_hashcache = {}
363 self._copiesstorage = opener.options.get(b'copies-storage')
327 self._copiesstorage = opener.options.get(b'copies-storage')
364
328
365 @property
329 @property
366 def filteredrevs(self):
330 def filteredrevs(self):
367 return self._filteredrevs
331 return self._filteredrevs
368
332
369 @filteredrevs.setter
333 @filteredrevs.setter
370 def filteredrevs(self, val):
334 def filteredrevs(self, val):
371 # Ensure all updates go through this function
335 # Ensure all updates go through this function
372 assert isinstance(val, frozenset)
336 assert isinstance(val, frozenset)
373 self._filteredrevs = val
337 self._filteredrevs = val
374 self._filteredrevs_hashcache = {}
338 self._filteredrevs_hashcache = {}
375
339
376 def _write_docket(self, tr):
340 def _write_docket(self, tr):
377 if not self.is_delaying:
341 if not self._v2_delayed:
378 super(changelog, self)._write_docket(tr)
342 super(changelog, self)._write_docket(tr)
379
343
380 @property
381 def is_delaying(self):
382 return self._delayed
383
384 def delayupdate(self, tr):
344 def delayupdate(self, tr):
385 """delay visibility of index updates to other readers"""
345 """delay visibility of index updates to other readers"""
386 assert not self._inner.is_open
346 assert not self._inner.is_open
387 if self._docket is None and not self.is_delaying:
347 if self._docket is not None:
388 if len(self) == 0:
348 self._v2_delayed = True
389 self._divert = True
349 else:
390 if self._realopener.exists(self._indexfile + b'.a'):
350 new_index = self._inner.delay()
391 self._realopener.unlink(self._indexfile + b'.a')
351 if new_index is not None:
392 self.opener = _divertopener(self._realopener, self._indexfile)
352 self._indexfile = new_index
393 else:
353 tr.registertmp(new_index)
394 self._delaybuf = []
395 self.opener = _delayopener(
396 self._realopener, self._indexfile, self._delaybuf
397 )
398 self._inner.opener = self.opener
399 self._inner._segmentfile.opener = self.opener
400 self._inner._segmentfile_sidedata.opener = self.opener
401 self._delayed = True
402 tr.addpending(b'cl-%i' % id(self), self._writepending)
354 tr.addpending(b'cl-%i' % id(self), self._writepending)
403 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
355 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
404
356
405 def _finalize(self, tr):
357 def _finalize(self, tr):
406 """finalize index updates"""
358 """finalize index updates"""
407 assert not self._inner.is_open
359 assert not self._inner.is_open
408 self._delayed = False
409 self.opener = self._realopener
410 self._inner.opener = self.opener
411 self._inner._segmentfile.opener = self.opener
412 self._inner._segmentfile_sidedata.opener = self.opener
413 # move redirected index data back into place
414 if self._docket is not None:
360 if self._docket is not None:
415 self._write_docket(tr)
361 self._docket.write(tr)
416 elif self._divert:
362 self._v2_delayed = False
417 assert not self._delaybuf
363 else:
418 tmpname = self._indexfile + b".a"
364 new_index_file = self._inner.finalize_pending()
419 nfile = self.opener.open(tmpname)
365 self._indexfile = new_index_file
420 nfile.close()
366 # split when we're done
421 self.opener.rename(tmpname, self._indexfile, checkambig=True)
367 self._enforceinlinesize(tr, side_write=False)
422 elif self._delaybuf:
423 fp = self.opener(self._indexfile, b'a', checkambig=True)
424 fp.write(b"".join(self._delaybuf))
425 fp.close()
426 self._delaybuf = None
427 self._divert = False
428 # split when we're done
429 self._enforceinlinesize(tr, side_write=False)
430
368
431 def _writepending(self, tr):
369 def _writepending(self, tr):
432 """create a file containing the unfinalized state for
370 """create a file containing the unfinalized state for
433 pretxnchangegroup"""
371 pretxnchangegroup"""
434 assert not self._inner.is_open
372 assert not self._inner.is_open
435 if self._docket:
373 if self._docket:
436 return self._docket.write(tr, pending=True)
374 any_pending = self._docket.write(tr, pending=True)
437 if self._delaybuf:
375 self._v2_delayed = False
438 # make a temporary copy of the index
376 else:
439 fp1 = self._realopener(self._indexfile)
377 new_index, any_pending = self._inner.write_pending()
440 pendingfilename = self._indexfile + b".a"
378 if new_index is not None:
441 # register as a temp file to ensure cleanup on failure
379 self._indexfile = new_index
442 tr.registertmp(pendingfilename)
380 tr.registertmp(new_index)
443 # write existing data
381 return any_pending
444 fp2 = self._realopener(pendingfilename, b"w")
445 fp2.write(fp1.read())
446 # add pending data
447 fp2.write(b"".join(self._delaybuf))
448 fp2.close()
449 # switch modes so finalize can simply rename
450 self._delaybuf = None
451 self._divert = True
452 self.opener = _divertopener(self._realopener, self._indexfile)
453 self._inner.opener = self.opener
454 self._inner._segmentfile.opener = self.opener
455 self._inner._segmentfile_sidedata.opener = self.opener
456
457 if self._divert:
458 return True
459
460 return False
461
382
462 def _enforceinlinesize(self, tr, side_write=True):
383 def _enforceinlinesize(self, tr, side_write=True):
463 if not self.is_delaying:
384 if not self.is_delaying:
464 revlog.revlog._enforceinlinesize(self, tr, side_write=side_write)
385 revlog.revlog._enforceinlinesize(self, tr, side_write=side_write)
465
386
466 def read(self, nodeorrev):
387 def read(self, nodeorrev):
467 """Obtain data from a parsed changelog revision.
388 """Obtain data from a parsed changelog revision.
468
389
469 Returns a 6-tuple of:
390 Returns a 6-tuple of:
470
391
471 - manifest node in binary
392 - manifest node in binary
472 - author/user as a localstr
393 - author/user as a localstr
473 - date as a 2-tuple of (time, timezone)
394 - date as a 2-tuple of (time, timezone)
474 - list of files
395 - list of files
475 - commit message as a localstr
396 - commit message as a localstr
476 - dict of extra metadata
397 - dict of extra metadata
477
398
478 Unless you need to access all fields, consider calling
399 Unless you need to access all fields, consider calling
479 ``changelogrevision`` instead, as it is faster for partial object
400 ``changelogrevision`` instead, as it is faster for partial object
480 access.
401 access.
481 """
402 """
482 d = self._revisiondata(nodeorrev)
403 d = self._revisiondata(nodeorrev)
483 sidedata = self.sidedata(nodeorrev)
404 sidedata = self.sidedata(nodeorrev)
484 copy_sd = self._copiesstorage == b'changeset-sidedata'
405 copy_sd = self._copiesstorage == b'changeset-sidedata'
485 c = changelogrevision(self, d, sidedata, copy_sd)
406 c = changelogrevision(self, d, sidedata, copy_sd)
486 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
407 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
487
408
488 def changelogrevision(self, nodeorrev):
409 def changelogrevision(self, nodeorrev):
489 """Obtain a ``changelogrevision`` for a node or revision."""
410 """Obtain a ``changelogrevision`` for a node or revision."""
490 text = self._revisiondata(nodeorrev)
411 text = self._revisiondata(nodeorrev)
491 sidedata = self.sidedata(nodeorrev)
412 sidedata = self.sidedata(nodeorrev)
492 return changelogrevision(
413 return changelogrevision(
493 self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
414 self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
494 )
415 )
495
416
496 def readfiles(self, nodeorrev):
417 def readfiles(self, nodeorrev):
497 """
418 """
498 short version of read that only returns the files modified by the cset
419 short version of read that only returns the files modified by the cset
499 """
420 """
500 text = self.revision(nodeorrev)
421 text = self.revision(nodeorrev)
501 if not text:
422 if not text:
502 return []
423 return []
503 last = text.index(b"\n\n")
424 last = text.index(b"\n\n")
504 l = text[:last].split(b'\n')
425 l = text[:last].split(b'\n')
505 return l[3:]
426 return l[3:]
506
427
507 def add(
428 def add(
508 self,
429 self,
509 manifest,
430 manifest,
510 files,
431 files,
511 desc,
432 desc,
512 transaction,
433 transaction,
513 p1,
434 p1,
514 p2,
435 p2,
515 user,
436 user,
516 date=None,
437 date=None,
517 extra=None,
438 extra=None,
518 ):
439 ):
519 # Convert to UTF-8 encoded bytestrings as the very first
440 # Convert to UTF-8 encoded bytestrings as the very first
520 # thing: calling any method on a localstr object will turn it
441 # thing: calling any method on a localstr object will turn it
521 # into a str object and the cached UTF-8 string is thus lost.
442 # into a str object and the cached UTF-8 string is thus lost.
522 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
443 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
523
444
524 user = user.strip()
445 user = user.strip()
525 # An empty username or a username with a "\n" will make the
446 # An empty username or a username with a "\n" will make the
526 # revision text contain two "\n\n" sequences -> corrupt
447 # revision text contain two "\n\n" sequences -> corrupt
527 # repository since read cannot unpack the revision.
448 # repository since read cannot unpack the revision.
528 if not user:
449 if not user:
529 raise error.StorageError(_(b"empty username"))
450 raise error.StorageError(_(b"empty username"))
530 if b"\n" in user:
451 if b"\n" in user:
531 raise error.StorageError(
452 raise error.StorageError(
532 _(b"username %r contains a newline") % pycompat.bytestr(user)
453 _(b"username %r contains a newline") % pycompat.bytestr(user)
533 )
454 )
534
455
535 desc = stripdesc(desc)
456 desc = stripdesc(desc)
536
457
537 if date:
458 if date:
538 parseddate = b"%d %d" % dateutil.parsedate(date)
459 parseddate = b"%d %d" % dateutil.parsedate(date)
539 else:
460 else:
540 parseddate = b"%d %d" % dateutil.makedate()
461 parseddate = b"%d %d" % dateutil.makedate()
541 if extra:
462 if extra:
542 branch = extra.get(b"branch")
463 branch = extra.get(b"branch")
543 if branch in (b"default", b""):
464 if branch in (b"default", b""):
544 del extra[b"branch"]
465 del extra[b"branch"]
545 elif branch in (b".", b"null", b"tip"):
466 elif branch in (b".", b"null", b"tip"):
546 raise error.StorageError(
467 raise error.StorageError(
547 _(b'the name \'%s\' is reserved') % branch
468 _(b'the name \'%s\' is reserved') % branch
548 )
469 )
549 sortedfiles = sorted(files.touched)
470 sortedfiles = sorted(files.touched)
550 flags = 0
471 flags = 0
551 sidedata = None
472 sidedata = None
552 if self._copiesstorage == b'changeset-sidedata':
473 if self._copiesstorage == b'changeset-sidedata':
553 if files.has_copies_info:
474 if files.has_copies_info:
554 flags |= flagutil.REVIDX_HASCOPIESINFO
475 flags |= flagutil.REVIDX_HASCOPIESINFO
555 sidedata = metadata.encode_files_sidedata(files)
476 sidedata = metadata.encode_files_sidedata(files)
556
477
557 if extra:
478 if extra:
558 extra = encodeextra(extra)
479 extra = encodeextra(extra)
559 parseddate = b"%s %s" % (parseddate, extra)
480 parseddate = b"%s %s" % (parseddate, extra)
560 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
481 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
561 text = b"\n".join(l)
482 text = b"\n".join(l)
562 rev = self.addrevision(
483 rev = self.addrevision(
563 text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
484 text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
564 )
485 )
565 return self.node(rev)
486 return self.node(rev)
566
487
567 def branchinfo(self, rev):
488 def branchinfo(self, rev):
568 """return the branch name and open/close state of a revision
489 """return the branch name and open/close state of a revision
569
490
570 This function exists because creating a changectx object
491 This function exists because creating a changectx object
571 just to access this is costly."""
492 just to access this is costly."""
572 return self.changelogrevision(rev).branchinfo
493 return self.changelogrevision(rev).branchinfo
573
494
574 def _nodeduplicatecallback(self, transaction, rev):
495 def _nodeduplicatecallback(self, transaction, rev):
575 # keep track of revisions that got "re-added", eg: unbunde of know rev.
496 # keep track of revisions that got "re-added", eg: unbunde of know rev.
576 #
497 #
577 # We track them in a list to preserve their order from the source bundle
498 # We track them in a list to preserve their order from the source bundle
578 duplicates = transaction.changes.setdefault(b'revduplicates', [])
499 duplicates = transaction.changes.setdefault(b'revduplicates', [])
579 duplicates.append(rev)
500 duplicates.append(rev)
@@ -1,138 +1,138 b''
1 # repocache.py - in-memory repository cache for long-running services
1 # repocache.py - in-memory repository cache for long-running services
2 #
2 #
3 # Copyright 2018 Yuya Nishihara <yuya@tcha.org>
3 # Copyright 2018 Yuya Nishihara <yuya@tcha.org>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import collections
9 import collections
10 import gc
10 import gc
11 import threading
11 import threading
12
12
13 from . import (
13 from . import (
14 error,
14 error,
15 hg,
15 hg,
16 obsolete,
16 obsolete,
17 scmutil,
17 scmutil,
18 util,
18 util,
19 )
19 )
20
20
21
21
22 class repoloader:
22 class repoloader:
23 """Load repositories in background thread
23 """Load repositories in background thread
24
24
25 This is designed for a forking server. A cached repo cannot be obtained
25 This is designed for a forking server. A cached repo cannot be obtained
26 until the server fork()s a worker and the loader thread stops.
26 until the server fork()s a worker and the loader thread stops.
27 """
27 """
28
28
29 def __init__(self, ui, maxlen):
29 def __init__(self, ui, maxlen):
30 self._ui = ui.copy()
30 self._ui = ui.copy()
31 self._cache = util.lrucachedict(max=maxlen)
31 self._cache = util.lrucachedict(max=maxlen)
32 # use deque and Event instead of Queue since deque can discard
32 # use deque and Event instead of Queue since deque can discard
33 # old items to keep at most maxlen items.
33 # old items to keep at most maxlen items.
34 self._inqueue = collections.deque(maxlen=maxlen)
34 self._inqueue = collections.deque(maxlen=maxlen)
35 self._accepting = False
35 self._accepting = False
36 self._newentry = threading.Event()
36 self._newentry = threading.Event()
37 self._thread = None
37 self._thread = None
38
38
39 def start(self):
39 def start(self):
40 assert not self._thread
40 assert not self._thread
41 if self._inqueue.maxlen == 0:
41 if self._inqueue.maxlen == 0:
42 # no need to spawn loader thread as the cache is disabled
42 # no need to spawn loader thread as the cache is disabled
43 return
43 return
44 self._accepting = True
44 self._accepting = True
45 self._thread = threading.Thread(target=self._mainloop)
45 self._thread = threading.Thread(target=self._mainloop)
46 self._thread.start()
46 self._thread.start()
47
47
48 def stop(self):
48 def stop(self):
49 if not self._thread:
49 if not self._thread:
50 return
50 return
51 self._accepting = False
51 self._accepting = False
52 self._newentry.set()
52 self._newentry.set()
53 self._thread.join()
53 self._thread.join()
54 self._thread = None
54 self._thread = None
55 self._cache.clear()
55 self._cache.clear()
56 self._inqueue.clear()
56 self._inqueue.clear()
57
57
58 def load(self, path):
58 def load(self, path):
59 """Request to load the specified repository in background"""
59 """Request to load the specified repository in background"""
60 self._inqueue.append(path)
60 self._inqueue.append(path)
61 self._newentry.set()
61 self._newentry.set()
62
62
63 def get(self, path):
63 def get(self, path):
64 """Return a cached repo if available
64 """Return a cached repo if available
65
65
66 This function must be called after fork(), where the loader thread
66 This function must be called after fork(), where the loader thread
67 is stopped. Otherwise, the returned repo might be updated by the
67 is stopped. Otherwise, the returned repo might be updated by the
68 loader thread.
68 loader thread.
69 """
69 """
70 if self._thread and self._thread.is_alive():
70 if self._thread and self._thread.is_alive():
71 raise error.ProgrammingError(
71 raise error.ProgrammingError(
72 b'cannot obtain cached repo while loader is active'
72 b'cannot obtain cached repo while loader is active'
73 )
73 )
74 return self._cache.peek(path, None)
74 return self._cache.peek(path, None)
75
75
76 def _mainloop(self):
76 def _mainloop(self):
77 while self._accepting:
77 while self._accepting:
78 # Avoid heavy GC after fork(), which would cancel the benefit of
78 # Avoid heavy GC after fork(), which would cancel the benefit of
79 # COW. We assume that GIL is acquired while GC is underway in the
79 # COW. We assume that GIL is acquired while GC is underway in the
80 # loader thread. If that isn't true, we might have to move
80 # loader thread. If that isn't true, we might have to move
81 # gc.collect() to the main thread so that fork() would never stop
81 # gc.collect() to the main thread so that fork() would never stop
82 # the thread where GC is in progress.
82 # the thread where GC is in progress.
83 gc.collect()
83 gc.collect()
84
84
85 self._newentry.wait()
85 self._newentry.wait()
86 while self._accepting:
86 while self._accepting:
87 self._newentry.clear()
87 self._newentry.clear()
88 try:
88 try:
89 path = self._inqueue.popleft()
89 path = self._inqueue.popleft()
90 except IndexError:
90 except IndexError:
91 break
91 break
92 scmutil.callcatch(self._ui, lambda: self._load(path))
92 scmutil.callcatch(self._ui, lambda: self._load(path))
93
93
94 def _load(self, path):
94 def _load(self, path):
95 start = util.timer()
95 start = util.timer()
96 # TODO: repo should be recreated if storage configuration changed
96 # TODO: repo should be recreated if storage configuration changed
97 try:
97 try:
98 # pop before loading so inconsistent state wouldn't be exposed
98 # pop before loading so inconsistent state wouldn't be exposed
99 repo = self._cache.pop(path)
99 repo = self._cache.pop(path)
100 except KeyError:
100 except KeyError:
101 repo = hg.repository(self._ui, path).unfiltered()
101 repo = hg.repository(self._ui, path).unfiltered()
102 _warmupcache(repo)
102 _warmupcache(repo)
103 repo.ui.log(
103 repo.ui.log(
104 b'repocache',
104 b'repocache',
105 b'loaded repo into cache: %s (in %.3fs)\n',
105 b'loaded repo into cache: %s (in %.3fs)\n',
106 path,
106 path,
107 util.timer() - start,
107 util.timer() - start,
108 )
108 )
109 self._cache.insert(path, repo)
109 self._cache.insert(path, repo)
110
110
111
111
112 # TODO: think about proper API of preloading cache
112 # TODO: think about proper API of preloading cache
113 def _warmupcache(repo):
113 def _warmupcache(repo):
114 repo.invalidateall()
114 repo.invalidateall()
115 repo.changelog
115 repo.changelog
116 repo.obsstore._all
116 repo.obsstore._all
117 repo.obsstore.successors
117 repo.obsstore.successors
118 repo.obsstore.predecessors
118 repo.obsstore.predecessors
119 repo.obsstore.children
119 repo.obsstore.children
120 for name in obsolete.cachefuncs:
120 for name in obsolete.cachefuncs:
121 obsolete.getrevs(repo, name)
121 obsolete.getrevs(repo, name)
122 repo._phasecache.loadphaserevs(repo)
122 repo._phasecache.loadphaserevs(repo)
123
123
124
124
125 # TODO: think about proper API of attaching preloaded attributes
125 # TODO: think about proper API of attaching preloaded attributes
126 def copycache(srcrepo, destrepo):
126 def copycache(srcrepo, destrepo):
127 """Copy cached attributes from srcrepo to destrepo"""
127 """Copy cached attributes from srcrepo to destrepo"""
128 destfilecache = destrepo._filecache
128 destfilecache = destrepo._filecache
129 srcfilecache = srcrepo._filecache
129 srcfilecache = srcrepo._filecache
130 if b'changelog' in srcfilecache:
130 if b'changelog' in srcfilecache:
131 destfilecache[b'changelog'] = ce = srcfilecache[b'changelog']
131 destfilecache[b'changelog'] = ce = srcfilecache[b'changelog']
132 ce.obj.opener = ce.obj._realopener = destrepo.svfs
132 ce.obj.opener = ce.obj._inner.opener = destrepo.svfs
133 if b'obsstore' in srcfilecache:
133 if b'obsstore' in srcfilecache:
134 destfilecache[b'obsstore'] = ce = srcfilecache[b'obsstore']
134 destfilecache[b'obsstore'] = ce = srcfilecache[b'obsstore']
135 ce.obj.svfs = destrepo.svfs
135 ce.obj.svfs = destrepo.svfs
136 if b'_phasecache' in srcfilecache:
136 if b'_phasecache' in srcfilecache:
137 destfilecache[b'_phasecache'] = ce = srcfilecache[b'_phasecache']
137 destfilecache[b'_phasecache'] = ce = srcfilecache[b'_phasecache']
138 ce.obj.opener = destrepo.svfs
138 ce.obj.opener = destrepo.svfs
@@ -1,4053 +1,4170 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 # coding: utf8
2 # coding: utf8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Storage back-end for Mercurial.
9 """Storage back-end for Mercurial.
10
10
11 This provides efficient delta storage with O(1) retrieve and append
11 This provides efficient delta storage with O(1) retrieve and append
12 and O(changes) merge between branches.
12 and O(changes) merge between branches.
13 """
13 """
14
14
15
15
16 import binascii
16 import binascii
17 import collections
17 import collections
18 import contextlib
18 import contextlib
19 import io
19 import io
20 import os
20 import os
21 import struct
21 import struct
22 import weakref
22 import weakref
23 import zlib
23 import zlib
24
24
25 # import stuff from node for others to import from revlog
25 # import stuff from node for others to import from revlog
26 from .node import (
26 from .node import (
27 bin,
27 bin,
28 hex,
28 hex,
29 nullrev,
29 nullrev,
30 sha1nodeconstants,
30 sha1nodeconstants,
31 short,
31 short,
32 wdirrev,
32 wdirrev,
33 )
33 )
34 from .i18n import _
34 from .i18n import _
35 from .revlogutils.constants import (
35 from .revlogutils.constants import (
36 ALL_KINDS,
36 ALL_KINDS,
37 CHANGELOGV2,
37 CHANGELOGV2,
38 COMP_MODE_DEFAULT,
38 COMP_MODE_DEFAULT,
39 COMP_MODE_INLINE,
39 COMP_MODE_INLINE,
40 COMP_MODE_PLAIN,
40 COMP_MODE_PLAIN,
41 DELTA_BASE_REUSE_NO,
41 DELTA_BASE_REUSE_NO,
42 DELTA_BASE_REUSE_TRY,
42 DELTA_BASE_REUSE_TRY,
43 ENTRY_RANK,
43 ENTRY_RANK,
44 FEATURES_BY_VERSION,
44 FEATURES_BY_VERSION,
45 FLAG_GENERALDELTA,
45 FLAG_GENERALDELTA,
46 FLAG_INLINE_DATA,
46 FLAG_INLINE_DATA,
47 INDEX_HEADER,
47 INDEX_HEADER,
48 KIND_CHANGELOG,
48 KIND_CHANGELOG,
49 KIND_FILELOG,
49 KIND_FILELOG,
50 RANK_UNKNOWN,
50 RANK_UNKNOWN,
51 REVLOGV0,
51 REVLOGV0,
52 REVLOGV1,
52 REVLOGV1,
53 REVLOGV1_FLAGS,
53 REVLOGV1_FLAGS,
54 REVLOGV2,
54 REVLOGV2,
55 REVLOGV2_FLAGS,
55 REVLOGV2_FLAGS,
56 REVLOG_DEFAULT_FLAGS,
56 REVLOG_DEFAULT_FLAGS,
57 REVLOG_DEFAULT_FORMAT,
57 REVLOG_DEFAULT_FORMAT,
58 REVLOG_DEFAULT_VERSION,
58 REVLOG_DEFAULT_VERSION,
59 SUPPORTED_FLAGS,
59 SUPPORTED_FLAGS,
60 )
60 )
61 from .revlogutils.flagutil import (
61 from .revlogutils.flagutil import (
62 REVIDX_DEFAULT_FLAGS,
62 REVIDX_DEFAULT_FLAGS,
63 REVIDX_ELLIPSIS,
63 REVIDX_ELLIPSIS,
64 REVIDX_EXTSTORED,
64 REVIDX_EXTSTORED,
65 REVIDX_FLAGS_ORDER,
65 REVIDX_FLAGS_ORDER,
66 REVIDX_HASCOPIESINFO,
66 REVIDX_HASCOPIESINFO,
67 REVIDX_ISCENSORED,
67 REVIDX_ISCENSORED,
68 REVIDX_RAWTEXT_CHANGING_FLAGS,
68 REVIDX_RAWTEXT_CHANGING_FLAGS,
69 )
69 )
70 from .thirdparty import attr
70 from .thirdparty import attr
71 from . import (
71 from . import (
72 ancestor,
72 ancestor,
73 dagop,
73 dagop,
74 error,
74 error,
75 mdiff,
75 mdiff,
76 policy,
76 policy,
77 pycompat,
77 pycompat,
78 revlogutils,
78 revlogutils,
79 templatefilters,
79 templatefilters,
80 util,
80 util,
81 )
81 )
82 from .interfaces import (
82 from .interfaces import (
83 repository,
83 repository,
84 util as interfaceutil,
84 util as interfaceutil,
85 )
85 )
86 from .revlogutils import (
86 from .revlogutils import (
87 deltas as deltautil,
87 deltas as deltautil,
88 docket as docketutil,
88 docket as docketutil,
89 flagutil,
89 flagutil,
90 nodemap as nodemaputil,
90 nodemap as nodemaputil,
91 randomaccessfile,
91 randomaccessfile,
92 revlogv0,
92 revlogv0,
93 rewrite,
93 rewrite,
94 sidedata as sidedatautil,
94 sidedata as sidedatautil,
95 )
95 )
96 from .utils import (
96 from .utils import (
97 storageutil,
97 storageutil,
98 stringutil,
98 stringutil,
99 )
99 )
100
100
101 # blanked usage of all the name to prevent pyflakes constraints
101 # blanked usage of all the name to prevent pyflakes constraints
102 # We need these name available in the module for extensions.
102 # We need these name available in the module for extensions.
103
103
104 REVLOGV0
104 REVLOGV0
105 REVLOGV1
105 REVLOGV1
106 REVLOGV2
106 REVLOGV2
107 CHANGELOGV2
107 CHANGELOGV2
108 FLAG_INLINE_DATA
108 FLAG_INLINE_DATA
109 FLAG_GENERALDELTA
109 FLAG_GENERALDELTA
110 REVLOG_DEFAULT_FLAGS
110 REVLOG_DEFAULT_FLAGS
111 REVLOG_DEFAULT_FORMAT
111 REVLOG_DEFAULT_FORMAT
112 REVLOG_DEFAULT_VERSION
112 REVLOG_DEFAULT_VERSION
113 REVLOGV1_FLAGS
113 REVLOGV1_FLAGS
114 REVLOGV2_FLAGS
114 REVLOGV2_FLAGS
115 REVIDX_ISCENSORED
115 REVIDX_ISCENSORED
116 REVIDX_ELLIPSIS
116 REVIDX_ELLIPSIS
117 REVIDX_HASCOPIESINFO
117 REVIDX_HASCOPIESINFO
118 REVIDX_EXTSTORED
118 REVIDX_EXTSTORED
119 REVIDX_DEFAULT_FLAGS
119 REVIDX_DEFAULT_FLAGS
120 REVIDX_FLAGS_ORDER
120 REVIDX_FLAGS_ORDER
121 REVIDX_RAWTEXT_CHANGING_FLAGS
121 REVIDX_RAWTEXT_CHANGING_FLAGS
122
122
123 parsers = policy.importmod('parsers')
123 parsers = policy.importmod('parsers')
124 rustancestor = policy.importrust('ancestor')
124 rustancestor = policy.importrust('ancestor')
125 rustdagop = policy.importrust('dagop')
125 rustdagop = policy.importrust('dagop')
126 rustrevlog = policy.importrust('revlog')
126 rustrevlog = policy.importrust('revlog')
127
127
128 # Aliased for performance.
128 # Aliased for performance.
129 _zlibdecompress = zlib.decompress
129 _zlibdecompress = zlib.decompress
130
130
131 # max size of inline data embedded into a revlog
131 # max size of inline data embedded into a revlog
132 _maxinline = 131072
132 _maxinline = 131072
133
133
134 # Flag processors for REVIDX_ELLIPSIS.
134 # Flag processors for REVIDX_ELLIPSIS.
135 def ellipsisreadprocessor(rl, text):
135 def ellipsisreadprocessor(rl, text):
136 return text, False
136 return text, False
137
137
138
138
139 def ellipsiswriteprocessor(rl, text):
139 def ellipsiswriteprocessor(rl, text):
140 return text, False
140 return text, False
141
141
142
142
143 def ellipsisrawprocessor(rl, text):
143 def ellipsisrawprocessor(rl, text):
144 return False
144 return False
145
145
146
146
147 ellipsisprocessor = (
147 ellipsisprocessor = (
148 ellipsisreadprocessor,
148 ellipsisreadprocessor,
149 ellipsiswriteprocessor,
149 ellipsiswriteprocessor,
150 ellipsisrawprocessor,
150 ellipsisrawprocessor,
151 )
151 )
152
152
153
153
154 def _verify_revision(rl, skipflags, state, node):
154 def _verify_revision(rl, skipflags, state, node):
155 """Verify the integrity of the given revlog ``node`` while providing a hook
155 """Verify the integrity of the given revlog ``node`` while providing a hook
156 point for extensions to influence the operation."""
156 point for extensions to influence the operation."""
157 if skipflags:
157 if skipflags:
158 state[b'skipread'].add(node)
158 state[b'skipread'].add(node)
159 else:
159 else:
160 # Side-effect: read content and verify hash.
160 # Side-effect: read content and verify hash.
161 rl.revision(node)
161 rl.revision(node)
162
162
163
163
164 # True if a fast implementation for persistent-nodemap is available
164 # True if a fast implementation for persistent-nodemap is available
165 #
165 #
166 # We also consider we have a "fast" implementation in "pure" python because
166 # We also consider we have a "fast" implementation in "pure" python because
167 # people using pure don't really have performance consideration (and a
167 # people using pure don't really have performance consideration (and a
168 # wheelbarrow of other slowness source)
168 # wheelbarrow of other slowness source)
169 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or hasattr(
169 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or hasattr(
170 parsers, 'BaseIndexObject'
170 parsers, 'BaseIndexObject'
171 )
171 )
172
172
173
173
174 @interfaceutil.implementer(repository.irevisiondelta)
174 @interfaceutil.implementer(repository.irevisiondelta)
175 @attr.s(slots=True)
175 @attr.s(slots=True)
176 class revlogrevisiondelta:
176 class revlogrevisiondelta:
177 node = attr.ib()
177 node = attr.ib()
178 p1node = attr.ib()
178 p1node = attr.ib()
179 p2node = attr.ib()
179 p2node = attr.ib()
180 basenode = attr.ib()
180 basenode = attr.ib()
181 flags = attr.ib()
181 flags = attr.ib()
182 baserevisionsize = attr.ib()
182 baserevisionsize = attr.ib()
183 revision = attr.ib()
183 revision = attr.ib()
184 delta = attr.ib()
184 delta = attr.ib()
185 sidedata = attr.ib()
185 sidedata = attr.ib()
186 protocol_flags = attr.ib()
186 protocol_flags = attr.ib()
187 linknode = attr.ib(default=None)
187 linknode = attr.ib(default=None)
188
188
189
189
190 @interfaceutil.implementer(repository.iverifyproblem)
190 @interfaceutil.implementer(repository.iverifyproblem)
191 @attr.s(frozen=True)
191 @attr.s(frozen=True)
192 class revlogproblem:
192 class revlogproblem:
193 warning = attr.ib(default=None)
193 warning = attr.ib(default=None)
194 error = attr.ib(default=None)
194 error = attr.ib(default=None)
195 node = attr.ib(default=None)
195 node = attr.ib(default=None)
196
196
197
197
198 def parse_index_v1(data, inline):
198 def parse_index_v1(data, inline):
199 # call the C implementation to parse the index data
199 # call the C implementation to parse the index data
200 index, cache = parsers.parse_index2(data, inline)
200 index, cache = parsers.parse_index2(data, inline)
201 return index, cache
201 return index, cache
202
202
203
203
204 def parse_index_v2(data, inline):
204 def parse_index_v2(data, inline):
205 # call the C implementation to parse the index data
205 # call the C implementation to parse the index data
206 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
206 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
207 return index, cache
207 return index, cache
208
208
209
209
210 def parse_index_cl_v2(data, inline):
210 def parse_index_cl_v2(data, inline):
211 # call the C implementation to parse the index data
211 # call the C implementation to parse the index data
212 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
212 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
213 return index, cache
213 return index, cache
214
214
215
215
216 if hasattr(parsers, 'parse_index_devel_nodemap'):
216 if hasattr(parsers, 'parse_index_devel_nodemap'):
217
217
218 def parse_index_v1_nodemap(data, inline):
218 def parse_index_v1_nodemap(data, inline):
219 index, cache = parsers.parse_index_devel_nodemap(data, inline)
219 index, cache = parsers.parse_index_devel_nodemap(data, inline)
220 return index, cache
220 return index, cache
221
221
222
222
223 else:
223 else:
224 parse_index_v1_nodemap = None
224 parse_index_v1_nodemap = None
225
225
226
226
227 def parse_index_v1_mixed(data, inline):
227 def parse_index_v1_mixed(data, inline):
228 index, cache = parse_index_v1(data, inline)
228 index, cache = parse_index_v1(data, inline)
229 return rustrevlog.MixedIndex(index), cache
229 return rustrevlog.MixedIndex(index), cache
230
230
231
231
232 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
232 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
233 # signed integer)
233 # signed integer)
234 _maxentrysize = 0x7FFFFFFF
234 _maxentrysize = 0x7FFFFFFF
235
235
236 FILE_TOO_SHORT_MSG = _(
236 FILE_TOO_SHORT_MSG = _(
237 b'cannot read from revlog %s;'
237 b'cannot read from revlog %s;'
238 b' expected %d bytes from offset %d, data size is %d'
238 b' expected %d bytes from offset %d, data size is %d'
239 )
239 )
240
240
241 hexdigits = b'0123456789abcdefABCDEF'
241 hexdigits = b'0123456789abcdefABCDEF'
242
242
243
243
244 class _Config:
244 class _Config:
245 def copy(self):
245 def copy(self):
246 return self.__class__(**self.__dict__)
246 return self.__class__(**self.__dict__)
247
247
248
248
249 @attr.s()
249 @attr.s()
250 class FeatureConfig(_Config):
250 class FeatureConfig(_Config):
251 """Hold configuration values about the available revlog features"""
251 """Hold configuration values about the available revlog features"""
252
252
253 # the default compression engine
253 # the default compression engine
254 compression_engine = attr.ib(default=b'zlib')
254 compression_engine = attr.ib(default=b'zlib')
255 # compression engines options
255 # compression engines options
256 compression_engine_options = attr.ib(default=attr.Factory(dict))
256 compression_engine_options = attr.ib(default=attr.Factory(dict))
257
257
258 # can we use censor on this revlog
258 # can we use censor on this revlog
259 censorable = attr.ib(default=False)
259 censorable = attr.ib(default=False)
260 # does this revlog use the "side data" feature
260 # does this revlog use the "side data" feature
261 has_side_data = attr.ib(default=False)
261 has_side_data = attr.ib(default=False)
262 # might remove rank configuration once the computation has no impact
262 # might remove rank configuration once the computation has no impact
263 compute_rank = attr.ib(default=False)
263 compute_rank = attr.ib(default=False)
264 # parent order is supposed to be semantically irrelevant, so we
264 # parent order is supposed to be semantically irrelevant, so we
265 # normally resort parents to ensure that the first parent is non-null,
265 # normally resort parents to ensure that the first parent is non-null,
266 # if there is a non-null parent at all.
266 # if there is a non-null parent at all.
267 # filelog abuses the parent order as flag to mark some instances of
267 # filelog abuses the parent order as flag to mark some instances of
268 # meta-encoded files, so allow it to disable this behavior.
268 # meta-encoded files, so allow it to disable this behavior.
269 canonical_parent_order = attr.ib(default=False)
269 canonical_parent_order = attr.ib(default=False)
270 # can ellipsis commit be used
270 # can ellipsis commit be used
271 enable_ellipsis = attr.ib(default=False)
271 enable_ellipsis = attr.ib(default=False)
272
272
273 def copy(self):
273 def copy(self):
274 new = super().copy()
274 new = super().copy()
275 new.compression_engine_options = self.compression_engine_options.copy()
275 new.compression_engine_options = self.compression_engine_options.copy()
276 return new
276 return new
277
277
278
278
279 @attr.s()
279 @attr.s()
280 class DataConfig(_Config):
280 class DataConfig(_Config):
281 """Hold configuration value about how the revlog data are read"""
281 """Hold configuration value about how the revlog data are read"""
282
282
283 # should we try to open the "pending" version of the revlog
283 # should we try to open the "pending" version of the revlog
284 try_pending = attr.ib(default=False)
284 try_pending = attr.ib(default=False)
285 # should we try to open the "splitted" version of the revlog
285 # should we try to open the "splitted" version of the revlog
286 try_split = attr.ib(default=False)
286 try_split = attr.ib(default=False)
287 # When True, indexfile should be opened with checkambig=True at writing,
287 # When True, indexfile should be opened with checkambig=True at writing,
288 # to avoid file stat ambiguity.
288 # to avoid file stat ambiguity.
289 check_ambig = attr.ib(default=False)
289 check_ambig = attr.ib(default=False)
290
290
291 # If true, use mmap instead of reading to deal with large index
291 # If true, use mmap instead of reading to deal with large index
292 mmap_large_index = attr.ib(default=False)
292 mmap_large_index = attr.ib(default=False)
293 # how much data is large
293 # how much data is large
294 mmap_index_threshold = attr.ib(default=None)
294 mmap_index_threshold = attr.ib(default=None)
295 # How much data to read and cache into the raw revlog data cache.
295 # How much data to read and cache into the raw revlog data cache.
296 chunk_cache_size = attr.ib(default=65536)
296 chunk_cache_size = attr.ib(default=65536)
297
297
298 # Allow sparse reading of the revlog data
298 # Allow sparse reading of the revlog data
299 with_sparse_read = attr.ib(default=False)
299 with_sparse_read = attr.ib(default=False)
300 # minimal density of a sparse read chunk
300 # minimal density of a sparse read chunk
301 sr_density_threshold = attr.ib(default=0.50)
301 sr_density_threshold = attr.ib(default=0.50)
302 # minimal size of data we skip when performing sparse read
302 # minimal size of data we skip when performing sparse read
303 sr_min_gap_size = attr.ib(default=262144)
303 sr_min_gap_size = attr.ib(default=262144)
304
304
305 # are delta encoded against arbitrary bases.
305 # are delta encoded against arbitrary bases.
306 generaldelta = attr.ib(default=False)
306 generaldelta = attr.ib(default=False)
307
307
308
308
309 @attr.s()
309 @attr.s()
310 class DeltaConfig(_Config):
310 class DeltaConfig(_Config):
311 """Hold configuration value about how new delta are computed
311 """Hold configuration value about how new delta are computed
312
312
313 Some attributes are duplicated from DataConfig to help havign each object
313 Some attributes are duplicated from DataConfig to help havign each object
314 self contained.
314 self contained.
315 """
315 """
316
316
317 # can delta be encoded against arbitrary bases.
317 # can delta be encoded against arbitrary bases.
318 general_delta = attr.ib(default=False)
318 general_delta = attr.ib(default=False)
319 # Allow sparse writing of the revlog data
319 # Allow sparse writing of the revlog data
320 sparse_revlog = attr.ib(default=False)
320 sparse_revlog = attr.ib(default=False)
321 # maximum length of a delta chain
321 # maximum length of a delta chain
322 max_chain_len = attr.ib(default=None)
322 max_chain_len = attr.ib(default=None)
323 # Maximum distance between delta chain base start and end
323 # Maximum distance between delta chain base start and end
324 max_deltachain_span = attr.ib(default=-1)
324 max_deltachain_span = attr.ib(default=-1)
325 # If `upper_bound_comp` is not None, this is the expected maximal gain from
325 # If `upper_bound_comp` is not None, this is the expected maximal gain from
326 # compression for the data content.
326 # compression for the data content.
327 upper_bound_comp = attr.ib(default=None)
327 upper_bound_comp = attr.ib(default=None)
328 # Should we try a delta against both parent
328 # Should we try a delta against both parent
329 delta_both_parents = attr.ib(default=True)
329 delta_both_parents = attr.ib(default=True)
330 # Test delta base candidate group by chunk of this maximal size.
330 # Test delta base candidate group by chunk of this maximal size.
331 candidate_group_chunk_size = attr.ib(default=0)
331 candidate_group_chunk_size = attr.ib(default=0)
332 # Should we display debug information about delta computation
332 # Should we display debug information about delta computation
333 debug_delta = attr.ib(default=False)
333 debug_delta = attr.ib(default=False)
334 # trust incoming delta by default
334 # trust incoming delta by default
335 lazy_delta = attr.ib(default=True)
335 lazy_delta = attr.ib(default=True)
336 # trust the base of incoming delta by default
336 # trust the base of incoming delta by default
337 lazy_delta_base = attr.ib(default=False)
337 lazy_delta_base = attr.ib(default=False)
338
338
339
339
340 class _InnerRevlog:
340 class _InnerRevlog:
341 """An inner layer of the revlog object
341 """An inner layer of the revlog object
342
342
343 That layer exist to be able to delegate some operation to Rust, its
343 That layer exist to be able to delegate some operation to Rust, its
344 boundaries are arbitrary and based on what we can delegate to Rust.
344 boundaries are arbitrary and based on what we can delegate to Rust.
345 """
345 """
346
346
347 def __init__(
347 def __init__(
348 self,
348 self,
349 opener,
349 opener,
350 index,
350 index,
351 index_file,
351 index_file,
352 data_file,
352 data_file,
353 sidedata_file,
353 sidedata_file,
354 inline,
354 inline,
355 data_config,
355 data_config,
356 delta_config,
356 delta_config,
357 feature_config,
357 feature_config,
358 chunk_cache,
358 chunk_cache,
359 default_compression_header,
359 default_compression_header,
360 ):
360 ):
361 self.opener = opener
361 self.opener = opener
362 self.index = index
362 self.index = index
363
363
364 self.__index_file = index_file
364 self.__index_file = index_file
365 self.data_file = data_file
365 self.data_file = data_file
366 self.sidedata_file = sidedata_file
366 self.sidedata_file = sidedata_file
367 self.inline = inline
367 self.inline = inline
368 self.data_config = data_config
368 self.data_config = data_config
369 self.delta_config = delta_config
369 self.delta_config = delta_config
370 self.feature_config = feature_config
370 self.feature_config = feature_config
371
371
372 # used during diverted write.
373 self._orig_index_file = None
374
372 self._default_compression_header = default_compression_header
375 self._default_compression_header = default_compression_header
373
376
374 # index
377 # index
375
378
376 # 3-tuple of file handles being used for active writing.
379 # 3-tuple of file handles being used for active writing.
377 self._writinghandles = None
380 self._writinghandles = None
378
381
379 self._segmentfile = randomaccessfile.randomaccessfile(
382 self._segmentfile = randomaccessfile.randomaccessfile(
380 self.opener,
383 self.opener,
381 (self.index_file if self.inline else self.data_file),
384 (self.index_file if self.inline else self.data_file),
382 self.data_config.chunk_cache_size,
385 self.data_config.chunk_cache_size,
383 chunk_cache,
386 chunk_cache,
384 )
387 )
385 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
388 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
386 self.opener,
389 self.opener,
387 self.sidedata_file,
390 self.sidedata_file,
388 self.data_config.chunk_cache_size,
391 self.data_config.chunk_cache_size,
389 )
392 )
390
393
391 # revlog header -> revlog compressor
394 # revlog header -> revlog compressor
392 self._decompressors = {}
395 self._decompressors = {}
393 # 3-tuple of (node, rev, text) for a raw revision.
396 # 3-tuple of (node, rev, text) for a raw revision.
394 self._revisioncache = None
397 self._revisioncache = None
395
398
399 self._delay_buffer = None
400
396 @property
401 @property
397 def index_file(self):
402 def index_file(self):
398 return self.__index_file
403 return self.__index_file
399
404
400 @index_file.setter
405 @index_file.setter
401 def index_file(self, new_index_file):
406 def index_file(self, new_index_file):
402 self.__index_file = new_index_file
407 self.__index_file = new_index_file
403 if self.inline:
408 if self.inline:
404 self._segmentfile.filename = new_index_file
409 self._segmentfile.filename = new_index_file
405
410
406 def __len__(self):
411 def __len__(self):
407 return len(self.index)
412 return len(self.index)
408
413
409 def clear_cache(self):
414 def clear_cache(self):
415 assert not self.is_delaying
410 self._revisioncache = None
416 self._revisioncache = None
411 self._segmentfile.clear_cache()
417 self._segmentfile.clear_cache()
412 self._segmentfile_sidedata.clear_cache()
418 self._segmentfile_sidedata.clear_cache()
413
419
414 @property
420 @property
415 def canonical_index_file(self):
421 def canonical_index_file(self):
422 if self._orig_index_file is not None:
423 return self._orig_index_file
416 return self.index_file
424 return self.index_file
417
425
426 @property
427 def is_delaying(self):
428 """is the revlog is currently delaying the visibility of written data?
429
430 The delaying mechanism can be either in-memory or written on disk in a
431 side-file."""
432 return (self._delay_buffer is not None) or (
433 self._orig_index_file is not None
434 )
435
418 # Derived from index values.
436 # Derived from index values.
419
437
420 def start(self, rev):
438 def start(self, rev):
421 """the offset of the data chunk for this revision"""
439 """the offset of the data chunk for this revision"""
422 return int(self.index[rev][0] >> 16)
440 return int(self.index[rev][0] >> 16)
423
441
424 def length(self, rev):
442 def length(self, rev):
425 """the length of the data chunk for this revision"""
443 """the length of the data chunk for this revision"""
426 return self.index[rev][1]
444 return self.index[rev][1]
427
445
428 def end(self, rev):
446 def end(self, rev):
429 """the end of the data chunk for this revision"""
447 """the end of the data chunk for this revision"""
430 return self.start(rev) + self.length(rev)
448 return self.start(rev) + self.length(rev)
431
449
432 def deltaparent(self, rev):
450 def deltaparent(self, rev):
433 """return deltaparent of the given revision"""
451 """return deltaparent of the given revision"""
434 base = self.index[rev][3]
452 base = self.index[rev][3]
435 if base == rev:
453 if base == rev:
436 return nullrev
454 return nullrev
437 elif self.delta_config.general_delta:
455 elif self.delta_config.general_delta:
438 return base
456 return base
439 else:
457 else:
440 return rev - 1
458 return rev - 1
441
459
442 def issnapshot(self, rev):
460 def issnapshot(self, rev):
443 """tells whether rev is a snapshot"""
461 """tells whether rev is a snapshot"""
444 if not self.delta_config.sparse_revlog:
462 if not self.delta_config.sparse_revlog:
445 return self.deltaparent(rev) == nullrev
463 return self.deltaparent(rev) == nullrev
446 elif hasattr(self.index, 'issnapshot'):
464 elif hasattr(self.index, 'issnapshot'):
447 # directly assign the method to cache the testing and access
465 # directly assign the method to cache the testing and access
448 self.issnapshot = self.index.issnapshot
466 self.issnapshot = self.index.issnapshot
449 return self.issnapshot(rev)
467 return self.issnapshot(rev)
450 if rev == nullrev:
468 if rev == nullrev:
451 return True
469 return True
452 entry = self.index[rev]
470 entry = self.index[rev]
453 base = entry[3]
471 base = entry[3]
454 if base == rev:
472 if base == rev:
455 return True
473 return True
456 if base == nullrev:
474 if base == nullrev:
457 return True
475 return True
458 p1 = entry[5]
476 p1 = entry[5]
459 while self.length(p1) == 0:
477 while self.length(p1) == 0:
460 b = self.deltaparent(p1)
478 b = self.deltaparent(p1)
461 if b == p1:
479 if b == p1:
462 break
480 break
463 p1 = b
481 p1 = b
464 p2 = entry[6]
482 p2 = entry[6]
465 while self.length(p2) == 0:
483 while self.length(p2) == 0:
466 b = self.deltaparent(p2)
484 b = self.deltaparent(p2)
467 if b == p2:
485 if b == p2:
468 break
486 break
469 p2 = b
487 p2 = b
470 if base == p1 or base == p2:
488 if base == p1 or base == p2:
471 return False
489 return False
472 return self.issnapshot(base)
490 return self.issnapshot(base)
473
491
474 def _deltachain(self, rev, stoprev=None):
492 def _deltachain(self, rev, stoprev=None):
475 """Obtain the delta chain for a revision.
493 """Obtain the delta chain for a revision.
476
494
477 ``stoprev`` specifies a revision to stop at. If not specified, we
495 ``stoprev`` specifies a revision to stop at. If not specified, we
478 stop at the base of the chain.
496 stop at the base of the chain.
479
497
480 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
498 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
481 revs in ascending order and ``stopped`` is a bool indicating whether
499 revs in ascending order and ``stopped`` is a bool indicating whether
482 ``stoprev`` was hit.
500 ``stoprev`` was hit.
483 """
501 """
484 generaldelta = self.delta_config.general_delta
502 generaldelta = self.delta_config.general_delta
485 # Try C implementation.
503 # Try C implementation.
486 try:
504 try:
487 return self.index.deltachain(rev, stoprev, generaldelta)
505 return self.index.deltachain(rev, stoprev, generaldelta)
488 except AttributeError:
506 except AttributeError:
489 pass
507 pass
490
508
491 chain = []
509 chain = []
492
510
493 # Alias to prevent attribute lookup in tight loop.
511 # Alias to prevent attribute lookup in tight loop.
494 index = self.index
512 index = self.index
495
513
496 iterrev = rev
514 iterrev = rev
497 e = index[iterrev]
515 e = index[iterrev]
498 while iterrev != e[3] and iterrev != stoprev:
516 while iterrev != e[3] and iterrev != stoprev:
499 chain.append(iterrev)
517 chain.append(iterrev)
500 if generaldelta:
518 if generaldelta:
501 iterrev = e[3]
519 iterrev = e[3]
502 else:
520 else:
503 iterrev -= 1
521 iterrev -= 1
504 e = index[iterrev]
522 e = index[iterrev]
505
523
506 if iterrev == stoprev:
524 if iterrev == stoprev:
507 stopped = True
525 stopped = True
508 else:
526 else:
509 chain.append(iterrev)
527 chain.append(iterrev)
510 stopped = False
528 stopped = False
511
529
512 chain.reverse()
530 chain.reverse()
513 return chain, stopped
531 return chain, stopped
514
532
515 @util.propertycache
533 @util.propertycache
516 def _compressor(self):
534 def _compressor(self):
517 engine = util.compengines[self.feature_config.compression_engine]
535 engine = util.compengines[self.feature_config.compression_engine]
518 return engine.revlogcompressor(
536 return engine.revlogcompressor(
519 self.feature_config.compression_engine_options
537 self.feature_config.compression_engine_options
520 )
538 )
521
539
522 @util.propertycache
540 @util.propertycache
523 def _decompressor(self):
541 def _decompressor(self):
524 """the default decompressor"""
542 """the default decompressor"""
525 if self._default_compression_header is None:
543 if self._default_compression_header is None:
526 return None
544 return None
527 t = self._default_compression_header
545 t = self._default_compression_header
528 c = self._get_decompressor(t)
546 c = self._get_decompressor(t)
529 return c.decompress
547 return c.decompress
530
548
531 def _get_decompressor(self, t):
549 def _get_decompressor(self, t):
532 try:
550 try:
533 compressor = self._decompressors[t]
551 compressor = self._decompressors[t]
534 except KeyError:
552 except KeyError:
535 try:
553 try:
536 engine = util.compengines.forrevlogheader(t)
554 engine = util.compengines.forrevlogheader(t)
537 compressor = engine.revlogcompressor(
555 compressor = engine.revlogcompressor(
538 self.feature_config.compression_engine_options
556 self.feature_config.compression_engine_options
539 )
557 )
540 self._decompressors[t] = compressor
558 self._decompressors[t] = compressor
541 except KeyError:
559 except KeyError:
542 raise error.RevlogError(
560 raise error.RevlogError(
543 _(b'unknown compression type %s') % binascii.hexlify(t)
561 _(b'unknown compression type %s') % binascii.hexlify(t)
544 )
562 )
545 return compressor
563 return compressor
546
564
547 def compress(self, data):
565 def compress(self, data):
548 """Generate a possibly-compressed representation of data."""
566 """Generate a possibly-compressed representation of data."""
549 if not data:
567 if not data:
550 return b'', data
568 return b'', data
551
569
552 compressed = self._compressor.compress(data)
570 compressed = self._compressor.compress(data)
553
571
554 if compressed:
572 if compressed:
555 # The revlog compressor added the header in the returned data.
573 # The revlog compressor added the header in the returned data.
556 return b'', compressed
574 return b'', compressed
557
575
558 if data[0:1] == b'\0':
576 if data[0:1] == b'\0':
559 return b'', data
577 return b'', data
560 return b'u', data
578 return b'u', data
561
579
562 def decompress(self, data):
580 def decompress(self, data):
563 """Decompress a revlog chunk.
581 """Decompress a revlog chunk.
564
582
565 The chunk is expected to begin with a header identifying the
583 The chunk is expected to begin with a header identifying the
566 format type so it can be routed to an appropriate decompressor.
584 format type so it can be routed to an appropriate decompressor.
567 """
585 """
568 if not data:
586 if not data:
569 return data
587 return data
570
588
571 # Revlogs are read much more frequently than they are written and many
589 # Revlogs are read much more frequently than they are written and many
572 # chunks only take microseconds to decompress, so performance is
590 # chunks only take microseconds to decompress, so performance is
573 # important here.
591 # important here.
574 #
592 #
575 # We can make a few assumptions about revlogs:
593 # We can make a few assumptions about revlogs:
576 #
594 #
577 # 1) the majority of chunks will be compressed (as opposed to inline
595 # 1) the majority of chunks will be compressed (as opposed to inline
578 # raw data).
596 # raw data).
579 # 2) decompressing *any* data will likely by at least 10x slower than
597 # 2) decompressing *any* data will likely by at least 10x slower than
580 # returning raw inline data.
598 # returning raw inline data.
581 # 3) we want to prioritize common and officially supported compression
599 # 3) we want to prioritize common and officially supported compression
582 # engines
600 # engines
583 #
601 #
584 # It follows that we want to optimize for "decompress compressed data
602 # It follows that we want to optimize for "decompress compressed data
585 # when encoded with common and officially supported compression engines"
603 # when encoded with common and officially supported compression engines"
586 # case over "raw data" and "data encoded by less common or non-official
604 # case over "raw data" and "data encoded by less common or non-official
587 # compression engines." That is why we have the inline lookup first
605 # compression engines." That is why we have the inline lookup first
588 # followed by the compengines lookup.
606 # followed by the compengines lookup.
589 #
607 #
590 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
608 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
591 # compressed chunks. And this matters for changelog and manifest reads.
609 # compressed chunks. And this matters for changelog and manifest reads.
592 t = data[0:1]
610 t = data[0:1]
593
611
594 if t == b'x':
612 if t == b'x':
595 try:
613 try:
596 return _zlibdecompress(data)
614 return _zlibdecompress(data)
597 except zlib.error as e:
615 except zlib.error as e:
598 raise error.RevlogError(
616 raise error.RevlogError(
599 _(b'revlog decompress error: %s')
617 _(b'revlog decompress error: %s')
600 % stringutil.forcebytestr(e)
618 % stringutil.forcebytestr(e)
601 )
619 )
602 # '\0' is more common than 'u' so it goes first.
620 # '\0' is more common than 'u' so it goes first.
603 elif t == b'\0':
621 elif t == b'\0':
604 return data
622 return data
605 elif t == b'u':
623 elif t == b'u':
606 return util.buffer(data, 1)
624 return util.buffer(data, 1)
607
625
608 compressor = self._get_decompressor(t)
626 compressor = self._get_decompressor(t)
609
627
610 return compressor.decompress(data)
628 return compressor.decompress(data)
611
629
612 @contextlib.contextmanager
630 @contextlib.contextmanager
613 def reading(self):
631 def reading(self):
614 """Context manager that keeps data and sidedata files open for reading"""
632 """Context manager that keeps data and sidedata files open for reading"""
615 if len(self.index) == 0:
633 if len(self.index) == 0:
616 yield # nothing to be read
634 yield # nothing to be read
617 else:
635 else:
618 with self._segmentfile.reading():
636 with self._segmentfile.reading():
619 with self._segmentfile_sidedata.reading():
637 with self._segmentfile_sidedata.reading():
620 yield
638 yield
621
639
622 @property
640 @property
623 def is_writing(self):
641 def is_writing(self):
624 """True is a writing context is open"""
642 """True is a writing context is open"""
625 return self._writinghandles is not None
643 return self._writinghandles is not None
626
644
627 @property
645 @property
628 def is_open(self):
646 def is_open(self):
629 """True if any file handle is being held
647 """True if any file handle is being held
630
648
631 Used for assert and debug in the python code"""
649 Used for assert and debug in the python code"""
632 return self._segmentfile.is_open or self._segmentfile_sidedata.is_open
650 return self._segmentfile.is_open or self._segmentfile_sidedata.is_open
633
651
634 @contextlib.contextmanager
652 @contextlib.contextmanager
635 def writing(self, transaction, data_end=None, sidedata_end=None):
653 def writing(self, transaction, data_end=None, sidedata_end=None):
636 """Open the revlog files for writing
654 """Open the revlog files for writing
637
655
638 Add content to a revlog should be done within such context.
656 Add content to a revlog should be done within such context.
639 """
657 """
640 if self.is_writing:
658 if self.is_writing:
641 yield
659 yield
642 else:
660 else:
643 ifh = dfh = sdfh = None
661 ifh = dfh = sdfh = None
644 try:
662 try:
645 r = len(self.index)
663 r = len(self.index)
646 # opening the data file.
664 # opening the data file.
647 dsize = 0
665 dsize = 0
648 if r:
666 if r:
649 dsize = self.end(r - 1)
667 dsize = self.end(r - 1)
650 dfh = None
668 dfh = None
651 if not self.inline:
669 if not self.inline:
652 try:
670 try:
653 dfh = self.opener(self.data_file, mode=b"r+")
671 dfh = self.opener(self.data_file, mode=b"r+")
654 if data_end is None:
672 if data_end is None:
655 dfh.seek(0, os.SEEK_END)
673 dfh.seek(0, os.SEEK_END)
656 else:
674 else:
657 dfh.seek(data_end, os.SEEK_SET)
675 dfh.seek(data_end, os.SEEK_SET)
658 except FileNotFoundError:
676 except FileNotFoundError:
659 dfh = self.opener(self.data_file, mode=b"w+")
677 dfh = self.opener(self.data_file, mode=b"w+")
660 transaction.add(self.data_file, dsize)
678 transaction.add(self.data_file, dsize)
661 if self.sidedata_file is not None:
679 if self.sidedata_file is not None:
662 assert sidedata_end is not None
680 assert sidedata_end is not None
663 # revlog-v2 does not inline, help Pytype
681 # revlog-v2 does not inline, help Pytype
664 assert dfh is not None
682 assert dfh is not None
665 try:
683 try:
666 sdfh = self.opener(self.sidedata_file, mode=b"r+")
684 sdfh = self.opener(self.sidedata_file, mode=b"r+")
667 dfh.seek(sidedata_end, os.SEEK_SET)
685 dfh.seek(sidedata_end, os.SEEK_SET)
668 except FileNotFoundError:
686 except FileNotFoundError:
669 sdfh = self.opener(self.sidedata_file, mode=b"w+")
687 sdfh = self.opener(self.sidedata_file, mode=b"w+")
670 transaction.add(self.sidedata_file, sidedata_end)
688 transaction.add(self.sidedata_file, sidedata_end)
671
689
672 # opening the index file.
690 # opening the index file.
673 isize = r * self.index.entry_size
691 isize = r * self.index.entry_size
674 ifh = self.__index_write_fp()
692 ifh = self.__index_write_fp()
675 if self.inline:
693 if self.inline:
676 transaction.add(self.index_file, dsize + isize)
694 transaction.add(self.index_file, dsize + isize)
677 else:
695 else:
678 transaction.add(self.index_file, isize)
696 transaction.add(self.index_file, isize)
679 # exposing all file handle for writing.
697 # exposing all file handle for writing.
680 self._writinghandles = (ifh, dfh, sdfh)
698 self._writinghandles = (ifh, dfh, sdfh)
681 self._segmentfile.writing_handle = ifh if self.inline else dfh
699 self._segmentfile.writing_handle = ifh if self.inline else dfh
682 self._segmentfile_sidedata.writing_handle = sdfh
700 self._segmentfile_sidedata.writing_handle = sdfh
683 yield
701 yield
684 finally:
702 finally:
685 self._writinghandles = None
703 self._writinghandles = None
686 self._segmentfile.writing_handle = None
704 self._segmentfile.writing_handle = None
687 self._segmentfile_sidedata.writing_handle = None
705 self._segmentfile_sidedata.writing_handle = None
688 if dfh is not None:
706 if dfh is not None:
689 dfh.close()
707 dfh.close()
690 if sdfh is not None:
708 if sdfh is not None:
691 sdfh.close()
709 sdfh.close()
692 # closing the index file last to avoid exposing referent to
710 # closing the index file last to avoid exposing referent to
693 # potential unflushed data content.
711 # potential unflushed data content.
694 if ifh is not None:
712 if ifh is not None:
695 ifh.close()
713 ifh.close()
696
714
697 def __index_write_fp(self, index_end=None):
715 def __index_write_fp(self, index_end=None):
698 """internal method to open the index file for writing
716 """internal method to open the index file for writing
699
717
700 You should not use this directly and use `_writing` instead
718 You should not use this directly and use `_writing` instead
701 """
719 """
702 try:
720 try:
703 f = self.opener(
721 if self._delay_buffer is None:
704 self.index_file,
722 f = self.opener(
705 mode=b"r+",
723 self.index_file,
706 checkambig=self.data_config.check_ambig,
724 mode=b"r+",
707 )
725 checkambig=self.data_config.check_ambig,
726 )
727 else:
728 # check_ambig affect we way we open file for writing, however
729 # here, we do not actually open a file for writting as write
730 # will appened to a delay_buffer. So check_ambig is not
731 # meaningful and unneeded here.
732 f = randomaccessfile.appender(
733 self.opener, self.index_file, b"r+", self._delay_buffer
734 )
708 if index_end is None:
735 if index_end is None:
709 f.seek(0, os.SEEK_END)
736 f.seek(0, os.SEEK_END)
710 else:
737 else:
711 f.seek(index_end, os.SEEK_SET)
738 f.seek(index_end, os.SEEK_SET)
712 return f
739 return f
713 except FileNotFoundError:
740 except FileNotFoundError:
714 return self.opener(
741 if self._delay_buffer is None:
715 self.index_file,
742 return self.opener(
716 mode=b"w+",
743 self.index_file,
717 checkambig=self.data_config.check_ambig,
744 mode=b"w+",
718 )
745 checkambig=self.data_config.check_ambig,
746 )
747 else:
748 return randomaccessfile.appender(
749 self.opener, self.index_file, b"w+", self._delay_buffer
750 )
719
751
720 def __index_new_fp(self):
752 def __index_new_fp(self):
721 """internal method to create a new index file for writing
753 """internal method to create a new index file for writing
722
754
723 You should not use this unless you are upgrading from inline revlog
755 You should not use this unless you are upgrading from inline revlog
724 """
756 """
725 return self.opener(
757 return self.opener(
726 self.index_file,
758 self.index_file,
727 mode=b"w",
759 mode=b"w",
728 checkambig=self.data_config.check_ambig,
760 checkambig=self.data_config.check_ambig,
729 atomictemp=True,
761 atomictemp=True,
730 )
762 )
731
763
732 def split_inline(self, tr, header, new_index_file_path=None):
764 def split_inline(self, tr, header, new_index_file_path=None):
733 """split the data of an inline revlog into an index and a data file"""
765 """split the data of an inline revlog into an index and a data file"""
734 existing_handles = False
766 existing_handles = False
735 if self._writinghandles is not None:
767 if self._writinghandles is not None:
736 existing_handles = True
768 existing_handles = True
737 fp = self._writinghandles[0]
769 fp = self._writinghandles[0]
738 fp.flush()
770 fp.flush()
739 fp.close()
771 fp.close()
740 # We can't use the cached file handle after close(). So prevent
772 # We can't use the cached file handle after close(). So prevent
741 # its usage.
773 # its usage.
742 self._writinghandles = None
774 self._writinghandles = None
743 self._segmentfile.writing_handle = None
775 self._segmentfile.writing_handle = None
744 # No need to deal with sidedata writing handle as it is only
776 # No need to deal with sidedata writing handle as it is only
745 # relevant with revlog-v2 which is never inline, not reaching
777 # relevant with revlog-v2 which is never inline, not reaching
746 # this code
778 # this code
747
779
748 new_dfh = self.opener(self.data_file, mode=b"w+")
780 new_dfh = self.opener(self.data_file, mode=b"w+")
749 new_dfh.truncate(0) # drop any potentially existing data
781 new_dfh.truncate(0) # drop any potentially existing data
750 try:
782 try:
751 with self.reading():
783 with self.reading():
752 for r in range(len(self.index)):
784 for r in range(len(self.index)):
753 new_dfh.write(self.get_segment_for_revs(r, r)[1])
785 new_dfh.write(self.get_segment_for_revs(r, r)[1])
754 new_dfh.flush()
786 new_dfh.flush()
755
787
756 if new_index_file_path is not None:
788 if new_index_file_path is not None:
757 self.index_file = new_index_file_path
789 self.index_file = new_index_file_path
758 with self.__index_new_fp() as fp:
790 with self.__index_new_fp() as fp:
759 self.inline = False
791 self.inline = False
760 for i in range(len(self.index)):
792 for i in range(len(self.index)):
761 e = self.index.entry_binary(i)
793 e = self.index.entry_binary(i)
762 if i == 0:
794 if i == 0:
763 packed_header = self.index.pack_header(header)
795 packed_header = self.index.pack_header(header)
764 e = packed_header + e
796 e = packed_header + e
765 fp.write(e)
797 fp.write(e)
766
798
767 # If we don't use side-write, the temp file replace the real
799 # If we don't use side-write, the temp file replace the real
768 # index when we exit the context manager
800 # index when we exit the context manager
769
801
770 self._segmentfile = randomaccessfile.randomaccessfile(
802 self._segmentfile = randomaccessfile.randomaccessfile(
771 self.opener,
803 self.opener,
772 self.data_file,
804 self.data_file,
773 self.data_config.chunk_cache_size,
805 self.data_config.chunk_cache_size,
774 )
806 )
775
807
776 if existing_handles:
808 if existing_handles:
777 # switched from inline to conventional reopen the index
809 # switched from inline to conventional reopen the index
778 ifh = self.__index_write_fp()
810 ifh = self.__index_write_fp()
779 self._writinghandles = (ifh, new_dfh, None)
811 self._writinghandles = (ifh, new_dfh, None)
780 self._segmentfile.writing_handle = new_dfh
812 self._segmentfile.writing_handle = new_dfh
781 new_dfh = None
813 new_dfh = None
782 # No need to deal with sidedata writing handle as it is only
814 # No need to deal with sidedata writing handle as it is only
783 # relevant with revlog-v2 which is never inline, not reaching
815 # relevant with revlog-v2 which is never inline, not reaching
784 # this code
816 # this code
785 finally:
817 finally:
786 if new_dfh is not None:
818 if new_dfh is not None:
787 new_dfh.close()
819 new_dfh.close()
788 return self.index_file
820 return self.index_file
789
821
790 def get_segment_for_revs(self, startrev, endrev):
822 def get_segment_for_revs(self, startrev, endrev):
791 """Obtain a segment of raw data corresponding to a range of revisions.
823 """Obtain a segment of raw data corresponding to a range of revisions.
792
824
793 Accepts the start and end revisions and an optional already-open
825 Accepts the start and end revisions and an optional already-open
794 file handle to be used for reading. If the file handle is read, its
826 file handle to be used for reading. If the file handle is read, its
795 seek position will not be preserved.
827 seek position will not be preserved.
796
828
797 Requests for data may be satisfied by a cache.
829 Requests for data may be satisfied by a cache.
798
830
799 Returns a 2-tuple of (offset, data) for the requested range of
831 Returns a 2-tuple of (offset, data) for the requested range of
800 revisions. Offset is the integer offset from the beginning of the
832 revisions. Offset is the integer offset from the beginning of the
801 revlog and data is a str or buffer of the raw byte data.
833 revlog and data is a str or buffer of the raw byte data.
802
834
803 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
835 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
804 to determine where each revision's data begins and ends.
836 to determine where each revision's data begins and ends.
805
837
806 API: we should consider making this a private part of the InnerRevlog
838 API: we should consider making this a private part of the InnerRevlog
807 at some point.
839 at some point.
808 """
840 """
809 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
841 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
810 # (functions are expensive).
842 # (functions are expensive).
811 index = self.index
843 index = self.index
812 istart = index[startrev]
844 istart = index[startrev]
813 start = int(istart[0] >> 16)
845 start = int(istart[0] >> 16)
814 if startrev == endrev:
846 if startrev == endrev:
815 end = start + istart[1]
847 end = start + istart[1]
816 else:
848 else:
817 iend = index[endrev]
849 iend = index[endrev]
818 end = int(iend[0] >> 16) + iend[1]
850 end = int(iend[0] >> 16) + iend[1]
819
851
820 if self.inline:
852 if self.inline:
821 start += (startrev + 1) * self.index.entry_size
853 start += (startrev + 1) * self.index.entry_size
822 end += (endrev + 1) * self.index.entry_size
854 end += (endrev + 1) * self.index.entry_size
823 length = end - start
855 length = end - start
824
856
825 return start, self._segmentfile.read_chunk(start, length)
857 return start, self._segmentfile.read_chunk(start, length)
826
858
827 def _chunk(self, rev):
859 def _chunk(self, rev):
828 """Obtain a single decompressed chunk for a revision.
860 """Obtain a single decompressed chunk for a revision.
829
861
830 Accepts an integer revision and an optional already-open file handle
862 Accepts an integer revision and an optional already-open file handle
831 to be used for reading. If used, the seek position of the file will not
863 to be used for reading. If used, the seek position of the file will not
832 be preserved.
864 be preserved.
833
865
834 Returns a str holding uncompressed data for the requested revision.
866 Returns a str holding uncompressed data for the requested revision.
835 """
867 """
836 compression_mode = self.index[rev][10]
868 compression_mode = self.index[rev][10]
837 data = self.get_segment_for_revs(rev, rev)[1]
869 data = self.get_segment_for_revs(rev, rev)[1]
838 if compression_mode == COMP_MODE_PLAIN:
870 if compression_mode == COMP_MODE_PLAIN:
839 return data
871 return data
840 elif compression_mode == COMP_MODE_DEFAULT:
872 elif compression_mode == COMP_MODE_DEFAULT:
841 return self._decompressor(data)
873 return self._decompressor(data)
842 elif compression_mode == COMP_MODE_INLINE:
874 elif compression_mode == COMP_MODE_INLINE:
843 return self.decompress(data)
875 return self.decompress(data)
844 else:
876 else:
845 msg = b'unknown compression mode %d'
877 msg = b'unknown compression mode %d'
846 msg %= compression_mode
878 msg %= compression_mode
847 raise error.RevlogError(msg)
879 raise error.RevlogError(msg)
848
880
849 def _chunks(self, revs, targetsize=None):
881 def _chunks(self, revs, targetsize=None):
850 """Obtain decompressed chunks for the specified revisions.
882 """Obtain decompressed chunks for the specified revisions.
851
883
852 Accepts an iterable of numeric revisions that are assumed to be in
884 Accepts an iterable of numeric revisions that are assumed to be in
853 ascending order. Also accepts an optional already-open file handle
885 ascending order. Also accepts an optional already-open file handle
854 to be used for reading. If used, the seek position of the file will
886 to be used for reading. If used, the seek position of the file will
855 not be preserved.
887 not be preserved.
856
888
857 This function is similar to calling ``self._chunk()`` multiple times,
889 This function is similar to calling ``self._chunk()`` multiple times,
858 but is faster.
890 but is faster.
859
891
860 Returns a list with decompressed data for each requested revision.
892 Returns a list with decompressed data for each requested revision.
861 """
893 """
862 if not revs:
894 if not revs:
863 return []
895 return []
864 start = self.start
896 start = self.start
865 length = self.length
897 length = self.length
866 inline = self.inline
898 inline = self.inline
867 iosize = self.index.entry_size
899 iosize = self.index.entry_size
868 buffer = util.buffer
900 buffer = util.buffer
869
901
870 l = []
902 l = []
871 ladd = l.append
903 ladd = l.append
872
904
873 if not self.data_config.with_sparse_read:
905 if not self.data_config.with_sparse_read:
874 slicedchunks = (revs,)
906 slicedchunks = (revs,)
875 else:
907 else:
876 slicedchunks = deltautil.slicechunk(
908 slicedchunks = deltautil.slicechunk(
877 self,
909 self,
878 revs,
910 revs,
879 targetsize=targetsize,
911 targetsize=targetsize,
880 )
912 )
881
913
882 for revschunk in slicedchunks:
914 for revschunk in slicedchunks:
883 firstrev = revschunk[0]
915 firstrev = revschunk[0]
884 # Skip trailing revisions with empty diff
916 # Skip trailing revisions with empty diff
885 for lastrev in revschunk[::-1]:
917 for lastrev in revschunk[::-1]:
886 if length(lastrev) != 0:
918 if length(lastrev) != 0:
887 break
919 break
888
920
889 try:
921 try:
890 offset, data = self.get_segment_for_revs(firstrev, lastrev)
922 offset, data = self.get_segment_for_revs(firstrev, lastrev)
891 except OverflowError:
923 except OverflowError:
892 # issue4215 - we can't cache a run of chunks greater than
924 # issue4215 - we can't cache a run of chunks greater than
893 # 2G on Windows
925 # 2G on Windows
894 return [self._chunk(rev) for rev in revschunk]
926 return [self._chunk(rev) for rev in revschunk]
895
927
896 decomp = self.decompress
928 decomp = self.decompress
897 # self._decompressor might be None, but will not be used in that case
929 # self._decompressor might be None, but will not be used in that case
898 def_decomp = self._decompressor
930 def_decomp = self._decompressor
899 for rev in revschunk:
931 for rev in revschunk:
900 chunkstart = start(rev)
932 chunkstart = start(rev)
901 if inline:
933 if inline:
902 chunkstart += (rev + 1) * iosize
934 chunkstart += (rev + 1) * iosize
903 chunklength = length(rev)
935 chunklength = length(rev)
904 comp_mode = self.index[rev][10]
936 comp_mode = self.index[rev][10]
905 c = buffer(data, chunkstart - offset, chunklength)
937 c = buffer(data, chunkstart - offset, chunklength)
906 if comp_mode == COMP_MODE_PLAIN:
938 if comp_mode == COMP_MODE_PLAIN:
907 ladd(c)
939 ladd(c)
908 elif comp_mode == COMP_MODE_INLINE:
940 elif comp_mode == COMP_MODE_INLINE:
909 ladd(decomp(c))
941 ladd(decomp(c))
910 elif comp_mode == COMP_MODE_DEFAULT:
942 elif comp_mode == COMP_MODE_DEFAULT:
911 ladd(def_decomp(c))
943 ladd(def_decomp(c))
912 else:
944 else:
913 msg = b'unknown compression mode %d'
945 msg = b'unknown compression mode %d'
914 msg %= comp_mode
946 msg %= comp_mode
915 raise error.RevlogError(msg)
947 raise error.RevlogError(msg)
916
948
917 return l
949 return l
918
950
919 def raw_text(self, node, rev):
951 def raw_text(self, node, rev):
920 """return the possibly unvalidated rawtext for a revision
952 """return the possibly unvalidated rawtext for a revision
921
953
922 returns (rev, rawtext, validated)
954 returns (rev, rawtext, validated)
923 """
955 """
924
956
925 # revision in the cache (could be useful to apply delta)
957 # revision in the cache (could be useful to apply delta)
926 cachedrev = None
958 cachedrev = None
927 # An intermediate text to apply deltas to
959 # An intermediate text to apply deltas to
928 basetext = None
960 basetext = None
929
961
930 # Check if we have the entry in cache
962 # Check if we have the entry in cache
931 # The cache entry looks like (node, rev, rawtext)
963 # The cache entry looks like (node, rev, rawtext)
932 if self._revisioncache:
964 if self._revisioncache:
933 cachedrev = self._revisioncache[1]
965 cachedrev = self._revisioncache[1]
934
966
935 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
967 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
936 if stopped:
968 if stopped:
937 basetext = self._revisioncache[2]
969 basetext = self._revisioncache[2]
938
970
939 # drop cache to save memory, the caller is expected to
971 # drop cache to save memory, the caller is expected to
940 # update self._inner._revisioncache after validating the text
972 # update self._inner._revisioncache after validating the text
941 self._revisioncache = None
973 self._revisioncache = None
942
974
943 targetsize = None
975 targetsize = None
944 rawsize = self.index[rev][2]
976 rawsize = self.index[rev][2]
945 if 0 <= rawsize:
977 if 0 <= rawsize:
946 targetsize = 4 * rawsize
978 targetsize = 4 * rawsize
947
979
948 bins = self._chunks(chain, targetsize=targetsize)
980 bins = self._chunks(chain, targetsize=targetsize)
949 if basetext is None:
981 if basetext is None:
950 basetext = bytes(bins[0])
982 basetext = bytes(bins[0])
951 bins = bins[1:]
983 bins = bins[1:]
952
984
953 rawtext = mdiff.patches(basetext, bins)
985 rawtext = mdiff.patches(basetext, bins)
954 del basetext # let us have a chance to free memory early
986 del basetext # let us have a chance to free memory early
955 return (rev, rawtext, False)
987 return (rev, rawtext, False)
956
988
957 def sidedata(self, rev, sidedata_end):
989 def sidedata(self, rev, sidedata_end):
958 """Return the sidedata for a given revision number."""
990 """Return the sidedata for a given revision number."""
959 index_entry = self.index[rev]
991 index_entry = self.index[rev]
960 sidedata_offset = index_entry[8]
992 sidedata_offset = index_entry[8]
961 sidedata_size = index_entry[9]
993 sidedata_size = index_entry[9]
962
994
963 if self.inline:
995 if self.inline:
964 sidedata_offset += self.index.entry_size * (1 + rev)
996 sidedata_offset += self.index.entry_size * (1 + rev)
965 if sidedata_size == 0:
997 if sidedata_size == 0:
966 return {}
998 return {}
967
999
968 if sidedata_end < sidedata_offset + sidedata_size:
1000 if sidedata_end < sidedata_offset + sidedata_size:
969 filename = self.sidedata_file
1001 filename = self.sidedata_file
970 end = sidedata_end
1002 end = sidedata_end
971 offset = sidedata_offset
1003 offset = sidedata_offset
972 length = sidedata_size
1004 length = sidedata_size
973 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
1005 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
974 raise error.RevlogError(m)
1006 raise error.RevlogError(m)
975
1007
976 comp_segment = self._segmentfile_sidedata.read_chunk(
1008 comp_segment = self._segmentfile_sidedata.read_chunk(
977 sidedata_offset, sidedata_size
1009 sidedata_offset, sidedata_size
978 )
1010 )
979
1011
980 comp = self.index[rev][11]
1012 comp = self.index[rev][11]
981 if comp == COMP_MODE_PLAIN:
1013 if comp == COMP_MODE_PLAIN:
982 segment = comp_segment
1014 segment = comp_segment
983 elif comp == COMP_MODE_DEFAULT:
1015 elif comp == COMP_MODE_DEFAULT:
984 segment = self._decompressor(comp_segment)
1016 segment = self._decompressor(comp_segment)
985 elif comp == COMP_MODE_INLINE:
1017 elif comp == COMP_MODE_INLINE:
986 segment = self.decompress(comp_segment)
1018 segment = self.decompress(comp_segment)
987 else:
1019 else:
988 msg = b'unknown compression mode %d'
1020 msg = b'unknown compression mode %d'
989 msg %= comp
1021 msg %= comp
990 raise error.RevlogError(msg)
1022 raise error.RevlogError(msg)
991
1023
992 sidedata = sidedatautil.deserialize_sidedata(segment)
1024 sidedata = sidedatautil.deserialize_sidedata(segment)
993 return sidedata
1025 return sidedata
994
1026
995 def write_entry(
1027 def write_entry(
996 self,
1028 self,
997 transaction,
1029 transaction,
998 entry,
1030 entry,
999 data,
1031 data,
1000 link,
1032 link,
1001 offset,
1033 offset,
1002 sidedata,
1034 sidedata,
1003 sidedata_offset,
1035 sidedata_offset,
1004 index_end,
1036 index_end,
1005 data_end,
1037 data_end,
1006 sidedata_end,
1038 sidedata_end,
1007 ):
1039 ):
1008 # Files opened in a+ mode have inconsistent behavior on various
1040 # Files opened in a+ mode have inconsistent behavior on various
1009 # platforms. Windows requires that a file positioning call be made
1041 # platforms. Windows requires that a file positioning call be made
1010 # when the file handle transitions between reads and writes. See
1042 # when the file handle transitions between reads and writes. See
1011 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
1043 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
1012 # platforms, Python or the platform itself can be buggy. Some versions
1044 # platforms, Python or the platform itself can be buggy. Some versions
1013 # of Solaris have been observed to not append at the end of the file
1045 # of Solaris have been observed to not append at the end of the file
1014 # if the file was seeked to before the end. See issue4943 for more.
1046 # if the file was seeked to before the end. See issue4943 for more.
1015 #
1047 #
1016 # We work around this issue by inserting a seek() before writing.
1048 # We work around this issue by inserting a seek() before writing.
1017 # Note: This is likely not necessary on Python 3. However, because
1049 # Note: This is likely not necessary on Python 3. However, because
1018 # the file handle is reused for reads and may be seeked there, we need
1050 # the file handle is reused for reads and may be seeked there, we need
1019 # to be careful before changing this.
1051 # to be careful before changing this.
1020 if self._writinghandles is None:
1052 if self._writinghandles is None:
1021 msg = b'adding revision outside `revlog._writing` context'
1053 msg = b'adding revision outside `revlog._writing` context'
1022 raise error.ProgrammingError(msg)
1054 raise error.ProgrammingError(msg)
1023 ifh, dfh, sdfh = self._writinghandles
1055 ifh, dfh, sdfh = self._writinghandles
1024 if index_end is None:
1056 if index_end is None:
1025 ifh.seek(0, os.SEEK_END)
1057 ifh.seek(0, os.SEEK_END)
1026 else:
1058 else:
1027 ifh.seek(index_end, os.SEEK_SET)
1059 ifh.seek(index_end, os.SEEK_SET)
1028 if dfh:
1060 if dfh:
1029 if data_end is None:
1061 if data_end is None:
1030 dfh.seek(0, os.SEEK_END)
1062 dfh.seek(0, os.SEEK_END)
1031 else:
1063 else:
1032 dfh.seek(data_end, os.SEEK_SET)
1064 dfh.seek(data_end, os.SEEK_SET)
1033 if sdfh:
1065 if sdfh:
1034 sdfh.seek(sidedata_end, os.SEEK_SET)
1066 sdfh.seek(sidedata_end, os.SEEK_SET)
1035
1067
1036 curr = len(self.index) - 1
1068 curr = len(self.index) - 1
1037 if not self.inline:
1069 if not self.inline:
1038 transaction.add(self.data_file, offset)
1070 transaction.add(self.data_file, offset)
1039 if self.sidedata_file:
1071 if self.sidedata_file:
1040 transaction.add(self.sidedata_file, sidedata_offset)
1072 transaction.add(self.sidedata_file, sidedata_offset)
1041 transaction.add(self.canonical_index_file, curr * len(entry))
1073 transaction.add(self.canonical_index_file, curr * len(entry))
1042 if data[0]:
1074 if data[0]:
1043 dfh.write(data[0])
1075 dfh.write(data[0])
1044 dfh.write(data[1])
1076 dfh.write(data[1])
1045 if sidedata:
1077 if sidedata:
1046 sdfh.write(sidedata)
1078 sdfh.write(sidedata)
1047 ifh.write(entry)
1079 if self._delay_buffer is None:
1080 ifh.write(entry)
1081 else:
1082 self._delay_buffer.append(entry)
1048 else:
1083 else:
1049 offset += curr * self.index.entry_size
1084 offset += curr * self.index.entry_size
1050 transaction.add(self.canonical_index_file, offset)
1085 transaction.add(self.canonical_index_file, offset)
1051 ifh.write(entry)
1052 ifh.write(data[0])
1053 ifh.write(data[1])
1054 assert not sidedata
1086 assert not sidedata
1087 if self._delay_buffer is None:
1088 ifh.write(entry)
1089 ifh.write(data[0])
1090 ifh.write(data[1])
1091 else:
1092 self._delay_buffer.append(entry)
1093 self._delay_buffer.append(data[0])
1094 self._delay_buffer.append(data[1])
1055 return (
1095 return (
1056 ifh.tell(),
1096 ifh.tell(),
1057 dfh.tell() if dfh else None,
1097 dfh.tell() if dfh else None,
1058 sdfh.tell() if sdfh else None,
1098 sdfh.tell() if sdfh else None,
1059 )
1099 )
1060
1100
1101 def _divert_index(self):
1102 return self.index_file + b'.a'
1103
1104 def delay(self):
1105 assert not self.is_open
1106 if self._delay_buffer is not None or self._orig_index_file is not None:
1107 # delay or divert already in place
1108 return None
1109 elif len(self.index) == 0:
1110 self._orig_index_file = self.index_file
1111 self.index_file = self._divert_index()
1112 self._segmentfile.filename = self.index_file
1113 assert self._orig_index_file is not None
1114 assert self.index_file is not None
1115 if self.opener.exists(self.index_file):
1116 self.opener.unlink(self.index_file)
1117 return self.index_file
1118 else:
1119 self._segmentfile._delay_buffer = self._delay_buffer = []
1120 return None
1121
1122 def write_pending(self):
1123 assert not self.is_open
1124 if self._orig_index_file is not None:
1125 return None, True
1126 any_pending = False
1127 pending_index_file = self._divert_index()
1128 if self.opener.exists(pending_index_file):
1129 self.opener.unlink(pending_index_file)
1130 util.copyfile(
1131 self.opener.join(self.index_file),
1132 self.opener.join(pending_index_file),
1133 )
1134 if self._delay_buffer:
1135 with self.opener(pending_index_file, b'r+') as ifh:
1136 ifh.seek(0, os.SEEK_END)
1137 ifh.write(b"".join(self._delay_buffer))
1138 any_pending = True
1139 self._segmentfile._delay_buffer = self._delay_buffer = None
1140 self._orig_index_file = self.index_file
1141 self.index_file = pending_index_file
1142 self._segmentfile.filename = self.index_file
1143 return self.index_file, any_pending
1144
1145 def finalize_pending(self):
1146 assert not self.is_open
1147
1148 delay = self._delay_buffer is not None
1149 divert = self._orig_index_file is not None
1150
1151 if delay and divert:
1152 assert False, "unreachable"
1153 elif delay:
1154 if self._delay_buffer:
1155 with self.opener(self.index_file, b'r+') as ifh:
1156 ifh.seek(0, os.SEEK_END)
1157 ifh.write(b"".join(self._delay_buffer))
1158 self._segmentfile._delay_buffer = self._delay_buffer = None
1159 elif divert:
1160 if self.opener.exists(self.index_file):
1161 self.opener.rename(
1162 self.index_file,
1163 self._orig_index_file,
1164 checkambig=True,
1165 )
1166 self.index_file = self._orig_index_file
1167 self._orig_index_file = None
1168 self._segmentfile.filename = self.index_file
1169 else:
1170 msg = b"not delay or divert found on this revlog"
1171 raise error.ProgrammingError(msg)
1172 return self.canonical_index_file
1173
1061
1174
1062 class revlog:
1175 class revlog:
1063 """
1176 """
1064 the underlying revision storage object
1177 the underlying revision storage object
1065
1178
1066 A revlog consists of two parts, an index and the revision data.
1179 A revlog consists of two parts, an index and the revision data.
1067
1180
1068 The index is a file with a fixed record size containing
1181 The index is a file with a fixed record size containing
1069 information on each revision, including its nodeid (hash), the
1182 information on each revision, including its nodeid (hash), the
1070 nodeids of its parents, the position and offset of its data within
1183 nodeids of its parents, the position and offset of its data within
1071 the data file, and the revision it's based on. Finally, each entry
1184 the data file, and the revision it's based on. Finally, each entry
1072 contains a linkrev entry that can serve as a pointer to external
1185 contains a linkrev entry that can serve as a pointer to external
1073 data.
1186 data.
1074
1187
1075 The revision data itself is a linear collection of data chunks.
1188 The revision data itself is a linear collection of data chunks.
1076 Each chunk represents a revision and is usually represented as a
1189 Each chunk represents a revision and is usually represented as a
1077 delta against the previous chunk. To bound lookup time, runs of
1190 delta against the previous chunk. To bound lookup time, runs of
1078 deltas are limited to about 2 times the length of the original
1191 deltas are limited to about 2 times the length of the original
1079 version data. This makes retrieval of a version proportional to
1192 version data. This makes retrieval of a version proportional to
1080 its size, or O(1) relative to the number of revisions.
1193 its size, or O(1) relative to the number of revisions.
1081
1194
1082 Both pieces of the revlog are written to in an append-only
1195 Both pieces of the revlog are written to in an append-only
1083 fashion, which means we never need to rewrite a file to insert or
1196 fashion, which means we never need to rewrite a file to insert or
1084 remove data, and can use some simple techniques to avoid the need
1197 remove data, and can use some simple techniques to avoid the need
1085 for locking while reading.
1198 for locking while reading.
1086
1199
1087 If checkambig, indexfile is opened with checkambig=True at
1200 If checkambig, indexfile is opened with checkambig=True at
1088 writing, to avoid file stat ambiguity.
1201 writing, to avoid file stat ambiguity.
1089
1202
1090 If mmaplargeindex is True, and an mmapindexthreshold is set, the
1203 If mmaplargeindex is True, and an mmapindexthreshold is set, the
1091 index will be mmapped rather than read if it is larger than the
1204 index will be mmapped rather than read if it is larger than the
1092 configured threshold.
1205 configured threshold.
1093
1206
1094 If censorable is True, the revlog can have censored revisions.
1207 If censorable is True, the revlog can have censored revisions.
1095
1208
1096 If `upperboundcomp` is not None, this is the expected maximal gain from
1209 If `upperboundcomp` is not None, this is the expected maximal gain from
1097 compression for the data content.
1210 compression for the data content.
1098
1211
1099 `concurrencychecker` is an optional function that receives 3 arguments: a
1212 `concurrencychecker` is an optional function that receives 3 arguments: a
1100 file handle, a filename, and an expected position. It should check whether
1213 file handle, a filename, and an expected position. It should check whether
1101 the current position in the file handle is valid, and log/warn/fail (by
1214 the current position in the file handle is valid, and log/warn/fail (by
1102 raising).
1215 raising).
1103
1216
1104 See mercurial/revlogutils/contants.py for details about the content of an
1217 See mercurial/revlogutils/contants.py for details about the content of an
1105 index entry.
1218 index entry.
1106 """
1219 """
1107
1220
1108 _flagserrorclass = error.RevlogError
1221 _flagserrorclass = error.RevlogError
1109
1222
1110 @staticmethod
1223 @staticmethod
1111 def is_inline_index(header_bytes):
1224 def is_inline_index(header_bytes):
1112 """Determine if a revlog is inline from the initial bytes of the index"""
1225 """Determine if a revlog is inline from the initial bytes of the index"""
1113 header = INDEX_HEADER.unpack(header_bytes)[0]
1226 header = INDEX_HEADER.unpack(header_bytes)[0]
1114
1227
1115 _format_flags = header & ~0xFFFF
1228 _format_flags = header & ~0xFFFF
1116 _format_version = header & 0xFFFF
1229 _format_version = header & 0xFFFF
1117
1230
1118 features = FEATURES_BY_VERSION[_format_version]
1231 features = FEATURES_BY_VERSION[_format_version]
1119 return features[b'inline'](_format_flags)
1232 return features[b'inline'](_format_flags)
1120
1233
1121 def __init__(
1234 def __init__(
1122 self,
1235 self,
1123 opener,
1236 opener,
1124 target,
1237 target,
1125 radix,
1238 radix,
1126 postfix=None, # only exist for `tmpcensored` now
1239 postfix=None, # only exist for `tmpcensored` now
1127 checkambig=False,
1240 checkambig=False,
1128 mmaplargeindex=False,
1241 mmaplargeindex=False,
1129 censorable=False,
1242 censorable=False,
1130 upperboundcomp=None,
1243 upperboundcomp=None,
1131 persistentnodemap=False,
1244 persistentnodemap=False,
1132 concurrencychecker=None,
1245 concurrencychecker=None,
1133 trypending=False,
1246 trypending=False,
1134 try_split=False,
1247 try_split=False,
1135 canonical_parent_order=True,
1248 canonical_parent_order=True,
1136 ):
1249 ):
1137 """
1250 """
1138 create a revlog object
1251 create a revlog object
1139
1252
1140 opener is a function that abstracts the file opening operation
1253 opener is a function that abstracts the file opening operation
1141 and can be used to implement COW semantics or the like.
1254 and can be used to implement COW semantics or the like.
1142
1255
1143 `target`: a (KIND, ID) tuple that identify the content stored in
1256 `target`: a (KIND, ID) tuple that identify the content stored in
1144 this revlog. It help the rest of the code to understand what the revlog
1257 this revlog. It help the rest of the code to understand what the revlog
1145 is about without having to resort to heuristic and index filename
1258 is about without having to resort to heuristic and index filename
1146 analysis. Note: that this must be reliably be set by normal code, but
1259 analysis. Note: that this must be reliably be set by normal code, but
1147 that test, debug, or performance measurement code might not set this to
1260 that test, debug, or performance measurement code might not set this to
1148 accurate value.
1261 accurate value.
1149 """
1262 """
1150
1263
1151 self.radix = radix
1264 self.radix = radix
1152
1265
1153 self._docket_file = None
1266 self._docket_file = None
1154 self._indexfile = None
1267 self._indexfile = None
1155 self._datafile = None
1268 self._datafile = None
1156 self._sidedatafile = None
1269 self._sidedatafile = None
1157 self._nodemap_file = None
1270 self._nodemap_file = None
1158 self.postfix = postfix
1271 self.postfix = postfix
1159 self._trypending = trypending
1272 self._trypending = trypending
1160 self._try_split = try_split
1273 self._try_split = try_split
1161 self.opener = opener
1274 self.opener = opener
1162 if persistentnodemap:
1275 if persistentnodemap:
1163 self._nodemap_file = nodemaputil.get_nodemap_file(self)
1276 self._nodemap_file = nodemaputil.get_nodemap_file(self)
1164
1277
1165 assert target[0] in ALL_KINDS
1278 assert target[0] in ALL_KINDS
1166 assert len(target) == 2
1279 assert len(target) == 2
1167 self.target = target
1280 self.target = target
1168 if b'feature-config' in self.opener.options:
1281 if b'feature-config' in self.opener.options:
1169 self.feature_config = self.opener.options[b'feature-config'].copy()
1282 self.feature_config = self.opener.options[b'feature-config'].copy()
1170 else:
1283 else:
1171 self.feature_config = FeatureConfig()
1284 self.feature_config = FeatureConfig()
1172 self.feature_config.censorable = censorable
1285 self.feature_config.censorable = censorable
1173 self.feature_config.canonical_parent_order = canonical_parent_order
1286 self.feature_config.canonical_parent_order = canonical_parent_order
1174 if b'data-config' in self.opener.options:
1287 if b'data-config' in self.opener.options:
1175 self.data_config = self.opener.options[b'data-config'].copy()
1288 self.data_config = self.opener.options[b'data-config'].copy()
1176 else:
1289 else:
1177 self.data_config = DataConfig()
1290 self.data_config = DataConfig()
1178 self.data_config.check_ambig = checkambig
1291 self.data_config.check_ambig = checkambig
1179 self.data_config.mmap_large_index = mmaplargeindex
1292 self.data_config.mmap_large_index = mmaplargeindex
1180 if b'delta-config' in self.opener.options:
1293 if b'delta-config' in self.opener.options:
1181 self.delta_config = self.opener.options[b'delta-config'].copy()
1294 self.delta_config = self.opener.options[b'delta-config'].copy()
1182 else:
1295 else:
1183 self.delta_config = DeltaConfig()
1296 self.delta_config = DeltaConfig()
1184 self.delta_config.upper_bound_comp = upperboundcomp
1297 self.delta_config.upper_bound_comp = upperboundcomp
1185
1298
1186 # Maps rev to chain base rev.
1299 # Maps rev to chain base rev.
1187 self._chainbasecache = util.lrucachedict(100)
1300 self._chainbasecache = util.lrucachedict(100)
1188
1301
1189 self.index = None
1302 self.index = None
1190 self._docket = None
1303 self._docket = None
1191 self._nodemap_docket = None
1304 self._nodemap_docket = None
1192 # Mapping of partial identifiers to full nodes.
1305 # Mapping of partial identifiers to full nodes.
1193 self._pcache = {}
1306 self._pcache = {}
1194
1307
1195 # other optionnals features
1308 # other optionnals features
1196
1309
1197 # Make copy of flag processors so each revlog instance can support
1310 # Make copy of flag processors so each revlog instance can support
1198 # custom flags.
1311 # custom flags.
1199 self._flagprocessors = dict(flagutil.flagprocessors)
1312 self._flagprocessors = dict(flagutil.flagprocessors)
1200 # prevent nesting of addgroup
1313 # prevent nesting of addgroup
1201 self._adding_group = None
1314 self._adding_group = None
1202
1315
1203 chunk_cache = self._loadindex()
1316 chunk_cache = self._loadindex()
1204 self._load_inner(chunk_cache)
1317 self._load_inner(chunk_cache)
1205 self._concurrencychecker = concurrencychecker
1318 self._concurrencychecker = concurrencychecker
1206
1319
1207 @property
1320 @property
1208 def _generaldelta(self):
1321 def _generaldelta(self):
1209 """temporary compatibility proxy"""
1322 """temporary compatibility proxy"""
1210 util.nouideprecwarn(
1323 util.nouideprecwarn(
1211 b"use revlog.delta_config.general_delta", b"6.6", stacklevel=2
1324 b"use revlog.delta_config.general_delta", b"6.6", stacklevel=2
1212 )
1325 )
1213 return self.delta_config.general_delta
1326 return self.delta_config.general_delta
1214
1327
1215 @property
1328 @property
1216 def _checkambig(self):
1329 def _checkambig(self):
1217 """temporary compatibility proxy"""
1330 """temporary compatibility proxy"""
1218 util.nouideprecwarn(
1331 util.nouideprecwarn(
1219 b"use revlog.data_config.checkambig", b"6.6", stacklevel=2
1332 b"use revlog.data_config.checkambig", b"6.6", stacklevel=2
1220 )
1333 )
1221 return self.data_config.check_ambig
1334 return self.data_config.check_ambig
1222
1335
1223 @property
1336 @property
1224 def _mmaplargeindex(self):
1337 def _mmaplargeindex(self):
1225 """temporary compatibility proxy"""
1338 """temporary compatibility proxy"""
1226 util.nouideprecwarn(
1339 util.nouideprecwarn(
1227 b"use revlog.data_config.mmap_large_index", b"6.6", stacklevel=2
1340 b"use revlog.data_config.mmap_large_index", b"6.6", stacklevel=2
1228 )
1341 )
1229 return self.data_config.mmap_large_index
1342 return self.data_config.mmap_large_index
1230
1343
1231 @property
1344 @property
1232 def _censorable(self):
1345 def _censorable(self):
1233 """temporary compatibility proxy"""
1346 """temporary compatibility proxy"""
1234 util.nouideprecwarn(
1347 util.nouideprecwarn(
1235 b"use revlog.feature_config.censorable", b"6.6", stacklevel=2
1348 b"use revlog.feature_config.censorable", b"6.6", stacklevel=2
1236 )
1349 )
1237 return self.feature_config.censorable
1350 return self.feature_config.censorable
1238
1351
1239 @property
1352 @property
1240 def _chunkcachesize(self):
1353 def _chunkcachesize(self):
1241 """temporary compatibility proxy"""
1354 """temporary compatibility proxy"""
1242 util.nouideprecwarn(
1355 util.nouideprecwarn(
1243 b"use revlog.data_config.chunk_cache_size", b"6.6", stacklevel=2
1356 b"use revlog.data_config.chunk_cache_size", b"6.6", stacklevel=2
1244 )
1357 )
1245 return self.data_config.chunk_cache_size
1358 return self.data_config.chunk_cache_size
1246
1359
1247 @property
1360 @property
1248 def _maxchainlen(self):
1361 def _maxchainlen(self):
1249 """temporary compatibility proxy"""
1362 """temporary compatibility proxy"""
1250 util.nouideprecwarn(
1363 util.nouideprecwarn(
1251 b"use revlog.delta_config.max_chain_len", b"6.6", stacklevel=2
1364 b"use revlog.delta_config.max_chain_len", b"6.6", stacklevel=2
1252 )
1365 )
1253 return self.delta_config.max_chain_len
1366 return self.delta_config.max_chain_len
1254
1367
1255 @property
1368 @property
1256 def _deltabothparents(self):
1369 def _deltabothparents(self):
1257 """temporary compatibility proxy"""
1370 """temporary compatibility proxy"""
1258 util.nouideprecwarn(
1371 util.nouideprecwarn(
1259 b"use revlog.delta_config.delta_both_parents", b"6.6", stacklevel=2
1372 b"use revlog.delta_config.delta_both_parents", b"6.6", stacklevel=2
1260 )
1373 )
1261 return self.delta_config.delta_both_parents
1374 return self.delta_config.delta_both_parents
1262
1375
1263 @property
1376 @property
1264 def _candidate_group_chunk_size(self):
1377 def _candidate_group_chunk_size(self):
1265 """temporary compatibility proxy"""
1378 """temporary compatibility proxy"""
1266 util.nouideprecwarn(
1379 util.nouideprecwarn(
1267 b"use revlog.delta_config.candidate_group_chunk_size",
1380 b"use revlog.delta_config.candidate_group_chunk_size",
1268 b"6.6",
1381 b"6.6",
1269 stacklevel=2,
1382 stacklevel=2,
1270 )
1383 )
1271 return self.delta_config.candidate_group_chunk_size
1384 return self.delta_config.candidate_group_chunk_size
1272
1385
1273 @property
1386 @property
1274 def _debug_delta(self):
1387 def _debug_delta(self):
1275 """temporary compatibility proxy"""
1388 """temporary compatibility proxy"""
1276 util.nouideprecwarn(
1389 util.nouideprecwarn(
1277 b"use revlog.delta_config.debug_delta", b"6.6", stacklevel=2
1390 b"use revlog.delta_config.debug_delta", b"6.6", stacklevel=2
1278 )
1391 )
1279 return self.delta_config.debug_delta
1392 return self.delta_config.debug_delta
1280
1393
1281 @property
1394 @property
1282 def _compengine(self):
1395 def _compengine(self):
1283 """temporary compatibility proxy"""
1396 """temporary compatibility proxy"""
1284 util.nouideprecwarn(
1397 util.nouideprecwarn(
1285 b"use revlog.feature_config.compression_engine",
1398 b"use revlog.feature_config.compression_engine",
1286 b"6.6",
1399 b"6.6",
1287 stacklevel=2,
1400 stacklevel=2,
1288 )
1401 )
1289 return self.feature_config.compression_engine
1402 return self.feature_config.compression_engine
1290
1403
1291 @property
1404 @property
1292 def upperboundcomp(self):
1405 def upperboundcomp(self):
1293 """temporary compatibility proxy"""
1406 """temporary compatibility proxy"""
1294 util.nouideprecwarn(
1407 util.nouideprecwarn(
1295 b"use revlog.delta_config.upper_bound_comp",
1408 b"use revlog.delta_config.upper_bound_comp",
1296 b"6.6",
1409 b"6.6",
1297 stacklevel=2,
1410 stacklevel=2,
1298 )
1411 )
1299 return self.delta_config.upper_bound_comp
1412 return self.delta_config.upper_bound_comp
1300
1413
1301 @property
1414 @property
1302 def _compengineopts(self):
1415 def _compengineopts(self):
1303 """temporary compatibility proxy"""
1416 """temporary compatibility proxy"""
1304 util.nouideprecwarn(
1417 util.nouideprecwarn(
1305 b"use revlog.feature_config.compression_engine_options",
1418 b"use revlog.feature_config.compression_engine_options",
1306 b"6.6",
1419 b"6.6",
1307 stacklevel=2,
1420 stacklevel=2,
1308 )
1421 )
1309 return self.feature_config.compression_engine_options
1422 return self.feature_config.compression_engine_options
1310
1423
1311 @property
1424 @property
1312 def _maxdeltachainspan(self):
1425 def _maxdeltachainspan(self):
1313 """temporary compatibility proxy"""
1426 """temporary compatibility proxy"""
1314 util.nouideprecwarn(
1427 util.nouideprecwarn(
1315 b"use revlog.delta_config.max_deltachain_span", b"6.6", stacklevel=2
1428 b"use revlog.delta_config.max_deltachain_span", b"6.6", stacklevel=2
1316 )
1429 )
1317 return self.delta_config.max_deltachain_span
1430 return self.delta_config.max_deltachain_span
1318
1431
1319 @property
1432 @property
1320 def _withsparseread(self):
1433 def _withsparseread(self):
1321 """temporary compatibility proxy"""
1434 """temporary compatibility proxy"""
1322 util.nouideprecwarn(
1435 util.nouideprecwarn(
1323 b"use revlog.data_config.with_sparse_read", b"6.6", stacklevel=2
1436 b"use revlog.data_config.with_sparse_read", b"6.6", stacklevel=2
1324 )
1437 )
1325 return self.data_config.with_sparse_read
1438 return self.data_config.with_sparse_read
1326
1439
1327 @property
1440 @property
1328 def _sparserevlog(self):
1441 def _sparserevlog(self):
1329 """temporary compatibility proxy"""
1442 """temporary compatibility proxy"""
1330 util.nouideprecwarn(
1443 util.nouideprecwarn(
1331 b"use revlog.delta_config.sparse_revlog", b"6.6", stacklevel=2
1444 b"use revlog.delta_config.sparse_revlog", b"6.6", stacklevel=2
1332 )
1445 )
1333 return self.delta_config.sparse_revlog
1446 return self.delta_config.sparse_revlog
1334
1447
1335 @property
1448 @property
1336 def hassidedata(self):
1449 def hassidedata(self):
1337 """temporary compatibility proxy"""
1450 """temporary compatibility proxy"""
1338 util.nouideprecwarn(
1451 util.nouideprecwarn(
1339 b"use revlog.feature_config.has_side_data", b"6.6", stacklevel=2
1452 b"use revlog.feature_config.has_side_data", b"6.6", stacklevel=2
1340 )
1453 )
1341 return self.feature_config.has_side_data
1454 return self.feature_config.has_side_data
1342
1455
1343 @property
1456 @property
1344 def _srdensitythreshold(self):
1457 def _srdensitythreshold(self):
1345 """temporary compatibility proxy"""
1458 """temporary compatibility proxy"""
1346 util.nouideprecwarn(
1459 util.nouideprecwarn(
1347 b"use revlog.data_config.sr_density_threshold",
1460 b"use revlog.data_config.sr_density_threshold",
1348 b"6.6",
1461 b"6.6",
1349 stacklevel=2,
1462 stacklevel=2,
1350 )
1463 )
1351 return self.data_config.sr_density_threshold
1464 return self.data_config.sr_density_threshold
1352
1465
1353 @property
1466 @property
1354 def _srmingapsize(self):
1467 def _srmingapsize(self):
1355 """temporary compatibility proxy"""
1468 """temporary compatibility proxy"""
1356 util.nouideprecwarn(
1469 util.nouideprecwarn(
1357 b"use revlog.data_config.sr_min_gap_size", b"6.6", stacklevel=2
1470 b"use revlog.data_config.sr_min_gap_size", b"6.6", stacklevel=2
1358 )
1471 )
1359 return self.data_config.sr_min_gap_size
1472 return self.data_config.sr_min_gap_size
1360
1473
1361 @property
1474 @property
1362 def _compute_rank(self):
1475 def _compute_rank(self):
1363 """temporary compatibility proxy"""
1476 """temporary compatibility proxy"""
1364 util.nouideprecwarn(
1477 util.nouideprecwarn(
1365 b"use revlog.feature_config.compute_rank", b"6.6", stacklevel=2
1478 b"use revlog.feature_config.compute_rank", b"6.6", stacklevel=2
1366 )
1479 )
1367 return self.feature_config.compute_rank
1480 return self.feature_config.compute_rank
1368
1481
1369 @property
1482 @property
1370 def canonical_parent_order(self):
1483 def canonical_parent_order(self):
1371 """temporary compatibility proxy"""
1484 """temporary compatibility proxy"""
1372 util.nouideprecwarn(
1485 util.nouideprecwarn(
1373 b"use revlog.feature_config.canonical_parent_order",
1486 b"use revlog.feature_config.canonical_parent_order",
1374 b"6.6",
1487 b"6.6",
1375 stacklevel=2,
1488 stacklevel=2,
1376 )
1489 )
1377 return self.feature_config.canonical_parent_order
1490 return self.feature_config.canonical_parent_order
1378
1491
1379 @property
1492 @property
1380 def _lazydelta(self):
1493 def _lazydelta(self):
1381 """temporary compatibility proxy"""
1494 """temporary compatibility proxy"""
1382 util.nouideprecwarn(
1495 util.nouideprecwarn(
1383 b"use revlog.delta_config.lazy_delta", b"6.6", stacklevel=2
1496 b"use revlog.delta_config.lazy_delta", b"6.6", stacklevel=2
1384 )
1497 )
1385 return self.delta_config.lazy_delta
1498 return self.delta_config.lazy_delta
1386
1499
1387 @property
1500 @property
1388 def _lazydeltabase(self):
1501 def _lazydeltabase(self):
1389 """temporary compatibility proxy"""
1502 """temporary compatibility proxy"""
1390 util.nouideprecwarn(
1503 util.nouideprecwarn(
1391 b"use revlog.delta_config.lazy_delta_base", b"6.6", stacklevel=2
1504 b"use revlog.delta_config.lazy_delta_base", b"6.6", stacklevel=2
1392 )
1505 )
1393 return self.delta_config.lazy_delta_base
1506 return self.delta_config.lazy_delta_base
1394
1507
1395 def _init_opts(self):
1508 def _init_opts(self):
1396 """process options (from above/config) to setup associated default revlog mode
1509 """process options (from above/config) to setup associated default revlog mode
1397
1510
1398 These values might be affected when actually reading on disk information.
1511 These values might be affected when actually reading on disk information.
1399
1512
1400 The relevant values are returned for use in _loadindex().
1513 The relevant values are returned for use in _loadindex().
1401
1514
1402 * newversionflags:
1515 * newversionflags:
1403 version header to use if we need to create a new revlog
1516 version header to use if we need to create a new revlog
1404
1517
1405 * mmapindexthreshold:
1518 * mmapindexthreshold:
1406 minimal index size for start to use mmap
1519 minimal index size for start to use mmap
1407
1520
1408 * force_nodemap:
1521 * force_nodemap:
1409 force the usage of a "development" version of the nodemap code
1522 force the usage of a "development" version of the nodemap code
1410 """
1523 """
1411 opts = self.opener.options
1524 opts = self.opener.options
1412
1525
1413 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
1526 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
1414 new_header = CHANGELOGV2
1527 new_header = CHANGELOGV2
1415 compute_rank = opts.get(b'changelogv2.compute-rank', True)
1528 compute_rank = opts.get(b'changelogv2.compute-rank', True)
1416 self.feature_config.compute_rank = compute_rank
1529 self.feature_config.compute_rank = compute_rank
1417 elif b'revlogv2' in opts:
1530 elif b'revlogv2' in opts:
1418 new_header = REVLOGV2
1531 new_header = REVLOGV2
1419 elif b'revlogv1' in opts:
1532 elif b'revlogv1' in opts:
1420 new_header = REVLOGV1 | FLAG_INLINE_DATA
1533 new_header = REVLOGV1 | FLAG_INLINE_DATA
1421 if b'generaldelta' in opts:
1534 if b'generaldelta' in opts:
1422 new_header |= FLAG_GENERALDELTA
1535 new_header |= FLAG_GENERALDELTA
1423 elif b'revlogv0' in self.opener.options:
1536 elif b'revlogv0' in self.opener.options:
1424 new_header = REVLOGV0
1537 new_header = REVLOGV0
1425 else:
1538 else:
1426 new_header = REVLOG_DEFAULT_VERSION
1539 new_header = REVLOG_DEFAULT_VERSION
1427
1540
1428 mmapindexthreshold = None
1541 mmapindexthreshold = None
1429 if self.data_config.mmap_large_index:
1542 if self.data_config.mmap_large_index:
1430 mmapindexthreshold = self.data_config.mmap_index_threshold
1543 mmapindexthreshold = self.data_config.mmap_index_threshold
1431 if self.feature_config.enable_ellipsis:
1544 if self.feature_config.enable_ellipsis:
1432 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
1545 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
1433
1546
1434 # revlog v0 doesn't have flag processors
1547 # revlog v0 doesn't have flag processors
1435 for flag, processor in opts.get(b'flagprocessors', {}).items():
1548 for flag, processor in opts.get(b'flagprocessors', {}).items():
1436 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
1549 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
1437
1550
1438 chunk_cache_size = self.data_config.chunk_cache_size
1551 chunk_cache_size = self.data_config.chunk_cache_size
1439 if chunk_cache_size <= 0:
1552 if chunk_cache_size <= 0:
1440 raise error.RevlogError(
1553 raise error.RevlogError(
1441 _(b'revlog chunk cache size %r is not greater than 0')
1554 _(b'revlog chunk cache size %r is not greater than 0')
1442 % chunk_cache_size
1555 % chunk_cache_size
1443 )
1556 )
1444 elif chunk_cache_size & (chunk_cache_size - 1):
1557 elif chunk_cache_size & (chunk_cache_size - 1):
1445 raise error.RevlogError(
1558 raise error.RevlogError(
1446 _(b'revlog chunk cache size %r is not a power of 2')
1559 _(b'revlog chunk cache size %r is not a power of 2')
1447 % chunk_cache_size
1560 % chunk_cache_size
1448 )
1561 )
1449 force_nodemap = opts.get(b'devel-force-nodemap', False)
1562 force_nodemap = opts.get(b'devel-force-nodemap', False)
1450 return new_header, mmapindexthreshold, force_nodemap
1563 return new_header, mmapindexthreshold, force_nodemap
1451
1564
1452 def _get_data(self, filepath, mmap_threshold, size=None):
1565 def _get_data(self, filepath, mmap_threshold, size=None):
1453 """return a file content with or without mmap
1566 """return a file content with or without mmap
1454
1567
1455 If the file is missing return the empty string"""
1568 If the file is missing return the empty string"""
1456 try:
1569 try:
1457 with self.opener(filepath) as fp:
1570 with self.opener(filepath) as fp:
1458 if mmap_threshold is not None:
1571 if mmap_threshold is not None:
1459 file_size = self.opener.fstat(fp).st_size
1572 file_size = self.opener.fstat(fp).st_size
1460 if file_size >= mmap_threshold:
1573 if file_size >= mmap_threshold:
1461 if size is not None:
1574 if size is not None:
1462 # avoid potentiel mmap crash
1575 # avoid potentiel mmap crash
1463 size = min(file_size, size)
1576 size = min(file_size, size)
1464 # TODO: should .close() to release resources without
1577 # TODO: should .close() to release resources without
1465 # relying on Python GC
1578 # relying on Python GC
1466 if size is None:
1579 if size is None:
1467 return util.buffer(util.mmapread(fp))
1580 return util.buffer(util.mmapread(fp))
1468 else:
1581 else:
1469 return util.buffer(util.mmapread(fp, size))
1582 return util.buffer(util.mmapread(fp, size))
1470 if size is None:
1583 if size is None:
1471 return fp.read()
1584 return fp.read()
1472 else:
1585 else:
1473 return fp.read(size)
1586 return fp.read(size)
1474 except FileNotFoundError:
1587 except FileNotFoundError:
1475 return b''
1588 return b''
1476
1589
1477 def get_streams(self, max_linkrev, force_inline=False):
1590 def get_streams(self, max_linkrev, force_inline=False):
1478 """return a list of streams that represent this revlog
1591 """return a list of streams that represent this revlog
1479
1592
1480 This is used by stream-clone to do bytes to bytes copies of a repository.
1593 This is used by stream-clone to do bytes to bytes copies of a repository.
1481
1594
1482 This streams data for all revisions that refer to a changelog revision up
1595 This streams data for all revisions that refer to a changelog revision up
1483 to `max_linkrev`.
1596 to `max_linkrev`.
1484
1597
1485 If `force_inline` is set, it enforces that the stream will represent an inline revlog.
1598 If `force_inline` is set, it enforces that the stream will represent an inline revlog.
1486
1599
1487 It returns is a list of three-tuple:
1600 It returns is a list of three-tuple:
1488
1601
1489 [
1602 [
1490 (filename, bytes_stream, stream_size),
1603 (filename, bytes_stream, stream_size),
1491 …
1604 …
1492 ]
1605 ]
1493 """
1606 """
1494 n = len(self)
1607 n = len(self)
1495 index = self.index
1608 index = self.index
1496 while n > 0:
1609 while n > 0:
1497 linkrev = index[n - 1][4]
1610 linkrev = index[n - 1][4]
1498 if linkrev < max_linkrev:
1611 if linkrev < max_linkrev:
1499 break
1612 break
1500 # note: this loop will rarely go through multiple iterations, since
1613 # note: this loop will rarely go through multiple iterations, since
1501 # it only traverses commits created during the current streaming
1614 # it only traverses commits created during the current streaming
1502 # pull operation.
1615 # pull operation.
1503 #
1616 #
1504 # If this become a problem, using a binary search should cap the
1617 # If this become a problem, using a binary search should cap the
1505 # runtime of this.
1618 # runtime of this.
1506 n = n - 1
1619 n = n - 1
1507 if n == 0:
1620 if n == 0:
1508 # no data to send
1621 # no data to send
1509 return []
1622 return []
1510 index_size = n * index.entry_size
1623 index_size = n * index.entry_size
1511 data_size = self.end(n - 1)
1624 data_size = self.end(n - 1)
1512
1625
1513 # XXX we might have been split (or stripped) since the object
1626 # XXX we might have been split (or stripped) since the object
1514 # initialization, We need to close this race too, but having a way to
1627 # initialization, We need to close this race too, but having a way to
1515 # pre-open the file we feed to the revlog and never closing them before
1628 # pre-open the file we feed to the revlog and never closing them before
1516 # we are done streaming.
1629 # we are done streaming.
1517
1630
1518 if self._inline:
1631 if self._inline:
1519
1632
1520 def get_stream():
1633 def get_stream():
1521 with self.opener(self._indexfile, mode=b"r") as fp:
1634 with self.opener(self._indexfile, mode=b"r") as fp:
1522 yield None
1635 yield None
1523 size = index_size + data_size
1636 size = index_size + data_size
1524 if size <= 65536:
1637 if size <= 65536:
1525 yield fp.read(size)
1638 yield fp.read(size)
1526 else:
1639 else:
1527 yield from util.filechunkiter(fp, limit=size)
1640 yield from util.filechunkiter(fp, limit=size)
1528
1641
1529 inline_stream = get_stream()
1642 inline_stream = get_stream()
1530 next(inline_stream)
1643 next(inline_stream)
1531 return [
1644 return [
1532 (self._indexfile, inline_stream, index_size + data_size),
1645 (self._indexfile, inline_stream, index_size + data_size),
1533 ]
1646 ]
1534 elif force_inline:
1647 elif force_inline:
1535
1648
1536 def get_stream():
1649 def get_stream():
1537 with self.reading():
1650 with self.reading():
1538 yield None
1651 yield None
1539
1652
1540 for rev in range(n):
1653 for rev in range(n):
1541 idx = self.index.entry_binary(rev)
1654 idx = self.index.entry_binary(rev)
1542 if rev == 0 and self._docket is None:
1655 if rev == 0 and self._docket is None:
1543 # re-inject the inline flag
1656 # re-inject the inline flag
1544 header = self._format_flags
1657 header = self._format_flags
1545 header |= self._format_version
1658 header |= self._format_version
1546 header |= FLAG_INLINE_DATA
1659 header |= FLAG_INLINE_DATA
1547 header = self.index.pack_header(header)
1660 header = self.index.pack_header(header)
1548 idx = header + idx
1661 idx = header + idx
1549 yield idx
1662 yield idx
1550 yield self._inner.get_segment_for_revs(rev, rev)[1]
1663 yield self._inner.get_segment_for_revs(rev, rev)[1]
1551
1664
1552 inline_stream = get_stream()
1665 inline_stream = get_stream()
1553 next(inline_stream)
1666 next(inline_stream)
1554 return [
1667 return [
1555 (self._indexfile, inline_stream, index_size + data_size),
1668 (self._indexfile, inline_stream, index_size + data_size),
1556 ]
1669 ]
1557 else:
1670 else:
1558
1671
1559 def get_index_stream():
1672 def get_index_stream():
1560 with self.opener(self._indexfile, mode=b"r") as fp:
1673 with self.opener(self._indexfile, mode=b"r") as fp:
1561 yield None
1674 yield None
1562 if index_size <= 65536:
1675 if index_size <= 65536:
1563 yield fp.read(index_size)
1676 yield fp.read(index_size)
1564 else:
1677 else:
1565 yield from util.filechunkiter(fp, limit=index_size)
1678 yield from util.filechunkiter(fp, limit=index_size)
1566
1679
1567 def get_data_stream():
1680 def get_data_stream():
1568 with self._datafp() as fp:
1681 with self._datafp() as fp:
1569 yield None
1682 yield None
1570 if data_size <= 65536:
1683 if data_size <= 65536:
1571 yield fp.read(data_size)
1684 yield fp.read(data_size)
1572 else:
1685 else:
1573 yield from util.filechunkiter(fp, limit=data_size)
1686 yield from util.filechunkiter(fp, limit=data_size)
1574
1687
1575 index_stream = get_index_stream()
1688 index_stream = get_index_stream()
1576 next(index_stream)
1689 next(index_stream)
1577 data_stream = get_data_stream()
1690 data_stream = get_data_stream()
1578 next(data_stream)
1691 next(data_stream)
1579 return [
1692 return [
1580 (self._datafile, data_stream, data_size),
1693 (self._datafile, data_stream, data_size),
1581 (self._indexfile, index_stream, index_size),
1694 (self._indexfile, index_stream, index_size),
1582 ]
1695 ]
1583
1696
1584 def _loadindex(self, docket=None):
1697 def _loadindex(self, docket=None):
1585
1698
1586 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
1699 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
1587
1700
1588 if self.postfix is not None:
1701 if self.postfix is not None:
1589 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
1702 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
1590 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
1703 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
1591 entry_point = b'%s.i.a' % self.radix
1704 entry_point = b'%s.i.a' % self.radix
1592 elif self._try_split and self.opener.exists(self._split_index_file):
1705 elif self._try_split and self.opener.exists(self._split_index_file):
1593 entry_point = self._split_index_file
1706 entry_point = self._split_index_file
1594 else:
1707 else:
1595 entry_point = b'%s.i' % self.radix
1708 entry_point = b'%s.i' % self.radix
1596
1709
1597 if docket is not None:
1710 if docket is not None:
1598 self._docket = docket
1711 self._docket = docket
1599 self._docket_file = entry_point
1712 self._docket_file = entry_point
1600 else:
1713 else:
1601 self._initempty = True
1714 self._initempty = True
1602 entry_data = self._get_data(entry_point, mmapindexthreshold)
1715 entry_data = self._get_data(entry_point, mmapindexthreshold)
1603 if len(entry_data) > 0:
1716 if len(entry_data) > 0:
1604 header = INDEX_HEADER.unpack(entry_data[:4])[0]
1717 header = INDEX_HEADER.unpack(entry_data[:4])[0]
1605 self._initempty = False
1718 self._initempty = False
1606 else:
1719 else:
1607 header = new_header
1720 header = new_header
1608
1721
1609 self._format_flags = header & ~0xFFFF
1722 self._format_flags = header & ~0xFFFF
1610 self._format_version = header & 0xFFFF
1723 self._format_version = header & 0xFFFF
1611
1724
1612 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
1725 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
1613 if supported_flags is None:
1726 if supported_flags is None:
1614 msg = _(b'unknown version (%d) in revlog %s')
1727 msg = _(b'unknown version (%d) in revlog %s')
1615 msg %= (self._format_version, self.display_id)
1728 msg %= (self._format_version, self.display_id)
1616 raise error.RevlogError(msg)
1729 raise error.RevlogError(msg)
1617 elif self._format_flags & ~supported_flags:
1730 elif self._format_flags & ~supported_flags:
1618 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
1731 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
1619 display_flag = self._format_flags >> 16
1732 display_flag = self._format_flags >> 16
1620 msg %= (display_flag, self._format_version, self.display_id)
1733 msg %= (display_flag, self._format_version, self.display_id)
1621 raise error.RevlogError(msg)
1734 raise error.RevlogError(msg)
1622
1735
1623 features = FEATURES_BY_VERSION[self._format_version]
1736 features = FEATURES_BY_VERSION[self._format_version]
1624 self._inline = features[b'inline'](self._format_flags)
1737 self._inline = features[b'inline'](self._format_flags)
1625 self.delta_config.general_delta = features[b'generaldelta'](
1738 self.delta_config.general_delta = features[b'generaldelta'](
1626 self._format_flags
1739 self._format_flags
1627 )
1740 )
1628 self.feature_config.has_side_data = features[b'sidedata']
1741 self.feature_config.has_side_data = features[b'sidedata']
1629
1742
1630 if not features[b'docket']:
1743 if not features[b'docket']:
1631 self._indexfile = entry_point
1744 self._indexfile = entry_point
1632 index_data = entry_data
1745 index_data = entry_data
1633 else:
1746 else:
1634 self._docket_file = entry_point
1747 self._docket_file = entry_point
1635 if self._initempty:
1748 if self._initempty:
1636 self._docket = docketutil.default_docket(self, header)
1749 self._docket = docketutil.default_docket(self, header)
1637 else:
1750 else:
1638 self._docket = docketutil.parse_docket(
1751 self._docket = docketutil.parse_docket(
1639 self, entry_data, use_pending=self._trypending
1752 self, entry_data, use_pending=self._trypending
1640 )
1753 )
1641
1754
1642 if self._docket is not None:
1755 if self._docket is not None:
1643 self._indexfile = self._docket.index_filepath()
1756 self._indexfile = self._docket.index_filepath()
1644 index_data = b''
1757 index_data = b''
1645 index_size = self._docket.index_end
1758 index_size = self._docket.index_end
1646 if index_size > 0:
1759 if index_size > 0:
1647 index_data = self._get_data(
1760 index_data = self._get_data(
1648 self._indexfile, mmapindexthreshold, size=index_size
1761 self._indexfile, mmapindexthreshold, size=index_size
1649 )
1762 )
1650 if len(index_data) < index_size:
1763 if len(index_data) < index_size:
1651 msg = _(b'too few index data for %s: got %d, expected %d')
1764 msg = _(b'too few index data for %s: got %d, expected %d')
1652 msg %= (self.display_id, len(index_data), index_size)
1765 msg %= (self.display_id, len(index_data), index_size)
1653 raise error.RevlogError(msg)
1766 raise error.RevlogError(msg)
1654
1767
1655 self._inline = False
1768 self._inline = False
1656 # generaldelta implied by version 2 revlogs.
1769 # generaldelta implied by version 2 revlogs.
1657 self.delta_config.general_delta = True
1770 self.delta_config.general_delta = True
1658 # the logic for persistent nodemap will be dealt with within the
1771 # the logic for persistent nodemap will be dealt with within the
1659 # main docket, so disable it for now.
1772 # main docket, so disable it for now.
1660 self._nodemap_file = None
1773 self._nodemap_file = None
1661
1774
1662 if self._docket is not None:
1775 if self._docket is not None:
1663 self._datafile = self._docket.data_filepath()
1776 self._datafile = self._docket.data_filepath()
1664 self._sidedatafile = self._docket.sidedata_filepath()
1777 self._sidedatafile = self._docket.sidedata_filepath()
1665 elif self.postfix is None:
1778 elif self.postfix is None:
1666 self._datafile = b'%s.d' % self.radix
1779 self._datafile = b'%s.d' % self.radix
1667 else:
1780 else:
1668 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
1781 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
1669
1782
1670 self.nodeconstants = sha1nodeconstants
1783 self.nodeconstants = sha1nodeconstants
1671 self.nullid = self.nodeconstants.nullid
1784 self.nullid = self.nodeconstants.nullid
1672
1785
1673 # sparse-revlog can't be on without general-delta (issue6056)
1786 # sparse-revlog can't be on without general-delta (issue6056)
1674 if not self.delta_config.general_delta:
1787 if not self.delta_config.general_delta:
1675 self.delta_config.sparse_revlog = False
1788 self.delta_config.sparse_revlog = False
1676
1789
1677 self._storedeltachains = True
1790 self._storedeltachains = True
1678
1791
1679 devel_nodemap = (
1792 devel_nodemap = (
1680 self._nodemap_file
1793 self._nodemap_file
1681 and force_nodemap
1794 and force_nodemap
1682 and parse_index_v1_nodemap is not None
1795 and parse_index_v1_nodemap is not None
1683 )
1796 )
1684
1797
1685 use_rust_index = False
1798 use_rust_index = False
1686 if rustrevlog is not None:
1799 if rustrevlog is not None:
1687 if self._nodemap_file is not None:
1800 if self._nodemap_file is not None:
1688 use_rust_index = True
1801 use_rust_index = True
1689 else:
1802 else:
1690 use_rust_index = self.opener.options.get(b'rust.index')
1803 use_rust_index = self.opener.options.get(b'rust.index')
1691
1804
1692 self._parse_index = parse_index_v1
1805 self._parse_index = parse_index_v1
1693 if self._format_version == REVLOGV0:
1806 if self._format_version == REVLOGV0:
1694 self._parse_index = revlogv0.parse_index_v0
1807 self._parse_index = revlogv0.parse_index_v0
1695 elif self._format_version == REVLOGV2:
1808 elif self._format_version == REVLOGV2:
1696 self._parse_index = parse_index_v2
1809 self._parse_index = parse_index_v2
1697 elif self._format_version == CHANGELOGV2:
1810 elif self._format_version == CHANGELOGV2:
1698 self._parse_index = parse_index_cl_v2
1811 self._parse_index = parse_index_cl_v2
1699 elif devel_nodemap:
1812 elif devel_nodemap:
1700 self._parse_index = parse_index_v1_nodemap
1813 self._parse_index = parse_index_v1_nodemap
1701 elif use_rust_index:
1814 elif use_rust_index:
1702 self._parse_index = parse_index_v1_mixed
1815 self._parse_index = parse_index_v1_mixed
1703 try:
1816 try:
1704 d = self._parse_index(index_data, self._inline)
1817 d = self._parse_index(index_data, self._inline)
1705 index, chunkcache = d
1818 index, chunkcache = d
1706 use_nodemap = (
1819 use_nodemap = (
1707 not self._inline
1820 not self._inline
1708 and self._nodemap_file is not None
1821 and self._nodemap_file is not None
1709 and hasattr(index, 'update_nodemap_data')
1822 and hasattr(index, 'update_nodemap_data')
1710 )
1823 )
1711 if use_nodemap:
1824 if use_nodemap:
1712 nodemap_data = nodemaputil.persisted_data(self)
1825 nodemap_data = nodemaputil.persisted_data(self)
1713 if nodemap_data is not None:
1826 if nodemap_data is not None:
1714 docket = nodemap_data[0]
1827 docket = nodemap_data[0]
1715 if (
1828 if (
1716 len(d[0]) > docket.tip_rev
1829 len(d[0]) > docket.tip_rev
1717 and d[0][docket.tip_rev][7] == docket.tip_node
1830 and d[0][docket.tip_rev][7] == docket.tip_node
1718 ):
1831 ):
1719 # no changelog tampering
1832 # no changelog tampering
1720 self._nodemap_docket = docket
1833 self._nodemap_docket = docket
1721 index.update_nodemap_data(*nodemap_data)
1834 index.update_nodemap_data(*nodemap_data)
1722 except (ValueError, IndexError):
1835 except (ValueError, IndexError):
1723 raise error.RevlogError(
1836 raise error.RevlogError(
1724 _(b"index %s is corrupted") % self.display_id
1837 _(b"index %s is corrupted") % self.display_id
1725 )
1838 )
1726 self.index = index
1839 self.index = index
1727 # revnum -> (chain-length, sum-delta-length)
1840 # revnum -> (chain-length, sum-delta-length)
1728 self._chaininfocache = util.lrucachedict(500)
1841 self._chaininfocache = util.lrucachedict(500)
1729
1842
1730 return chunkcache
1843 return chunkcache
1731
1844
1732 def _load_inner(self, chunk_cache):
1845 def _load_inner(self, chunk_cache):
1733 if self._docket is None:
1846 if self._docket is None:
1734 default_compression_header = None
1847 default_compression_header = None
1735 else:
1848 else:
1736 default_compression_header = self._docket.default_compression_header
1849 default_compression_header = self._docket.default_compression_header
1737
1850
1738 self._inner = _InnerRevlog(
1851 self._inner = _InnerRevlog(
1739 opener=self.opener,
1852 opener=self.opener,
1740 index=self.index,
1853 index=self.index,
1741 index_file=self._indexfile,
1854 index_file=self._indexfile,
1742 data_file=self._datafile,
1855 data_file=self._datafile,
1743 sidedata_file=self._sidedatafile,
1856 sidedata_file=self._sidedatafile,
1744 inline=self._inline,
1857 inline=self._inline,
1745 data_config=self.data_config,
1858 data_config=self.data_config,
1746 delta_config=self.delta_config,
1859 delta_config=self.delta_config,
1747 feature_config=self.feature_config,
1860 feature_config=self.feature_config,
1748 chunk_cache=chunk_cache,
1861 chunk_cache=chunk_cache,
1749 default_compression_header=default_compression_header,
1862 default_compression_header=default_compression_header,
1750 )
1863 )
1751
1864
1752 def get_revlog(self):
1865 def get_revlog(self):
1753 """simple function to mirror API of other not-really-revlog API"""
1866 """simple function to mirror API of other not-really-revlog API"""
1754 return self
1867 return self
1755
1868
1756 @util.propertycache
1869 @util.propertycache
1757 def revlog_kind(self):
1870 def revlog_kind(self):
1758 return self.target[0]
1871 return self.target[0]
1759
1872
1760 @util.propertycache
1873 @util.propertycache
1761 def display_id(self):
1874 def display_id(self):
1762 """The public facing "ID" of the revlog that we use in message"""
1875 """The public facing "ID" of the revlog that we use in message"""
1763 if self.revlog_kind == KIND_FILELOG:
1876 if self.revlog_kind == KIND_FILELOG:
1764 # Reference the file without the "data/" prefix, so it is familiar
1877 # Reference the file without the "data/" prefix, so it is familiar
1765 # to the user.
1878 # to the user.
1766 return self.target[1]
1879 return self.target[1]
1767 else:
1880 else:
1768 return self.radix
1881 return self.radix
1769
1882
1770 def _datafp(self, mode=b'r'):
1883 def _datafp(self, mode=b'r'):
1771 """file object for the revlog's data file"""
1884 """file object for the revlog's data file"""
1772 return self.opener(self._datafile, mode=mode)
1885 return self.opener(self._datafile, mode=mode)
1773
1886
1774 def tiprev(self):
1887 def tiprev(self):
1775 return len(self.index) - 1
1888 return len(self.index) - 1
1776
1889
1777 def tip(self):
1890 def tip(self):
1778 return self.node(self.tiprev())
1891 return self.node(self.tiprev())
1779
1892
1780 def __contains__(self, rev):
1893 def __contains__(self, rev):
1781 return 0 <= rev < len(self)
1894 return 0 <= rev < len(self)
1782
1895
1783 def __len__(self):
1896 def __len__(self):
1784 return len(self.index)
1897 return len(self.index)
1785
1898
1786 def __iter__(self):
1899 def __iter__(self):
1787 return iter(range(len(self)))
1900 return iter(range(len(self)))
1788
1901
1789 def revs(self, start=0, stop=None):
1902 def revs(self, start=0, stop=None):
1790 """iterate over all rev in this revlog (from start to stop)"""
1903 """iterate over all rev in this revlog (from start to stop)"""
1791 return storageutil.iterrevs(len(self), start=start, stop=stop)
1904 return storageutil.iterrevs(len(self), start=start, stop=stop)
1792
1905
1793 def hasnode(self, node):
1906 def hasnode(self, node):
1794 try:
1907 try:
1795 self.rev(node)
1908 self.rev(node)
1796 return True
1909 return True
1797 except KeyError:
1910 except KeyError:
1798 return False
1911 return False
1799
1912
1800 def _candelta(self, baserev, rev):
1913 def _candelta(self, baserev, rev):
1801 """whether two revisions (baserev, rev) can be delta-ed or not"""
1914 """whether two revisions (baserev, rev) can be delta-ed or not"""
1802 # Disable delta if either rev requires a content-changing flag
1915 # Disable delta if either rev requires a content-changing flag
1803 # processor (ex. LFS). This is because such flag processor can alter
1916 # processor (ex. LFS). This is because such flag processor can alter
1804 # the rawtext content that the delta will be based on, and two clients
1917 # the rawtext content that the delta will be based on, and two clients
1805 # could have a same revlog node with different flags (i.e. different
1918 # could have a same revlog node with different flags (i.e. different
1806 # rawtext contents) and the delta could be incompatible.
1919 # rawtext contents) and the delta could be incompatible.
1807 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
1920 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
1808 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
1921 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
1809 ):
1922 ):
1810 return False
1923 return False
1811 return True
1924 return True
1812
1925
1813 def update_caches(self, transaction):
1926 def update_caches(self, transaction):
1814 """update on disk cache
1927 """update on disk cache
1815
1928
1816 If a transaction is passed, the update may be delayed to transaction
1929 If a transaction is passed, the update may be delayed to transaction
1817 commit."""
1930 commit."""
1818 if self._nodemap_file is not None:
1931 if self._nodemap_file is not None:
1819 if transaction is None:
1932 if transaction is None:
1820 nodemaputil.update_persistent_nodemap(self)
1933 nodemaputil.update_persistent_nodemap(self)
1821 else:
1934 else:
1822 nodemaputil.setup_persistent_nodemap(transaction, self)
1935 nodemaputil.setup_persistent_nodemap(transaction, self)
1823
1936
1824 def clearcaches(self):
1937 def clearcaches(self):
1825 """Clear in-memory caches"""
1938 """Clear in-memory caches"""
1826 self._chainbasecache.clear()
1939 self._chainbasecache.clear()
1827 self._inner.clear_cache()
1940 self._inner.clear_cache()
1828 self._pcache = {}
1941 self._pcache = {}
1829 self._nodemap_docket = None
1942 self._nodemap_docket = None
1830 self.index.clearcaches()
1943 self.index.clearcaches()
1831 # The python code is the one responsible for validating the docket, we
1944 # The python code is the one responsible for validating the docket, we
1832 # end up having to refresh it here.
1945 # end up having to refresh it here.
1833 use_nodemap = (
1946 use_nodemap = (
1834 not self._inline
1947 not self._inline
1835 and self._nodemap_file is not None
1948 and self._nodemap_file is not None
1836 and hasattr(self.index, 'update_nodemap_data')
1949 and hasattr(self.index, 'update_nodemap_data')
1837 )
1950 )
1838 if use_nodemap:
1951 if use_nodemap:
1839 nodemap_data = nodemaputil.persisted_data(self)
1952 nodemap_data = nodemaputil.persisted_data(self)
1840 if nodemap_data is not None:
1953 if nodemap_data is not None:
1841 self._nodemap_docket = nodemap_data[0]
1954 self._nodemap_docket = nodemap_data[0]
1842 self.index.update_nodemap_data(*nodemap_data)
1955 self.index.update_nodemap_data(*nodemap_data)
1843
1956
1844 def rev(self, node):
1957 def rev(self, node):
1845 """return the revision number associated with a <nodeid>"""
1958 """return the revision number associated with a <nodeid>"""
1846 try:
1959 try:
1847 return self.index.rev(node)
1960 return self.index.rev(node)
1848 except TypeError:
1961 except TypeError:
1849 raise
1962 raise
1850 except error.RevlogError:
1963 except error.RevlogError:
1851 # parsers.c radix tree lookup failed
1964 # parsers.c radix tree lookup failed
1852 if (
1965 if (
1853 node == self.nodeconstants.wdirid
1966 node == self.nodeconstants.wdirid
1854 or node in self.nodeconstants.wdirfilenodeids
1967 or node in self.nodeconstants.wdirfilenodeids
1855 ):
1968 ):
1856 raise error.WdirUnsupported
1969 raise error.WdirUnsupported
1857 raise error.LookupError(node, self.display_id, _(b'no node'))
1970 raise error.LookupError(node, self.display_id, _(b'no node'))
1858
1971
1859 # Accessors for index entries.
1972 # Accessors for index entries.
1860
1973
1861 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
1974 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
1862 # are flags.
1975 # are flags.
1863 def start(self, rev):
1976 def start(self, rev):
1864 return int(self.index[rev][0] >> 16)
1977 return int(self.index[rev][0] >> 16)
1865
1978
1866 def sidedata_cut_off(self, rev):
1979 def sidedata_cut_off(self, rev):
1867 sd_cut_off = self.index[rev][8]
1980 sd_cut_off = self.index[rev][8]
1868 if sd_cut_off != 0:
1981 if sd_cut_off != 0:
1869 return sd_cut_off
1982 return sd_cut_off
1870 # This is some annoying dance, because entries without sidedata
1983 # This is some annoying dance, because entries without sidedata
1871 # currently use 0 as their ofsset. (instead of previous-offset +
1984 # currently use 0 as their ofsset. (instead of previous-offset +
1872 # previous-size)
1985 # previous-size)
1873 #
1986 #
1874 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
1987 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
1875 # In the meantime, we need this.
1988 # In the meantime, we need this.
1876 while 0 <= rev:
1989 while 0 <= rev:
1877 e = self.index[rev]
1990 e = self.index[rev]
1878 if e[9] != 0:
1991 if e[9] != 0:
1879 return e[8] + e[9]
1992 return e[8] + e[9]
1880 rev -= 1
1993 rev -= 1
1881 return 0
1994 return 0
1882
1995
1883 def flags(self, rev):
1996 def flags(self, rev):
1884 return self.index[rev][0] & 0xFFFF
1997 return self.index[rev][0] & 0xFFFF
1885
1998
1886 def length(self, rev):
1999 def length(self, rev):
1887 return self.index[rev][1]
2000 return self.index[rev][1]
1888
2001
1889 def sidedata_length(self, rev):
2002 def sidedata_length(self, rev):
1890 if not self.feature_config.has_side_data:
2003 if not self.feature_config.has_side_data:
1891 return 0
2004 return 0
1892 return self.index[rev][9]
2005 return self.index[rev][9]
1893
2006
1894 def rawsize(self, rev):
2007 def rawsize(self, rev):
1895 """return the length of the uncompressed text for a given revision"""
2008 """return the length of the uncompressed text for a given revision"""
1896 l = self.index[rev][2]
2009 l = self.index[rev][2]
1897 if l >= 0:
2010 if l >= 0:
1898 return l
2011 return l
1899
2012
1900 t = self.rawdata(rev)
2013 t = self.rawdata(rev)
1901 return len(t)
2014 return len(t)
1902
2015
1903 def size(self, rev):
2016 def size(self, rev):
1904 """length of non-raw text (processed by a "read" flag processor)"""
2017 """length of non-raw text (processed by a "read" flag processor)"""
1905 # fast path: if no "read" flag processor could change the content,
2018 # fast path: if no "read" flag processor could change the content,
1906 # size is rawsize. note: ELLIPSIS is known to not change the content.
2019 # size is rawsize. note: ELLIPSIS is known to not change the content.
1907 flags = self.flags(rev)
2020 flags = self.flags(rev)
1908 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
2021 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
1909 return self.rawsize(rev)
2022 return self.rawsize(rev)
1910
2023
1911 return len(self.revision(rev))
2024 return len(self.revision(rev))
1912
2025
1913 def fast_rank(self, rev):
2026 def fast_rank(self, rev):
1914 """Return the rank of a revision if already known, or None otherwise.
2027 """Return the rank of a revision if already known, or None otherwise.
1915
2028
1916 The rank of a revision is the size of the sub-graph it defines as a
2029 The rank of a revision is the size of the sub-graph it defines as a
1917 head. Equivalently, the rank of a revision `r` is the size of the set
2030 head. Equivalently, the rank of a revision `r` is the size of the set
1918 `ancestors(r)`, `r` included.
2031 `ancestors(r)`, `r` included.
1919
2032
1920 This method returns the rank retrieved from the revlog in constant
2033 This method returns the rank retrieved from the revlog in constant
1921 time. It makes no attempt at computing unknown values for versions of
2034 time. It makes no attempt at computing unknown values for versions of
1922 the revlog which do not persist the rank.
2035 the revlog which do not persist the rank.
1923 """
2036 """
1924 rank = self.index[rev][ENTRY_RANK]
2037 rank = self.index[rev][ENTRY_RANK]
1925 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
2038 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
1926 return None
2039 return None
1927 if rev == nullrev:
2040 if rev == nullrev:
1928 return 0 # convention
2041 return 0 # convention
1929 return rank
2042 return rank
1930
2043
1931 def chainbase(self, rev):
2044 def chainbase(self, rev):
1932 base = self._chainbasecache.get(rev)
2045 base = self._chainbasecache.get(rev)
1933 if base is not None:
2046 if base is not None:
1934 return base
2047 return base
1935
2048
1936 index = self.index
2049 index = self.index
1937 iterrev = rev
2050 iterrev = rev
1938 base = index[iterrev][3]
2051 base = index[iterrev][3]
1939 while base != iterrev:
2052 while base != iterrev:
1940 iterrev = base
2053 iterrev = base
1941 base = index[iterrev][3]
2054 base = index[iterrev][3]
1942
2055
1943 self._chainbasecache[rev] = base
2056 self._chainbasecache[rev] = base
1944 return base
2057 return base
1945
2058
1946 def linkrev(self, rev):
2059 def linkrev(self, rev):
1947 return self.index[rev][4]
2060 return self.index[rev][4]
1948
2061
1949 def parentrevs(self, rev):
2062 def parentrevs(self, rev):
1950 try:
2063 try:
1951 entry = self.index[rev]
2064 entry = self.index[rev]
1952 except IndexError:
2065 except IndexError:
1953 if rev == wdirrev:
2066 if rev == wdirrev:
1954 raise error.WdirUnsupported
2067 raise error.WdirUnsupported
1955 raise
2068 raise
1956
2069
1957 if self.feature_config.canonical_parent_order and entry[5] == nullrev:
2070 if self.feature_config.canonical_parent_order and entry[5] == nullrev:
1958 return entry[6], entry[5]
2071 return entry[6], entry[5]
1959 else:
2072 else:
1960 return entry[5], entry[6]
2073 return entry[5], entry[6]
1961
2074
1962 # fast parentrevs(rev) where rev isn't filtered
2075 # fast parentrevs(rev) where rev isn't filtered
1963 _uncheckedparentrevs = parentrevs
2076 _uncheckedparentrevs = parentrevs
1964
2077
1965 def node(self, rev):
2078 def node(self, rev):
1966 try:
2079 try:
1967 return self.index[rev][7]
2080 return self.index[rev][7]
1968 except IndexError:
2081 except IndexError:
1969 if rev == wdirrev:
2082 if rev == wdirrev:
1970 raise error.WdirUnsupported
2083 raise error.WdirUnsupported
1971 raise
2084 raise
1972
2085
1973 # Derived from index values.
2086 # Derived from index values.
1974
2087
1975 def end(self, rev):
2088 def end(self, rev):
1976 return self.start(rev) + self.length(rev)
2089 return self.start(rev) + self.length(rev)
1977
2090
1978 def parents(self, node):
2091 def parents(self, node):
1979 i = self.index
2092 i = self.index
1980 d = i[self.rev(node)]
2093 d = i[self.rev(node)]
1981 # inline node() to avoid function call overhead
2094 # inline node() to avoid function call overhead
1982 if self.feature_config.canonical_parent_order and d[5] == self.nullid:
2095 if self.feature_config.canonical_parent_order and d[5] == self.nullid:
1983 return i[d[6]][7], i[d[5]][7]
2096 return i[d[6]][7], i[d[5]][7]
1984 else:
2097 else:
1985 return i[d[5]][7], i[d[6]][7]
2098 return i[d[5]][7], i[d[6]][7]
1986
2099
1987 def chainlen(self, rev):
2100 def chainlen(self, rev):
1988 return self._chaininfo(rev)[0]
2101 return self._chaininfo(rev)[0]
1989
2102
1990 def _chaininfo(self, rev):
2103 def _chaininfo(self, rev):
1991 chaininfocache = self._chaininfocache
2104 chaininfocache = self._chaininfocache
1992 if rev in chaininfocache:
2105 if rev in chaininfocache:
1993 return chaininfocache[rev]
2106 return chaininfocache[rev]
1994 index = self.index
2107 index = self.index
1995 generaldelta = self.delta_config.general_delta
2108 generaldelta = self.delta_config.general_delta
1996 iterrev = rev
2109 iterrev = rev
1997 e = index[iterrev]
2110 e = index[iterrev]
1998 clen = 0
2111 clen = 0
1999 compresseddeltalen = 0
2112 compresseddeltalen = 0
2000 while iterrev != e[3]:
2113 while iterrev != e[3]:
2001 clen += 1
2114 clen += 1
2002 compresseddeltalen += e[1]
2115 compresseddeltalen += e[1]
2003 if generaldelta:
2116 if generaldelta:
2004 iterrev = e[3]
2117 iterrev = e[3]
2005 else:
2118 else:
2006 iterrev -= 1
2119 iterrev -= 1
2007 if iterrev in chaininfocache:
2120 if iterrev in chaininfocache:
2008 t = chaininfocache[iterrev]
2121 t = chaininfocache[iterrev]
2009 clen += t[0]
2122 clen += t[0]
2010 compresseddeltalen += t[1]
2123 compresseddeltalen += t[1]
2011 break
2124 break
2012 e = index[iterrev]
2125 e = index[iterrev]
2013 else:
2126 else:
2014 # Add text length of base since decompressing that also takes
2127 # Add text length of base since decompressing that also takes
2015 # work. For cache hits the length is already included.
2128 # work. For cache hits the length is already included.
2016 compresseddeltalen += e[1]
2129 compresseddeltalen += e[1]
2017 r = (clen, compresseddeltalen)
2130 r = (clen, compresseddeltalen)
2018 chaininfocache[rev] = r
2131 chaininfocache[rev] = r
2019 return r
2132 return r
2020
2133
2021 def _deltachain(self, rev, stoprev=None):
2134 def _deltachain(self, rev, stoprev=None):
2022 return self._inner._deltachain(rev, stoprev=stoprev)
2135 return self._inner._deltachain(rev, stoprev=stoprev)
2023
2136
2024 def ancestors(self, revs, stoprev=0, inclusive=False):
2137 def ancestors(self, revs, stoprev=0, inclusive=False):
2025 """Generate the ancestors of 'revs' in reverse revision order.
2138 """Generate the ancestors of 'revs' in reverse revision order.
2026 Does not generate revs lower than stoprev.
2139 Does not generate revs lower than stoprev.
2027
2140
2028 See the documentation for ancestor.lazyancestors for more details."""
2141 See the documentation for ancestor.lazyancestors for more details."""
2029
2142
2030 # first, make sure start revisions aren't filtered
2143 # first, make sure start revisions aren't filtered
2031 revs = list(revs)
2144 revs = list(revs)
2032 checkrev = self.node
2145 checkrev = self.node
2033 for r in revs:
2146 for r in revs:
2034 checkrev(r)
2147 checkrev(r)
2035 # and we're sure ancestors aren't filtered as well
2148 # and we're sure ancestors aren't filtered as well
2036
2149
2037 if rustancestor is not None and self.index.rust_ext_compat:
2150 if rustancestor is not None and self.index.rust_ext_compat:
2038 lazyancestors = rustancestor.LazyAncestors
2151 lazyancestors = rustancestor.LazyAncestors
2039 arg = self.index
2152 arg = self.index
2040 else:
2153 else:
2041 lazyancestors = ancestor.lazyancestors
2154 lazyancestors = ancestor.lazyancestors
2042 arg = self._uncheckedparentrevs
2155 arg = self._uncheckedparentrevs
2043 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
2156 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
2044
2157
2045 def descendants(self, revs):
2158 def descendants(self, revs):
2046 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
2159 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
2047
2160
2048 def findcommonmissing(self, common=None, heads=None):
2161 def findcommonmissing(self, common=None, heads=None):
2049 """Return a tuple of the ancestors of common and the ancestors of heads
2162 """Return a tuple of the ancestors of common and the ancestors of heads
2050 that are not ancestors of common. In revset terminology, we return the
2163 that are not ancestors of common. In revset terminology, we return the
2051 tuple:
2164 tuple:
2052
2165
2053 ::common, (::heads) - (::common)
2166 ::common, (::heads) - (::common)
2054
2167
2055 The list is sorted by revision number, meaning it is
2168 The list is sorted by revision number, meaning it is
2056 topologically sorted.
2169 topologically sorted.
2057
2170
2058 'heads' and 'common' are both lists of node IDs. If heads is
2171 'heads' and 'common' are both lists of node IDs. If heads is
2059 not supplied, uses all of the revlog's heads. If common is not
2172 not supplied, uses all of the revlog's heads. If common is not
2060 supplied, uses nullid."""
2173 supplied, uses nullid."""
2061 if common is None:
2174 if common is None:
2062 common = [self.nullid]
2175 common = [self.nullid]
2063 if heads is None:
2176 if heads is None:
2064 heads = self.heads()
2177 heads = self.heads()
2065
2178
2066 common = [self.rev(n) for n in common]
2179 common = [self.rev(n) for n in common]
2067 heads = [self.rev(n) for n in heads]
2180 heads = [self.rev(n) for n in heads]
2068
2181
2069 # we want the ancestors, but inclusive
2182 # we want the ancestors, but inclusive
2070 class lazyset:
2183 class lazyset:
2071 def __init__(self, lazyvalues):
2184 def __init__(self, lazyvalues):
2072 self.addedvalues = set()
2185 self.addedvalues = set()
2073 self.lazyvalues = lazyvalues
2186 self.lazyvalues = lazyvalues
2074
2187
2075 def __contains__(self, value):
2188 def __contains__(self, value):
2076 return value in self.addedvalues or value in self.lazyvalues
2189 return value in self.addedvalues or value in self.lazyvalues
2077
2190
2078 def __iter__(self):
2191 def __iter__(self):
2079 added = self.addedvalues
2192 added = self.addedvalues
2080 for r in added:
2193 for r in added:
2081 yield r
2194 yield r
2082 for r in self.lazyvalues:
2195 for r in self.lazyvalues:
2083 if not r in added:
2196 if not r in added:
2084 yield r
2197 yield r
2085
2198
2086 def add(self, value):
2199 def add(self, value):
2087 self.addedvalues.add(value)
2200 self.addedvalues.add(value)
2088
2201
2089 def update(self, values):
2202 def update(self, values):
2090 self.addedvalues.update(values)
2203 self.addedvalues.update(values)
2091
2204
2092 has = lazyset(self.ancestors(common))
2205 has = lazyset(self.ancestors(common))
2093 has.add(nullrev)
2206 has.add(nullrev)
2094 has.update(common)
2207 has.update(common)
2095
2208
2096 # take all ancestors from heads that aren't in has
2209 # take all ancestors from heads that aren't in has
2097 missing = set()
2210 missing = set()
2098 visit = collections.deque(r for r in heads if r not in has)
2211 visit = collections.deque(r for r in heads if r not in has)
2099 while visit:
2212 while visit:
2100 r = visit.popleft()
2213 r = visit.popleft()
2101 if r in missing:
2214 if r in missing:
2102 continue
2215 continue
2103 else:
2216 else:
2104 missing.add(r)
2217 missing.add(r)
2105 for p in self.parentrevs(r):
2218 for p in self.parentrevs(r):
2106 if p not in has:
2219 if p not in has:
2107 visit.append(p)
2220 visit.append(p)
2108 missing = list(missing)
2221 missing = list(missing)
2109 missing.sort()
2222 missing.sort()
2110 return has, [self.node(miss) for miss in missing]
2223 return has, [self.node(miss) for miss in missing]
2111
2224
2112 def incrementalmissingrevs(self, common=None):
2225 def incrementalmissingrevs(self, common=None):
2113 """Return an object that can be used to incrementally compute the
2226 """Return an object that can be used to incrementally compute the
2114 revision numbers of the ancestors of arbitrary sets that are not
2227 revision numbers of the ancestors of arbitrary sets that are not
2115 ancestors of common. This is an ancestor.incrementalmissingancestors
2228 ancestors of common. This is an ancestor.incrementalmissingancestors
2116 object.
2229 object.
2117
2230
2118 'common' is a list of revision numbers. If common is not supplied, uses
2231 'common' is a list of revision numbers. If common is not supplied, uses
2119 nullrev.
2232 nullrev.
2120 """
2233 """
2121 if common is None:
2234 if common is None:
2122 common = [nullrev]
2235 common = [nullrev]
2123
2236
2124 if rustancestor is not None and self.index.rust_ext_compat:
2237 if rustancestor is not None and self.index.rust_ext_compat:
2125 return rustancestor.MissingAncestors(self.index, common)
2238 return rustancestor.MissingAncestors(self.index, common)
2126 return ancestor.incrementalmissingancestors(self.parentrevs, common)
2239 return ancestor.incrementalmissingancestors(self.parentrevs, common)
2127
2240
2128 def findmissingrevs(self, common=None, heads=None):
2241 def findmissingrevs(self, common=None, heads=None):
2129 """Return the revision numbers of the ancestors of heads that
2242 """Return the revision numbers of the ancestors of heads that
2130 are not ancestors of common.
2243 are not ancestors of common.
2131
2244
2132 More specifically, return a list of revision numbers corresponding to
2245 More specifically, return a list of revision numbers corresponding to
2133 nodes N such that every N satisfies the following constraints:
2246 nodes N such that every N satisfies the following constraints:
2134
2247
2135 1. N is an ancestor of some node in 'heads'
2248 1. N is an ancestor of some node in 'heads'
2136 2. N is not an ancestor of any node in 'common'
2249 2. N is not an ancestor of any node in 'common'
2137
2250
2138 The list is sorted by revision number, meaning it is
2251 The list is sorted by revision number, meaning it is
2139 topologically sorted.
2252 topologically sorted.
2140
2253
2141 'heads' and 'common' are both lists of revision numbers. If heads is
2254 'heads' and 'common' are both lists of revision numbers. If heads is
2142 not supplied, uses all of the revlog's heads. If common is not
2255 not supplied, uses all of the revlog's heads. If common is not
2143 supplied, uses nullid."""
2256 supplied, uses nullid."""
2144 if common is None:
2257 if common is None:
2145 common = [nullrev]
2258 common = [nullrev]
2146 if heads is None:
2259 if heads is None:
2147 heads = self.headrevs()
2260 heads = self.headrevs()
2148
2261
2149 inc = self.incrementalmissingrevs(common=common)
2262 inc = self.incrementalmissingrevs(common=common)
2150 return inc.missingancestors(heads)
2263 return inc.missingancestors(heads)
2151
2264
2152 def findmissing(self, common=None, heads=None):
2265 def findmissing(self, common=None, heads=None):
2153 """Return the ancestors of heads that are not ancestors of common.
2266 """Return the ancestors of heads that are not ancestors of common.
2154
2267
2155 More specifically, return a list of nodes N such that every N
2268 More specifically, return a list of nodes N such that every N
2156 satisfies the following constraints:
2269 satisfies the following constraints:
2157
2270
2158 1. N is an ancestor of some node in 'heads'
2271 1. N is an ancestor of some node in 'heads'
2159 2. N is not an ancestor of any node in 'common'
2272 2. N is not an ancestor of any node in 'common'
2160
2273
2161 The list is sorted by revision number, meaning it is
2274 The list is sorted by revision number, meaning it is
2162 topologically sorted.
2275 topologically sorted.
2163
2276
2164 'heads' and 'common' are both lists of node IDs. If heads is
2277 'heads' and 'common' are both lists of node IDs. If heads is
2165 not supplied, uses all of the revlog's heads. If common is not
2278 not supplied, uses all of the revlog's heads. If common is not
2166 supplied, uses nullid."""
2279 supplied, uses nullid."""
2167 if common is None:
2280 if common is None:
2168 common = [self.nullid]
2281 common = [self.nullid]
2169 if heads is None:
2282 if heads is None:
2170 heads = self.heads()
2283 heads = self.heads()
2171
2284
2172 common = [self.rev(n) for n in common]
2285 common = [self.rev(n) for n in common]
2173 heads = [self.rev(n) for n in heads]
2286 heads = [self.rev(n) for n in heads]
2174
2287
2175 inc = self.incrementalmissingrevs(common=common)
2288 inc = self.incrementalmissingrevs(common=common)
2176 return [self.node(r) for r in inc.missingancestors(heads)]
2289 return [self.node(r) for r in inc.missingancestors(heads)]
2177
2290
2178 def nodesbetween(self, roots=None, heads=None):
2291 def nodesbetween(self, roots=None, heads=None):
2179 """Return a topological path from 'roots' to 'heads'.
2292 """Return a topological path from 'roots' to 'heads'.
2180
2293
2181 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
2294 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
2182 topologically sorted list of all nodes N that satisfy both of
2295 topologically sorted list of all nodes N that satisfy both of
2183 these constraints:
2296 these constraints:
2184
2297
2185 1. N is a descendant of some node in 'roots'
2298 1. N is a descendant of some node in 'roots'
2186 2. N is an ancestor of some node in 'heads'
2299 2. N is an ancestor of some node in 'heads'
2187
2300
2188 Every node is considered to be both a descendant and an ancestor
2301 Every node is considered to be both a descendant and an ancestor
2189 of itself, so every reachable node in 'roots' and 'heads' will be
2302 of itself, so every reachable node in 'roots' and 'heads' will be
2190 included in 'nodes'.
2303 included in 'nodes'.
2191
2304
2192 'outroots' is the list of reachable nodes in 'roots', i.e., the
2305 'outroots' is the list of reachable nodes in 'roots', i.e., the
2193 subset of 'roots' that is returned in 'nodes'. Likewise,
2306 subset of 'roots' that is returned in 'nodes'. Likewise,
2194 'outheads' is the subset of 'heads' that is also in 'nodes'.
2307 'outheads' is the subset of 'heads' that is also in 'nodes'.
2195
2308
2196 'roots' and 'heads' are both lists of node IDs. If 'roots' is
2309 'roots' and 'heads' are both lists of node IDs. If 'roots' is
2197 unspecified, uses nullid as the only root. If 'heads' is
2310 unspecified, uses nullid as the only root. If 'heads' is
2198 unspecified, uses list of all of the revlog's heads."""
2311 unspecified, uses list of all of the revlog's heads."""
2199 nonodes = ([], [], [])
2312 nonodes = ([], [], [])
2200 if roots is not None:
2313 if roots is not None:
2201 roots = list(roots)
2314 roots = list(roots)
2202 if not roots:
2315 if not roots:
2203 return nonodes
2316 return nonodes
2204 lowestrev = min([self.rev(n) for n in roots])
2317 lowestrev = min([self.rev(n) for n in roots])
2205 else:
2318 else:
2206 roots = [self.nullid] # Everybody's a descendant of nullid
2319 roots = [self.nullid] # Everybody's a descendant of nullid
2207 lowestrev = nullrev
2320 lowestrev = nullrev
2208 if (lowestrev == nullrev) and (heads is None):
2321 if (lowestrev == nullrev) and (heads is None):
2209 # We want _all_ the nodes!
2322 # We want _all_ the nodes!
2210 return (
2323 return (
2211 [self.node(r) for r in self],
2324 [self.node(r) for r in self],
2212 [self.nullid],
2325 [self.nullid],
2213 list(self.heads()),
2326 list(self.heads()),
2214 )
2327 )
2215 if heads is None:
2328 if heads is None:
2216 # All nodes are ancestors, so the latest ancestor is the last
2329 # All nodes are ancestors, so the latest ancestor is the last
2217 # node.
2330 # node.
2218 highestrev = len(self) - 1
2331 highestrev = len(self) - 1
2219 # Set ancestors to None to signal that every node is an ancestor.
2332 # Set ancestors to None to signal that every node is an ancestor.
2220 ancestors = None
2333 ancestors = None
2221 # Set heads to an empty dictionary for later discovery of heads
2334 # Set heads to an empty dictionary for later discovery of heads
2222 heads = {}
2335 heads = {}
2223 else:
2336 else:
2224 heads = list(heads)
2337 heads = list(heads)
2225 if not heads:
2338 if not heads:
2226 return nonodes
2339 return nonodes
2227 ancestors = set()
2340 ancestors = set()
2228 # Turn heads into a dictionary so we can remove 'fake' heads.
2341 # Turn heads into a dictionary so we can remove 'fake' heads.
2229 # Also, later we will be using it to filter out the heads we can't
2342 # Also, later we will be using it to filter out the heads we can't
2230 # find from roots.
2343 # find from roots.
2231 heads = dict.fromkeys(heads, False)
2344 heads = dict.fromkeys(heads, False)
2232 # Start at the top and keep marking parents until we're done.
2345 # Start at the top and keep marking parents until we're done.
2233 nodestotag = set(heads)
2346 nodestotag = set(heads)
2234 # Remember where the top was so we can use it as a limit later.
2347 # Remember where the top was so we can use it as a limit later.
2235 highestrev = max([self.rev(n) for n in nodestotag])
2348 highestrev = max([self.rev(n) for n in nodestotag])
2236 while nodestotag:
2349 while nodestotag:
2237 # grab a node to tag
2350 # grab a node to tag
2238 n = nodestotag.pop()
2351 n = nodestotag.pop()
2239 # Never tag nullid
2352 # Never tag nullid
2240 if n == self.nullid:
2353 if n == self.nullid:
2241 continue
2354 continue
2242 # A node's revision number represents its place in a
2355 # A node's revision number represents its place in a
2243 # topologically sorted list of nodes.
2356 # topologically sorted list of nodes.
2244 r = self.rev(n)
2357 r = self.rev(n)
2245 if r >= lowestrev:
2358 if r >= lowestrev:
2246 if n not in ancestors:
2359 if n not in ancestors:
2247 # If we are possibly a descendant of one of the roots
2360 # If we are possibly a descendant of one of the roots
2248 # and we haven't already been marked as an ancestor
2361 # and we haven't already been marked as an ancestor
2249 ancestors.add(n) # Mark as ancestor
2362 ancestors.add(n) # Mark as ancestor
2250 # Add non-nullid parents to list of nodes to tag.
2363 # Add non-nullid parents to list of nodes to tag.
2251 nodestotag.update(
2364 nodestotag.update(
2252 [p for p in self.parents(n) if p != self.nullid]
2365 [p for p in self.parents(n) if p != self.nullid]
2253 )
2366 )
2254 elif n in heads: # We've seen it before, is it a fake head?
2367 elif n in heads: # We've seen it before, is it a fake head?
2255 # So it is, real heads should not be the ancestors of
2368 # So it is, real heads should not be the ancestors of
2256 # any other heads.
2369 # any other heads.
2257 heads.pop(n)
2370 heads.pop(n)
2258 if not ancestors:
2371 if not ancestors:
2259 return nonodes
2372 return nonodes
2260 # Now that we have our set of ancestors, we want to remove any
2373 # Now that we have our set of ancestors, we want to remove any
2261 # roots that are not ancestors.
2374 # roots that are not ancestors.
2262
2375
2263 # If one of the roots was nullid, everything is included anyway.
2376 # If one of the roots was nullid, everything is included anyway.
2264 if lowestrev > nullrev:
2377 if lowestrev > nullrev:
2265 # But, since we weren't, let's recompute the lowest rev to not
2378 # But, since we weren't, let's recompute the lowest rev to not
2266 # include roots that aren't ancestors.
2379 # include roots that aren't ancestors.
2267
2380
2268 # Filter out roots that aren't ancestors of heads
2381 # Filter out roots that aren't ancestors of heads
2269 roots = [root for root in roots if root in ancestors]
2382 roots = [root for root in roots if root in ancestors]
2270 # Recompute the lowest revision
2383 # Recompute the lowest revision
2271 if roots:
2384 if roots:
2272 lowestrev = min([self.rev(root) for root in roots])
2385 lowestrev = min([self.rev(root) for root in roots])
2273 else:
2386 else:
2274 # No more roots? Return empty list
2387 # No more roots? Return empty list
2275 return nonodes
2388 return nonodes
2276 else:
2389 else:
2277 # We are descending from nullid, and don't need to care about
2390 # We are descending from nullid, and don't need to care about
2278 # any other roots.
2391 # any other roots.
2279 lowestrev = nullrev
2392 lowestrev = nullrev
2280 roots = [self.nullid]
2393 roots = [self.nullid]
2281 # Transform our roots list into a set.
2394 # Transform our roots list into a set.
2282 descendants = set(roots)
2395 descendants = set(roots)
2283 # Also, keep the original roots so we can filter out roots that aren't
2396 # Also, keep the original roots so we can filter out roots that aren't
2284 # 'real' roots (i.e. are descended from other roots).
2397 # 'real' roots (i.e. are descended from other roots).
2285 roots = descendants.copy()
2398 roots = descendants.copy()
2286 # Our topologically sorted list of output nodes.
2399 # Our topologically sorted list of output nodes.
2287 orderedout = []
2400 orderedout = []
2288 # Don't start at nullid since we don't want nullid in our output list,
2401 # Don't start at nullid since we don't want nullid in our output list,
2289 # and if nullid shows up in descendants, empty parents will look like
2402 # and if nullid shows up in descendants, empty parents will look like
2290 # they're descendants.
2403 # they're descendants.
2291 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
2404 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
2292 n = self.node(r)
2405 n = self.node(r)
2293 isdescendant = False
2406 isdescendant = False
2294 if lowestrev == nullrev: # Everybody is a descendant of nullid
2407 if lowestrev == nullrev: # Everybody is a descendant of nullid
2295 isdescendant = True
2408 isdescendant = True
2296 elif n in descendants:
2409 elif n in descendants:
2297 # n is already a descendant
2410 # n is already a descendant
2298 isdescendant = True
2411 isdescendant = True
2299 # This check only needs to be done here because all the roots
2412 # This check only needs to be done here because all the roots
2300 # will start being marked is descendants before the loop.
2413 # will start being marked is descendants before the loop.
2301 if n in roots:
2414 if n in roots:
2302 # If n was a root, check if it's a 'real' root.
2415 # If n was a root, check if it's a 'real' root.
2303 p = tuple(self.parents(n))
2416 p = tuple(self.parents(n))
2304 # If any of its parents are descendants, it's not a root.
2417 # If any of its parents are descendants, it's not a root.
2305 if (p[0] in descendants) or (p[1] in descendants):
2418 if (p[0] in descendants) or (p[1] in descendants):
2306 roots.remove(n)
2419 roots.remove(n)
2307 else:
2420 else:
2308 p = tuple(self.parents(n))
2421 p = tuple(self.parents(n))
2309 # A node is a descendant if either of its parents are
2422 # A node is a descendant if either of its parents are
2310 # descendants. (We seeded the dependents list with the roots
2423 # descendants. (We seeded the dependents list with the roots
2311 # up there, remember?)
2424 # up there, remember?)
2312 if (p[0] in descendants) or (p[1] in descendants):
2425 if (p[0] in descendants) or (p[1] in descendants):
2313 descendants.add(n)
2426 descendants.add(n)
2314 isdescendant = True
2427 isdescendant = True
2315 if isdescendant and ((ancestors is None) or (n in ancestors)):
2428 if isdescendant and ((ancestors is None) or (n in ancestors)):
2316 # Only include nodes that are both descendants and ancestors.
2429 # Only include nodes that are both descendants and ancestors.
2317 orderedout.append(n)
2430 orderedout.append(n)
2318 if (ancestors is not None) and (n in heads):
2431 if (ancestors is not None) and (n in heads):
2319 # We're trying to figure out which heads are reachable
2432 # We're trying to figure out which heads are reachable
2320 # from roots.
2433 # from roots.
2321 # Mark this head as having been reached
2434 # Mark this head as having been reached
2322 heads[n] = True
2435 heads[n] = True
2323 elif ancestors is None:
2436 elif ancestors is None:
2324 # Otherwise, we're trying to discover the heads.
2437 # Otherwise, we're trying to discover the heads.
2325 # Assume this is a head because if it isn't, the next step
2438 # Assume this is a head because if it isn't, the next step
2326 # will eventually remove it.
2439 # will eventually remove it.
2327 heads[n] = True
2440 heads[n] = True
2328 # But, obviously its parents aren't.
2441 # But, obviously its parents aren't.
2329 for p in self.parents(n):
2442 for p in self.parents(n):
2330 heads.pop(p, None)
2443 heads.pop(p, None)
2331 heads = [head for head, flag in heads.items() if flag]
2444 heads = [head for head, flag in heads.items() if flag]
2332 roots = list(roots)
2445 roots = list(roots)
2333 assert orderedout
2446 assert orderedout
2334 assert roots
2447 assert roots
2335 assert heads
2448 assert heads
2336 return (orderedout, roots, heads)
2449 return (orderedout, roots, heads)
2337
2450
2338 def headrevs(self, revs=None):
2451 def headrevs(self, revs=None):
2339 if revs is None:
2452 if revs is None:
2340 try:
2453 try:
2341 return self.index.headrevs()
2454 return self.index.headrevs()
2342 except AttributeError:
2455 except AttributeError:
2343 return self._headrevs()
2456 return self._headrevs()
2344 if rustdagop is not None and self.index.rust_ext_compat:
2457 if rustdagop is not None and self.index.rust_ext_compat:
2345 return rustdagop.headrevs(self.index, revs)
2458 return rustdagop.headrevs(self.index, revs)
2346 return dagop.headrevs(revs, self._uncheckedparentrevs)
2459 return dagop.headrevs(revs, self._uncheckedparentrevs)
2347
2460
2348 def computephases(self, roots):
2461 def computephases(self, roots):
2349 return self.index.computephasesmapsets(roots)
2462 return self.index.computephasesmapsets(roots)
2350
2463
2351 def _headrevs(self):
2464 def _headrevs(self):
2352 count = len(self)
2465 count = len(self)
2353 if not count:
2466 if not count:
2354 return [nullrev]
2467 return [nullrev]
2355 # we won't iter over filtered rev so nobody is a head at start
2468 # we won't iter over filtered rev so nobody is a head at start
2356 ishead = [0] * (count + 1)
2469 ishead = [0] * (count + 1)
2357 index = self.index
2470 index = self.index
2358 for r in self:
2471 for r in self:
2359 ishead[r] = 1 # I may be an head
2472 ishead[r] = 1 # I may be an head
2360 e = index[r]
2473 e = index[r]
2361 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
2474 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
2362 return [r for r, val in enumerate(ishead) if val]
2475 return [r for r, val in enumerate(ishead) if val]
2363
2476
2364 def heads(self, start=None, stop=None):
2477 def heads(self, start=None, stop=None):
2365 """return the list of all nodes that have no children
2478 """return the list of all nodes that have no children
2366
2479
2367 if start is specified, only heads that are descendants of
2480 if start is specified, only heads that are descendants of
2368 start will be returned
2481 start will be returned
2369 if stop is specified, it will consider all the revs from stop
2482 if stop is specified, it will consider all the revs from stop
2370 as if they had no children
2483 as if they had no children
2371 """
2484 """
2372 if start is None and stop is None:
2485 if start is None and stop is None:
2373 if not len(self):
2486 if not len(self):
2374 return [self.nullid]
2487 return [self.nullid]
2375 return [self.node(r) for r in self.headrevs()]
2488 return [self.node(r) for r in self.headrevs()]
2376
2489
2377 if start is None:
2490 if start is None:
2378 start = nullrev
2491 start = nullrev
2379 else:
2492 else:
2380 start = self.rev(start)
2493 start = self.rev(start)
2381
2494
2382 stoprevs = {self.rev(n) for n in stop or []}
2495 stoprevs = {self.rev(n) for n in stop or []}
2383
2496
2384 revs = dagop.headrevssubset(
2497 revs = dagop.headrevssubset(
2385 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
2498 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
2386 )
2499 )
2387
2500
2388 return [self.node(rev) for rev in revs]
2501 return [self.node(rev) for rev in revs]
2389
2502
2390 def children(self, node):
2503 def children(self, node):
2391 """find the children of a given node"""
2504 """find the children of a given node"""
2392 c = []
2505 c = []
2393 p = self.rev(node)
2506 p = self.rev(node)
2394 for r in self.revs(start=p + 1):
2507 for r in self.revs(start=p + 1):
2395 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
2508 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
2396 if prevs:
2509 if prevs:
2397 for pr in prevs:
2510 for pr in prevs:
2398 if pr == p:
2511 if pr == p:
2399 c.append(self.node(r))
2512 c.append(self.node(r))
2400 elif p == nullrev:
2513 elif p == nullrev:
2401 c.append(self.node(r))
2514 c.append(self.node(r))
2402 return c
2515 return c
2403
2516
2404 def commonancestorsheads(self, a, b):
2517 def commonancestorsheads(self, a, b):
2405 """calculate all the heads of the common ancestors of nodes a and b"""
2518 """calculate all the heads of the common ancestors of nodes a and b"""
2406 a, b = self.rev(a), self.rev(b)
2519 a, b = self.rev(a), self.rev(b)
2407 ancs = self._commonancestorsheads(a, b)
2520 ancs = self._commonancestorsheads(a, b)
2408 return pycompat.maplist(self.node, ancs)
2521 return pycompat.maplist(self.node, ancs)
2409
2522
2410 def _commonancestorsheads(self, *revs):
2523 def _commonancestorsheads(self, *revs):
2411 """calculate all the heads of the common ancestors of revs"""
2524 """calculate all the heads of the common ancestors of revs"""
2412 try:
2525 try:
2413 ancs = self.index.commonancestorsheads(*revs)
2526 ancs = self.index.commonancestorsheads(*revs)
2414 except (AttributeError, OverflowError): # C implementation failed
2527 except (AttributeError, OverflowError): # C implementation failed
2415 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
2528 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
2416 return ancs
2529 return ancs
2417
2530
2418 def isancestor(self, a, b):
2531 def isancestor(self, a, b):
2419 """return True if node a is an ancestor of node b
2532 """return True if node a is an ancestor of node b
2420
2533
2421 A revision is considered an ancestor of itself."""
2534 A revision is considered an ancestor of itself."""
2422 a, b = self.rev(a), self.rev(b)
2535 a, b = self.rev(a), self.rev(b)
2423 return self.isancestorrev(a, b)
2536 return self.isancestorrev(a, b)
2424
2537
2425 def isancestorrev(self, a, b):
2538 def isancestorrev(self, a, b):
2426 """return True if revision a is an ancestor of revision b
2539 """return True if revision a is an ancestor of revision b
2427
2540
2428 A revision is considered an ancestor of itself.
2541 A revision is considered an ancestor of itself.
2429
2542
2430 The implementation of this is trivial but the use of
2543 The implementation of this is trivial but the use of
2431 reachableroots is not."""
2544 reachableroots is not."""
2432 if a == nullrev:
2545 if a == nullrev:
2433 return True
2546 return True
2434 elif a == b:
2547 elif a == b:
2435 return True
2548 return True
2436 elif a > b:
2549 elif a > b:
2437 return False
2550 return False
2438 return bool(self.reachableroots(a, [b], [a], includepath=False))
2551 return bool(self.reachableroots(a, [b], [a], includepath=False))
2439
2552
2440 def reachableroots(self, minroot, heads, roots, includepath=False):
2553 def reachableroots(self, minroot, heads, roots, includepath=False):
2441 """return (heads(::(<roots> and <roots>::<heads>)))
2554 """return (heads(::(<roots> and <roots>::<heads>)))
2442
2555
2443 If includepath is True, return (<roots>::<heads>)."""
2556 If includepath is True, return (<roots>::<heads>)."""
2444 try:
2557 try:
2445 return self.index.reachableroots2(
2558 return self.index.reachableroots2(
2446 minroot, heads, roots, includepath
2559 minroot, heads, roots, includepath
2447 )
2560 )
2448 except AttributeError:
2561 except AttributeError:
2449 return dagop._reachablerootspure(
2562 return dagop._reachablerootspure(
2450 self.parentrevs, minroot, roots, heads, includepath
2563 self.parentrevs, minroot, roots, heads, includepath
2451 )
2564 )
2452
2565
2453 def ancestor(self, a, b):
2566 def ancestor(self, a, b):
2454 """calculate the "best" common ancestor of nodes a and b"""
2567 """calculate the "best" common ancestor of nodes a and b"""
2455
2568
2456 a, b = self.rev(a), self.rev(b)
2569 a, b = self.rev(a), self.rev(b)
2457 try:
2570 try:
2458 ancs = self.index.ancestors(a, b)
2571 ancs = self.index.ancestors(a, b)
2459 except (AttributeError, OverflowError):
2572 except (AttributeError, OverflowError):
2460 ancs = ancestor.ancestors(self.parentrevs, a, b)
2573 ancs = ancestor.ancestors(self.parentrevs, a, b)
2461 if ancs:
2574 if ancs:
2462 # choose a consistent winner when there's a tie
2575 # choose a consistent winner when there's a tie
2463 return min(map(self.node, ancs))
2576 return min(map(self.node, ancs))
2464 return self.nullid
2577 return self.nullid
2465
2578
2466 def _match(self, id):
2579 def _match(self, id):
2467 if isinstance(id, int):
2580 if isinstance(id, int):
2468 # rev
2581 # rev
2469 return self.node(id)
2582 return self.node(id)
2470 if len(id) == self.nodeconstants.nodelen:
2583 if len(id) == self.nodeconstants.nodelen:
2471 # possibly a binary node
2584 # possibly a binary node
2472 # odds of a binary node being all hex in ASCII are 1 in 10**25
2585 # odds of a binary node being all hex in ASCII are 1 in 10**25
2473 try:
2586 try:
2474 node = id
2587 node = id
2475 self.rev(node) # quick search the index
2588 self.rev(node) # quick search the index
2476 return node
2589 return node
2477 except error.LookupError:
2590 except error.LookupError:
2478 pass # may be partial hex id
2591 pass # may be partial hex id
2479 try:
2592 try:
2480 # str(rev)
2593 # str(rev)
2481 rev = int(id)
2594 rev = int(id)
2482 if b"%d" % rev != id:
2595 if b"%d" % rev != id:
2483 raise ValueError
2596 raise ValueError
2484 if rev < 0:
2597 if rev < 0:
2485 rev = len(self) + rev
2598 rev = len(self) + rev
2486 if rev < 0 or rev >= len(self):
2599 if rev < 0 or rev >= len(self):
2487 raise ValueError
2600 raise ValueError
2488 return self.node(rev)
2601 return self.node(rev)
2489 except (ValueError, OverflowError):
2602 except (ValueError, OverflowError):
2490 pass
2603 pass
2491 if len(id) == 2 * self.nodeconstants.nodelen:
2604 if len(id) == 2 * self.nodeconstants.nodelen:
2492 try:
2605 try:
2493 # a full hex nodeid?
2606 # a full hex nodeid?
2494 node = bin(id)
2607 node = bin(id)
2495 self.rev(node)
2608 self.rev(node)
2496 return node
2609 return node
2497 except (binascii.Error, error.LookupError):
2610 except (binascii.Error, error.LookupError):
2498 pass
2611 pass
2499
2612
2500 def _partialmatch(self, id):
2613 def _partialmatch(self, id):
2501 # we don't care wdirfilenodeids as they should be always full hash
2614 # we don't care wdirfilenodeids as they should be always full hash
2502 maybewdir = self.nodeconstants.wdirhex.startswith(id)
2615 maybewdir = self.nodeconstants.wdirhex.startswith(id)
2503 ambiguous = False
2616 ambiguous = False
2504 try:
2617 try:
2505 partial = self.index.partialmatch(id)
2618 partial = self.index.partialmatch(id)
2506 if partial and self.hasnode(partial):
2619 if partial and self.hasnode(partial):
2507 if maybewdir:
2620 if maybewdir:
2508 # single 'ff...' match in radix tree, ambiguous with wdir
2621 # single 'ff...' match in radix tree, ambiguous with wdir
2509 ambiguous = True
2622 ambiguous = True
2510 else:
2623 else:
2511 return partial
2624 return partial
2512 elif maybewdir:
2625 elif maybewdir:
2513 # no 'ff...' match in radix tree, wdir identified
2626 # no 'ff...' match in radix tree, wdir identified
2514 raise error.WdirUnsupported
2627 raise error.WdirUnsupported
2515 else:
2628 else:
2516 return None
2629 return None
2517 except error.RevlogError:
2630 except error.RevlogError:
2518 # parsers.c radix tree lookup gave multiple matches
2631 # parsers.c radix tree lookup gave multiple matches
2519 # fast path: for unfiltered changelog, radix tree is accurate
2632 # fast path: for unfiltered changelog, radix tree is accurate
2520 if not getattr(self, 'filteredrevs', None):
2633 if not getattr(self, 'filteredrevs', None):
2521 ambiguous = True
2634 ambiguous = True
2522 # fall through to slow path that filters hidden revisions
2635 # fall through to slow path that filters hidden revisions
2523 except (AttributeError, ValueError):
2636 except (AttributeError, ValueError):
2524 # we are pure python, or key is not hex
2637 # we are pure python, or key is not hex
2525 pass
2638 pass
2526 if ambiguous:
2639 if ambiguous:
2527 raise error.AmbiguousPrefixLookupError(
2640 raise error.AmbiguousPrefixLookupError(
2528 id, self.display_id, _(b'ambiguous identifier')
2641 id, self.display_id, _(b'ambiguous identifier')
2529 )
2642 )
2530
2643
2531 if id in self._pcache:
2644 if id in self._pcache:
2532 return self._pcache[id]
2645 return self._pcache[id]
2533
2646
2534 if len(id) <= 40:
2647 if len(id) <= 40:
2535 # hex(node)[:...]
2648 # hex(node)[:...]
2536 l = len(id) // 2 * 2 # grab an even number of digits
2649 l = len(id) // 2 * 2 # grab an even number of digits
2537 try:
2650 try:
2538 # we're dropping the last digit, so let's check that it's hex,
2651 # we're dropping the last digit, so let's check that it's hex,
2539 # to avoid the expensive computation below if it's not
2652 # to avoid the expensive computation below if it's not
2540 if len(id) % 2 > 0:
2653 if len(id) % 2 > 0:
2541 if not (id[-1] in hexdigits):
2654 if not (id[-1] in hexdigits):
2542 return None
2655 return None
2543 prefix = bin(id[:l])
2656 prefix = bin(id[:l])
2544 except binascii.Error:
2657 except binascii.Error:
2545 pass
2658 pass
2546 else:
2659 else:
2547 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
2660 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
2548 nl = [
2661 nl = [
2549 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
2662 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
2550 ]
2663 ]
2551 if self.nodeconstants.nullhex.startswith(id):
2664 if self.nodeconstants.nullhex.startswith(id):
2552 nl.append(self.nullid)
2665 nl.append(self.nullid)
2553 if len(nl) > 0:
2666 if len(nl) > 0:
2554 if len(nl) == 1 and not maybewdir:
2667 if len(nl) == 1 and not maybewdir:
2555 self._pcache[id] = nl[0]
2668 self._pcache[id] = nl[0]
2556 return nl[0]
2669 return nl[0]
2557 raise error.AmbiguousPrefixLookupError(
2670 raise error.AmbiguousPrefixLookupError(
2558 id, self.display_id, _(b'ambiguous identifier')
2671 id, self.display_id, _(b'ambiguous identifier')
2559 )
2672 )
2560 if maybewdir:
2673 if maybewdir:
2561 raise error.WdirUnsupported
2674 raise error.WdirUnsupported
2562 return None
2675 return None
2563
2676
2564 def lookup(self, id):
2677 def lookup(self, id):
2565 """locate a node based on:
2678 """locate a node based on:
2566 - revision number or str(revision number)
2679 - revision number or str(revision number)
2567 - nodeid or subset of hex nodeid
2680 - nodeid or subset of hex nodeid
2568 """
2681 """
2569 n = self._match(id)
2682 n = self._match(id)
2570 if n is not None:
2683 if n is not None:
2571 return n
2684 return n
2572 n = self._partialmatch(id)
2685 n = self._partialmatch(id)
2573 if n:
2686 if n:
2574 return n
2687 return n
2575
2688
2576 raise error.LookupError(id, self.display_id, _(b'no match found'))
2689 raise error.LookupError(id, self.display_id, _(b'no match found'))
2577
2690
2578 def shortest(self, node, minlength=1):
2691 def shortest(self, node, minlength=1):
2579 """Find the shortest unambiguous prefix that matches node."""
2692 """Find the shortest unambiguous prefix that matches node."""
2580
2693
2581 def isvalid(prefix):
2694 def isvalid(prefix):
2582 try:
2695 try:
2583 matchednode = self._partialmatch(prefix)
2696 matchednode = self._partialmatch(prefix)
2584 except error.AmbiguousPrefixLookupError:
2697 except error.AmbiguousPrefixLookupError:
2585 return False
2698 return False
2586 except error.WdirUnsupported:
2699 except error.WdirUnsupported:
2587 # single 'ff...' match
2700 # single 'ff...' match
2588 return True
2701 return True
2589 if matchednode is None:
2702 if matchednode is None:
2590 raise error.LookupError(node, self.display_id, _(b'no node'))
2703 raise error.LookupError(node, self.display_id, _(b'no node'))
2591 return True
2704 return True
2592
2705
2593 def maybewdir(prefix):
2706 def maybewdir(prefix):
2594 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
2707 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
2595
2708
2596 hexnode = hex(node)
2709 hexnode = hex(node)
2597
2710
2598 def disambiguate(hexnode, minlength):
2711 def disambiguate(hexnode, minlength):
2599 """Disambiguate against wdirid."""
2712 """Disambiguate against wdirid."""
2600 for length in range(minlength, len(hexnode) + 1):
2713 for length in range(minlength, len(hexnode) + 1):
2601 prefix = hexnode[:length]
2714 prefix = hexnode[:length]
2602 if not maybewdir(prefix):
2715 if not maybewdir(prefix):
2603 return prefix
2716 return prefix
2604
2717
2605 if not getattr(self, 'filteredrevs', None):
2718 if not getattr(self, 'filteredrevs', None):
2606 try:
2719 try:
2607 length = max(self.index.shortest(node), minlength)
2720 length = max(self.index.shortest(node), minlength)
2608 return disambiguate(hexnode, length)
2721 return disambiguate(hexnode, length)
2609 except error.RevlogError:
2722 except error.RevlogError:
2610 if node != self.nodeconstants.wdirid:
2723 if node != self.nodeconstants.wdirid:
2611 raise error.LookupError(
2724 raise error.LookupError(
2612 node, self.display_id, _(b'no node')
2725 node, self.display_id, _(b'no node')
2613 )
2726 )
2614 except AttributeError:
2727 except AttributeError:
2615 # Fall through to pure code
2728 # Fall through to pure code
2616 pass
2729 pass
2617
2730
2618 if node == self.nodeconstants.wdirid:
2731 if node == self.nodeconstants.wdirid:
2619 for length in range(minlength, len(hexnode) + 1):
2732 for length in range(minlength, len(hexnode) + 1):
2620 prefix = hexnode[:length]
2733 prefix = hexnode[:length]
2621 if isvalid(prefix):
2734 if isvalid(prefix):
2622 return prefix
2735 return prefix
2623
2736
2624 for length in range(minlength, len(hexnode) + 1):
2737 for length in range(minlength, len(hexnode) + 1):
2625 prefix = hexnode[:length]
2738 prefix = hexnode[:length]
2626 if isvalid(prefix):
2739 if isvalid(prefix):
2627 return disambiguate(hexnode, length)
2740 return disambiguate(hexnode, length)
2628
2741
2629 def cmp(self, node, text):
2742 def cmp(self, node, text):
2630 """compare text with a given file revision
2743 """compare text with a given file revision
2631
2744
2632 returns True if text is different than what is stored.
2745 returns True if text is different than what is stored.
2633 """
2746 """
2634 p1, p2 = self.parents(node)
2747 p1, p2 = self.parents(node)
2635 return storageutil.hashrevisionsha1(text, p1, p2) != node
2748 return storageutil.hashrevisionsha1(text, p1, p2) != node
2636
2749
2637 def deltaparent(self, rev):
2750 def deltaparent(self, rev):
2638 """return deltaparent of the given revision"""
2751 """return deltaparent of the given revision"""
2639 base = self.index[rev][3]
2752 base = self.index[rev][3]
2640 if base == rev:
2753 if base == rev:
2641 return nullrev
2754 return nullrev
2642 elif self.delta_config.general_delta:
2755 elif self.delta_config.general_delta:
2643 return base
2756 return base
2644 else:
2757 else:
2645 return rev - 1
2758 return rev - 1
2646
2759
2647 def issnapshot(self, rev):
2760 def issnapshot(self, rev):
2648 """tells whether rev is a snapshot"""
2761 """tells whether rev is a snapshot"""
2649 ret = self._inner.issnapshot(rev)
2762 ret = self._inner.issnapshot(rev)
2650 self.issnapshot = self._inner.issnapshot
2763 self.issnapshot = self._inner.issnapshot
2651 return ret
2764 return ret
2652
2765
2653 def snapshotdepth(self, rev):
2766 def snapshotdepth(self, rev):
2654 """number of snapshot in the chain before this one"""
2767 """number of snapshot in the chain before this one"""
2655 if not self.issnapshot(rev):
2768 if not self.issnapshot(rev):
2656 raise error.ProgrammingError(b'revision %d not a snapshot')
2769 raise error.ProgrammingError(b'revision %d not a snapshot')
2657 return len(self._inner._deltachain(rev)[0]) - 1
2770 return len(self._inner._deltachain(rev)[0]) - 1
2658
2771
2659 def revdiff(self, rev1, rev2):
2772 def revdiff(self, rev1, rev2):
2660 """return or calculate a delta between two revisions
2773 """return or calculate a delta between two revisions
2661
2774
2662 The delta calculated is in binary form and is intended to be written to
2775 The delta calculated is in binary form and is intended to be written to
2663 revlog data directly. So this function needs raw revision data.
2776 revlog data directly. So this function needs raw revision data.
2664 """
2777 """
2665 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
2778 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
2666 return bytes(self._inner._chunk(rev2))
2779 return bytes(self._inner._chunk(rev2))
2667
2780
2668 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
2781 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
2669
2782
2670 def revision(self, nodeorrev):
2783 def revision(self, nodeorrev):
2671 """return an uncompressed revision of a given node or revision
2784 """return an uncompressed revision of a given node or revision
2672 number.
2785 number.
2673 """
2786 """
2674 return self._revisiondata(nodeorrev)
2787 return self._revisiondata(nodeorrev)
2675
2788
2676 def sidedata(self, nodeorrev):
2789 def sidedata(self, nodeorrev):
2677 """a map of extra data related to the changeset but not part of the hash
2790 """a map of extra data related to the changeset but not part of the hash
2678
2791
2679 This function currently return a dictionary. However, more advanced
2792 This function currently return a dictionary. However, more advanced
2680 mapping object will likely be used in the future for a more
2793 mapping object will likely be used in the future for a more
2681 efficient/lazy code.
2794 efficient/lazy code.
2682 """
2795 """
2683 # deal with <nodeorrev> argument type
2796 # deal with <nodeorrev> argument type
2684 if isinstance(nodeorrev, int):
2797 if isinstance(nodeorrev, int):
2685 rev = nodeorrev
2798 rev = nodeorrev
2686 else:
2799 else:
2687 rev = self.rev(nodeorrev)
2800 rev = self.rev(nodeorrev)
2688 return self._sidedata(rev)
2801 return self._sidedata(rev)
2689
2802
2690 def _rawtext(self, node, rev):
2803 def _rawtext(self, node, rev):
2691 """return the possibly unvalidated rawtext for a revision
2804 """return the possibly unvalidated rawtext for a revision
2692
2805
2693 returns (rev, rawtext, validated)
2806 returns (rev, rawtext, validated)
2694 """
2807 """
2695 # Check if we have the entry in cache
2808 # Check if we have the entry in cache
2696 # The cache entry looks like (node, rev, rawtext)
2809 # The cache entry looks like (node, rev, rawtext)
2697 if self._inner._revisioncache:
2810 if self._inner._revisioncache:
2698 if self._inner._revisioncache[0] == node:
2811 if self._inner._revisioncache[0] == node:
2699 return (rev, self._inner._revisioncache[2], True)
2812 return (rev, self._inner._revisioncache[2], True)
2700
2813
2701 if rev is None:
2814 if rev is None:
2702 rev = self.rev(node)
2815 rev = self.rev(node)
2703
2816
2704 return self._inner.raw_text(node, rev)
2817 return self._inner.raw_text(node, rev)
2705
2818
2706 def _revisiondata(self, nodeorrev, raw=False):
2819 def _revisiondata(self, nodeorrev, raw=False):
2707 # deal with <nodeorrev> argument type
2820 # deal with <nodeorrev> argument type
2708 if isinstance(nodeorrev, int):
2821 if isinstance(nodeorrev, int):
2709 rev = nodeorrev
2822 rev = nodeorrev
2710 node = self.node(rev)
2823 node = self.node(rev)
2711 else:
2824 else:
2712 node = nodeorrev
2825 node = nodeorrev
2713 rev = None
2826 rev = None
2714
2827
2715 # fast path the special `nullid` rev
2828 # fast path the special `nullid` rev
2716 if node == self.nullid:
2829 if node == self.nullid:
2717 return b""
2830 return b""
2718
2831
2719 # ``rawtext`` is the text as stored inside the revlog. Might be the
2832 # ``rawtext`` is the text as stored inside the revlog. Might be the
2720 # revision or might need to be processed to retrieve the revision.
2833 # revision or might need to be processed to retrieve the revision.
2721 rev, rawtext, validated = self._rawtext(node, rev)
2834 rev, rawtext, validated = self._rawtext(node, rev)
2722
2835
2723 if raw and validated:
2836 if raw and validated:
2724 # if we don't want to process the raw text and that raw
2837 # if we don't want to process the raw text and that raw
2725 # text is cached, we can exit early.
2838 # text is cached, we can exit early.
2726 return rawtext
2839 return rawtext
2727 if rev is None:
2840 if rev is None:
2728 rev = self.rev(node)
2841 rev = self.rev(node)
2729 # the revlog's flag for this revision
2842 # the revlog's flag for this revision
2730 # (usually alter its state or content)
2843 # (usually alter its state or content)
2731 flags = self.flags(rev)
2844 flags = self.flags(rev)
2732
2845
2733 if validated and flags == REVIDX_DEFAULT_FLAGS:
2846 if validated and flags == REVIDX_DEFAULT_FLAGS:
2734 # no extra flags set, no flag processor runs, text = rawtext
2847 # no extra flags set, no flag processor runs, text = rawtext
2735 return rawtext
2848 return rawtext
2736
2849
2737 if raw:
2850 if raw:
2738 validatehash = flagutil.processflagsraw(self, rawtext, flags)
2851 validatehash = flagutil.processflagsraw(self, rawtext, flags)
2739 text = rawtext
2852 text = rawtext
2740 else:
2853 else:
2741 r = flagutil.processflagsread(self, rawtext, flags)
2854 r = flagutil.processflagsread(self, rawtext, flags)
2742 text, validatehash = r
2855 text, validatehash = r
2743 if validatehash:
2856 if validatehash:
2744 self.checkhash(text, node, rev=rev)
2857 self.checkhash(text, node, rev=rev)
2745 if not validated:
2858 if not validated:
2746 self._inner._revisioncache = (node, rev, rawtext)
2859 self._inner._revisioncache = (node, rev, rawtext)
2747
2860
2748 return text
2861 return text
2749
2862
2750 def _sidedata(self, rev):
2863 def _sidedata(self, rev):
2751 """Return the sidedata for a given revision number."""
2864 """Return the sidedata for a given revision number."""
2752 sidedata_end = None
2865 sidedata_end = None
2753 if self._docket is not None:
2866 if self._docket is not None:
2754 sidedata_end = self._docket.sidedata_end
2867 sidedata_end = self._docket.sidedata_end
2755 return self._inner.sidedata(rev, sidedata_end)
2868 return self._inner.sidedata(rev, sidedata_end)
2756
2869
2757 def rawdata(self, nodeorrev):
2870 def rawdata(self, nodeorrev):
2758 """return an uncompressed raw data of a given node or revision number."""
2871 """return an uncompressed raw data of a given node or revision number."""
2759 return self._revisiondata(nodeorrev, raw=True)
2872 return self._revisiondata(nodeorrev, raw=True)
2760
2873
2761 def hash(self, text, p1, p2):
2874 def hash(self, text, p1, p2):
2762 """Compute a node hash.
2875 """Compute a node hash.
2763
2876
2764 Available as a function so that subclasses can replace the hash
2877 Available as a function so that subclasses can replace the hash
2765 as needed.
2878 as needed.
2766 """
2879 """
2767 return storageutil.hashrevisionsha1(text, p1, p2)
2880 return storageutil.hashrevisionsha1(text, p1, p2)
2768
2881
2769 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2882 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2770 """Check node hash integrity.
2883 """Check node hash integrity.
2771
2884
2772 Available as a function so that subclasses can extend hash mismatch
2885 Available as a function so that subclasses can extend hash mismatch
2773 behaviors as needed.
2886 behaviors as needed.
2774 """
2887 """
2775 try:
2888 try:
2776 if p1 is None and p2 is None:
2889 if p1 is None and p2 is None:
2777 p1, p2 = self.parents(node)
2890 p1, p2 = self.parents(node)
2778 if node != self.hash(text, p1, p2):
2891 if node != self.hash(text, p1, p2):
2779 # Clear the revision cache on hash failure. The revision cache
2892 # Clear the revision cache on hash failure. The revision cache
2780 # only stores the raw revision and clearing the cache does have
2893 # only stores the raw revision and clearing the cache does have
2781 # the side-effect that we won't have a cache hit when the raw
2894 # the side-effect that we won't have a cache hit when the raw
2782 # revision data is accessed. But this case should be rare and
2895 # revision data is accessed. But this case should be rare and
2783 # it is extra work to teach the cache about the hash
2896 # it is extra work to teach the cache about the hash
2784 # verification state.
2897 # verification state.
2785 if (
2898 if (
2786 self._inner._revisioncache
2899 self._inner._revisioncache
2787 and self._inner._revisioncache[0] == node
2900 and self._inner._revisioncache[0] == node
2788 ):
2901 ):
2789 self._inner._revisioncache = None
2902 self._inner._revisioncache = None
2790
2903
2791 revornode = rev
2904 revornode = rev
2792 if revornode is None:
2905 if revornode is None:
2793 revornode = templatefilters.short(hex(node))
2906 revornode = templatefilters.short(hex(node))
2794 raise error.RevlogError(
2907 raise error.RevlogError(
2795 _(b"integrity check failed on %s:%s")
2908 _(b"integrity check failed on %s:%s")
2796 % (self.display_id, pycompat.bytestr(revornode))
2909 % (self.display_id, pycompat.bytestr(revornode))
2797 )
2910 )
2798 except error.RevlogError:
2911 except error.RevlogError:
2799 if self.feature_config.censorable and storageutil.iscensoredtext(
2912 if self.feature_config.censorable and storageutil.iscensoredtext(
2800 text
2913 text
2801 ):
2914 ):
2802 raise error.CensoredNodeError(self.display_id, node, text)
2915 raise error.CensoredNodeError(self.display_id, node, text)
2803 raise
2916 raise
2804
2917
2805 @property
2918 @property
2806 def _split_index_file(self):
2919 def _split_index_file(self):
2807 """the path where to expect the index of an ongoing splitting operation
2920 """the path where to expect the index of an ongoing splitting operation
2808
2921
2809 The file will only exist if a splitting operation is in progress, but
2922 The file will only exist if a splitting operation is in progress, but
2810 it is always expected at the same location."""
2923 it is always expected at the same location."""
2811 parts = self.radix.split(b'/')
2924 parts = self.radix.split(b'/')
2812 if len(parts) > 1:
2925 if len(parts) > 1:
2813 # adds a '-s' prefix to the ``data/` or `meta/` base
2926 # adds a '-s' prefix to the ``data/` or `meta/` base
2814 head = parts[0] + b'-s'
2927 head = parts[0] + b'-s'
2815 mids = parts[1:-1]
2928 mids = parts[1:-1]
2816 tail = parts[-1] + b'.i'
2929 tail = parts[-1] + b'.i'
2817 pieces = [head] + mids + [tail]
2930 pieces = [head] + mids + [tail]
2818 return b'/'.join(pieces)
2931 return b'/'.join(pieces)
2819 else:
2932 else:
2820 # the revlog is stored at the root of the store (changelog or
2933 # the revlog is stored at the root of the store (changelog or
2821 # manifest), no risk of collision.
2934 # manifest), no risk of collision.
2822 return self.radix + b'.i.s'
2935 return self.radix + b'.i.s'
2823
2936
2824 def _enforceinlinesize(self, tr, side_write=True):
2937 def _enforceinlinesize(self, tr, side_write=True):
2825 """Check if the revlog is too big for inline and convert if so.
2938 """Check if the revlog is too big for inline and convert if so.
2826
2939
2827 This should be called after revisions are added to the revlog. If the
2940 This should be called after revisions are added to the revlog. If the
2828 revlog has grown too large to be an inline revlog, it will convert it
2941 revlog has grown too large to be an inline revlog, it will convert it
2829 to use multiple index and data files.
2942 to use multiple index and data files.
2830 """
2943 """
2831 tiprev = len(self) - 1
2944 tiprev = len(self) - 1
2832 total_size = self.start(tiprev) + self.length(tiprev)
2945 total_size = self.start(tiprev) + self.length(tiprev)
2833 if not self._inline or total_size < _maxinline:
2946 if not self._inline or total_size < _maxinline:
2834 return
2947 return
2835
2948
2836 if self._docket is not None:
2949 if self._docket is not None:
2837 msg = b"inline revlog should not have a docket"
2950 msg = b"inline revlog should not have a docket"
2838 raise error.ProgrammingError(msg)
2951 raise error.ProgrammingError(msg)
2839
2952
2840 troffset = tr.findoffset(self._inner.canonical_index_file)
2953 troffset = tr.findoffset(self._inner.canonical_index_file)
2841 if troffset is None:
2954 if troffset is None:
2842 raise error.RevlogError(
2955 raise error.RevlogError(
2843 _(b"%s not found in the transaction") % self._indexfile
2956 _(b"%s not found in the transaction") % self._indexfile
2844 )
2957 )
2845 if troffset:
2958 if troffset:
2846 tr.addbackup(self._inner.canonical_index_file, for_offset=True)
2959 tr.addbackup(self._inner.canonical_index_file, for_offset=True)
2847 tr.add(self._datafile, 0)
2960 tr.add(self._datafile, 0)
2848
2961
2849 new_index_file_path = None
2962 new_index_file_path = None
2850 if side_write:
2963 if side_write:
2851 old_index_file_path = self._indexfile
2964 old_index_file_path = self._indexfile
2852 new_index_file_path = self._split_index_file
2965 new_index_file_path = self._split_index_file
2853 opener = self.opener
2966 opener = self.opener
2854 weak_self = weakref.ref(self)
2967 weak_self = weakref.ref(self)
2855
2968
2856 # the "split" index replace the real index when the transaction is
2969 # the "split" index replace the real index when the transaction is
2857 # finalized
2970 # finalized
2858 def finalize_callback(tr):
2971 def finalize_callback(tr):
2859 opener.rename(
2972 opener.rename(
2860 new_index_file_path,
2973 new_index_file_path,
2861 old_index_file_path,
2974 old_index_file_path,
2862 checkambig=True,
2975 checkambig=True,
2863 )
2976 )
2864 maybe_self = weak_self()
2977 maybe_self = weak_self()
2865 if maybe_self is not None:
2978 if maybe_self is not None:
2866 maybe_self._indexfile = old_index_file_path
2979 maybe_self._indexfile = old_index_file_path
2867 maybe_self._inner.index_file = maybe_self._indexfile
2980 maybe_self._inner.index_file = maybe_self._indexfile
2868
2981
2869 def abort_callback(tr):
2982 def abort_callback(tr):
2870 maybe_self = weak_self()
2983 maybe_self = weak_self()
2871 if maybe_self is not None:
2984 if maybe_self is not None:
2872 maybe_self._indexfile = old_index_file_path
2985 maybe_self._indexfile = old_index_file_path
2873 maybe_self._inner.inline = True
2986 maybe_self._inner.inline = True
2874 maybe_self._inner.index_file = old_index_file_path
2987 maybe_self._inner.index_file = old_index_file_path
2875
2988
2876 tr.registertmp(new_index_file_path)
2989 tr.registertmp(new_index_file_path)
2877 if self.target[1] is not None:
2990 if self.target[1] is not None:
2878 callback_id = b'000-revlog-split-%d-%s' % self.target
2991 callback_id = b'000-revlog-split-%d-%s' % self.target
2879 else:
2992 else:
2880 callback_id = b'000-revlog-split-%d' % self.target[0]
2993 callback_id = b'000-revlog-split-%d' % self.target[0]
2881 tr.addfinalize(callback_id, finalize_callback)
2994 tr.addfinalize(callback_id, finalize_callback)
2882 tr.addabort(callback_id, abort_callback)
2995 tr.addabort(callback_id, abort_callback)
2883
2996
2884 self._format_flags &= ~FLAG_INLINE_DATA
2997 self._format_flags &= ~FLAG_INLINE_DATA
2885 self._inner.split_inline(
2998 self._inner.split_inline(
2886 tr,
2999 tr,
2887 self._format_flags | self._format_version,
3000 self._format_flags | self._format_version,
2888 new_index_file_path=new_index_file_path,
3001 new_index_file_path=new_index_file_path,
2889 )
3002 )
2890
3003
2891 self._inline = False
3004 self._inline = False
2892 if new_index_file_path is not None:
3005 if new_index_file_path is not None:
2893 self._indexfile = new_index_file_path
3006 self._indexfile = new_index_file_path
2894
3007
2895 nodemaputil.setup_persistent_nodemap(tr, self)
3008 nodemaputil.setup_persistent_nodemap(tr, self)
2896
3009
2897 def _nodeduplicatecallback(self, transaction, node):
3010 def _nodeduplicatecallback(self, transaction, node):
2898 """called when trying to add a node already stored."""
3011 """called when trying to add a node already stored."""
2899
3012
2900 @contextlib.contextmanager
3013 @contextlib.contextmanager
2901 def reading(self):
3014 def reading(self):
2902 with self._inner.reading():
3015 with self._inner.reading():
2903 yield
3016 yield
2904
3017
2905 @contextlib.contextmanager
3018 @contextlib.contextmanager
2906 def _writing(self, transaction):
3019 def _writing(self, transaction):
2907 if self._trypending:
3020 if self._trypending:
2908 msg = b'try to write in a `trypending` revlog: %s'
3021 msg = b'try to write in a `trypending` revlog: %s'
2909 msg %= self.display_id
3022 msg %= self.display_id
2910 raise error.ProgrammingError(msg)
3023 raise error.ProgrammingError(msg)
2911 if self._inner.is_writing:
3024 if self._inner.is_writing:
2912 yield
3025 yield
2913 else:
3026 else:
2914 data_end = None
3027 data_end = None
2915 sidedata_end = None
3028 sidedata_end = None
2916 if self._docket is not None:
3029 if self._docket is not None:
2917 data_end = self._docket.data_end
3030 data_end = self._docket.data_end
2918 sidedata_end = self._docket.sidedata_end
3031 sidedata_end = self._docket.sidedata_end
2919 with self._inner.writing(
3032 with self._inner.writing(
2920 transaction,
3033 transaction,
2921 data_end=data_end,
3034 data_end=data_end,
2922 sidedata_end=sidedata_end,
3035 sidedata_end=sidedata_end,
2923 ):
3036 ):
2924 yield
3037 yield
2925 if self._docket is not None:
3038 if self._docket is not None:
2926 self._write_docket(transaction)
3039 self._write_docket(transaction)
2927
3040
3041 @property
3042 def is_delaying(self):
3043 return self._inner.is_delaying
3044
2928 def _write_docket(self, transaction):
3045 def _write_docket(self, transaction):
2929 """write the current docket on disk
3046 """write the current docket on disk
2930
3047
2931 Exist as a method to help changelog to implement transaction logic
3048 Exist as a method to help changelog to implement transaction logic
2932
3049
2933 We could also imagine using the same transaction logic for all revlog
3050 We could also imagine using the same transaction logic for all revlog
2934 since docket are cheap."""
3051 since docket are cheap."""
2935 self._docket.write(transaction)
3052 self._docket.write(transaction)
2936
3053
2937 def addrevision(
3054 def addrevision(
2938 self,
3055 self,
2939 text,
3056 text,
2940 transaction,
3057 transaction,
2941 link,
3058 link,
2942 p1,
3059 p1,
2943 p2,
3060 p2,
2944 cachedelta=None,
3061 cachedelta=None,
2945 node=None,
3062 node=None,
2946 flags=REVIDX_DEFAULT_FLAGS,
3063 flags=REVIDX_DEFAULT_FLAGS,
2947 deltacomputer=None,
3064 deltacomputer=None,
2948 sidedata=None,
3065 sidedata=None,
2949 ):
3066 ):
2950 """add a revision to the log
3067 """add a revision to the log
2951
3068
2952 text - the revision data to add
3069 text - the revision data to add
2953 transaction - the transaction object used for rollback
3070 transaction - the transaction object used for rollback
2954 link - the linkrev data to add
3071 link - the linkrev data to add
2955 p1, p2 - the parent nodeids of the revision
3072 p1, p2 - the parent nodeids of the revision
2956 cachedelta - an optional precomputed delta
3073 cachedelta - an optional precomputed delta
2957 node - nodeid of revision; typically node is not specified, and it is
3074 node - nodeid of revision; typically node is not specified, and it is
2958 computed by default as hash(text, p1, p2), however subclasses might
3075 computed by default as hash(text, p1, p2), however subclasses might
2959 use different hashing method (and override checkhash() in such case)
3076 use different hashing method (and override checkhash() in such case)
2960 flags - the known flags to set on the revision
3077 flags - the known flags to set on the revision
2961 deltacomputer - an optional deltacomputer instance shared between
3078 deltacomputer - an optional deltacomputer instance shared between
2962 multiple calls
3079 multiple calls
2963 """
3080 """
2964 if link == nullrev:
3081 if link == nullrev:
2965 raise error.RevlogError(
3082 raise error.RevlogError(
2966 _(b"attempted to add linkrev -1 to %s") % self.display_id
3083 _(b"attempted to add linkrev -1 to %s") % self.display_id
2967 )
3084 )
2968
3085
2969 if sidedata is None:
3086 if sidedata is None:
2970 sidedata = {}
3087 sidedata = {}
2971 elif sidedata and not self.feature_config.has_side_data:
3088 elif sidedata and not self.feature_config.has_side_data:
2972 raise error.ProgrammingError(
3089 raise error.ProgrammingError(
2973 _(b"trying to add sidedata to a revlog who don't support them")
3090 _(b"trying to add sidedata to a revlog who don't support them")
2974 )
3091 )
2975
3092
2976 if flags:
3093 if flags:
2977 node = node or self.hash(text, p1, p2)
3094 node = node or self.hash(text, p1, p2)
2978
3095
2979 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
3096 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2980
3097
2981 # If the flag processor modifies the revision data, ignore any provided
3098 # If the flag processor modifies the revision data, ignore any provided
2982 # cachedelta.
3099 # cachedelta.
2983 if rawtext != text:
3100 if rawtext != text:
2984 cachedelta = None
3101 cachedelta = None
2985
3102
2986 if len(rawtext) > _maxentrysize:
3103 if len(rawtext) > _maxentrysize:
2987 raise error.RevlogError(
3104 raise error.RevlogError(
2988 _(
3105 _(
2989 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
3106 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2990 )
3107 )
2991 % (self.display_id, len(rawtext))
3108 % (self.display_id, len(rawtext))
2992 )
3109 )
2993
3110
2994 node = node or self.hash(rawtext, p1, p2)
3111 node = node or self.hash(rawtext, p1, p2)
2995 rev = self.index.get_rev(node)
3112 rev = self.index.get_rev(node)
2996 if rev is not None:
3113 if rev is not None:
2997 return rev
3114 return rev
2998
3115
2999 if validatehash:
3116 if validatehash:
3000 self.checkhash(rawtext, node, p1=p1, p2=p2)
3117 self.checkhash(rawtext, node, p1=p1, p2=p2)
3001
3118
3002 return self.addrawrevision(
3119 return self.addrawrevision(
3003 rawtext,
3120 rawtext,
3004 transaction,
3121 transaction,
3005 link,
3122 link,
3006 p1,
3123 p1,
3007 p2,
3124 p2,
3008 node,
3125 node,
3009 flags,
3126 flags,
3010 cachedelta=cachedelta,
3127 cachedelta=cachedelta,
3011 deltacomputer=deltacomputer,
3128 deltacomputer=deltacomputer,
3012 sidedata=sidedata,
3129 sidedata=sidedata,
3013 )
3130 )
3014
3131
3015 def addrawrevision(
3132 def addrawrevision(
3016 self,
3133 self,
3017 rawtext,
3134 rawtext,
3018 transaction,
3135 transaction,
3019 link,
3136 link,
3020 p1,
3137 p1,
3021 p2,
3138 p2,
3022 node,
3139 node,
3023 flags,
3140 flags,
3024 cachedelta=None,
3141 cachedelta=None,
3025 deltacomputer=None,
3142 deltacomputer=None,
3026 sidedata=None,
3143 sidedata=None,
3027 ):
3144 ):
3028 """add a raw revision with known flags, node and parents
3145 """add a raw revision with known flags, node and parents
3029 useful when reusing a revision not stored in this revlog (ex: received
3146 useful when reusing a revision not stored in this revlog (ex: received
3030 over wire, or read from an external bundle).
3147 over wire, or read from an external bundle).
3031 """
3148 """
3032 with self._writing(transaction):
3149 with self._writing(transaction):
3033 return self._addrevision(
3150 return self._addrevision(
3034 node,
3151 node,
3035 rawtext,
3152 rawtext,
3036 transaction,
3153 transaction,
3037 link,
3154 link,
3038 p1,
3155 p1,
3039 p2,
3156 p2,
3040 flags,
3157 flags,
3041 cachedelta,
3158 cachedelta,
3042 deltacomputer=deltacomputer,
3159 deltacomputer=deltacomputer,
3043 sidedata=sidedata,
3160 sidedata=sidedata,
3044 )
3161 )
3045
3162
3046 def compress(self, data):
3163 def compress(self, data):
3047 return self._inner.compress(data)
3164 return self._inner.compress(data)
3048
3165
3049 def decompress(self, data):
3166 def decompress(self, data):
3050 return self._inner.decompress(data)
3167 return self._inner.decompress(data)
3051
3168
3052 def _addrevision(
3169 def _addrevision(
3053 self,
3170 self,
3054 node,
3171 node,
3055 rawtext,
3172 rawtext,
3056 transaction,
3173 transaction,
3057 link,
3174 link,
3058 p1,
3175 p1,
3059 p2,
3176 p2,
3060 flags,
3177 flags,
3061 cachedelta,
3178 cachedelta,
3062 alwayscache=False,
3179 alwayscache=False,
3063 deltacomputer=None,
3180 deltacomputer=None,
3064 sidedata=None,
3181 sidedata=None,
3065 ):
3182 ):
3066 """internal function to add revisions to the log
3183 """internal function to add revisions to the log
3067
3184
3068 see addrevision for argument descriptions.
3185 see addrevision for argument descriptions.
3069
3186
3070 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
3187 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
3071
3188
3072 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
3189 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
3073 be used.
3190 be used.
3074
3191
3075 invariants:
3192 invariants:
3076 - rawtext is optional (can be None); if not set, cachedelta must be set.
3193 - rawtext is optional (can be None); if not set, cachedelta must be set.
3077 if both are set, they must correspond to each other.
3194 if both are set, they must correspond to each other.
3078 """
3195 """
3079 if node == self.nullid:
3196 if node == self.nullid:
3080 raise error.RevlogError(
3197 raise error.RevlogError(
3081 _(b"%s: attempt to add null revision") % self.display_id
3198 _(b"%s: attempt to add null revision") % self.display_id
3082 )
3199 )
3083 if (
3200 if (
3084 node == self.nodeconstants.wdirid
3201 node == self.nodeconstants.wdirid
3085 or node in self.nodeconstants.wdirfilenodeids
3202 or node in self.nodeconstants.wdirfilenodeids
3086 ):
3203 ):
3087 raise error.RevlogError(
3204 raise error.RevlogError(
3088 _(b"%s: attempt to add wdir revision") % self.display_id
3205 _(b"%s: attempt to add wdir revision") % self.display_id
3089 )
3206 )
3090 if self._inner._writinghandles is None:
3207 if self._inner._writinghandles is None:
3091 msg = b'adding revision outside `revlog._writing` context'
3208 msg = b'adding revision outside `revlog._writing` context'
3092 raise error.ProgrammingError(msg)
3209 raise error.ProgrammingError(msg)
3093
3210
3094 btext = [rawtext]
3211 btext = [rawtext]
3095
3212
3096 curr = len(self)
3213 curr = len(self)
3097 prev = curr - 1
3214 prev = curr - 1
3098
3215
3099 offset = self._get_data_offset(prev)
3216 offset = self._get_data_offset(prev)
3100
3217
3101 if self._concurrencychecker:
3218 if self._concurrencychecker:
3102 ifh, dfh, sdfh = self._inner._writinghandles
3219 ifh, dfh, sdfh = self._inner._writinghandles
3103 # XXX no checking for the sidedata file
3220 # XXX no checking for the sidedata file
3104 if self._inline:
3221 if self._inline:
3105 # offset is "as if" it were in the .d file, so we need to add on
3222 # offset is "as if" it were in the .d file, so we need to add on
3106 # the size of the entry metadata.
3223 # the size of the entry metadata.
3107 self._concurrencychecker(
3224 self._concurrencychecker(
3108 ifh, self._indexfile, offset + curr * self.index.entry_size
3225 ifh, self._indexfile, offset + curr * self.index.entry_size
3109 )
3226 )
3110 else:
3227 else:
3111 # Entries in the .i are a consistent size.
3228 # Entries in the .i are a consistent size.
3112 self._concurrencychecker(
3229 self._concurrencychecker(
3113 ifh, self._indexfile, curr * self.index.entry_size
3230 ifh, self._indexfile, curr * self.index.entry_size
3114 )
3231 )
3115 self._concurrencychecker(dfh, self._datafile, offset)
3232 self._concurrencychecker(dfh, self._datafile, offset)
3116
3233
3117 p1r, p2r = self.rev(p1), self.rev(p2)
3234 p1r, p2r = self.rev(p1), self.rev(p2)
3118
3235
3119 # full versions are inserted when the needed deltas
3236 # full versions are inserted when the needed deltas
3120 # become comparable to the uncompressed text
3237 # become comparable to the uncompressed text
3121 if rawtext is None:
3238 if rawtext is None:
3122 # need rawtext size, before changed by flag processors, which is
3239 # need rawtext size, before changed by flag processors, which is
3123 # the non-raw size. use revlog explicitly to avoid filelog's extra
3240 # the non-raw size. use revlog explicitly to avoid filelog's extra
3124 # logic that might remove metadata size.
3241 # logic that might remove metadata size.
3125 textlen = mdiff.patchedsize(
3242 textlen = mdiff.patchedsize(
3126 revlog.size(self, cachedelta[0]), cachedelta[1]
3243 revlog.size(self, cachedelta[0]), cachedelta[1]
3127 )
3244 )
3128 else:
3245 else:
3129 textlen = len(rawtext)
3246 textlen = len(rawtext)
3130
3247
3131 if deltacomputer is None:
3248 if deltacomputer is None:
3132 write_debug = None
3249 write_debug = None
3133 if self.delta_config.debug_delta:
3250 if self.delta_config.debug_delta:
3134 write_debug = transaction._report
3251 write_debug = transaction._report
3135 deltacomputer = deltautil.deltacomputer(
3252 deltacomputer = deltautil.deltacomputer(
3136 self, write_debug=write_debug
3253 self, write_debug=write_debug
3137 )
3254 )
3138
3255
3139 if cachedelta is not None and len(cachedelta) == 2:
3256 if cachedelta is not None and len(cachedelta) == 2:
3140 # If the cached delta has no information about how it should be
3257 # If the cached delta has no information about how it should be
3141 # reused, add the default reuse instruction according to the
3258 # reused, add the default reuse instruction according to the
3142 # revlog's configuration.
3259 # revlog's configuration.
3143 if (
3260 if (
3144 self.delta_config.general_delta
3261 self.delta_config.general_delta
3145 and self.delta_config.lazy_delta_base
3262 and self.delta_config.lazy_delta_base
3146 ):
3263 ):
3147 delta_base_reuse = DELTA_BASE_REUSE_TRY
3264 delta_base_reuse = DELTA_BASE_REUSE_TRY
3148 else:
3265 else:
3149 delta_base_reuse = DELTA_BASE_REUSE_NO
3266 delta_base_reuse = DELTA_BASE_REUSE_NO
3150 cachedelta = (cachedelta[0], cachedelta[1], delta_base_reuse)
3267 cachedelta = (cachedelta[0], cachedelta[1], delta_base_reuse)
3151
3268
3152 revinfo = revlogutils.revisioninfo(
3269 revinfo = revlogutils.revisioninfo(
3153 node,
3270 node,
3154 p1,
3271 p1,
3155 p2,
3272 p2,
3156 btext,
3273 btext,
3157 textlen,
3274 textlen,
3158 cachedelta,
3275 cachedelta,
3159 flags,
3276 flags,
3160 )
3277 )
3161
3278
3162 deltainfo = deltacomputer.finddeltainfo(revinfo)
3279 deltainfo = deltacomputer.finddeltainfo(revinfo)
3163
3280
3164 compression_mode = COMP_MODE_INLINE
3281 compression_mode = COMP_MODE_INLINE
3165 if self._docket is not None:
3282 if self._docket is not None:
3166 default_comp = self._docket.default_compression_header
3283 default_comp = self._docket.default_compression_header
3167 r = deltautil.delta_compression(default_comp, deltainfo)
3284 r = deltautil.delta_compression(default_comp, deltainfo)
3168 compression_mode, deltainfo = r
3285 compression_mode, deltainfo = r
3169
3286
3170 sidedata_compression_mode = COMP_MODE_INLINE
3287 sidedata_compression_mode = COMP_MODE_INLINE
3171 if sidedata and self.feature_config.has_side_data:
3288 if sidedata and self.feature_config.has_side_data:
3172 sidedata_compression_mode = COMP_MODE_PLAIN
3289 sidedata_compression_mode = COMP_MODE_PLAIN
3173 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
3290 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
3174 sidedata_offset = self._docket.sidedata_end
3291 sidedata_offset = self._docket.sidedata_end
3175 h, comp_sidedata = self._inner.compress(serialized_sidedata)
3292 h, comp_sidedata = self._inner.compress(serialized_sidedata)
3176 if (
3293 if (
3177 h != b'u'
3294 h != b'u'
3178 and comp_sidedata[0:1] != b'\0'
3295 and comp_sidedata[0:1] != b'\0'
3179 and len(comp_sidedata) < len(serialized_sidedata)
3296 and len(comp_sidedata) < len(serialized_sidedata)
3180 ):
3297 ):
3181 assert not h
3298 assert not h
3182 if (
3299 if (
3183 comp_sidedata[0:1]
3300 comp_sidedata[0:1]
3184 == self._docket.default_compression_header
3301 == self._docket.default_compression_header
3185 ):
3302 ):
3186 sidedata_compression_mode = COMP_MODE_DEFAULT
3303 sidedata_compression_mode = COMP_MODE_DEFAULT
3187 serialized_sidedata = comp_sidedata
3304 serialized_sidedata = comp_sidedata
3188 else:
3305 else:
3189 sidedata_compression_mode = COMP_MODE_INLINE
3306 sidedata_compression_mode = COMP_MODE_INLINE
3190 serialized_sidedata = comp_sidedata
3307 serialized_sidedata = comp_sidedata
3191 else:
3308 else:
3192 serialized_sidedata = b""
3309 serialized_sidedata = b""
3193 # Don't store the offset if the sidedata is empty, that way
3310 # Don't store the offset if the sidedata is empty, that way
3194 # we can easily detect empty sidedata and they will be no different
3311 # we can easily detect empty sidedata and they will be no different
3195 # than ones we manually add.
3312 # than ones we manually add.
3196 sidedata_offset = 0
3313 sidedata_offset = 0
3197
3314
3198 rank = RANK_UNKNOWN
3315 rank = RANK_UNKNOWN
3199 if self.feature_config.compute_rank:
3316 if self.feature_config.compute_rank:
3200 if (p1r, p2r) == (nullrev, nullrev):
3317 if (p1r, p2r) == (nullrev, nullrev):
3201 rank = 1
3318 rank = 1
3202 elif p1r != nullrev and p2r == nullrev:
3319 elif p1r != nullrev and p2r == nullrev:
3203 rank = 1 + self.fast_rank(p1r)
3320 rank = 1 + self.fast_rank(p1r)
3204 elif p1r == nullrev and p2r != nullrev:
3321 elif p1r == nullrev and p2r != nullrev:
3205 rank = 1 + self.fast_rank(p2r)
3322 rank = 1 + self.fast_rank(p2r)
3206 else: # merge node
3323 else: # merge node
3207 if rustdagop is not None and self.index.rust_ext_compat:
3324 if rustdagop is not None and self.index.rust_ext_compat:
3208 rank = rustdagop.rank(self.index, p1r, p2r)
3325 rank = rustdagop.rank(self.index, p1r, p2r)
3209 else:
3326 else:
3210 pmin, pmax = sorted((p1r, p2r))
3327 pmin, pmax = sorted((p1r, p2r))
3211 rank = 1 + self.fast_rank(pmax)
3328 rank = 1 + self.fast_rank(pmax)
3212 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
3329 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
3213
3330
3214 e = revlogutils.entry(
3331 e = revlogutils.entry(
3215 flags=flags,
3332 flags=flags,
3216 data_offset=offset,
3333 data_offset=offset,
3217 data_compressed_length=deltainfo.deltalen,
3334 data_compressed_length=deltainfo.deltalen,
3218 data_uncompressed_length=textlen,
3335 data_uncompressed_length=textlen,
3219 data_compression_mode=compression_mode,
3336 data_compression_mode=compression_mode,
3220 data_delta_base=deltainfo.base,
3337 data_delta_base=deltainfo.base,
3221 link_rev=link,
3338 link_rev=link,
3222 parent_rev_1=p1r,
3339 parent_rev_1=p1r,
3223 parent_rev_2=p2r,
3340 parent_rev_2=p2r,
3224 node_id=node,
3341 node_id=node,
3225 sidedata_offset=sidedata_offset,
3342 sidedata_offset=sidedata_offset,
3226 sidedata_compressed_length=len(serialized_sidedata),
3343 sidedata_compressed_length=len(serialized_sidedata),
3227 sidedata_compression_mode=sidedata_compression_mode,
3344 sidedata_compression_mode=sidedata_compression_mode,
3228 rank=rank,
3345 rank=rank,
3229 )
3346 )
3230
3347
3231 self.index.append(e)
3348 self.index.append(e)
3232 entry = self.index.entry_binary(curr)
3349 entry = self.index.entry_binary(curr)
3233 if curr == 0 and self._docket is None:
3350 if curr == 0 and self._docket is None:
3234 header = self._format_flags | self._format_version
3351 header = self._format_flags | self._format_version
3235 header = self.index.pack_header(header)
3352 header = self.index.pack_header(header)
3236 entry = header + entry
3353 entry = header + entry
3237 self._writeentry(
3354 self._writeentry(
3238 transaction,
3355 transaction,
3239 entry,
3356 entry,
3240 deltainfo.data,
3357 deltainfo.data,
3241 link,
3358 link,
3242 offset,
3359 offset,
3243 serialized_sidedata,
3360 serialized_sidedata,
3244 sidedata_offset,
3361 sidedata_offset,
3245 )
3362 )
3246
3363
3247 rawtext = btext[0]
3364 rawtext = btext[0]
3248
3365
3249 if alwayscache and rawtext is None:
3366 if alwayscache and rawtext is None:
3250 rawtext = deltacomputer.buildtext(revinfo)
3367 rawtext = deltacomputer.buildtext(revinfo)
3251
3368
3252 if type(rawtext) == bytes: # only accept immutable objects
3369 if type(rawtext) == bytes: # only accept immutable objects
3253 self._inner._revisioncache = (node, curr, rawtext)
3370 self._inner._revisioncache = (node, curr, rawtext)
3254 self._chainbasecache[curr] = deltainfo.chainbase
3371 self._chainbasecache[curr] = deltainfo.chainbase
3255 return curr
3372 return curr
3256
3373
3257 def _get_data_offset(self, prev):
3374 def _get_data_offset(self, prev):
3258 """Returns the current offset in the (in-transaction) data file.
3375 """Returns the current offset in the (in-transaction) data file.
3259 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
3376 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
3260 file to store that information: since sidedata can be rewritten to the
3377 file to store that information: since sidedata can be rewritten to the
3261 end of the data file within a transaction, you can have cases where, for
3378 end of the data file within a transaction, you can have cases where, for
3262 example, rev `n` does not have sidedata while rev `n - 1` does, leading
3379 example, rev `n` does not have sidedata while rev `n - 1` does, leading
3263 to `n - 1`'s sidedata being written after `n`'s data.
3380 to `n - 1`'s sidedata being written after `n`'s data.
3264
3381
3265 TODO cache this in a docket file before getting out of experimental."""
3382 TODO cache this in a docket file before getting out of experimental."""
3266 if self._docket is None:
3383 if self._docket is None:
3267 return self.end(prev)
3384 return self.end(prev)
3268 else:
3385 else:
3269 return self._docket.data_end
3386 return self._docket.data_end
3270
3387
3271 def _writeentry(
3388 def _writeentry(
3272 self,
3389 self,
3273 transaction,
3390 transaction,
3274 entry,
3391 entry,
3275 data,
3392 data,
3276 link,
3393 link,
3277 offset,
3394 offset,
3278 sidedata,
3395 sidedata,
3279 sidedata_offset,
3396 sidedata_offset,
3280 ):
3397 ):
3281 # Files opened in a+ mode have inconsistent behavior on various
3398 # Files opened in a+ mode have inconsistent behavior on various
3282 # platforms. Windows requires that a file positioning call be made
3399 # platforms. Windows requires that a file positioning call be made
3283 # when the file handle transitions between reads and writes. See
3400 # when the file handle transitions between reads and writes. See
3284 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
3401 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
3285 # platforms, Python or the platform itself can be buggy. Some versions
3402 # platforms, Python or the platform itself can be buggy. Some versions
3286 # of Solaris have been observed to not append at the end of the file
3403 # of Solaris have been observed to not append at the end of the file
3287 # if the file was seeked to before the end. See issue4943 for more.
3404 # if the file was seeked to before the end. See issue4943 for more.
3288 #
3405 #
3289 # We work around this issue by inserting a seek() before writing.
3406 # We work around this issue by inserting a seek() before writing.
3290 # Note: This is likely not necessary on Python 3. However, because
3407 # Note: This is likely not necessary on Python 3. However, because
3291 # the file handle is reused for reads and may be seeked there, we need
3408 # the file handle is reused for reads and may be seeked there, we need
3292 # to be careful before changing this.
3409 # to be careful before changing this.
3293 index_end = data_end = sidedata_end = None
3410 index_end = data_end = sidedata_end = None
3294 if self._docket is not None:
3411 if self._docket is not None:
3295 index_end = self._docket.index_end
3412 index_end = self._docket.index_end
3296 data_end = self._docket.data_end
3413 data_end = self._docket.data_end
3297 sidedata_end = self._docket.sidedata_end
3414 sidedata_end = self._docket.sidedata_end
3298
3415
3299 files_end = self._inner.write_entry(
3416 files_end = self._inner.write_entry(
3300 transaction,
3417 transaction,
3301 entry,
3418 entry,
3302 data,
3419 data,
3303 link,
3420 link,
3304 offset,
3421 offset,
3305 sidedata,
3422 sidedata,
3306 sidedata_offset,
3423 sidedata_offset,
3307 index_end,
3424 index_end,
3308 data_end,
3425 data_end,
3309 sidedata_end,
3426 sidedata_end,
3310 )
3427 )
3311 self._enforceinlinesize(transaction)
3428 self._enforceinlinesize(transaction)
3312 if self._docket is not None:
3429 if self._docket is not None:
3313 self._docket.index_end = files_end[0]
3430 self._docket.index_end = files_end[0]
3314 self._docket.data_end = files_end[1]
3431 self._docket.data_end = files_end[1]
3315 self._docket.sidedata_end = files_end[2]
3432 self._docket.sidedata_end = files_end[2]
3316
3433
3317 nodemaputil.setup_persistent_nodemap(transaction, self)
3434 nodemaputil.setup_persistent_nodemap(transaction, self)
3318
3435
3319 def addgroup(
3436 def addgroup(
3320 self,
3437 self,
3321 deltas,
3438 deltas,
3322 linkmapper,
3439 linkmapper,
3323 transaction,
3440 transaction,
3324 alwayscache=False,
3441 alwayscache=False,
3325 addrevisioncb=None,
3442 addrevisioncb=None,
3326 duplicaterevisioncb=None,
3443 duplicaterevisioncb=None,
3327 debug_info=None,
3444 debug_info=None,
3328 delta_base_reuse_policy=None,
3445 delta_base_reuse_policy=None,
3329 ):
3446 ):
3330 """
3447 """
3331 add a delta group
3448 add a delta group
3332
3449
3333 given a set of deltas, add them to the revision log. the
3450 given a set of deltas, add them to the revision log. the
3334 first delta is against its parent, which should be in our
3451 first delta is against its parent, which should be in our
3335 log, the rest are against the previous delta.
3452 log, the rest are against the previous delta.
3336
3453
3337 If ``addrevisioncb`` is defined, it will be called with arguments of
3454 If ``addrevisioncb`` is defined, it will be called with arguments of
3338 this revlog and the node that was added.
3455 this revlog and the node that was added.
3339 """
3456 """
3340
3457
3341 if self._adding_group:
3458 if self._adding_group:
3342 raise error.ProgrammingError(b'cannot nest addgroup() calls')
3459 raise error.ProgrammingError(b'cannot nest addgroup() calls')
3343
3460
3344 # read the default delta-base reuse policy from revlog config if the
3461 # read the default delta-base reuse policy from revlog config if the
3345 # group did not specify one.
3462 # group did not specify one.
3346 if delta_base_reuse_policy is None:
3463 if delta_base_reuse_policy is None:
3347 if (
3464 if (
3348 self.delta_config.general_delta
3465 self.delta_config.general_delta
3349 and self.delta_config.lazy_delta_base
3466 and self.delta_config.lazy_delta_base
3350 ):
3467 ):
3351 delta_base_reuse_policy = DELTA_BASE_REUSE_TRY
3468 delta_base_reuse_policy = DELTA_BASE_REUSE_TRY
3352 else:
3469 else:
3353 delta_base_reuse_policy = DELTA_BASE_REUSE_NO
3470 delta_base_reuse_policy = DELTA_BASE_REUSE_NO
3354
3471
3355 self._adding_group = True
3472 self._adding_group = True
3356 empty = True
3473 empty = True
3357 try:
3474 try:
3358 with self._writing(transaction):
3475 with self._writing(transaction):
3359 write_debug = None
3476 write_debug = None
3360 if self.delta_config.debug_delta:
3477 if self.delta_config.debug_delta:
3361 write_debug = transaction._report
3478 write_debug = transaction._report
3362 deltacomputer = deltautil.deltacomputer(
3479 deltacomputer = deltautil.deltacomputer(
3363 self,
3480 self,
3364 write_debug=write_debug,
3481 write_debug=write_debug,
3365 debug_info=debug_info,
3482 debug_info=debug_info,
3366 )
3483 )
3367 # loop through our set of deltas
3484 # loop through our set of deltas
3368 for data in deltas:
3485 for data in deltas:
3369 (
3486 (
3370 node,
3487 node,
3371 p1,
3488 p1,
3372 p2,
3489 p2,
3373 linknode,
3490 linknode,
3374 deltabase,
3491 deltabase,
3375 delta,
3492 delta,
3376 flags,
3493 flags,
3377 sidedata,
3494 sidedata,
3378 ) = data
3495 ) = data
3379 link = linkmapper(linknode)
3496 link = linkmapper(linknode)
3380 flags = flags or REVIDX_DEFAULT_FLAGS
3497 flags = flags or REVIDX_DEFAULT_FLAGS
3381
3498
3382 rev = self.index.get_rev(node)
3499 rev = self.index.get_rev(node)
3383 if rev is not None:
3500 if rev is not None:
3384 # this can happen if two branches make the same change
3501 # this can happen if two branches make the same change
3385 self._nodeduplicatecallback(transaction, rev)
3502 self._nodeduplicatecallback(transaction, rev)
3386 if duplicaterevisioncb:
3503 if duplicaterevisioncb:
3387 duplicaterevisioncb(self, rev)
3504 duplicaterevisioncb(self, rev)
3388 empty = False
3505 empty = False
3389 continue
3506 continue
3390
3507
3391 for p in (p1, p2):
3508 for p in (p1, p2):
3392 if not self.index.has_node(p):
3509 if not self.index.has_node(p):
3393 raise error.LookupError(
3510 raise error.LookupError(
3394 p, self.radix, _(b'unknown parent')
3511 p, self.radix, _(b'unknown parent')
3395 )
3512 )
3396
3513
3397 if not self.index.has_node(deltabase):
3514 if not self.index.has_node(deltabase):
3398 raise error.LookupError(
3515 raise error.LookupError(
3399 deltabase, self.display_id, _(b'unknown delta base')
3516 deltabase, self.display_id, _(b'unknown delta base')
3400 )
3517 )
3401
3518
3402 baserev = self.rev(deltabase)
3519 baserev = self.rev(deltabase)
3403
3520
3404 if baserev != nullrev and self.iscensored(baserev):
3521 if baserev != nullrev and self.iscensored(baserev):
3405 # if base is censored, delta must be full replacement in a
3522 # if base is censored, delta must be full replacement in a
3406 # single patch operation
3523 # single patch operation
3407 hlen = struct.calcsize(b">lll")
3524 hlen = struct.calcsize(b">lll")
3408 oldlen = self.rawsize(baserev)
3525 oldlen = self.rawsize(baserev)
3409 newlen = len(delta) - hlen
3526 newlen = len(delta) - hlen
3410 if delta[:hlen] != mdiff.replacediffheader(
3527 if delta[:hlen] != mdiff.replacediffheader(
3411 oldlen, newlen
3528 oldlen, newlen
3412 ):
3529 ):
3413 raise error.CensoredBaseError(
3530 raise error.CensoredBaseError(
3414 self.display_id, self.node(baserev)
3531 self.display_id, self.node(baserev)
3415 )
3532 )
3416
3533
3417 if not flags and self._peek_iscensored(baserev, delta):
3534 if not flags and self._peek_iscensored(baserev, delta):
3418 flags |= REVIDX_ISCENSORED
3535 flags |= REVIDX_ISCENSORED
3419
3536
3420 # We assume consumers of addrevisioncb will want to retrieve
3537 # We assume consumers of addrevisioncb will want to retrieve
3421 # the added revision, which will require a call to
3538 # the added revision, which will require a call to
3422 # revision(). revision() will fast path if there is a cache
3539 # revision(). revision() will fast path if there is a cache
3423 # hit. So, we tell _addrevision() to always cache in this case.
3540 # hit. So, we tell _addrevision() to always cache in this case.
3424 # We're only using addgroup() in the context of changegroup
3541 # We're only using addgroup() in the context of changegroup
3425 # generation so the revision data can always be handled as raw
3542 # generation so the revision data can always be handled as raw
3426 # by the flagprocessor.
3543 # by the flagprocessor.
3427 rev = self._addrevision(
3544 rev = self._addrevision(
3428 node,
3545 node,
3429 None,
3546 None,
3430 transaction,
3547 transaction,
3431 link,
3548 link,
3432 p1,
3549 p1,
3433 p2,
3550 p2,
3434 flags,
3551 flags,
3435 (baserev, delta, delta_base_reuse_policy),
3552 (baserev, delta, delta_base_reuse_policy),
3436 alwayscache=alwayscache,
3553 alwayscache=alwayscache,
3437 deltacomputer=deltacomputer,
3554 deltacomputer=deltacomputer,
3438 sidedata=sidedata,
3555 sidedata=sidedata,
3439 )
3556 )
3440
3557
3441 if addrevisioncb:
3558 if addrevisioncb:
3442 addrevisioncb(self, rev)
3559 addrevisioncb(self, rev)
3443 empty = False
3560 empty = False
3444 finally:
3561 finally:
3445 self._adding_group = False
3562 self._adding_group = False
3446 return not empty
3563 return not empty
3447
3564
3448 def iscensored(self, rev):
3565 def iscensored(self, rev):
3449 """Check if a file revision is censored."""
3566 """Check if a file revision is censored."""
3450 if not self.feature_config.censorable:
3567 if not self.feature_config.censorable:
3451 return False
3568 return False
3452
3569
3453 return self.flags(rev) & REVIDX_ISCENSORED
3570 return self.flags(rev) & REVIDX_ISCENSORED
3454
3571
3455 def _peek_iscensored(self, baserev, delta):
3572 def _peek_iscensored(self, baserev, delta):
3456 """Quickly check if a delta produces a censored revision."""
3573 """Quickly check if a delta produces a censored revision."""
3457 if not self.feature_config.censorable:
3574 if not self.feature_config.censorable:
3458 return False
3575 return False
3459
3576
3460 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
3577 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
3461
3578
3462 def getstrippoint(self, minlink):
3579 def getstrippoint(self, minlink):
3463 """find the minimum rev that must be stripped to strip the linkrev
3580 """find the minimum rev that must be stripped to strip the linkrev
3464
3581
3465 Returns a tuple containing the minimum rev and a set of all revs that
3582 Returns a tuple containing the minimum rev and a set of all revs that
3466 have linkrevs that will be broken by this strip.
3583 have linkrevs that will be broken by this strip.
3467 """
3584 """
3468 return storageutil.resolvestripinfo(
3585 return storageutil.resolvestripinfo(
3469 minlink,
3586 minlink,
3470 len(self) - 1,
3587 len(self) - 1,
3471 self.headrevs(),
3588 self.headrevs(),
3472 self.linkrev,
3589 self.linkrev,
3473 self.parentrevs,
3590 self.parentrevs,
3474 )
3591 )
3475
3592
3476 def strip(self, minlink, transaction):
3593 def strip(self, minlink, transaction):
3477 """truncate the revlog on the first revision with a linkrev >= minlink
3594 """truncate the revlog on the first revision with a linkrev >= minlink
3478
3595
3479 This function is called when we're stripping revision minlink and
3596 This function is called when we're stripping revision minlink and
3480 its descendants from the repository.
3597 its descendants from the repository.
3481
3598
3482 We have to remove all revisions with linkrev >= minlink, because
3599 We have to remove all revisions with linkrev >= minlink, because
3483 the equivalent changelog revisions will be renumbered after the
3600 the equivalent changelog revisions will be renumbered after the
3484 strip.
3601 strip.
3485
3602
3486 So we truncate the revlog on the first of these revisions, and
3603 So we truncate the revlog on the first of these revisions, and
3487 trust that the caller has saved the revisions that shouldn't be
3604 trust that the caller has saved the revisions that shouldn't be
3488 removed and that it'll re-add them after this truncation.
3605 removed and that it'll re-add them after this truncation.
3489 """
3606 """
3490 if len(self) == 0:
3607 if len(self) == 0:
3491 return
3608 return
3492
3609
3493 rev, _ = self.getstrippoint(minlink)
3610 rev, _ = self.getstrippoint(minlink)
3494 if rev == len(self):
3611 if rev == len(self):
3495 return
3612 return
3496
3613
3497 # first truncate the files on disk
3614 # first truncate the files on disk
3498 data_end = self.start(rev)
3615 data_end = self.start(rev)
3499 if not self._inline:
3616 if not self._inline:
3500 transaction.add(self._datafile, data_end)
3617 transaction.add(self._datafile, data_end)
3501 end = rev * self.index.entry_size
3618 end = rev * self.index.entry_size
3502 else:
3619 else:
3503 end = data_end + (rev * self.index.entry_size)
3620 end = data_end + (rev * self.index.entry_size)
3504
3621
3505 if self._sidedatafile:
3622 if self._sidedatafile:
3506 sidedata_end = self.sidedata_cut_off(rev)
3623 sidedata_end = self.sidedata_cut_off(rev)
3507 transaction.add(self._sidedatafile, sidedata_end)
3624 transaction.add(self._sidedatafile, sidedata_end)
3508
3625
3509 transaction.add(self._indexfile, end)
3626 transaction.add(self._indexfile, end)
3510 if self._docket is not None:
3627 if self._docket is not None:
3511 # XXX we could, leverage the docket while stripping. However it is
3628 # XXX we could, leverage the docket while stripping. However it is
3512 # not powerfull enough at the time of this comment
3629 # not powerfull enough at the time of this comment
3513 self._docket.index_end = end
3630 self._docket.index_end = end
3514 self._docket.data_end = data_end
3631 self._docket.data_end = data_end
3515 self._docket.sidedata_end = sidedata_end
3632 self._docket.sidedata_end = sidedata_end
3516 self._docket.write(transaction, stripping=True)
3633 self._docket.write(transaction, stripping=True)
3517
3634
3518 # then reset internal state in memory to forget those revisions
3635 # then reset internal state in memory to forget those revisions
3519 self._chaininfocache = util.lrucachedict(500)
3636 self._chaininfocache = util.lrucachedict(500)
3520 self._inner.clear_cache()
3637 self._inner.clear_cache()
3521
3638
3522 del self.index[rev:-1]
3639 del self.index[rev:-1]
3523
3640
3524 def checksize(self):
3641 def checksize(self):
3525 """Check size of index and data files
3642 """Check size of index and data files
3526
3643
3527 return a (dd, di) tuple.
3644 return a (dd, di) tuple.
3528 - dd: extra bytes for the "data" file
3645 - dd: extra bytes for the "data" file
3529 - di: extra bytes for the "index" file
3646 - di: extra bytes for the "index" file
3530
3647
3531 A healthy revlog will return (0, 0).
3648 A healthy revlog will return (0, 0).
3532 """
3649 """
3533 expected = 0
3650 expected = 0
3534 if len(self):
3651 if len(self):
3535 expected = max(0, self.end(len(self) - 1))
3652 expected = max(0, self.end(len(self) - 1))
3536
3653
3537 try:
3654 try:
3538 with self._datafp() as f:
3655 with self._datafp() as f:
3539 f.seek(0, io.SEEK_END)
3656 f.seek(0, io.SEEK_END)
3540 actual = f.tell()
3657 actual = f.tell()
3541 dd = actual - expected
3658 dd = actual - expected
3542 except FileNotFoundError:
3659 except FileNotFoundError:
3543 dd = 0
3660 dd = 0
3544
3661
3545 try:
3662 try:
3546 f = self.opener(self._indexfile)
3663 f = self.opener(self._indexfile)
3547 f.seek(0, io.SEEK_END)
3664 f.seek(0, io.SEEK_END)
3548 actual = f.tell()
3665 actual = f.tell()
3549 f.close()
3666 f.close()
3550 s = self.index.entry_size
3667 s = self.index.entry_size
3551 i = max(0, actual // s)
3668 i = max(0, actual // s)
3552 di = actual - (i * s)
3669 di = actual - (i * s)
3553 if self._inline:
3670 if self._inline:
3554 databytes = 0
3671 databytes = 0
3555 for r in self:
3672 for r in self:
3556 databytes += max(0, self.length(r))
3673 databytes += max(0, self.length(r))
3557 dd = 0
3674 dd = 0
3558 di = actual - len(self) * s - databytes
3675 di = actual - len(self) * s - databytes
3559 except FileNotFoundError:
3676 except FileNotFoundError:
3560 di = 0
3677 di = 0
3561
3678
3562 return (dd, di)
3679 return (dd, di)
3563
3680
3564 def files(self):
3681 def files(self):
3565 """return list of files that compose this revlog"""
3682 """return list of files that compose this revlog"""
3566 res = [self._indexfile]
3683 res = [self._indexfile]
3567 if self._docket_file is None:
3684 if self._docket_file is None:
3568 if not self._inline:
3685 if not self._inline:
3569 res.append(self._datafile)
3686 res.append(self._datafile)
3570 else:
3687 else:
3571 res.append(self._docket_file)
3688 res.append(self._docket_file)
3572 res.extend(self._docket.old_index_filepaths(include_empty=False))
3689 res.extend(self._docket.old_index_filepaths(include_empty=False))
3573 if self._docket.data_end:
3690 if self._docket.data_end:
3574 res.append(self._datafile)
3691 res.append(self._datafile)
3575 res.extend(self._docket.old_data_filepaths(include_empty=False))
3692 res.extend(self._docket.old_data_filepaths(include_empty=False))
3576 if self._docket.sidedata_end:
3693 if self._docket.sidedata_end:
3577 res.append(self._sidedatafile)
3694 res.append(self._sidedatafile)
3578 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
3695 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
3579 return res
3696 return res
3580
3697
3581 def emitrevisions(
3698 def emitrevisions(
3582 self,
3699 self,
3583 nodes,
3700 nodes,
3584 nodesorder=None,
3701 nodesorder=None,
3585 revisiondata=False,
3702 revisiondata=False,
3586 assumehaveparentrevisions=False,
3703 assumehaveparentrevisions=False,
3587 deltamode=repository.CG_DELTAMODE_STD,
3704 deltamode=repository.CG_DELTAMODE_STD,
3588 sidedata_helpers=None,
3705 sidedata_helpers=None,
3589 debug_info=None,
3706 debug_info=None,
3590 ):
3707 ):
3591 if nodesorder not in (b'nodes', b'storage', b'linear', None):
3708 if nodesorder not in (b'nodes', b'storage', b'linear', None):
3592 raise error.ProgrammingError(
3709 raise error.ProgrammingError(
3593 b'unhandled value for nodesorder: %s' % nodesorder
3710 b'unhandled value for nodesorder: %s' % nodesorder
3594 )
3711 )
3595
3712
3596 if nodesorder is None and not self.delta_config.general_delta:
3713 if nodesorder is None and not self.delta_config.general_delta:
3597 nodesorder = b'storage'
3714 nodesorder = b'storage'
3598
3715
3599 if (
3716 if (
3600 not self._storedeltachains
3717 not self._storedeltachains
3601 and deltamode != repository.CG_DELTAMODE_PREV
3718 and deltamode != repository.CG_DELTAMODE_PREV
3602 ):
3719 ):
3603 deltamode = repository.CG_DELTAMODE_FULL
3720 deltamode = repository.CG_DELTAMODE_FULL
3604
3721
3605 return storageutil.emitrevisions(
3722 return storageutil.emitrevisions(
3606 self,
3723 self,
3607 nodes,
3724 nodes,
3608 nodesorder,
3725 nodesorder,
3609 revlogrevisiondelta,
3726 revlogrevisiondelta,
3610 deltaparentfn=self.deltaparent,
3727 deltaparentfn=self.deltaparent,
3611 candeltafn=self._candelta,
3728 candeltafn=self._candelta,
3612 rawsizefn=self.rawsize,
3729 rawsizefn=self.rawsize,
3613 revdifffn=self.revdiff,
3730 revdifffn=self.revdiff,
3614 flagsfn=self.flags,
3731 flagsfn=self.flags,
3615 deltamode=deltamode,
3732 deltamode=deltamode,
3616 revisiondata=revisiondata,
3733 revisiondata=revisiondata,
3617 assumehaveparentrevisions=assumehaveparentrevisions,
3734 assumehaveparentrevisions=assumehaveparentrevisions,
3618 sidedata_helpers=sidedata_helpers,
3735 sidedata_helpers=sidedata_helpers,
3619 debug_info=debug_info,
3736 debug_info=debug_info,
3620 )
3737 )
3621
3738
3622 DELTAREUSEALWAYS = b'always'
3739 DELTAREUSEALWAYS = b'always'
3623 DELTAREUSESAMEREVS = b'samerevs'
3740 DELTAREUSESAMEREVS = b'samerevs'
3624 DELTAREUSENEVER = b'never'
3741 DELTAREUSENEVER = b'never'
3625
3742
3626 DELTAREUSEFULLADD = b'fulladd'
3743 DELTAREUSEFULLADD = b'fulladd'
3627
3744
3628 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
3745 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
3629
3746
3630 def clone(
3747 def clone(
3631 self,
3748 self,
3632 tr,
3749 tr,
3633 destrevlog,
3750 destrevlog,
3634 addrevisioncb=None,
3751 addrevisioncb=None,
3635 deltareuse=DELTAREUSESAMEREVS,
3752 deltareuse=DELTAREUSESAMEREVS,
3636 forcedeltabothparents=None,
3753 forcedeltabothparents=None,
3637 sidedata_helpers=None,
3754 sidedata_helpers=None,
3638 ):
3755 ):
3639 """Copy this revlog to another, possibly with format changes.
3756 """Copy this revlog to another, possibly with format changes.
3640
3757
3641 The destination revlog will contain the same revisions and nodes.
3758 The destination revlog will contain the same revisions and nodes.
3642 However, it may not be bit-for-bit identical due to e.g. delta encoding
3759 However, it may not be bit-for-bit identical due to e.g. delta encoding
3643 differences.
3760 differences.
3644
3761
3645 The ``deltareuse`` argument control how deltas from the existing revlog
3762 The ``deltareuse`` argument control how deltas from the existing revlog
3646 are preserved in the destination revlog. The argument can have the
3763 are preserved in the destination revlog. The argument can have the
3647 following values:
3764 following values:
3648
3765
3649 DELTAREUSEALWAYS
3766 DELTAREUSEALWAYS
3650 Deltas will always be reused (if possible), even if the destination
3767 Deltas will always be reused (if possible), even if the destination
3651 revlog would not select the same revisions for the delta. This is the
3768 revlog would not select the same revisions for the delta. This is the
3652 fastest mode of operation.
3769 fastest mode of operation.
3653 DELTAREUSESAMEREVS
3770 DELTAREUSESAMEREVS
3654 Deltas will be reused if the destination revlog would pick the same
3771 Deltas will be reused if the destination revlog would pick the same
3655 revisions for the delta. This mode strikes a balance between speed
3772 revisions for the delta. This mode strikes a balance between speed
3656 and optimization.
3773 and optimization.
3657 DELTAREUSENEVER
3774 DELTAREUSENEVER
3658 Deltas will never be reused. This is the slowest mode of execution.
3775 Deltas will never be reused. This is the slowest mode of execution.
3659 This mode can be used to recompute deltas (e.g. if the diff/delta
3776 This mode can be used to recompute deltas (e.g. if the diff/delta
3660 algorithm changes).
3777 algorithm changes).
3661 DELTAREUSEFULLADD
3778 DELTAREUSEFULLADD
3662 Revision will be re-added as if their were new content. This is
3779 Revision will be re-added as if their were new content. This is
3663 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
3780 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
3664 eg: large file detection and handling.
3781 eg: large file detection and handling.
3665
3782
3666 Delta computation can be slow, so the choice of delta reuse policy can
3783 Delta computation can be slow, so the choice of delta reuse policy can
3667 significantly affect run time.
3784 significantly affect run time.
3668
3785
3669 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
3786 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
3670 two extremes. Deltas will be reused if they are appropriate. But if the
3787 two extremes. Deltas will be reused if they are appropriate. But if the
3671 delta could choose a better revision, it will do so. This means if you
3788 delta could choose a better revision, it will do so. This means if you
3672 are converting a non-generaldelta revlog to a generaldelta revlog,
3789 are converting a non-generaldelta revlog to a generaldelta revlog,
3673 deltas will be recomputed if the delta's parent isn't a parent of the
3790 deltas will be recomputed if the delta's parent isn't a parent of the
3674 revision.
3791 revision.
3675
3792
3676 In addition to the delta policy, the ``forcedeltabothparents``
3793 In addition to the delta policy, the ``forcedeltabothparents``
3677 argument controls whether to force compute deltas against both parents
3794 argument controls whether to force compute deltas against both parents
3678 for merges. By default, the current default is used.
3795 for merges. By default, the current default is used.
3679
3796
3680 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
3797 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
3681 `sidedata_helpers`.
3798 `sidedata_helpers`.
3682 """
3799 """
3683 if deltareuse not in self.DELTAREUSEALL:
3800 if deltareuse not in self.DELTAREUSEALL:
3684 raise ValueError(
3801 raise ValueError(
3685 _(b'value for deltareuse invalid: %s') % deltareuse
3802 _(b'value for deltareuse invalid: %s') % deltareuse
3686 )
3803 )
3687
3804
3688 if len(destrevlog):
3805 if len(destrevlog):
3689 raise ValueError(_(b'destination revlog is not empty'))
3806 raise ValueError(_(b'destination revlog is not empty'))
3690
3807
3691 if getattr(self, 'filteredrevs', None):
3808 if getattr(self, 'filteredrevs', None):
3692 raise ValueError(_(b'source revlog has filtered revisions'))
3809 raise ValueError(_(b'source revlog has filtered revisions'))
3693 if getattr(destrevlog, 'filteredrevs', None):
3810 if getattr(destrevlog, 'filteredrevs', None):
3694 raise ValueError(_(b'destination revlog has filtered revisions'))
3811 raise ValueError(_(b'destination revlog has filtered revisions'))
3695
3812
3696 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3813 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3697 # if possible.
3814 # if possible.
3698 old_delta_config = destrevlog.delta_config
3815 old_delta_config = destrevlog.delta_config
3699 destrevlog.delta_config = destrevlog.delta_config.copy()
3816 destrevlog.delta_config = destrevlog.delta_config.copy()
3700
3817
3701 try:
3818 try:
3702 if deltareuse == self.DELTAREUSEALWAYS:
3819 if deltareuse == self.DELTAREUSEALWAYS:
3703 destrevlog.delta_config.lazy_delta_base = True
3820 destrevlog.delta_config.lazy_delta_base = True
3704 destrevlog.delta_config.lazy_delta = True
3821 destrevlog.delta_config.lazy_delta = True
3705 elif deltareuse == self.DELTAREUSESAMEREVS:
3822 elif deltareuse == self.DELTAREUSESAMEREVS:
3706 destrevlog.delta_config.lazy_delta_base = False
3823 destrevlog.delta_config.lazy_delta_base = False
3707 destrevlog.delta_config.lazy_delta = True
3824 destrevlog.delta_config.lazy_delta = True
3708 elif deltareuse == self.DELTAREUSENEVER:
3825 elif deltareuse == self.DELTAREUSENEVER:
3709 destrevlog.delta_config.lazy_delta_base = False
3826 destrevlog.delta_config.lazy_delta_base = False
3710 destrevlog.delta_config.lazy_delta = False
3827 destrevlog.delta_config.lazy_delta = False
3711
3828
3712 delta_both_parents = (
3829 delta_both_parents = (
3713 forcedeltabothparents or old_delta_config.delta_both_parents
3830 forcedeltabothparents or old_delta_config.delta_both_parents
3714 )
3831 )
3715 destrevlog.delta_config.delta_both_parents = delta_both_parents
3832 destrevlog.delta_config.delta_both_parents = delta_both_parents
3716
3833
3717 with self.reading(), destrevlog._writing(tr):
3834 with self.reading(), destrevlog._writing(tr):
3718 self._clone(
3835 self._clone(
3719 tr,
3836 tr,
3720 destrevlog,
3837 destrevlog,
3721 addrevisioncb,
3838 addrevisioncb,
3722 deltareuse,
3839 deltareuse,
3723 forcedeltabothparents,
3840 forcedeltabothparents,
3724 sidedata_helpers,
3841 sidedata_helpers,
3725 )
3842 )
3726
3843
3727 finally:
3844 finally:
3728 destrevlog.delta_config = old_delta_config
3845 destrevlog.delta_config = old_delta_config
3729
3846
3730 def _clone(
3847 def _clone(
3731 self,
3848 self,
3732 tr,
3849 tr,
3733 destrevlog,
3850 destrevlog,
3734 addrevisioncb,
3851 addrevisioncb,
3735 deltareuse,
3852 deltareuse,
3736 forcedeltabothparents,
3853 forcedeltabothparents,
3737 sidedata_helpers,
3854 sidedata_helpers,
3738 ):
3855 ):
3739 """perform the core duty of `revlog.clone` after parameter processing"""
3856 """perform the core duty of `revlog.clone` after parameter processing"""
3740 write_debug = None
3857 write_debug = None
3741 if self.delta_config.debug_delta:
3858 if self.delta_config.debug_delta:
3742 write_debug = tr._report
3859 write_debug = tr._report
3743 deltacomputer = deltautil.deltacomputer(
3860 deltacomputer = deltautil.deltacomputer(
3744 destrevlog,
3861 destrevlog,
3745 write_debug=write_debug,
3862 write_debug=write_debug,
3746 )
3863 )
3747 index = self.index
3864 index = self.index
3748 for rev in self:
3865 for rev in self:
3749 entry = index[rev]
3866 entry = index[rev]
3750
3867
3751 # Some classes override linkrev to take filtered revs into
3868 # Some classes override linkrev to take filtered revs into
3752 # account. Use raw entry from index.
3869 # account. Use raw entry from index.
3753 flags = entry[0] & 0xFFFF
3870 flags = entry[0] & 0xFFFF
3754 linkrev = entry[4]
3871 linkrev = entry[4]
3755 p1 = index[entry[5]][7]
3872 p1 = index[entry[5]][7]
3756 p2 = index[entry[6]][7]
3873 p2 = index[entry[6]][7]
3757 node = entry[7]
3874 node = entry[7]
3758
3875
3759 # (Possibly) reuse the delta from the revlog if allowed and
3876 # (Possibly) reuse the delta from the revlog if allowed and
3760 # the revlog chunk is a delta.
3877 # the revlog chunk is a delta.
3761 cachedelta = None
3878 cachedelta = None
3762 rawtext = None
3879 rawtext = None
3763 if deltareuse == self.DELTAREUSEFULLADD:
3880 if deltareuse == self.DELTAREUSEFULLADD:
3764 text = self._revisiondata(rev)
3881 text = self._revisiondata(rev)
3765 sidedata = self.sidedata(rev)
3882 sidedata = self.sidedata(rev)
3766
3883
3767 if sidedata_helpers is not None:
3884 if sidedata_helpers is not None:
3768 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3885 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3769 self, sidedata_helpers, sidedata, rev
3886 self, sidedata_helpers, sidedata, rev
3770 )
3887 )
3771 flags = flags | new_flags[0] & ~new_flags[1]
3888 flags = flags | new_flags[0] & ~new_flags[1]
3772
3889
3773 destrevlog.addrevision(
3890 destrevlog.addrevision(
3774 text,
3891 text,
3775 tr,
3892 tr,
3776 linkrev,
3893 linkrev,
3777 p1,
3894 p1,
3778 p2,
3895 p2,
3779 cachedelta=cachedelta,
3896 cachedelta=cachedelta,
3780 node=node,
3897 node=node,
3781 flags=flags,
3898 flags=flags,
3782 deltacomputer=deltacomputer,
3899 deltacomputer=deltacomputer,
3783 sidedata=sidedata,
3900 sidedata=sidedata,
3784 )
3901 )
3785 else:
3902 else:
3786 if destrevlog.delta_config.lazy_delta:
3903 if destrevlog.delta_config.lazy_delta:
3787 dp = self.deltaparent(rev)
3904 dp = self.deltaparent(rev)
3788 if dp != nullrev:
3905 if dp != nullrev:
3789 cachedelta = (dp, bytes(self._inner._chunk(rev)))
3906 cachedelta = (dp, bytes(self._inner._chunk(rev)))
3790
3907
3791 sidedata = None
3908 sidedata = None
3792 if not cachedelta:
3909 if not cachedelta:
3793 try:
3910 try:
3794 rawtext = self._revisiondata(rev)
3911 rawtext = self._revisiondata(rev)
3795 except error.CensoredNodeError as censored:
3912 except error.CensoredNodeError as censored:
3796 assert flags & REVIDX_ISCENSORED
3913 assert flags & REVIDX_ISCENSORED
3797 rawtext = censored.tombstone
3914 rawtext = censored.tombstone
3798 sidedata = self.sidedata(rev)
3915 sidedata = self.sidedata(rev)
3799 if sidedata is None:
3916 if sidedata is None:
3800 sidedata = self.sidedata(rev)
3917 sidedata = self.sidedata(rev)
3801
3918
3802 if sidedata_helpers is not None:
3919 if sidedata_helpers is not None:
3803 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3920 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3804 self, sidedata_helpers, sidedata, rev
3921 self, sidedata_helpers, sidedata, rev
3805 )
3922 )
3806 flags = flags | new_flags[0] & ~new_flags[1]
3923 flags = flags | new_flags[0] & ~new_flags[1]
3807
3924
3808 destrevlog._addrevision(
3925 destrevlog._addrevision(
3809 node,
3926 node,
3810 rawtext,
3927 rawtext,
3811 tr,
3928 tr,
3812 linkrev,
3929 linkrev,
3813 p1,
3930 p1,
3814 p2,
3931 p2,
3815 flags,
3932 flags,
3816 cachedelta,
3933 cachedelta,
3817 deltacomputer=deltacomputer,
3934 deltacomputer=deltacomputer,
3818 sidedata=sidedata,
3935 sidedata=sidedata,
3819 )
3936 )
3820
3937
3821 if addrevisioncb:
3938 if addrevisioncb:
3822 addrevisioncb(self, rev, node)
3939 addrevisioncb(self, rev, node)
3823
3940
3824 def censorrevision(self, tr, censornode, tombstone=b''):
3941 def censorrevision(self, tr, censornode, tombstone=b''):
3825 if self._format_version == REVLOGV0:
3942 if self._format_version == REVLOGV0:
3826 raise error.RevlogError(
3943 raise error.RevlogError(
3827 _(b'cannot censor with version %d revlogs')
3944 _(b'cannot censor with version %d revlogs')
3828 % self._format_version
3945 % self._format_version
3829 )
3946 )
3830 elif self._format_version == REVLOGV1:
3947 elif self._format_version == REVLOGV1:
3831 rewrite.v1_censor(self, tr, censornode, tombstone)
3948 rewrite.v1_censor(self, tr, censornode, tombstone)
3832 else:
3949 else:
3833 rewrite.v2_censor(self, tr, censornode, tombstone)
3950 rewrite.v2_censor(self, tr, censornode, tombstone)
3834
3951
3835 def verifyintegrity(self, state):
3952 def verifyintegrity(self, state):
3836 """Verifies the integrity of the revlog.
3953 """Verifies the integrity of the revlog.
3837
3954
3838 Yields ``revlogproblem`` instances describing problems that are
3955 Yields ``revlogproblem`` instances describing problems that are
3839 found.
3956 found.
3840 """
3957 """
3841 dd, di = self.checksize()
3958 dd, di = self.checksize()
3842 if dd:
3959 if dd:
3843 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3960 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3844 if di:
3961 if di:
3845 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3962 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3846
3963
3847 version = self._format_version
3964 version = self._format_version
3848
3965
3849 # The verifier tells us what version revlog we should be.
3966 # The verifier tells us what version revlog we should be.
3850 if version != state[b'expectedversion']:
3967 if version != state[b'expectedversion']:
3851 yield revlogproblem(
3968 yield revlogproblem(
3852 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3969 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3853 % (self.display_id, version, state[b'expectedversion'])
3970 % (self.display_id, version, state[b'expectedversion'])
3854 )
3971 )
3855
3972
3856 state[b'skipread'] = set()
3973 state[b'skipread'] = set()
3857 state[b'safe_renamed'] = set()
3974 state[b'safe_renamed'] = set()
3858
3975
3859 for rev in self:
3976 for rev in self:
3860 node = self.node(rev)
3977 node = self.node(rev)
3861
3978
3862 # Verify contents. 4 cases to care about:
3979 # Verify contents. 4 cases to care about:
3863 #
3980 #
3864 # common: the most common case
3981 # common: the most common case
3865 # rename: with a rename
3982 # rename: with a rename
3866 # meta: file content starts with b'\1\n', the metadata
3983 # meta: file content starts with b'\1\n', the metadata
3867 # header defined in filelog.py, but without a rename
3984 # header defined in filelog.py, but without a rename
3868 # ext: content stored externally
3985 # ext: content stored externally
3869 #
3986 #
3870 # More formally, their differences are shown below:
3987 # More formally, their differences are shown below:
3871 #
3988 #
3872 # | common | rename | meta | ext
3989 # | common | rename | meta | ext
3873 # -------------------------------------------------------
3990 # -------------------------------------------------------
3874 # flags() | 0 | 0 | 0 | not 0
3991 # flags() | 0 | 0 | 0 | not 0
3875 # renamed() | False | True | False | ?
3992 # renamed() | False | True | False | ?
3876 # rawtext[0:2]=='\1\n'| False | True | True | ?
3993 # rawtext[0:2]=='\1\n'| False | True | True | ?
3877 #
3994 #
3878 # "rawtext" means the raw text stored in revlog data, which
3995 # "rawtext" means the raw text stored in revlog data, which
3879 # could be retrieved by "rawdata(rev)". "text"
3996 # could be retrieved by "rawdata(rev)". "text"
3880 # mentioned below is "revision(rev)".
3997 # mentioned below is "revision(rev)".
3881 #
3998 #
3882 # There are 3 different lengths stored physically:
3999 # There are 3 different lengths stored physically:
3883 # 1. L1: rawsize, stored in revlog index
4000 # 1. L1: rawsize, stored in revlog index
3884 # 2. L2: len(rawtext), stored in revlog data
4001 # 2. L2: len(rawtext), stored in revlog data
3885 # 3. L3: len(text), stored in revlog data if flags==0, or
4002 # 3. L3: len(text), stored in revlog data if flags==0, or
3886 # possibly somewhere else if flags!=0
4003 # possibly somewhere else if flags!=0
3887 #
4004 #
3888 # L1 should be equal to L2. L3 could be different from them.
4005 # L1 should be equal to L2. L3 could be different from them.
3889 # "text" may or may not affect commit hash depending on flag
4006 # "text" may or may not affect commit hash depending on flag
3890 # processors (see flagutil.addflagprocessor).
4007 # processors (see flagutil.addflagprocessor).
3891 #
4008 #
3892 # | common | rename | meta | ext
4009 # | common | rename | meta | ext
3893 # -------------------------------------------------
4010 # -------------------------------------------------
3894 # rawsize() | L1 | L1 | L1 | L1
4011 # rawsize() | L1 | L1 | L1 | L1
3895 # size() | L1 | L2-LM | L1(*) | L1 (?)
4012 # size() | L1 | L2-LM | L1(*) | L1 (?)
3896 # len(rawtext) | L2 | L2 | L2 | L2
4013 # len(rawtext) | L2 | L2 | L2 | L2
3897 # len(text) | L2 | L2 | L2 | L3
4014 # len(text) | L2 | L2 | L2 | L3
3898 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
4015 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3899 #
4016 #
3900 # LM: length of metadata, depending on rawtext
4017 # LM: length of metadata, depending on rawtext
3901 # (*): not ideal, see comment in filelog.size
4018 # (*): not ideal, see comment in filelog.size
3902 # (?): could be "- len(meta)" if the resolved content has
4019 # (?): could be "- len(meta)" if the resolved content has
3903 # rename metadata
4020 # rename metadata
3904 #
4021 #
3905 # Checks needed to be done:
4022 # Checks needed to be done:
3906 # 1. length check: L1 == L2, in all cases.
4023 # 1. length check: L1 == L2, in all cases.
3907 # 2. hash check: depending on flag processor, we may need to
4024 # 2. hash check: depending on flag processor, we may need to
3908 # use either "text" (external), or "rawtext" (in revlog).
4025 # use either "text" (external), or "rawtext" (in revlog).
3909
4026
3910 try:
4027 try:
3911 skipflags = state.get(b'skipflags', 0)
4028 skipflags = state.get(b'skipflags', 0)
3912 if skipflags:
4029 if skipflags:
3913 skipflags &= self.flags(rev)
4030 skipflags &= self.flags(rev)
3914
4031
3915 _verify_revision(self, skipflags, state, node)
4032 _verify_revision(self, skipflags, state, node)
3916
4033
3917 l1 = self.rawsize(rev)
4034 l1 = self.rawsize(rev)
3918 l2 = len(self.rawdata(node))
4035 l2 = len(self.rawdata(node))
3919
4036
3920 if l1 != l2:
4037 if l1 != l2:
3921 yield revlogproblem(
4038 yield revlogproblem(
3922 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
4039 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3923 node=node,
4040 node=node,
3924 )
4041 )
3925
4042
3926 except error.CensoredNodeError:
4043 except error.CensoredNodeError:
3927 if state[b'erroroncensored']:
4044 if state[b'erroroncensored']:
3928 yield revlogproblem(
4045 yield revlogproblem(
3929 error=_(b'censored file data'), node=node
4046 error=_(b'censored file data'), node=node
3930 )
4047 )
3931 state[b'skipread'].add(node)
4048 state[b'skipread'].add(node)
3932 except Exception as e:
4049 except Exception as e:
3933 yield revlogproblem(
4050 yield revlogproblem(
3934 error=_(b'unpacking %s: %s')
4051 error=_(b'unpacking %s: %s')
3935 % (short(node), stringutil.forcebytestr(e)),
4052 % (short(node), stringutil.forcebytestr(e)),
3936 node=node,
4053 node=node,
3937 )
4054 )
3938 state[b'skipread'].add(node)
4055 state[b'skipread'].add(node)
3939
4056
3940 def storageinfo(
4057 def storageinfo(
3941 self,
4058 self,
3942 exclusivefiles=False,
4059 exclusivefiles=False,
3943 sharedfiles=False,
4060 sharedfiles=False,
3944 revisionscount=False,
4061 revisionscount=False,
3945 trackedsize=False,
4062 trackedsize=False,
3946 storedsize=False,
4063 storedsize=False,
3947 ):
4064 ):
3948 d = {}
4065 d = {}
3949
4066
3950 if exclusivefiles:
4067 if exclusivefiles:
3951 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
4068 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3952 if not self._inline:
4069 if not self._inline:
3953 d[b'exclusivefiles'].append((self.opener, self._datafile))
4070 d[b'exclusivefiles'].append((self.opener, self._datafile))
3954
4071
3955 if sharedfiles:
4072 if sharedfiles:
3956 d[b'sharedfiles'] = []
4073 d[b'sharedfiles'] = []
3957
4074
3958 if revisionscount:
4075 if revisionscount:
3959 d[b'revisionscount'] = len(self)
4076 d[b'revisionscount'] = len(self)
3960
4077
3961 if trackedsize:
4078 if trackedsize:
3962 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
4079 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3963
4080
3964 if storedsize:
4081 if storedsize:
3965 d[b'storedsize'] = sum(
4082 d[b'storedsize'] = sum(
3966 self.opener.stat(path).st_size for path in self.files()
4083 self.opener.stat(path).st_size for path in self.files()
3967 )
4084 )
3968
4085
3969 return d
4086 return d
3970
4087
3971 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
4088 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3972 if not self.feature_config.has_side_data:
4089 if not self.feature_config.has_side_data:
3973 return
4090 return
3974 # revlog formats with sidedata support does not support inline
4091 # revlog formats with sidedata support does not support inline
3975 assert not self._inline
4092 assert not self._inline
3976 if not helpers[1] and not helpers[2]:
4093 if not helpers[1] and not helpers[2]:
3977 # Nothing to generate or remove
4094 # Nothing to generate or remove
3978 return
4095 return
3979
4096
3980 new_entries = []
4097 new_entries = []
3981 # append the new sidedata
4098 # append the new sidedata
3982 with self._writing(transaction):
4099 with self._writing(transaction):
3983 ifh, dfh, sdfh = self._inner._writinghandles
4100 ifh, dfh, sdfh = self._inner._writinghandles
3984 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
4101 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3985
4102
3986 current_offset = sdfh.tell()
4103 current_offset = sdfh.tell()
3987 for rev in range(startrev, endrev + 1):
4104 for rev in range(startrev, endrev + 1):
3988 entry = self.index[rev]
4105 entry = self.index[rev]
3989 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
4106 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3990 store=self,
4107 store=self,
3991 sidedata_helpers=helpers,
4108 sidedata_helpers=helpers,
3992 sidedata={},
4109 sidedata={},
3993 rev=rev,
4110 rev=rev,
3994 )
4111 )
3995
4112
3996 serialized_sidedata = sidedatautil.serialize_sidedata(
4113 serialized_sidedata = sidedatautil.serialize_sidedata(
3997 new_sidedata
4114 new_sidedata
3998 )
4115 )
3999
4116
4000 sidedata_compression_mode = COMP_MODE_INLINE
4117 sidedata_compression_mode = COMP_MODE_INLINE
4001 if serialized_sidedata and self.feature_config.has_side_data:
4118 if serialized_sidedata and self.feature_config.has_side_data:
4002 sidedata_compression_mode = COMP_MODE_PLAIN
4119 sidedata_compression_mode = COMP_MODE_PLAIN
4003 h, comp_sidedata = self._inner.compress(serialized_sidedata)
4120 h, comp_sidedata = self._inner.compress(serialized_sidedata)
4004 if (
4121 if (
4005 h != b'u'
4122 h != b'u'
4006 and comp_sidedata[0] != b'\0'
4123 and comp_sidedata[0] != b'\0'
4007 and len(comp_sidedata) < len(serialized_sidedata)
4124 and len(comp_sidedata) < len(serialized_sidedata)
4008 ):
4125 ):
4009 assert not h
4126 assert not h
4010 if (
4127 if (
4011 comp_sidedata[0]
4128 comp_sidedata[0]
4012 == self._docket.default_compression_header
4129 == self._docket.default_compression_header
4013 ):
4130 ):
4014 sidedata_compression_mode = COMP_MODE_DEFAULT
4131 sidedata_compression_mode = COMP_MODE_DEFAULT
4015 serialized_sidedata = comp_sidedata
4132 serialized_sidedata = comp_sidedata
4016 else:
4133 else:
4017 sidedata_compression_mode = COMP_MODE_INLINE
4134 sidedata_compression_mode = COMP_MODE_INLINE
4018 serialized_sidedata = comp_sidedata
4135 serialized_sidedata = comp_sidedata
4019 if entry[8] != 0 or entry[9] != 0:
4136 if entry[8] != 0 or entry[9] != 0:
4020 # rewriting entries that already have sidedata is not
4137 # rewriting entries that already have sidedata is not
4021 # supported yet, because it introduces garbage data in the
4138 # supported yet, because it introduces garbage data in the
4022 # revlog.
4139 # revlog.
4023 msg = b"rewriting existing sidedata is not supported yet"
4140 msg = b"rewriting existing sidedata is not supported yet"
4024 raise error.Abort(msg)
4141 raise error.Abort(msg)
4025
4142
4026 # Apply (potential) flags to add and to remove after running
4143 # Apply (potential) flags to add and to remove after running
4027 # the sidedata helpers
4144 # the sidedata helpers
4028 new_offset_flags = entry[0] | flags[0] & ~flags[1]
4145 new_offset_flags = entry[0] | flags[0] & ~flags[1]
4029 entry_update = (
4146 entry_update = (
4030 current_offset,
4147 current_offset,
4031 len(serialized_sidedata),
4148 len(serialized_sidedata),
4032 new_offset_flags,
4149 new_offset_flags,
4033 sidedata_compression_mode,
4150 sidedata_compression_mode,
4034 )
4151 )
4035
4152
4036 # the sidedata computation might have move the file cursors around
4153 # the sidedata computation might have move the file cursors around
4037 sdfh.seek(current_offset, os.SEEK_SET)
4154 sdfh.seek(current_offset, os.SEEK_SET)
4038 sdfh.write(serialized_sidedata)
4155 sdfh.write(serialized_sidedata)
4039 new_entries.append(entry_update)
4156 new_entries.append(entry_update)
4040 current_offset += len(serialized_sidedata)
4157 current_offset += len(serialized_sidedata)
4041 self._docket.sidedata_end = sdfh.tell()
4158 self._docket.sidedata_end = sdfh.tell()
4042
4159
4043 # rewrite the new index entries
4160 # rewrite the new index entries
4044 ifh.seek(startrev * self.index.entry_size)
4161 ifh.seek(startrev * self.index.entry_size)
4045 for i, e in enumerate(new_entries):
4162 for i, e in enumerate(new_entries):
4046 rev = startrev + i
4163 rev = startrev + i
4047 self.index.replace_sidedata_info(rev, *e)
4164 self.index.replace_sidedata_info(rev, *e)
4048 packed = self.index.entry_binary(rev)
4165 packed = self.index.entry_binary(rev)
4049 if rev == 0 and self._docket is None:
4166 if rev == 0 and self._docket is None:
4050 header = self._format_flags | self._format_version
4167 header = self._format_flags | self._format_version
4051 header = self.index.pack_header(header)
4168 header = self.index.pack_header(header)
4052 packed = header + packed
4169 packed = header + packed
4053 ifh.write(packed)
4170 ifh.write(packed)
@@ -1,234 +1,241 b''
1 # Copyright Mercurial Contributors
1 # Copyright Mercurial Contributors
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 import contextlib
6 import contextlib
7
7
8 from ..i18n import _
8 from ..i18n import _
9 from .. import (
9 from .. import (
10 error,
10 error,
11 util,
11 util,
12 )
12 )
13
13
14
14
15 _MAX_CACHED_CHUNK_SIZE = 1048576 # 1 MiB
15 _MAX_CACHED_CHUNK_SIZE = 1048576 # 1 MiB
16
16
17 PARTIAL_READ_MSG = _(
17 PARTIAL_READ_MSG = _(
18 b'partial read of revlog %s; expected %d bytes from offset %d, got %d'
18 b'partial read of revlog %s; expected %d bytes from offset %d, got %d'
19 )
19 )
20
20
21
21
22 def _is_power_of_two(n):
22 def _is_power_of_two(n):
23 return (n & (n - 1) == 0) and n != 0
23 return (n & (n - 1) == 0) and n != 0
24
24
25
25
26 class appender:
26 class appender:
27 """the changelog index must be updated last on disk, so we use this class
27 """the changelog index must be updated last on disk, so we use this class
28 to delay writes to it"""
28 to delay writes to it"""
29
29
30 def __init__(self, vfs, name, mode, buf):
30 def __init__(self, vfs, name, mode, buf):
31 self.data = buf
31 self.data = buf
32 fp = vfs(name, mode)
32 fp = vfs(name, mode)
33 self.fp = fp
33 self.fp = fp
34 self.offset = fp.tell()
34 self.offset = fp.tell()
35 self.size = vfs.fstat(fp).st_size
35 self.size = vfs.fstat(fp).st_size
36 self._end = self.size
36 self._end = self.size
37
37
38 def end(self):
38 def end(self):
39 return self._end
39 return self._end
40
40
41 def tell(self):
41 def tell(self):
42 return self.offset
42 return self.offset
43
43
44 def flush(self):
44 def flush(self):
45 pass
45 pass
46
46
47 @property
47 @property
48 def closed(self):
48 def closed(self):
49 return self.fp.closed
49 return self.fp.closed
50
50
51 def close(self):
51 def close(self):
52 self.fp.close()
52 self.fp.close()
53
53
54 def seek(self, offset, whence=0):
54 def seek(self, offset, whence=0):
55 '''virtual file offset spans real file and data'''
55 '''virtual file offset spans real file and data'''
56 if whence == 0:
56 if whence == 0:
57 self.offset = offset
57 self.offset = offset
58 elif whence == 1:
58 elif whence == 1:
59 self.offset += offset
59 self.offset += offset
60 elif whence == 2:
60 elif whence == 2:
61 self.offset = self.end() + offset
61 self.offset = self.end() + offset
62 if self.offset < self.size:
62 if self.offset < self.size:
63 self.fp.seek(self.offset)
63 self.fp.seek(self.offset)
64
64
65 def read(self, count=-1):
65 def read(self, count=-1):
66 '''only trick here is reads that span real file and data'''
66 '''only trick here is reads that span real file and data'''
67 ret = b""
67 ret = b""
68 if self.offset < self.size:
68 if self.offset < self.size:
69 s = self.fp.read(count)
69 s = self.fp.read(count)
70 ret = s
70 ret = s
71 self.offset += len(s)
71 self.offset += len(s)
72 if count > 0:
72 if count > 0:
73 count -= len(s)
73 count -= len(s)
74 if count != 0:
74 if count != 0:
75 doff = self.offset - self.size
75 doff = self.offset - self.size
76 self.data.insert(0, b"".join(self.data))
76 self.data.insert(0, b"".join(self.data))
77 del self.data[1:]
77 del self.data[1:]
78 s = self.data[0][doff : doff + count]
78 s = self.data[0][doff : doff + count]
79 self.offset += len(s)
79 self.offset += len(s)
80 ret += s
80 ret += s
81 return ret
81 return ret
82
82
83 def write(self, s):
83 def write(self, s):
84 self.data.append(bytes(s))
84 self.data.append(bytes(s))
85 self.offset += len(s)
85 self.offset += len(s)
86 self._end += len(s)
86 self._end += len(s)
87
87
88 def __enter__(self):
88 def __enter__(self):
89 self.fp.__enter__()
89 self.fp.__enter__()
90 return self
90 return self
91
91
92 def __exit__(self, *args):
92 def __exit__(self, *args):
93 return self.fp.__exit__(*args)
93 return self.fp.__exit__(*args)
94
94
95
95
96 class randomaccessfile:
96 class randomaccessfile:
97 """Accessing arbitrary chuncks of data within a file, with some caching"""
97 """Accessing arbitrary chuncks of data within a file, with some caching"""
98
98
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 filename,
102 filename,
103 default_cached_chunk_size,
103 default_cached_chunk_size,
104 initial_cache=None,
104 initial_cache=None,
105 ):
105 ):
106 # Required by bitwise manipulation below
106 # Required by bitwise manipulation below
107 assert _is_power_of_two(default_cached_chunk_size)
107 assert _is_power_of_two(default_cached_chunk_size)
108
108
109 self.opener = opener
109 self.opener = opener
110 self.filename = filename
110 self.filename = filename
111 self.default_cached_chunk_size = default_cached_chunk_size
111 self.default_cached_chunk_size = default_cached_chunk_size
112 self.writing_handle = None # This is set from revlog.py
112 self.writing_handle = None # This is set from revlog.py
113 self.reading_handle = None
113 self.reading_handle = None
114 self._cached_chunk = b''
114 self._cached_chunk = b''
115 self._cached_chunk_position = 0 # Offset from the start of the file
115 self._cached_chunk_position = 0 # Offset from the start of the file
116 if initial_cache:
116 if initial_cache:
117 self._cached_chunk_position, self._cached_chunk = initial_cache
117 self._cached_chunk_position, self._cached_chunk = initial_cache
118
118
119 self._delay_buffer = None
120
119 def clear_cache(self):
121 def clear_cache(self):
120 self._cached_chunk = b''
122 self._cached_chunk = b''
121 self._cached_chunk_position = 0
123 self._cached_chunk_position = 0
122
124
123 @property
125 @property
124 def is_open(self):
126 def is_open(self):
125 """True if any file handle is being held
127 """True if any file handle is being held
126
128
127 Used for assert and debug in the python code"""
129 Used for assert and debug in the python code"""
128 return (
130 return (
129 self.reading_handle is not None or self.writing_handle is not None
131 self.reading_handle is not None or self.writing_handle is not None
130 )
132 )
131
133
132 def _open(self, mode=b'r'):
134 def _open(self, mode=b'r'):
133 """Return a file object"""
135 """Return a file object"""
134 return self.opener(self.filename, mode=mode)
136 if self._delay_buffer is None:
137 return self.opener(self.filename, mode=mode)
138 else:
139 return appender(
140 self.opener, self.filename, mode, self._delay_buffer
141 )
135
142
136 @contextlib.contextmanager
143 @contextlib.contextmanager
137 def _read_handle(self):
144 def _read_handle(self):
138 """File object suitable for reading data"""
145 """File object suitable for reading data"""
139 # Use a file handle being actively used for writes, if available.
146 # Use a file handle being actively used for writes, if available.
140 # There is some danger to doing this because reads will seek the
147 # There is some danger to doing this because reads will seek the
141 # file. However, revlog._writeentry performs a SEEK_END before all
148 # file. However, revlog._writeentry performs a SEEK_END before all
142 # writes, so we should be safe.
149 # writes, so we should be safe.
143 if self.writing_handle:
150 if self.writing_handle:
144 yield self.writing_handle
151 yield self.writing_handle
145
152
146 elif self.reading_handle:
153 elif self.reading_handle:
147 yield self.reading_handle
154 yield self.reading_handle
148
155
149 # Otherwise open a new file handle.
156 # Otherwise open a new file handle.
150 else:
157 else:
151 with self._open() as fp:
158 with self._open() as fp:
152 yield fp
159 yield fp
153
160
154 @contextlib.contextmanager
161 @contextlib.contextmanager
155 def reading(self):
162 def reading(self):
156 """Context manager that keeps the file open for reading"""
163 """Context manager that keeps the file open for reading"""
157 if (
164 if (
158 self.reading_handle is None
165 self.reading_handle is None
159 and self.writing_handle is None
166 and self.writing_handle is None
160 and self.filename is not None
167 and self.filename is not None
161 ):
168 ):
162 with self._open() as fp:
169 with self._open() as fp:
163 self.reading_handle = fp
170 self.reading_handle = fp
164 try:
171 try:
165 yield
172 yield
166 finally:
173 finally:
167 self.reading_handle = None
174 self.reading_handle = None
168 else:
175 else:
169 yield
176 yield
170
177
171 def read_chunk(self, offset, length):
178 def read_chunk(self, offset, length):
172 """Read a chunk of bytes from the file.
179 """Read a chunk of bytes from the file.
173
180
174 Accepts an absolute offset, length to read, and an optional existing
181 Accepts an absolute offset, length to read, and an optional existing
175 file handle to read from.
182 file handle to read from.
176
183
177 If an existing file handle is passed, it will be seeked and the
184 If an existing file handle is passed, it will be seeked and the
178 original seek position will NOT be restored.
185 original seek position will NOT be restored.
179
186
180 Returns a str or buffer of raw byte data.
187 Returns a str or buffer of raw byte data.
181
188
182 Raises if the requested number of bytes could not be read.
189 Raises if the requested number of bytes could not be read.
183 """
190 """
184 end = offset + length
191 end = offset + length
185 cache_start = self._cached_chunk_position
192 cache_start = self._cached_chunk_position
186 cache_end = cache_start + len(self._cached_chunk)
193 cache_end = cache_start + len(self._cached_chunk)
187 # Is the requested chunk within the cache?
194 # Is the requested chunk within the cache?
188 if cache_start <= offset and end <= cache_end:
195 if cache_start <= offset and end <= cache_end:
189 if cache_start == offset and end == cache_end:
196 if cache_start == offset and end == cache_end:
190 return self._cached_chunk # avoid a copy
197 return self._cached_chunk # avoid a copy
191 relative_start = offset - cache_start
198 relative_start = offset - cache_start
192 return util.buffer(self._cached_chunk, relative_start, length)
199 return util.buffer(self._cached_chunk, relative_start, length)
193
200
194 return self._read_and_update_cache(offset, length)
201 return self._read_and_update_cache(offset, length)
195
202
196 def _read_and_update_cache(self, offset, length):
203 def _read_and_update_cache(self, offset, length):
197 # Cache data both forward and backward around the requested
204 # Cache data both forward and backward around the requested
198 # data, in a fixed size window. This helps speed up operations
205 # data, in a fixed size window. This helps speed up operations
199 # involving reading the revlog backwards.
206 # involving reading the revlog backwards.
200 real_offset = offset & ~(self.default_cached_chunk_size - 1)
207 real_offset = offset & ~(self.default_cached_chunk_size - 1)
201 real_length = (
208 real_length = (
202 (offset + length + self.default_cached_chunk_size)
209 (offset + length + self.default_cached_chunk_size)
203 & ~(self.default_cached_chunk_size - 1)
210 & ~(self.default_cached_chunk_size - 1)
204 ) - real_offset
211 ) - real_offset
205 with self._read_handle() as file_obj:
212 with self._read_handle() as file_obj:
206 file_obj.seek(real_offset)
213 file_obj.seek(real_offset)
207 data = file_obj.read(real_length)
214 data = file_obj.read(real_length)
208
215
209 self._add_cached_chunk(real_offset, data)
216 self._add_cached_chunk(real_offset, data)
210
217
211 relative_offset = offset - real_offset
218 relative_offset = offset - real_offset
212 got = len(data) - relative_offset
219 got = len(data) - relative_offset
213 if got < length:
220 if got < length:
214 message = PARTIAL_READ_MSG % (self.filename, length, offset, got)
221 message = PARTIAL_READ_MSG % (self.filename, length, offset, got)
215 raise error.RevlogError(message)
222 raise error.RevlogError(message)
216
223
217 if offset != real_offset or real_length != length:
224 if offset != real_offset or real_length != length:
218 return util.buffer(data, relative_offset, length)
225 return util.buffer(data, relative_offset, length)
219 return data
226 return data
220
227
221 def _add_cached_chunk(self, offset, data):
228 def _add_cached_chunk(self, offset, data):
222 """Add to or replace the cached data chunk.
229 """Add to or replace the cached data chunk.
223
230
224 Accepts an absolute offset and the data that is at that location.
231 Accepts an absolute offset and the data that is at that location.
225 """
232 """
226 if (
233 if (
227 self._cached_chunk_position + len(self._cached_chunk) == offset
234 self._cached_chunk_position + len(self._cached_chunk) == offset
228 and len(self._cached_chunk) + len(data) < _MAX_CACHED_CHUNK_SIZE
235 and len(self._cached_chunk) + len(data) < _MAX_CACHED_CHUNK_SIZE
229 ):
236 ):
230 # add to existing cache
237 # add to existing cache
231 self._cached_chunk += data
238 self._cached_chunk += data
232 else:
239 else:
233 self._cached_chunk = data
240 self._cached_chunk = data
234 self._cached_chunk_position = offset
241 self._cached_chunk_position = offset
@@ -1,1136 +1,1134 b''
1 Test exchange of common information using bundle2
1 Test exchange of common information using bundle2
2
2
3
3
4 $ getmainid() {
4 $ getmainid() {
5 > hg -R main log --template '{node}\n' --rev "$1"
5 > hg -R main log --template '{node}\n' --rev "$1"
6 > }
6 > }
7
7
8 enable obsolescence
8 enable obsolescence
9
9
10 $ cp $HGRCPATH $TESTTMP/hgrc.orig
10 $ cp $HGRCPATH $TESTTMP/hgrc.orig
11 $ cat > $TESTTMP/bundle2-pushkey-hook.sh << EOF
11 $ cat > $TESTTMP/bundle2-pushkey-hook.sh << EOF
12 > echo pushkey: lock state after \"\$HG_NAMESPACE\"
12 > echo pushkey: lock state after \"\$HG_NAMESPACE\"
13 > hg debuglock
13 > hg debuglock
14 > EOF
14 > EOF
15
15
16 $ cat >> $HGRCPATH << EOF
16 $ cat >> $HGRCPATH << EOF
17 > [experimental]
17 > [experimental]
18 > evolution.createmarkers=True
18 > evolution.createmarkers=True
19 > evolution.exchange=True
19 > evolution.exchange=True
20 > bundle2-output-capture=True
20 > bundle2-output-capture=True
21 > [command-templates]
21 > [command-templates]
22 > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
22 > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
23 > [web]
23 > [web]
24 > push_ssl = false
24 > push_ssl = false
25 > allow_push = *
25 > allow_push = *
26 > [phases]
26 > [phases]
27 > publish=False
27 > publish=False
28 > [hooks]
28 > [hooks]
29 > pretxnclose.tip = hg log -r tip -T "pre-close-tip:{node|short} {phase} {bookmarks}\n"
29 > pretxnclose.tip = hg log -r tip -T "pre-close-tip:{node|short} {phase} {bookmarks}\n"
30 > txnclose.tip = hg log -r tip -T "postclose-tip:{node|short} {phase} {bookmarks}\n"
30 > txnclose.tip = hg log -r tip -T "postclose-tip:{node|short} {phase} {bookmarks}\n"
31 > txnclose.env = sh -c "HG_LOCAL= printenv.py txnclose"
31 > txnclose.env = sh -c "HG_LOCAL= printenv.py txnclose"
32 > pushkey= sh "$TESTTMP/bundle2-pushkey-hook.sh"
32 > pushkey= sh "$TESTTMP/bundle2-pushkey-hook.sh"
33 > EOF
33 > EOF
34
34
35 The extension requires a repo (currently unused)
35 The extension requires a repo (currently unused)
36
36
37 $ hg init main
37 $ hg init main
38 $ cd main
38 $ cd main
39 $ touch a
39 $ touch a
40 $ hg add a
40 $ hg add a
41 $ hg commit -m 'a'
41 $ hg commit -m 'a'
42 pre-close-tip:3903775176ed draft
42 pre-close-tip:3903775176ed draft
43 postclose-tip:3903775176ed draft
43 postclose-tip:3903775176ed draft
44 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
44 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
45
45
46 $ hg unbundle $TESTDIR/bundles/rebase.hg
46 $ hg unbundle $TESTDIR/bundles/rebase.hg
47 adding changesets
47 adding changesets
48 adding manifests
48 adding manifests
49 adding file changes
49 adding file changes
50 pre-close-tip:02de42196ebe draft
50 pre-close-tip:02de42196ebe draft
51 added 8 changesets with 7 changes to 7 files (+3 heads)
51 added 8 changesets with 7 changes to 7 files (+3 heads)
52 new changesets cd010b8cd998:02de42196ebe (8 drafts)
52 new changesets cd010b8cd998:02de42196ebe (8 drafts)
53 postclose-tip:02de42196ebe draft
53 postclose-tip:02de42196ebe draft
54 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_NODE_LAST=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=unbundle HG_TXNID=TXN:$ID$ HG_TXNNAME=unbundle
54 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_NODE_LAST=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=unbundle HG_TXNID=TXN:$ID$ HG_TXNNAME=unbundle
55 bundle:*/tests/bundles/rebase.hg HG_URL=bundle:*/tests/bundles/rebase.hg (glob)
55 bundle:*/tests/bundles/rebase.hg HG_URL=bundle:*/tests/bundles/rebase.hg (glob)
56 (run 'hg heads' to see heads, 'hg merge' to merge)
56 (run 'hg heads' to see heads, 'hg merge' to merge)
57
57
58 $ cd ..
58 $ cd ..
59
59
60 Real world exchange
60 Real world exchange
61 =====================
61 =====================
62
62
63 Add more obsolescence information
63 Add more obsolescence information
64
64
65 $ hg -R main debugobsolete -d '0 0' 1111111111111111111111111111111111111111 `getmainid 9520eea781bc`
65 $ hg -R main debugobsolete -d '0 0' 1111111111111111111111111111111111111111 `getmainid 9520eea781bc`
66 pre-close-tip:02de42196ebe draft
66 pre-close-tip:02de42196ebe draft
67 1 new obsolescence markers
67 1 new obsolescence markers
68 postclose-tip:02de42196ebe draft
68 postclose-tip:02de42196ebe draft
69 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
69 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
70 $ hg -R main debugobsolete -d '0 0' 2222222222222222222222222222222222222222 `getmainid 24b6387c8c8c`
70 $ hg -R main debugobsolete -d '0 0' 2222222222222222222222222222222222222222 `getmainid 24b6387c8c8c`
71 pre-close-tip:02de42196ebe draft
71 pre-close-tip:02de42196ebe draft
72 1 new obsolescence markers
72 1 new obsolescence markers
73 postclose-tip:02de42196ebe draft
73 postclose-tip:02de42196ebe draft
74 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
74 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
75
75
76 clone --pull
76 clone --pull
77
77
78 $ hg -R main phase --public cd010b8cd998
78 $ hg -R main phase --public cd010b8cd998
79 pre-close-tip:02de42196ebe draft
79 pre-close-tip:02de42196ebe draft
80 postclose-tip:02de42196ebe draft
80 postclose-tip:02de42196ebe draft
81 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
81 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
82 $ hg clone main other --pull --rev 9520eea781bc
82 $ hg clone main other --pull --rev 9520eea781bc
83 adding changesets
83 adding changesets
84 adding manifests
84 adding manifests
85 adding file changes
85 adding file changes
86 pre-close-tip:9520eea781bc draft
86 pre-close-tip:9520eea781bc draft
87 added 2 changesets with 2 changes to 2 files
87 added 2 changesets with 2 changes to 2 files
88 1 new obsolescence markers
88 1 new obsolescence markers
89 new changesets cd010b8cd998:9520eea781bc (1 drafts)
89 new changesets cd010b8cd998:9520eea781bc (1 drafts)
90 postclose-tip:9520eea781bc draft
90 postclose-tip:9520eea781bc draft
91 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_NODE_LAST=9520eea781bcca16c1e15acc0ba14335a0e8e5ba HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
91 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_NODE_LAST=9520eea781bcca16c1e15acc0ba14335a0e8e5ba HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
92 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
92 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
93 updating to branch default
93 updating to branch default
94 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
94 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
95 $ hg -R other log -G
95 $ hg -R other log -G
96 @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
96 @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
97 |
97 |
98 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
98 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
99
99
100 $ hg -R other debugobsolete
100 $ hg -R other debugobsolete
101 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
101 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
102
102
103 pull
103 pull
104
104
105 $ hg -R main phase --public 9520eea781bc
105 $ hg -R main phase --public 9520eea781bc
106 pre-close-tip:02de42196ebe draft
106 pre-close-tip:02de42196ebe draft
107 postclose-tip:02de42196ebe draft
107 postclose-tip:02de42196ebe draft
108 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
108 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
109 $ hg -R other pull -r 24b6387c8c8c
109 $ hg -R other pull -r 24b6387c8c8c
110 pulling from $TESTTMP/main
110 pulling from $TESTTMP/main
111 searching for changes
111 searching for changes
112 adding changesets
112 adding changesets
113 adding manifests
113 adding manifests
114 adding file changes
114 adding file changes
115 pre-close-tip:24b6387c8c8c draft
115 pre-close-tip:24b6387c8c8c draft
116 added 1 changesets with 1 changes to 1 files (+1 heads)
116 added 1 changesets with 1 changes to 1 files (+1 heads)
117 1 new obsolescence markers
117 1 new obsolescence markers
118 new changesets 24b6387c8c8c (1 drafts)
118 new changesets 24b6387c8c8c (1 drafts)
119 postclose-tip:24b6387c8c8c draft
119 postclose-tip:24b6387c8c8c draft
120 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_NODE_LAST=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
120 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_NODE_LAST=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
121 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
121 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
122 (run 'hg heads' to see heads, 'hg merge' to merge)
122 (run 'hg heads' to see heads, 'hg merge' to merge)
123 $ hg -R other log -G
123 $ hg -R other log -G
124 o 2:24b6387c8c8c draft Nicolas Dumazet <nicdumz.commits@gmail.com> F
124 o 2:24b6387c8c8c draft Nicolas Dumazet <nicdumz.commits@gmail.com> F
125 |
125 |
126 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
126 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
127 |/
127 |/
128 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
128 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
129
129
130 $ hg -R other debugobsolete
130 $ hg -R other debugobsolete
131 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
131 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
132 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
132 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
133
133
134 pull empty (with phase movement)
134 pull empty (with phase movement)
135
135
136 $ hg -R main phase --public 24b6387c8c8c
136 $ hg -R main phase --public 24b6387c8c8c
137 pre-close-tip:02de42196ebe draft
137 pre-close-tip:02de42196ebe draft
138 postclose-tip:02de42196ebe draft
138 postclose-tip:02de42196ebe draft
139 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
139 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
140 $ hg -R other pull -r 24b6387c8c8c
140 $ hg -R other pull -r 24b6387c8c8c
141 pulling from $TESTTMP/main
141 pulling from $TESTTMP/main
142 no changes found
142 no changes found
143 pre-close-tip:24b6387c8c8c public
143 pre-close-tip:24b6387c8c8c public
144 1 local changesets published
144 1 local changesets published
145 postclose-tip:24b6387c8c8c public
145 postclose-tip:24b6387c8c8c public
146 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=0 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
146 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=0 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
147 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
147 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
148 $ hg -R other log -G
148 $ hg -R other log -G
149 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
149 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
150 |
150 |
151 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
151 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
152 |/
152 |/
153 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
153 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
154
154
155 $ hg -R other debugobsolete
155 $ hg -R other debugobsolete
156 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
156 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
157 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
157 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
158
158
159 pull empty
159 pull empty
160
160
161 $ hg -R other pull -r 24b6387c8c8c
161 $ hg -R other pull -r 24b6387c8c8c
162 pulling from $TESTTMP/main
162 pulling from $TESTTMP/main
163 no changes found
163 no changes found
164 pre-close-tip:24b6387c8c8c public
164 pre-close-tip:24b6387c8c8c public
165 postclose-tip:24b6387c8c8c public
165 postclose-tip:24b6387c8c8c public
166 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=0 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
166 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=0 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
167 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
167 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
168 $ hg -R other log -G
168 $ hg -R other log -G
169 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
169 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
170 |
170 |
171 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
171 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
172 |/
172 |/
173 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
173 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
174
174
175 $ hg -R other debugobsolete
175 $ hg -R other debugobsolete
176 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
176 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
177 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
177 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
178
178
179 add extra data to test their exchange during push
179 add extra data to test their exchange during push
180
180
181 $ hg -R main bookmark --rev eea13746799a book_eea1
181 $ hg -R main bookmark --rev eea13746799a book_eea1
182 pre-close-tip:02de42196ebe draft
182 pre-close-tip:02de42196ebe draft
183 postclose-tip:02de42196ebe draft
183 postclose-tip:02de42196ebe draft
184 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
184 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
185 $ hg -R main debugobsolete -d '0 0' 3333333333333333333333333333333333333333 `getmainid eea13746799a`
185 $ hg -R main debugobsolete -d '0 0' 3333333333333333333333333333333333333333 `getmainid eea13746799a`
186 pre-close-tip:02de42196ebe draft
186 pre-close-tip:02de42196ebe draft
187 1 new obsolescence markers
187 1 new obsolescence markers
188 postclose-tip:02de42196ebe draft
188 postclose-tip:02de42196ebe draft
189 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
189 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
190 $ hg -R main bookmark --rev 02de42196ebe book_02de
190 $ hg -R main bookmark --rev 02de42196ebe book_02de
191 pre-close-tip:02de42196ebe draft book_02de
191 pre-close-tip:02de42196ebe draft book_02de
192 postclose-tip:02de42196ebe draft book_02de
192 postclose-tip:02de42196ebe draft book_02de
193 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
193 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
194 $ hg -R main debugobsolete -d '0 0' 4444444444444444444444444444444444444444 `getmainid 02de42196ebe`
194 $ hg -R main debugobsolete -d '0 0' 4444444444444444444444444444444444444444 `getmainid 02de42196ebe`
195 pre-close-tip:02de42196ebe draft book_02de
195 pre-close-tip:02de42196ebe draft book_02de
196 1 new obsolescence markers
196 1 new obsolescence markers
197 postclose-tip:02de42196ebe draft book_02de
197 postclose-tip:02de42196ebe draft book_02de
198 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
198 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
199 $ hg -R main bookmark --rev 42ccdea3bb16 book_42cc
199 $ hg -R main bookmark --rev 42ccdea3bb16 book_42cc
200 pre-close-tip:02de42196ebe draft book_02de
200 pre-close-tip:02de42196ebe draft book_02de
201 postclose-tip:02de42196ebe draft book_02de
201 postclose-tip:02de42196ebe draft book_02de
202 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
202 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
203 $ hg -R main debugobsolete -d '0 0' 5555555555555555555555555555555555555555 `getmainid 42ccdea3bb16`
203 $ hg -R main debugobsolete -d '0 0' 5555555555555555555555555555555555555555 `getmainid 42ccdea3bb16`
204 pre-close-tip:02de42196ebe draft book_02de
204 pre-close-tip:02de42196ebe draft book_02de
205 1 new obsolescence markers
205 1 new obsolescence markers
206 postclose-tip:02de42196ebe draft book_02de
206 postclose-tip:02de42196ebe draft book_02de
207 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
207 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
208 $ hg -R main bookmark --rev 5fddd98957c8 book_5fdd
208 $ hg -R main bookmark --rev 5fddd98957c8 book_5fdd
209 pre-close-tip:02de42196ebe draft book_02de
209 pre-close-tip:02de42196ebe draft book_02de
210 postclose-tip:02de42196ebe draft book_02de
210 postclose-tip:02de42196ebe draft book_02de
211 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
211 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
212 $ hg -R main debugobsolete -d '0 0' 6666666666666666666666666666666666666666 `getmainid 5fddd98957c8`
212 $ hg -R main debugobsolete -d '0 0' 6666666666666666666666666666666666666666 `getmainid 5fddd98957c8`
213 pre-close-tip:02de42196ebe draft book_02de
213 pre-close-tip:02de42196ebe draft book_02de
214 1 new obsolescence markers
214 1 new obsolescence markers
215 postclose-tip:02de42196ebe draft book_02de
215 postclose-tip:02de42196ebe draft book_02de
216 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
216 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
217 $ hg -R main bookmark --rev 32af7686d403 book_32af
217 $ hg -R main bookmark --rev 32af7686d403 book_32af
218 pre-close-tip:02de42196ebe draft book_02de
218 pre-close-tip:02de42196ebe draft book_02de
219 postclose-tip:02de42196ebe draft book_02de
219 postclose-tip:02de42196ebe draft book_02de
220 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
220 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
221 $ hg -R main debugobsolete -d '0 0' 7777777777777777777777777777777777777777 `getmainid 32af7686d403`
221 $ hg -R main debugobsolete -d '0 0' 7777777777777777777777777777777777777777 `getmainid 32af7686d403`
222 pre-close-tip:02de42196ebe draft book_02de
222 pre-close-tip:02de42196ebe draft book_02de
223 1 new obsolescence markers
223 1 new obsolescence markers
224 postclose-tip:02de42196ebe draft book_02de
224 postclose-tip:02de42196ebe draft book_02de
225 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
225 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
226
226
227 $ hg -R other bookmark --rev cd010b8cd998 book_eea1
227 $ hg -R other bookmark --rev cd010b8cd998 book_eea1
228 pre-close-tip:24b6387c8c8c public
228 pre-close-tip:24b6387c8c8c public
229 postclose-tip:24b6387c8c8c public
229 postclose-tip:24b6387c8c8c public
230 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
230 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
231 $ hg -R other bookmark --rev cd010b8cd998 book_02de
231 $ hg -R other bookmark --rev cd010b8cd998 book_02de
232 pre-close-tip:24b6387c8c8c public
232 pre-close-tip:24b6387c8c8c public
233 postclose-tip:24b6387c8c8c public
233 postclose-tip:24b6387c8c8c public
234 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
234 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
235 $ hg -R other bookmark --rev cd010b8cd998 book_42cc
235 $ hg -R other bookmark --rev cd010b8cd998 book_42cc
236 pre-close-tip:24b6387c8c8c public
236 pre-close-tip:24b6387c8c8c public
237 postclose-tip:24b6387c8c8c public
237 postclose-tip:24b6387c8c8c public
238 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
238 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
239 $ hg -R other bookmark --rev cd010b8cd998 book_5fdd
239 $ hg -R other bookmark --rev cd010b8cd998 book_5fdd
240 pre-close-tip:24b6387c8c8c public
240 pre-close-tip:24b6387c8c8c public
241 postclose-tip:24b6387c8c8c public
241 postclose-tip:24b6387c8c8c public
242 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
242 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
243 $ hg -R other bookmark --rev cd010b8cd998 book_32af
243 $ hg -R other bookmark --rev cd010b8cd998 book_32af
244 pre-close-tip:24b6387c8c8c public
244 pre-close-tip:24b6387c8c8c public
245 postclose-tip:24b6387c8c8c public
245 postclose-tip:24b6387c8c8c public
246 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
246 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
247
247
248 $ hg -R main phase --public eea13746799a
248 $ hg -R main phase --public eea13746799a
249 pre-close-tip:02de42196ebe draft book_02de
249 pre-close-tip:02de42196ebe draft book_02de
250 postclose-tip:02de42196ebe draft book_02de
250 postclose-tip:02de42196ebe draft book_02de
251 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
251 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
252
252
253 push
253 push
254 $ hg -R main push other --rev eea13746799a --bookmark book_eea1
254 $ hg -R main push other --rev eea13746799a --bookmark book_eea1
255 pushing to other
255 pushing to other
256 searching for changes
256 searching for changes
257 remote: adding changesets
257 remote: adding changesets
258 remote: adding manifests
258 remote: adding manifests
259 remote: adding file changes
259 remote: adding file changes
260 remote: pre-close-tip:eea13746799a public book_eea1
260 remote: pre-close-tip:eea13746799a public book_eea1
261 remote: added 1 changesets with 0 changes to 0 files (-1 heads)
261 remote: added 1 changesets with 0 changes to 0 files (-1 heads)
262 remote: 1 new obsolescence markers
262 remote: 1 new obsolescence markers
263 remote: pushkey: lock state after "bookmarks"
263 remote: pushkey: lock state after "bookmarks"
264 remote: lock: free
264 remote: lock: free
265 remote: wlock: free
265 remote: wlock: free
266 remote: postclose-tip:eea13746799a public book_eea1
266 remote: postclose-tip:eea13746799a public book_eea1
267 remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_NODE_LAST=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_PHASES_MOVED=1 HG_SOURCE=push HG_TXNID=TXN:$ID$ HG_TXNNAME=push HG_URL=file:$TESTTMP/other
267 remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_NODE_LAST=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_PHASES_MOVED=1 HG_SOURCE=push HG_TXNID=TXN:$ID$ HG_TXNNAME=push HG_URL=file:$TESTTMP/other
268 updating bookmark book_eea1
268 updating bookmark book_eea1
269 pre-close-tip:02de42196ebe draft book_02de
269 pre-close-tip:02de42196ebe draft book_02de
270 postclose-tip:02de42196ebe draft book_02de
270 postclose-tip:02de42196ebe draft book_02de
271 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_SOURCE=push-response HG_TXNID=TXN:$ID$ HG_TXNNAME=push-response
271 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_SOURCE=push-response HG_TXNID=TXN:$ID$ HG_TXNNAME=push-response
272 file:/*/$TESTTMP/other HG_URL=file:$TESTTMP/other (glob)
272 file:/*/$TESTTMP/other HG_URL=file:$TESTTMP/other (glob)
273 $ hg -R other log -G
273 $ hg -R other log -G
274 o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
274 o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
275 |\
275 |\
276 | o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
276 | o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
277 | |
277 | |
278 @ | 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
278 @ | 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
279 |/
279 |/
280 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de book_32af book_42cc book_5fdd A
280 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de book_32af book_42cc book_5fdd A
281
281
282 $ hg -R other debugobsolete
282 $ hg -R other debugobsolete
283 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
283 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
284 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
284 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
285 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
285 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
286
286
287 pull over ssh
287 pull over ssh
288
288
289 $ hg -R other pull ssh://user@dummy/main -r 02de42196ebe --bookmark book_02de
289 $ hg -R other pull ssh://user@dummy/main -r 02de42196ebe --bookmark book_02de
290 pulling from ssh://user@dummy/main
290 pulling from ssh://user@dummy/main
291 searching for changes
291 searching for changes
292 adding changesets
292 adding changesets
293 adding manifests
293 adding manifests
294 adding file changes
294 adding file changes
295 updating bookmark book_02de
295 updating bookmark book_02de
296 pre-close-tip:02de42196ebe draft book_02de
296 pre-close-tip:02de42196ebe draft book_02de
297 added 1 changesets with 1 changes to 1 files (+1 heads)
297 added 1 changesets with 1 changes to 1 files (+1 heads)
298 1 new obsolescence markers
298 1 new obsolescence markers
299 new changesets 02de42196ebe (1 drafts)
299 new changesets 02de42196ebe (1 drafts)
300 postclose-tip:02de42196ebe draft book_02de
300 postclose-tip:02de42196ebe draft book_02de
301 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_NODE_LAST=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
301 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_NODE_LAST=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
302 ssh://user@dummy/main HG_URL=ssh://user@dummy/main
302 ssh://user@dummy/main HG_URL=ssh://user@dummy/main
303 (run 'hg heads' to see heads, 'hg merge' to merge)
303 (run 'hg heads' to see heads, 'hg merge' to merge)
304 $ hg -R other debugobsolete
304 $ hg -R other debugobsolete
305 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
305 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
306 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
306 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
307 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
307 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
308 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
308 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
309
309
310 pull over http
310 pull over http
311
311
312 $ hg serve -R main -p $HGPORT -d --pid-file=main.pid -E main-error.log
312 $ hg serve -R main -p $HGPORT -d --pid-file=main.pid -E main-error.log
313 $ cat main.pid >> $DAEMON_PIDS
313 $ cat main.pid >> $DAEMON_PIDS
314
314
315 $ hg -R other pull http://localhost:$HGPORT/ -r 42ccdea3bb16 --bookmark book_42cc
315 $ hg -R other pull http://localhost:$HGPORT/ -r 42ccdea3bb16 --bookmark book_42cc
316 pulling from http://localhost:$HGPORT/
316 pulling from http://localhost:$HGPORT/
317 searching for changes
317 searching for changes
318 adding changesets
318 adding changesets
319 adding manifests
319 adding manifests
320 adding file changes
320 adding file changes
321 updating bookmark book_42cc
321 updating bookmark book_42cc
322 pre-close-tip:42ccdea3bb16 draft book_42cc
322 pre-close-tip:42ccdea3bb16 draft book_42cc
323 added 1 changesets with 1 changes to 1 files (+1 heads)
323 added 1 changesets with 1 changes to 1 files (+1 heads)
324 1 new obsolescence markers
324 1 new obsolescence markers
325 new changesets 42ccdea3bb16 (1 drafts)
325 new changesets 42ccdea3bb16 (1 drafts)
326 postclose-tip:42ccdea3bb16 draft book_42cc
326 postclose-tip:42ccdea3bb16 draft book_42cc
327 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_NODE_LAST=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
327 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_NODE_LAST=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
328 http://localhost:$HGPORT/ HG_URL=http://localhost:$HGPORT/
328 http://localhost:$HGPORT/ HG_URL=http://localhost:$HGPORT/
329 (run 'hg heads .' to see heads, 'hg merge' to merge)
329 (run 'hg heads .' to see heads, 'hg merge' to merge)
330 $ cat main-error.log
330 $ cat main-error.log
331 $ hg -R other debugobsolete
331 $ hg -R other debugobsolete
332 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
332 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
333 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
333 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
334 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
334 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
335 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
335 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
336 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
336 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
337
337
338 push over ssh
338 push over ssh
339
339
340 $ hg -R main push ssh://user@dummy/other -r 5fddd98957c8 --bookmark book_5fdd
340 $ hg -R main push ssh://user@dummy/other -r 5fddd98957c8 --bookmark book_5fdd
341 pushing to ssh://user@dummy/other
341 pushing to ssh://user@dummy/other
342 searching for changes
342 searching for changes
343 remote: adding changesets
343 remote: adding changesets
344 remote: adding manifests
344 remote: adding manifests
345 remote: adding file changes
345 remote: adding file changes
346 remote: pre-close-tip:5fddd98957c8 draft book_5fdd
346 remote: pre-close-tip:5fddd98957c8 draft book_5fdd
347 remote: added 1 changesets with 1 changes to 1 files
347 remote: added 1 changesets with 1 changes to 1 files
348 remote: 1 new obsolescence markers
348 remote: 1 new obsolescence markers
349 remote: pushkey: lock state after "bookmarks"
349 remote: pushkey: lock state after "bookmarks"
350 remote: lock: free
350 remote: lock: free
351 remote: wlock: free
351 remote: wlock: free
352 remote: postclose-tip:5fddd98957c8 draft book_5fdd
352 remote: postclose-tip:5fddd98957c8 draft book_5fdd
353 remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_NODE_LAST=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_TXNNAME=serve HG_URL=remote:ssh:$LOCALIP
353 remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_NODE_LAST=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_TXNNAME=serve HG_URL=remote:ssh:$LOCALIP
354 updating bookmark book_5fdd
354 updating bookmark book_5fdd
355 pre-close-tip:02de42196ebe draft book_02de
355 pre-close-tip:02de42196ebe draft book_02de
356 postclose-tip:02de42196ebe draft book_02de
356 postclose-tip:02de42196ebe draft book_02de
357 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_SOURCE=push-response HG_TXNID=TXN:$ID$ HG_TXNNAME=push-response
357 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_SOURCE=push-response HG_TXNID=TXN:$ID$ HG_TXNNAME=push-response
358 ssh://user@dummy/other HG_URL=ssh://user@dummy/other
358 ssh://user@dummy/other HG_URL=ssh://user@dummy/other
359 $ hg -R other log -G
359 $ hg -R other log -G
360 o 6:5fddd98957c8 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
360 o 6:5fddd98957c8 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
361 |
361 |
362 o 5:42ccdea3bb16 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
362 o 5:42ccdea3bb16 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
363 |
363 |
364 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
364 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
365 | |
365 | |
366 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
366 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
367 | |/|
367 | |/|
368 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
368 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
369 |/ /
369 |/ /
370 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
370 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
371 |/
371 |/
372 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af A
372 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af A
373
373
374 $ hg -R other debugobsolete
374 $ hg -R other debugobsolete
375 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
375 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
376 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
376 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
377 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
377 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
378 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
378 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
379 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
379 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
380 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
380 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
381
381
382 push over http
382 push over http
383
383
384 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
384 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
385 $ cat other.pid >> $DAEMON_PIDS
385 $ cat other.pid >> $DAEMON_PIDS
386
386
387 $ hg -R main phase --public 32af7686d403
387 $ hg -R main phase --public 32af7686d403
388 pre-close-tip:02de42196ebe draft book_02de
388 pre-close-tip:02de42196ebe draft book_02de
389 postclose-tip:02de42196ebe draft book_02de
389 postclose-tip:02de42196ebe draft book_02de
390 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
390 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
391 $ hg -R main push http://localhost:$HGPORT2/ -r 32af7686d403 --bookmark book_32af
391 $ hg -R main push http://localhost:$HGPORT2/ -r 32af7686d403 --bookmark book_32af
392 pushing to http://localhost:$HGPORT2/
392 pushing to http://localhost:$HGPORT2/
393 searching for changes
393 searching for changes
394 remote: adding changesets
394 remote: adding changesets
395 remote: adding manifests
395 remote: adding manifests
396 remote: adding file changes
396 remote: adding file changes
397 remote: pre-close-tip:32af7686d403 public book_32af
397 remote: pre-close-tip:32af7686d403 public book_32af
398 remote: added 1 changesets with 1 changes to 1 files
398 remote: added 1 changesets with 1 changes to 1 files
399 remote: 1 new obsolescence markers
399 remote: 1 new obsolescence markers
400 remote: pushkey: lock state after "bookmarks"
400 remote: pushkey: lock state after "bookmarks"
401 remote: lock: free
401 remote: lock: free
402 remote: wlock: free
402 remote: wlock: free
403 remote: postclose-tip:32af7686d403 public book_32af
403 remote: postclose-tip:32af7686d403 public book_32af
404 remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=32af7686d403cf45b5d95f2d70cebea587ac806a HG_NODE_LAST=32af7686d403cf45b5d95f2d70cebea587ac806a HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_TXNNAME=serve HG_URL=remote:http:$LOCALIP: (glob)
404 remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=32af7686d403cf45b5d95f2d70cebea587ac806a HG_NODE_LAST=32af7686d403cf45b5d95f2d70cebea587ac806a HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_TXNNAME=serve HG_URL=remote:http:$LOCALIP: (glob)
405 updating bookmark book_32af
405 updating bookmark book_32af
406 pre-close-tip:02de42196ebe draft book_02de
406 pre-close-tip:02de42196ebe draft book_02de
407 postclose-tip:02de42196ebe draft book_02de
407 postclose-tip:02de42196ebe draft book_02de
408 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_SOURCE=push-response HG_TXNID=TXN:$ID$ HG_TXNNAME=push-response
408 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_SOURCE=push-response HG_TXNID=TXN:$ID$ HG_TXNNAME=push-response
409 http://localhost:$HGPORT2/ HG_URL=http://localhost:$HGPORT2/
409 http://localhost:$HGPORT2/ HG_URL=http://localhost:$HGPORT2/
410 $ cat other-error.log
410 $ cat other-error.log
411
411
412 Check final content.
412 Check final content.
413
413
414 $ hg -R other log -G
414 $ hg -R other log -G
415 o 7:32af7686d403 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af D
415 o 7:32af7686d403 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af D
416 |
416 |
417 o 6:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
417 o 6:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
418 |
418 |
419 o 5:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
419 o 5:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
420 |
420 |
421 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
421 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
422 | |
422 | |
423 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
423 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
424 | |/|
424 | |/|
425 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
425 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
426 |/ /
426 |/ /
427 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
427 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
428 |/
428 |/
429 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
429 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
430
430
431 $ hg -R other debugobsolete
431 $ hg -R other debugobsolete
432 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
432 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
433 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
433 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
434 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
434 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
435 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
435 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
436 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
436 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
437 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
437 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
438 7777777777777777777777777777777777777777 32af7686d403cf45b5d95f2d70cebea587ac806a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
438 7777777777777777777777777777777777777777 32af7686d403cf45b5d95f2d70cebea587ac806a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
439
439
440 (check that no 'pending' files remain)
440 (check that no 'pending' files remain)
441
441
442 $ ls -1 other/.hg/bookmarks*
442 $ ls -1 other/.hg/bookmarks*
443 other/.hg/bookmarks
443 other/.hg/bookmarks
444 $ ls -1 other/.hg/store/phaseroots*
444 $ ls -1 other/.hg/store/phaseroots*
445 other/.hg/store/phaseroots
445 other/.hg/store/phaseroots
446 $ ls -1 other/.hg/store/00changelog.i*
446 $ ls -1 other/.hg/store/00changelog.i*
447 other/.hg/store/00changelog.i
447 other/.hg/store/00changelog.i
448
448
449 Error Handling
449 Error Handling
450 ==============
450 ==============
451
451
452 Check that errors are properly returned to the client during push.
452 Check that errors are properly returned to the client during push.
453
453
454 Setting up
454 Setting up
455
455
456 $ cat > failpush.py << EOF
456 $ cat > failpush.py << EOF
457 > """A small extension that makes push fails when using bundle2
457 > """A small extension that makes push fails when using bundle2
458 >
458 >
459 > used to test error handling in bundle2
459 > used to test error handling in bundle2
460 > """
460 > """
461 >
461 >
462 > from mercurial import error
462 > from mercurial import error
463 > from mercurial import bundle2
463 > from mercurial import bundle2
464 > from mercurial import exchange
464 > from mercurial import exchange
465 > from mercurial import extensions
465 > from mercurial import extensions
466 > from mercurial import registrar
466 > from mercurial import registrar
467 > cmdtable = {}
467 > cmdtable = {}
468 > command = registrar.command(cmdtable)
468 > command = registrar.command(cmdtable)
469 >
469 >
470 > configtable = {}
470 > configtable = {}
471 > configitem = registrar.configitem(configtable)
471 > configitem = registrar.configitem(configtable)
472 > configitem(b'failpush', b'reason',
472 > configitem(b'failpush', b'reason',
473 > default=None,
473 > default=None,
474 > )
474 > )
475 >
475 >
476 > def _pushbundle2failpart(pushop, bundler):
476 > def _pushbundle2failpart(pushop, bundler):
477 > reason = pushop.ui.config(b'failpush', b'reason')
477 > reason = pushop.ui.config(b'failpush', b'reason')
478 > part = None
478 > part = None
479 > if reason == b'abort':
479 > if reason == b'abort':
480 > bundler.newpart(b'test:abort')
480 > bundler.newpart(b'test:abort')
481 > if reason == b'unknown':
481 > if reason == b'unknown':
482 > bundler.newpart(b'test:unknown')
482 > bundler.newpart(b'test:unknown')
483 > if reason == b'race':
483 > if reason == b'race':
484 > # 20 Bytes of crap
484 > # 20 Bytes of crap
485 > bundler.newpart(b'check:heads', data=b'01234567890123456789')
485 > bundler.newpart(b'check:heads', data=b'01234567890123456789')
486 >
486 >
487 > @bundle2.parthandler(b"test:abort")
487 > @bundle2.parthandler(b"test:abort")
488 > def handleabort(op, part):
488 > def handleabort(op, part):
489 > raise error.Abort(b'Abandon ship!', hint=b"don't panic")
489 > raise error.Abort(b'Abandon ship!', hint=b"don't panic")
490 >
490 >
491 > def uisetup(ui):
491 > def uisetup(ui):
492 > exchange.b2partsgenmapping[b'failpart'] = _pushbundle2failpart
492 > exchange.b2partsgenmapping[b'failpart'] = _pushbundle2failpart
493 > exchange.b2partsgenorder.insert(0, b'failpart')
493 > exchange.b2partsgenorder.insert(0, b'failpart')
494 >
494 >
495 > EOF
495 > EOF
496
496
497 $ cd main
497 $ cd main
498 $ hg up tip
498 $ hg up tip
499 3 files updated, 0 files merged, 1 files removed, 0 files unresolved
499 3 files updated, 0 files merged, 1 files removed, 0 files unresolved
500 $ echo 'I' > I
500 $ echo 'I' > I
501 $ hg add I
501 $ hg add I
502 $ hg ci -m 'I'
502 $ hg ci -m 'I'
503 pre-close-tip:e7ec4e813ba6 draft
503 pre-close-tip:e7ec4e813ba6 draft
504 postclose-tip:e7ec4e813ba6 draft
504 postclose-tip:e7ec4e813ba6 draft
505 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
505 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
506 $ hg id
506 $ hg id
507 e7ec4e813ba6 tip
507 e7ec4e813ba6 tip
508 $ cd ..
508 $ cd ..
509
509
510 $ cat << EOF >> $HGRCPATH
510 $ cat << EOF >> $HGRCPATH
511 > [extensions]
511 > [extensions]
512 > failpush=$TESTTMP/failpush.py
512 > failpush=$TESTTMP/failpush.py
513 > EOF
513 > EOF
514
514
515 $ killdaemons.py
515 $ killdaemons.py
516 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
516 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
517 $ cat other.pid >> $DAEMON_PIDS
517 $ cat other.pid >> $DAEMON_PIDS
518
518
519 Doing the actual push: Abort error
519 Doing the actual push: Abort error
520
520
521 $ cat << EOF >> $HGRCPATH
521 $ cat << EOF >> $HGRCPATH
522 > [failpush]
522 > [failpush]
523 > reason = abort
523 > reason = abort
524 > EOF
524 > EOF
525
525
526 $ hg -R main push other -r e7ec4e813ba6
526 $ hg -R main push other -r e7ec4e813ba6
527 pushing to other
527 pushing to other
528 searching for changes
528 searching for changes
529 abort: Abandon ship!
529 abort: Abandon ship!
530 (don't panic)
530 (don't panic)
531 [255]
531 [255]
532
532
533 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
533 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
534 pushing to ssh://user@dummy/other
534 pushing to ssh://user@dummy/other
535 searching for changes
535 searching for changes
536 remote: Abandon ship!
536 remote: Abandon ship!
537 remote: (don't panic)
537 remote: (don't panic)
538 abort: push failed on remote
538 abort: push failed on remote
539 [100]
539 [100]
540
540
541 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
541 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
542 pushing to http://localhost:$HGPORT2/
542 pushing to http://localhost:$HGPORT2/
543 searching for changes
543 searching for changes
544 remote: Abandon ship!
544 remote: Abandon ship!
545 remote: (don't panic)
545 remote: (don't panic)
546 abort: push failed on remote
546 abort: push failed on remote
547 [100]
547 [100]
548
548
549
549
550 Doing the actual push: unknown mandatory parts
550 Doing the actual push: unknown mandatory parts
551
551
552 $ cat << EOF >> $HGRCPATH
552 $ cat << EOF >> $HGRCPATH
553 > [failpush]
553 > [failpush]
554 > reason = unknown
554 > reason = unknown
555 > EOF
555 > EOF
556
556
557 $ hg -R main push other -r e7ec4e813ba6
557 $ hg -R main push other -r e7ec4e813ba6
558 pushing to other
558 pushing to other
559 searching for changes
559 searching for changes
560 abort: missing support for test:unknown
560 abort: missing support for test:unknown
561 [100]
561 [100]
562
562
563 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
563 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
564 pushing to ssh://user@dummy/other
564 pushing to ssh://user@dummy/other
565 searching for changes
565 searching for changes
566 abort: missing support for test:unknown
566 abort: missing support for test:unknown
567 [100]
567 [100]
568
568
569 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
569 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
570 pushing to http://localhost:$HGPORT2/
570 pushing to http://localhost:$HGPORT2/
571 searching for changes
571 searching for changes
572 abort: missing support for test:unknown
572 abort: missing support for test:unknown
573 [100]
573 [100]
574
574
575 Doing the actual push: race
575 Doing the actual push: race
576
576
577 $ cat << EOF >> $HGRCPATH
577 $ cat << EOF >> $HGRCPATH
578 > [failpush]
578 > [failpush]
579 > reason = race
579 > reason = race
580 > EOF
580 > EOF
581
581
582 $ hg -R main push other -r e7ec4e813ba6
582 $ hg -R main push other -r e7ec4e813ba6
583 pushing to other
583 pushing to other
584 searching for changes
584 searching for changes
585 abort: push failed:
585 abort: push failed:
586 'remote repository changed while pushing - please try again'
586 'remote repository changed while pushing - please try again'
587 [255]
587 [255]
588
588
589 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
589 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
590 pushing to ssh://user@dummy/other
590 pushing to ssh://user@dummy/other
591 searching for changes
591 searching for changes
592 abort: push failed:
592 abort: push failed:
593 'remote repository changed while pushing - please try again'
593 'remote repository changed while pushing - please try again'
594 [255]
594 [255]
595
595
596 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
596 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
597 pushing to http://localhost:$HGPORT2/
597 pushing to http://localhost:$HGPORT2/
598 searching for changes
598 searching for changes
599 abort: push failed:
599 abort: push failed:
600 'remote repository changed while pushing - please try again'
600 'remote repository changed while pushing - please try again'
601 [255]
601 [255]
602
602
603 Doing the actual push: hook abort
603 Doing the actual push: hook abort
604
604
605 $ cat << EOF >> $HGRCPATH
605 $ cat << EOF >> $HGRCPATH
606 > [failpush]
606 > [failpush]
607 > reason =
607 > reason =
608 > [hooks]
608 > [hooks]
609 > pretxnclose.failpush = sh -c "echo 'You shall not pass!'; false"
609 > pretxnclose.failpush = sh -c "echo 'You shall not pass!'; false"
610 > txnabort.failpush = sh -c "echo 'Cleaning up the mess...'"
610 > txnabort.failpush = sh -c "echo 'Cleaning up the mess...'"
611 > EOF
611 > EOF
612
612
613 $ killdaemons.py
613 $ killdaemons.py
614 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
614 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
615 $ cat other.pid >> $DAEMON_PIDS
615 $ cat other.pid >> $DAEMON_PIDS
616
616
617 $ hg -R main push other -r e7ec4e813ba6
617 $ hg -R main push other -r e7ec4e813ba6
618 pushing to other
618 pushing to other
619 searching for changes
619 searching for changes
620 remote: adding changesets
620 remote: adding changesets
621 remote: adding manifests
621 remote: adding manifests
622 remote: adding file changes
622 remote: adding file changes
623 remote: pre-close-tip:e7ec4e813ba6 draft
623 remote: pre-close-tip:e7ec4e813ba6 draft
624 remote: You shall not pass!
624 remote: You shall not pass!
625 remote: transaction abort!
625 remote: transaction abort!
626 remote: Cleaning up the mess...
626 remote: Cleaning up the mess...
627 remote: rollback completed
627 remote: rollback completed
628 abort: pretxnclose.failpush hook exited with status 1
628 abort: pretxnclose.failpush hook exited with status 1
629 [40]
629 [40]
630
630
631 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
631 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
632 pushing to ssh://user@dummy/other
632 pushing to ssh://user@dummy/other
633 searching for changes
633 searching for changes
634 remote: adding changesets
634 remote: adding changesets
635 remote: adding manifests
635 remote: adding manifests
636 remote: adding file changes
636 remote: adding file changes
637 remote: pre-close-tip:e7ec4e813ba6 draft
637 remote: pre-close-tip:e7ec4e813ba6 draft
638 remote: You shall not pass!
638 remote: You shall not pass!
639 remote: transaction abort!
639 remote: transaction abort!
640 remote: Cleaning up the mess...
640 remote: Cleaning up the mess...
641 remote: rollback completed
641 remote: rollback completed
642 remote: pretxnclose.failpush hook exited with status 1
642 remote: pretxnclose.failpush hook exited with status 1
643 abort: push failed on remote
643 abort: push failed on remote
644 [100]
644 [100]
645
645
646 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
646 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
647 pushing to http://localhost:$HGPORT2/
647 pushing to http://localhost:$HGPORT2/
648 searching for changes
648 searching for changes
649 remote: adding changesets
649 remote: adding changesets
650 remote: adding manifests
650 remote: adding manifests
651 remote: adding file changes
651 remote: adding file changes
652 remote: pre-close-tip:e7ec4e813ba6 draft
652 remote: pre-close-tip:e7ec4e813ba6 draft
653 remote: You shall not pass!
653 remote: You shall not pass!
654 remote: transaction abort!
654 remote: transaction abort!
655 remote: Cleaning up the mess...
655 remote: Cleaning up the mess...
656 remote: rollback completed
656 remote: rollback completed
657 remote: pretxnclose.failpush hook exited with status 1
657 remote: pretxnclose.failpush hook exited with status 1
658 abort: push failed on remote
658 abort: push failed on remote
659 [100]
659 [100]
660
660
661 (check that no 'pending' files remain)
661 (check that no 'pending' files remain)
662
662
663 $ ls -1 other/.hg/bookmarks*
663 $ ls -1 other/.hg/bookmarks*
664 other/.hg/bookmarks
664 other/.hg/bookmarks
665 $ ls -1 other/.hg/store/phaseroots*
665 $ ls -1 other/.hg/store/phaseroots*
666 other/.hg/store/phaseroots
666 other/.hg/store/phaseroots
667 $ ls -1 other/.hg/store/00changelog.i*
667 $ ls -1 other/.hg/store/00changelog.i*
668 other/.hg/store/00changelog.i
668 other/.hg/store/00changelog.i
669
669
670 Check error from hook during the unbundling process itself
670 Check error from hook during the unbundling process itself
671
671
672 $ cat << EOF >> $HGRCPATH
672 $ cat << EOF >> $HGRCPATH
673 > pretxnchangegroup = sh -c "echo 'Fail early!'; false"
673 > pretxnchangegroup = sh -c "echo 'Fail early!'; false"
674 > EOF
674 > EOF
675 $ killdaemons.py # reload http config
675 $ killdaemons.py # reload http config
676 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
676 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
677 $ cat other.pid >> $DAEMON_PIDS
677 $ cat other.pid >> $DAEMON_PIDS
678
678
679 $ hg -R main push other -r e7ec4e813ba6
679 $ hg -R main push other -r e7ec4e813ba6
680 pushing to other
680 pushing to other
681 searching for changes
681 searching for changes
682 remote: adding changesets
682 remote: adding changesets
683 remote: adding manifests
683 remote: adding manifests
684 remote: adding file changes
684 remote: adding file changes
685 remote: Fail early!
685 remote: Fail early!
686 remote: transaction abort!
686 remote: transaction abort!
687 remote: Cleaning up the mess...
687 remote: Cleaning up the mess...
688 remote: rollback completed
688 remote: rollback completed
689 abort: pretxnchangegroup hook exited with status 1
689 abort: pretxnchangegroup hook exited with status 1
690 [40]
690 [40]
691 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
691 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
692 pushing to ssh://user@dummy/other
692 pushing to ssh://user@dummy/other
693 searching for changes
693 searching for changes
694 remote: adding changesets
694 remote: adding changesets
695 remote: adding manifests
695 remote: adding manifests
696 remote: adding file changes
696 remote: adding file changes
697 remote: Fail early!
697 remote: Fail early!
698 remote: transaction abort!
698 remote: transaction abort!
699 remote: Cleaning up the mess...
699 remote: Cleaning up the mess...
700 remote: rollback completed
700 remote: rollback completed
701 remote: pretxnchangegroup hook exited with status 1
701 remote: pretxnchangegroup hook exited with status 1
702 abort: push failed on remote
702 abort: push failed on remote
703 [100]
703 [100]
704 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
704 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
705 pushing to http://localhost:$HGPORT2/
705 pushing to http://localhost:$HGPORT2/
706 searching for changes
706 searching for changes
707 remote: adding changesets
707 remote: adding changesets
708 remote: adding manifests
708 remote: adding manifests
709 remote: adding file changes
709 remote: adding file changes
710 remote: Fail early!
710 remote: Fail early!
711 remote: transaction abort!
711 remote: transaction abort!
712 remote: Cleaning up the mess...
712 remote: Cleaning up the mess...
713 remote: rollback completed
713 remote: rollback completed
714 remote: pretxnchangegroup hook exited with status 1
714 remote: pretxnchangegroup hook exited with status 1
715 abort: push failed on remote
715 abort: push failed on remote
716 [100]
716 [100]
717
717
718 Check output capture control.
718 Check output capture control.
719
719
720 (should be still forced for http, disabled for local and ssh)
720 (should be still forced for http, disabled for local and ssh)
721
721
722 $ cat >> $HGRCPATH << EOF
722 $ cat >> $HGRCPATH << EOF
723 > [experimental]
723 > [experimental]
724 > bundle2-output-capture=False
724 > bundle2-output-capture=False
725 > EOF
725 > EOF
726
726
727 $ hg -R main push other -r e7ec4e813ba6
727 $ hg -R main push other -r e7ec4e813ba6
728 pushing to other
728 pushing to other
729 searching for changes
729 searching for changes
730 adding changesets
730 adding changesets
731 adding manifests
731 adding manifests
732 adding file changes
732 adding file changes
733 Fail early!
733 Fail early!
734 transaction abort!
734 transaction abort!
735 Cleaning up the mess...
735 Cleaning up the mess...
736 rollback completed
736 rollback completed
737 abort: pretxnchangegroup hook exited with status 1
737 abort: pretxnchangegroup hook exited with status 1
738 [40]
738 [40]
739 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
739 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
740 pushing to ssh://user@dummy/other
740 pushing to ssh://user@dummy/other
741 searching for changes
741 searching for changes
742 remote: adding changesets
742 remote: adding changesets
743 remote: adding manifests
743 remote: adding manifests
744 remote: adding file changes
744 remote: adding file changes
745 remote: Fail early!
745 remote: Fail early!
746 remote: transaction abort!
746 remote: transaction abort!
747 remote: Cleaning up the mess...
747 remote: Cleaning up the mess...
748 remote: rollback completed
748 remote: rollback completed
749 remote: pretxnchangegroup hook exited with status 1
749 remote: pretxnchangegroup hook exited with status 1
750 abort: push failed on remote
750 abort: push failed on remote
751 [100]
751 [100]
752 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
752 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
753 pushing to http://localhost:$HGPORT2/
753 pushing to http://localhost:$HGPORT2/
754 searching for changes
754 searching for changes
755 remote: adding changesets
755 remote: adding changesets
756 remote: adding manifests
756 remote: adding manifests
757 remote: adding file changes
757 remote: adding file changes
758 remote: Fail early!
758 remote: Fail early!
759 remote: transaction abort!
759 remote: transaction abort!
760 remote: Cleaning up the mess...
760 remote: Cleaning up the mess...
761 remote: rollback completed
761 remote: rollback completed
762 remote: pretxnchangegroup hook exited with status 1
762 remote: pretxnchangegroup hook exited with status 1
763 abort: push failed on remote
763 abort: push failed on remote
764 [100]
764 [100]
765
765
766 Check abort from mandatory pushkey
766 Check abort from mandatory pushkey
767
767
768 $ cat > mandatorypart.py << EOF
768 $ cat > mandatorypart.py << EOF
769 > from mercurial import exchange
769 > from mercurial import exchange
770 > from mercurial import pushkey
770 > from mercurial import pushkey
771 > from mercurial import node
771 > from mercurial import node
772 > from mercurial import error
772 > from mercurial import error
773 > @exchange.b2partsgenerator(b'failingpuskey')
773 > @exchange.b2partsgenerator(b'failingpuskey')
774 > def addfailingpushey(pushop, bundler):
774 > def addfailingpushey(pushop, bundler):
775 > enc = pushkey.encode
775 > enc = pushkey.encode
776 > part = bundler.newpart(b'pushkey')
776 > part = bundler.newpart(b'pushkey')
777 > part.addparam(b'namespace', enc(b'phases'))
777 > part.addparam(b'namespace', enc(b'phases'))
778 > part.addparam(b'key', enc(b'cd010b8cd998f3981a5a8115f94f8da4ab506089'))
778 > part.addparam(b'key', enc(b'cd010b8cd998f3981a5a8115f94f8da4ab506089'))
779 > part.addparam(b'old', enc(b'0')) # successful update
779 > part.addparam(b'old', enc(b'0')) # successful update
780 > part.addparam(b'new', enc(b'0'))
780 > part.addparam(b'new', enc(b'0'))
781 > def fail(pushop, exc):
781 > def fail(pushop, exc):
782 > raise error.Abort(b'Correct phase push failed (because hooks)')
782 > raise error.Abort(b'Correct phase push failed (because hooks)')
783 > pushop.pkfailcb[part.id] = fail
783 > pushop.pkfailcb[part.id] = fail
784 > EOF
784 > EOF
785 $ cat >> $HGRCPATH << EOF
785 $ cat >> $HGRCPATH << EOF
786 > [hooks]
786 > [hooks]
787 > pretxnchangegroup=
787 > pretxnchangegroup=
788 > pretxnclose.failpush=
788 > pretxnclose.failpush=
789 > prepushkey.failpush = sh -c "echo 'do not push the key !'; false"
789 > prepushkey.failpush = sh -c "echo 'do not push the key !'; false"
790 > [extensions]
790 > [extensions]
791 > mandatorypart=$TESTTMP/mandatorypart.py
791 > mandatorypart=$TESTTMP/mandatorypart.py
792 > EOF
792 > EOF
793 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS # reload http config
793 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS # reload http config
794 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
794 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
795 $ cat other.pid >> $DAEMON_PIDS
795 $ cat other.pid >> $DAEMON_PIDS
796
796
797 (Failure from a hook)
797 (Failure from a hook)
798
798
799 $ hg -R main push other -r e7ec4e813ba6
799 $ hg -R main push other -r e7ec4e813ba6
800 pushing to other
800 pushing to other
801 searching for changes
801 searching for changes
802 adding changesets
802 adding changesets
803 adding manifests
803 adding manifests
804 adding file changes
804 adding file changes
805 do not push the key !
805 do not push the key !
806 pushkey-abort: prepushkey.failpush hook exited with status 1
806 pushkey-abort: prepushkey.failpush hook exited with status 1
807 transaction abort!
807 transaction abort!
808 Cleaning up the mess...
808 Cleaning up the mess...
809 rollback completed
809 rollback completed
810 abort: Correct phase push failed (because hooks)
810 abort: Correct phase push failed (because hooks)
811 [255]
811 [255]
812 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
812 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
813 pushing to ssh://user@dummy/other
813 pushing to ssh://user@dummy/other
814 searching for changes
814 searching for changes
815 remote: adding changesets
815 remote: adding changesets
816 remote: adding manifests
816 remote: adding manifests
817 remote: adding file changes
817 remote: adding file changes
818 remote: do not push the key !
818 remote: do not push the key !
819 remote: pushkey-abort: prepushkey.failpush hook exited with status 1
819 remote: pushkey-abort: prepushkey.failpush hook exited with status 1
820 remote: transaction abort!
820 remote: transaction abort!
821 remote: Cleaning up the mess...
821 remote: Cleaning up the mess...
822 remote: rollback completed
822 remote: rollback completed
823 abort: Correct phase push failed (because hooks)
823 abort: Correct phase push failed (because hooks)
824 [255]
824 [255]
825 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
825 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
826 pushing to http://localhost:$HGPORT2/
826 pushing to http://localhost:$HGPORT2/
827 searching for changes
827 searching for changes
828 remote: adding changesets
828 remote: adding changesets
829 remote: adding manifests
829 remote: adding manifests
830 remote: adding file changes
830 remote: adding file changes
831 remote: do not push the key !
831 remote: do not push the key !
832 remote: pushkey-abort: prepushkey.failpush hook exited with status 1
832 remote: pushkey-abort: prepushkey.failpush hook exited with status 1
833 remote: transaction abort!
833 remote: transaction abort!
834 remote: Cleaning up the mess...
834 remote: Cleaning up the mess...
835 remote: rollback completed
835 remote: rollback completed
836 abort: Correct phase push failed (because hooks)
836 abort: Correct phase push failed (because hooks)
837 [255]
837 [255]
838
838
839 (Failure from a the pushkey)
839 (Failure from a the pushkey)
840
840
841 $ cat > mandatorypart.py << EOF
841 $ cat > mandatorypart.py << EOF
842 > from mercurial import exchange
842 > from mercurial import exchange
843 > from mercurial import pushkey
843 > from mercurial import pushkey
844 > from mercurial import node
844 > from mercurial import node
845 > from mercurial import error
845 > from mercurial import error
846 > @exchange.b2partsgenerator(b'failingpuskey')
846 > @exchange.b2partsgenerator(b'failingpuskey')
847 > def addfailingpushey(pushop, bundler):
847 > def addfailingpushey(pushop, bundler):
848 > enc = pushkey.encode
848 > enc = pushkey.encode
849 > part = bundler.newpart(b'pushkey')
849 > part = bundler.newpart(b'pushkey')
850 > part.addparam(b'namespace', enc(b'phases'))
850 > part.addparam(b'namespace', enc(b'phases'))
851 > part.addparam(b'key', enc(b'cd010b8cd998f3981a5a8115f94f8da4ab506089'))
851 > part.addparam(b'key', enc(b'cd010b8cd998f3981a5a8115f94f8da4ab506089'))
852 > part.addparam(b'old', enc(b'4')) # will fail
852 > part.addparam(b'old', enc(b'4')) # will fail
853 > part.addparam(b'new', enc(b'3'))
853 > part.addparam(b'new', enc(b'3'))
854 > def fail(pushop, exc):
854 > def fail(pushop, exc):
855 > raise error.Abort(b'Clown phase push failed')
855 > raise error.Abort(b'Clown phase push failed')
856 > pushop.pkfailcb[part.id] = fail
856 > pushop.pkfailcb[part.id] = fail
857 > EOF
857 > EOF
858 $ cat >> $HGRCPATH << EOF
858 $ cat >> $HGRCPATH << EOF
859 > [hooks]
859 > [hooks]
860 > prepushkey.failpush =
860 > prepushkey.failpush =
861 > EOF
861 > EOF
862 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS # reload http config
862 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS # reload http config
863 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
863 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
864 $ cat other.pid >> $DAEMON_PIDS
864 $ cat other.pid >> $DAEMON_PIDS
865
865
866 $ hg -R main push other -r e7ec4e813ba6
866 $ hg -R main push other -r e7ec4e813ba6
867 pushing to other
867 pushing to other
868 searching for changes
868 searching for changes
869 adding changesets
869 adding changesets
870 adding manifests
870 adding manifests
871 adding file changes
871 adding file changes
872 transaction abort!
872 transaction abort!
873 Cleaning up the mess...
873 Cleaning up the mess...
874 rollback completed
874 rollback completed
875 pushkey: lock state after "phases"
875 pushkey: lock state after "phases"
876 lock: free
876 lock: free
877 wlock: free
877 wlock: free
878 abort: Clown phase push failed
878 abort: Clown phase push failed
879 [255]
879 [255]
880 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
880 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
881 pushing to ssh://user@dummy/other
881 pushing to ssh://user@dummy/other
882 searching for changes
882 searching for changes
883 remote: adding changesets
883 remote: adding changesets
884 remote: adding manifests
884 remote: adding manifests
885 remote: adding file changes
885 remote: adding file changes
886 remote: transaction abort!
886 remote: transaction abort!
887 remote: Cleaning up the mess...
887 remote: Cleaning up the mess...
888 remote: rollback completed
888 remote: rollback completed
889 remote: pushkey: lock state after "phases"
889 remote: pushkey: lock state after "phases"
890 remote: lock: free
890 remote: lock: free
891 remote: wlock: free
891 remote: wlock: free
892 abort: Clown phase push failed
892 abort: Clown phase push failed
893 [255]
893 [255]
894 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
894 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
895 pushing to http://localhost:$HGPORT2/
895 pushing to http://localhost:$HGPORT2/
896 searching for changes
896 searching for changes
897 remote: adding changesets
897 remote: adding changesets
898 remote: adding manifests
898 remote: adding manifests
899 remote: adding file changes
899 remote: adding file changes
900 remote: transaction abort!
900 remote: transaction abort!
901 remote: Cleaning up the mess...
901 remote: Cleaning up the mess...
902 remote: rollback completed
902 remote: rollback completed
903 remote: pushkey: lock state after "phases"
903 remote: pushkey: lock state after "phases"
904 remote: lock: free
904 remote: lock: free
905 remote: wlock: free
905 remote: wlock: free
906 abort: Clown phase push failed
906 abort: Clown phase push failed
907 [255]
907 [255]
908
908
909 Test lazily acquiring the lock during unbundle
909 Test lazily acquiring the lock during unbundle
910 $ cp $TESTTMP/hgrc.orig $HGRCPATH
910 $ cp $TESTTMP/hgrc.orig $HGRCPATH
911
911
912 $ cat >> $TESTTMP/locktester.py <<EOF
912 $ cat >> $TESTTMP/locktester.py <<EOF
913 > import os
913 > import os
914 > from mercurial import bundle2, error, extensions
914 > from mercurial import bundle2, error, extensions
915 > def checklock(orig, repo, *args, **kwargs):
915 > def checklock(orig, repo, *args, **kwargs):
916 > if repo.svfs.lexists(b"lock"):
916 > if repo.svfs.lexists(b"lock"):
917 > raise error.Abort(b"Lock should not be taken")
917 > raise error.Abort(b"Lock should not be taken")
918 > return orig(repo, *args, **kwargs)
918 > return orig(repo, *args, **kwargs)
919 > def extsetup(ui):
919 > def extsetup(ui):
920 > extensions.wrapfunction(bundle2, 'processbundle', checklock)
920 > extensions.wrapfunction(bundle2, 'processbundle', checklock)
921 > EOF
921 > EOF
922
922
923 $ hg init lazylock
923 $ hg init lazylock
924 $ cat >> lazylock/.hg/hgrc <<EOF
924 $ cat >> lazylock/.hg/hgrc <<EOF
925 > [extensions]
925 > [extensions]
926 > locktester=$TESTTMP/locktester.py
926 > locktester=$TESTTMP/locktester.py
927 > EOF
927 > EOF
928
928
929 $ hg clone -q ssh://user@dummy/lazylock lazylockclient
929 $ hg clone -q ssh://user@dummy/lazylock lazylockclient
930 $ cd lazylockclient
930 $ cd lazylockclient
931 $ touch a && hg ci -Aqm a
931 $ touch a && hg ci -Aqm a
932 $ hg push
932 $ hg push
933 pushing to ssh://user@dummy/lazylock
933 pushing to ssh://user@dummy/lazylock
934 searching for changes
934 searching for changes
935 remote: Lock should not be taken
935 remote: Lock should not be taken
936 abort: push failed on remote
936 abort: push failed on remote
937 [100]
937 [100]
938
938
939 $ cat >> ../lazylock/.hg/hgrc <<EOF
939 $ cat >> ../lazylock/.hg/hgrc <<EOF
940 > [experimental]
940 > [experimental]
941 > bundle2lazylocking=True
941 > bundle2lazylocking=True
942 > EOF
942 > EOF
943 $ hg push
943 $ hg push
944 pushing to ssh://user@dummy/lazylock
944 pushing to ssh://user@dummy/lazylock
945 searching for changes
945 searching for changes
946 remote: adding changesets
946 remote: adding changesets
947 remote: adding manifests
947 remote: adding manifests
948 remote: adding file changes
948 remote: adding file changes
949 remote: added 1 changesets with 1 changes to 1 files
949 remote: added 1 changesets with 1 changes to 1 files
950
950
951 $ cd ..
951 $ cd ..
952
952
953 Servers can disable bundle1 for clone/pull operations
953 Servers can disable bundle1 for clone/pull operations
954
954
955 $ killdaemons.py
955 $ killdaemons.py
956 $ hg init bundle2onlyserver
956 $ hg init bundle2onlyserver
957 $ cd bundle2onlyserver
957 $ cd bundle2onlyserver
958 $ cat > .hg/hgrc << EOF
958 $ cat > .hg/hgrc << EOF
959 > [server]
959 > [server]
960 > bundle1.pull = false
960 > bundle1.pull = false
961 > EOF
961 > EOF
962
962
963 $ touch foo
963 $ touch foo
964 $ hg -q commit -A -m initial
964 $ hg -q commit -A -m initial
965
965
966 $ hg serve -p $HGPORT -d --pid-file=hg.pid
966 $ hg serve -p $HGPORT -d --pid-file=hg.pid
967 $ cat hg.pid >> $DAEMON_PIDS
967 $ cat hg.pid >> $DAEMON_PIDS
968
968
969 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2
969 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2
970 requesting all changes
970 requesting all changes
971 abort: remote error:
971 abort: remote error:
972 incompatible Mercurial client; bundle2 required
972 incompatible Mercurial client; bundle2 required
973 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
973 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
974 [100]
974 [100]
975 $ killdaemons.py
975 $ killdaemons.py
976 $ cd ..
976 $ cd ..
977
977
978 bundle1 can still pull non-generaldelta repos when generaldelta bundle1 disabled
978 bundle1 can still pull non-generaldelta repos when generaldelta bundle1 disabled
979
979
980 $ hg --config format.usegeneraldelta=false init notgdserver
980 $ hg --config format.usegeneraldelta=false init notgdserver
981 $ cd notgdserver
981 $ cd notgdserver
982 $ cat > .hg/hgrc << EOF
982 $ cat > .hg/hgrc << EOF
983 > [server]
983 > [server]
984 > bundle1gd.pull = false
984 > bundle1gd.pull = false
985 > EOF
985 > EOF
986
986
987 $ touch foo
987 $ touch foo
988 $ hg -q commit -A -m initial
988 $ hg -q commit -A -m initial
989 $ hg serve -p $HGPORT -d --pid-file=hg.pid
989 $ hg serve -p $HGPORT -d --pid-file=hg.pid
990 $ cat hg.pid >> $DAEMON_PIDS
990 $ cat hg.pid >> $DAEMON_PIDS
991
991
992 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2-1
992 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2-1
993 requesting all changes
993 requesting all changes
994 adding changesets
994 adding changesets
995 adding manifests
995 adding manifests
996 adding file changes
996 adding file changes
997 added 1 changesets with 1 changes to 1 files
997 added 1 changesets with 1 changes to 1 files
998 new changesets 96ee1d7354c4
998 new changesets 96ee1d7354c4
999 updating to branch default
999 updating to branch default
1000 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1000 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1001
1001
1002 $ killdaemons.py
1002 $ killdaemons.py
1003 $ cd ../bundle2onlyserver
1003 $ cd ../bundle2onlyserver
1004
1004
1005 bundle1 pull can be disabled for generaldelta repos only
1005 bundle1 pull can be disabled for generaldelta repos only
1006
1006
1007 $ cat > .hg/hgrc << EOF
1007 $ cat > .hg/hgrc << EOF
1008 > [server]
1008 > [server]
1009 > bundle1gd.pull = false
1009 > bundle1gd.pull = false
1010 > EOF
1010 > EOF
1011
1011
1012 $ hg serve -p $HGPORT -d --pid-file=hg.pid
1012 $ hg serve -p $HGPORT -d --pid-file=hg.pid
1013 $ cat hg.pid >> $DAEMON_PIDS
1013 $ cat hg.pid >> $DAEMON_PIDS
1014 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2
1014 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2
1015 requesting all changes
1015 requesting all changes
1016 abort: remote error:
1016 abort: remote error:
1017 incompatible Mercurial client; bundle2 required
1017 incompatible Mercurial client; bundle2 required
1018 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1018 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1019 [100]
1019 [100]
1020
1020
1021 $ killdaemons.py
1021 $ killdaemons.py
1022
1022
1023 Verify the global server.bundle1 option works
1023 Verify the global server.bundle1 option works
1024
1024
1025 $ cd ..
1025 $ cd ..
1026 $ cat > bundle2onlyserver/.hg/hgrc << EOF
1026 $ cat > bundle2onlyserver/.hg/hgrc << EOF
1027 > [server]
1027 > [server]
1028 > bundle1 = false
1028 > bundle1 = false
1029 > EOF
1029 > EOF
1030 $ hg serve -R bundle2onlyserver -p $HGPORT -d --pid-file=hg.pid
1030 $ hg serve -R bundle2onlyserver -p $HGPORT -d --pid-file=hg.pid
1031 $ cat hg.pid >> $DAEMON_PIDS
1031 $ cat hg.pid >> $DAEMON_PIDS
1032 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT not-bundle2
1032 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT not-bundle2
1033 requesting all changes
1033 requesting all changes
1034 abort: remote error:
1034 abort: remote error:
1035 incompatible Mercurial client; bundle2 required
1035 incompatible Mercurial client; bundle2 required
1036 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1036 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1037 [100]
1037 [100]
1038 $ killdaemons.py
1038 $ killdaemons.py
1039
1039
1040 $ hg --config devel.legacy.exchange=bundle1 clone ssh://user@dummy/bundle2onlyserver not-bundle2-ssh
1040 $ hg --config devel.legacy.exchange=bundle1 clone ssh://user@dummy/bundle2onlyserver not-bundle2-ssh
1041 requesting all changes
1041 requesting all changes
1042 adding changesets
1042 adding changesets
1043 remote: abort: incompatible Mercurial client; bundle2 required
1043 remote: abort: incompatible Mercurial client; bundle2 required
1044 remote: (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1044 remote: (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1045 transaction abort!
1046 rollback completed
1047 abort: stream ended unexpectedly (got 0 bytes, expected 4)
1045 abort: stream ended unexpectedly (got 0 bytes, expected 4)
1048 [255]
1046 [255]
1049
1047
1050 $ cat > bundle2onlyserver/.hg/hgrc << EOF
1048 $ cat > bundle2onlyserver/.hg/hgrc << EOF
1051 > [server]
1049 > [server]
1052 > bundle1gd = false
1050 > bundle1gd = false
1053 > EOF
1051 > EOF
1054 $ hg serve -R bundle2onlyserver -p $HGPORT -d --pid-file=hg.pid
1052 $ hg serve -R bundle2onlyserver -p $HGPORT -d --pid-file=hg.pid
1055 $ cat hg.pid >> $DAEMON_PIDS
1053 $ cat hg.pid >> $DAEMON_PIDS
1056
1054
1057 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2
1055 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2
1058 requesting all changes
1056 requesting all changes
1059 abort: remote error:
1057 abort: remote error:
1060 incompatible Mercurial client; bundle2 required
1058 incompatible Mercurial client; bundle2 required
1061 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1059 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1062 [100]
1060 [100]
1063
1061
1064 $ killdaemons.py
1062 $ killdaemons.py
1065
1063
1066 $ cd notgdserver
1064 $ cd notgdserver
1067 $ cat > .hg/hgrc << EOF
1065 $ cat > .hg/hgrc << EOF
1068 > [server]
1066 > [server]
1069 > bundle1gd = false
1067 > bundle1gd = false
1070 > EOF
1068 > EOF
1071 $ hg serve -p $HGPORT -d --pid-file=hg.pid
1069 $ hg serve -p $HGPORT -d --pid-file=hg.pid
1072 $ cat hg.pid >> $DAEMON_PIDS
1070 $ cat hg.pid >> $DAEMON_PIDS
1073
1071
1074 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2-2
1072 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2-2
1075 requesting all changes
1073 requesting all changes
1076 adding changesets
1074 adding changesets
1077 adding manifests
1075 adding manifests
1078 adding file changes
1076 adding file changes
1079 added 1 changesets with 1 changes to 1 files
1077 added 1 changesets with 1 changes to 1 files
1080 new changesets 96ee1d7354c4
1078 new changesets 96ee1d7354c4
1081 updating to branch default
1079 updating to branch default
1082 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1080 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1083
1081
1084 $ killdaemons.py
1082 $ killdaemons.py
1085 $ cd ../bundle2onlyserver
1083 $ cd ../bundle2onlyserver
1086
1084
1087 Verify bundle1 pushes can be disabled
1085 Verify bundle1 pushes can be disabled
1088
1086
1089 $ cat > .hg/hgrc << EOF
1087 $ cat > .hg/hgrc << EOF
1090 > [server]
1088 > [server]
1091 > bundle1.push = false
1089 > bundle1.push = false
1092 > [web]
1090 > [web]
1093 > allow_push = *
1091 > allow_push = *
1094 > push_ssl = false
1092 > push_ssl = false
1095 > EOF
1093 > EOF
1096
1094
1097 $ hg serve -p $HGPORT -d --pid-file=hg.pid -E error.log
1095 $ hg serve -p $HGPORT -d --pid-file=hg.pid -E error.log
1098 $ cat hg.pid >> $DAEMON_PIDS
1096 $ cat hg.pid >> $DAEMON_PIDS
1099 $ cd ..
1097 $ cd ..
1100
1098
1101 $ hg clone http://localhost:$HGPORT bundle2-only
1099 $ hg clone http://localhost:$HGPORT bundle2-only
1102 requesting all changes
1100 requesting all changes
1103 adding changesets
1101 adding changesets
1104 adding manifests
1102 adding manifests
1105 adding file changes
1103 adding file changes
1106 added 1 changesets with 1 changes to 1 files
1104 added 1 changesets with 1 changes to 1 files
1107 new changesets 96ee1d7354c4
1105 new changesets 96ee1d7354c4
1108 updating to branch default
1106 updating to branch default
1109 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1107 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1110 $ cd bundle2-only
1108 $ cd bundle2-only
1111 $ echo commit > foo
1109 $ echo commit > foo
1112 $ hg commit -m commit
1110 $ hg commit -m commit
1113 $ hg --config devel.legacy.exchange=bundle1 push
1111 $ hg --config devel.legacy.exchange=bundle1 push
1114 pushing to http://localhost:$HGPORT/
1112 pushing to http://localhost:$HGPORT/
1115 searching for changes
1113 searching for changes
1116 abort: remote error:
1114 abort: remote error:
1117 incompatible Mercurial client; bundle2 required
1115 incompatible Mercurial client; bundle2 required
1118 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1116 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1119 [100]
1117 [100]
1120
1118
1121 (also check with ssh)
1119 (also check with ssh)
1122
1120
1123 $ hg --config devel.legacy.exchange=bundle1 push ssh://user@dummy/bundle2onlyserver
1121 $ hg --config devel.legacy.exchange=bundle1 push ssh://user@dummy/bundle2onlyserver
1124 pushing to ssh://user@dummy/bundle2onlyserver
1122 pushing to ssh://user@dummy/bundle2onlyserver
1125 searching for changes
1123 searching for changes
1126 remote: abort: incompatible Mercurial client; bundle2 required
1124 remote: abort: incompatible Mercurial client; bundle2 required
1127 remote: (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1125 remote: (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1128 [1]
1126 [1]
1129
1127
1130 $ hg push
1128 $ hg push
1131 pushing to http://localhost:$HGPORT/
1129 pushing to http://localhost:$HGPORT/
1132 searching for changes
1130 searching for changes
1133 remote: adding changesets
1131 remote: adding changesets
1134 remote: adding manifests
1132 remote: adding manifests
1135 remote: adding file changes
1133 remote: adding file changes
1136 remote: added 1 changesets with 1 changes to 1 files
1134 remote: added 1 changesets with 1 changes to 1 files
@@ -1,952 +1,946 b''
1 #require serve zstd
1 #require serve zstd
2
2
3 Client version is embedded in HTTP request and is effectively dynamic. Pin the
3 Client version is embedded in HTTP request and is effectively dynamic. Pin the
4 version so behavior is deterministic.
4 version so behavior is deterministic.
5
5
6 $ cat > fakeversion.py << EOF
6 $ cat > fakeversion.py << EOF
7 > from mercurial import util
7 > from mercurial import util
8 > util.version = lambda: b'4.2'
8 > util.version = lambda: b'4.2'
9 > EOF
9 > EOF
10
10
11 $ cat >> $HGRCPATH << EOF
11 $ cat >> $HGRCPATH << EOF
12 > [extensions]
12 > [extensions]
13 > fakeversion = `pwd`/fakeversion.py
13 > fakeversion = `pwd`/fakeversion.py
14 > [format]
14 > [format]
15 > sparse-revlog = no
15 > sparse-revlog = no
16 > use-persistent-nodemap = no
16 > use-persistent-nodemap = no
17 > [devel]
17 > [devel]
18 > legacy.exchange = phases
18 > legacy.exchange = phases
19 > [server]
19 > [server]
20 > concurrent-push-mode = strict
20 > concurrent-push-mode = strict
21 > EOF
21 > EOF
22
22
23 $ hg init server0
23 $ hg init server0
24 $ cd server0
24 $ cd server0
25 $ touch foo
25 $ touch foo
26 $ hg -q commit -A -m initial
26 $ hg -q commit -A -m initial
27
27
28 Also disable compression because zstd is optional and causes output to vary
28 Also disable compression because zstd is optional and causes output to vary
29 and because debugging partial responses is hard when compression is involved
29 and because debugging partial responses is hard when compression is involved
30
30
31 $ cat > .hg/hgrc << EOF
31 $ cat > .hg/hgrc << EOF
32 > [extensions]
32 > [extensions]
33 > badserver = $TESTDIR/testlib/badserverext.py
33 > badserver = $TESTDIR/testlib/badserverext.py
34 > [server]
34 > [server]
35 > compressionengines = none
35 > compressionengines = none
36 > EOF
36 > EOF
37
37
38 Failure to accept() socket should result in connection related error message
38 Failure to accept() socket should result in connection related error message
39 ----------------------------------------------------------------------------
39 ----------------------------------------------------------------------------
40
40
41 $ hg serve --config badserver.close-before-accept=true -p $HGPORT -d --pid-file=hg.pid
41 $ hg serve --config badserver.close-before-accept=true -p $HGPORT -d --pid-file=hg.pid
42 $ cat hg.pid > $DAEMON_PIDS
42 $ cat hg.pid > $DAEMON_PIDS
43
43
44 $ hg clone http://localhost:$HGPORT/ clone
44 $ hg clone http://localhost:$HGPORT/ clone
45 abort: error: (\$ECONNRESET\$|\$EADDRNOTAVAIL\$) (re)
45 abort: error: (\$ECONNRESET\$|\$EADDRNOTAVAIL\$) (re)
46 [100]
46 [100]
47
47
48 (The server exits on its own, but there is a race between that and starting a new server.
48 (The server exits on its own, but there is a race between that and starting a new server.
49 So ensure the process is dead.)
49 So ensure the process is dead.)
50
50
51 $ killdaemons.py $DAEMON_PIDS
51 $ killdaemons.py $DAEMON_PIDS
52
52
53 Failure immediately after accept() should yield connection related error message
53 Failure immediately after accept() should yield connection related error message
54 --------------------------------------------------------------------------------
54 --------------------------------------------------------------------------------
55
55
56 $ hg serve --config badserver.close-after-accept=true -p $HGPORT -d --pid-file=hg.pid
56 $ hg serve --config badserver.close-after-accept=true -p $HGPORT -d --pid-file=hg.pid
57 $ cat hg.pid > $DAEMON_PIDS
57 $ cat hg.pid > $DAEMON_PIDS
58
58
59 TODO: this usually outputs good results, but sometimes emits abort:
59 TODO: this usually outputs good results, but sometimes emits abort:
60 error: '' on FreeBSD and OS X.
60 error: '' on FreeBSD and OS X.
61 What we ideally want are:
61 What we ideally want are:
62
62
63 abort: error: $ECONNRESET$
63 abort: error: $ECONNRESET$
64
64
65 The flakiness in this output was observable easily with
65 The flakiness in this output was observable easily with
66 --runs-per-test=20 on macOS 10.12 during the freeze for 4.2.
66 --runs-per-test=20 on macOS 10.12 during the freeze for 4.2.
67 $ hg clone http://localhost:$HGPORT/ clone
67 $ hg clone http://localhost:$HGPORT/ clone
68 abort: error: * (glob)
68 abort: error: * (glob)
69 [100]
69 [100]
70
70
71 $ killdaemons.py $DAEMON_PIDS
71 $ killdaemons.py $DAEMON_PIDS
72
72
73 Failure to read all bytes in initial HTTP request should yield connection related error message
73 Failure to read all bytes in initial HTTP request should yield connection related error message
74 -----------------------------------------------------------------------------------------------
74 -----------------------------------------------------------------------------------------------
75
75
76 $ hg serve --config badserver.close-after-recv-bytes=1 -p $HGPORT -d --pid-file=hg.pid -E error.log
76 $ hg serve --config badserver.close-after-recv-bytes=1 -p $HGPORT -d --pid-file=hg.pid -E error.log
77 $ cat hg.pid > $DAEMON_PIDS
77 $ cat hg.pid > $DAEMON_PIDS
78
78
79 $ hg clone http://localhost:$HGPORT/ clone
79 $ hg clone http://localhost:$HGPORT/ clone
80 abort: error: bad HTTP status line: * (glob)
80 abort: error: bad HTTP status line: * (glob)
81 [100]
81 [100]
82
82
83 $ killdaemons.py $DAEMON_PIDS
83 $ killdaemons.py $DAEMON_PIDS
84
84
85 $ cat error.log
85 $ cat error.log
86 readline(1 from ~) -> (1) G
86 readline(1 from ~) -> (1) G
87 read limit reached; closing socket
87 read limit reached; closing socket
88
88
89 $ rm -f error.log
89 $ rm -f error.log
90
90
91 Same failure, but server reads full HTTP request line
91 Same failure, but server reads full HTTP request line
92 -----------------------------------------------------
92 -----------------------------------------------------
93
93
94 $ hg serve \
94 $ hg serve \
95 > --config badserver.close-after-recv-patterns="GET /\?cmd=capabilities" \
95 > --config badserver.close-after-recv-patterns="GET /\?cmd=capabilities" \
96 > --config badserver.close-after-recv-bytes=7 \
96 > --config badserver.close-after-recv-bytes=7 \
97 > -p $HGPORT -d --pid-file=hg.pid -E error.log
97 > -p $HGPORT -d --pid-file=hg.pid -E error.log
98 $ cat hg.pid > $DAEMON_PIDS
98 $ cat hg.pid > $DAEMON_PIDS
99 $ hg clone http://localhost:$HGPORT/ clone
99 $ hg clone http://localhost:$HGPORT/ clone
100 abort: error: bad HTTP status line: * (glob)
100 abort: error: bad HTTP status line: * (glob)
101 [100]
101 [100]
102
102
103 $ killdaemons.py $DAEMON_PIDS
103 $ killdaemons.py $DAEMON_PIDS
104
104
105 $ cat error.log
105 $ cat error.log
106 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
106 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
107 readline(7 from *) -> (7) Accept- (glob)
107 readline(7 from *) -> (7) Accept- (glob)
108 read limit reached; closing socket
108 read limit reached; closing socket
109
109
110 $ rm -f error.log
110 $ rm -f error.log
111
111
112 Failure on subsequent HTTP request on the same socket (cmd?batch)
112 Failure on subsequent HTTP request on the same socket (cmd?batch)
113 -----------------------------------------------------------------
113 -----------------------------------------------------------------
114
114
115 $ hg serve \
115 $ hg serve \
116 > --config badserver.close-after-recv-patterns="GET /\?cmd=batch,GET /\?cmd=batch" \
116 > --config badserver.close-after-recv-patterns="GET /\?cmd=batch,GET /\?cmd=batch" \
117 > --config badserver.close-after-recv-bytes=15,197 \
117 > --config badserver.close-after-recv-bytes=15,197 \
118 > -p $HGPORT -d --pid-file=hg.pid -E error.log
118 > -p $HGPORT -d --pid-file=hg.pid -E error.log
119 $ cat hg.pid > $DAEMON_PIDS
119 $ cat hg.pid > $DAEMON_PIDS
120 $ hg clone http://localhost:$HGPORT/ clone
120 $ hg clone http://localhost:$HGPORT/ clone
121 abort: error: bad HTTP status line: * (glob)
121 abort: error: bad HTTP status line: * (glob)
122 [100]
122 [100]
123
123
124 $ killdaemons.py $DAEMON_PIDS
124 $ killdaemons.py $DAEMON_PIDS
125
125
126 $ cat error.log
126 $ cat error.log
127 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
127 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
128 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
128 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
129 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
129 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
130 readline(*) -> (*) host: localhost:$HGPORT\r\n (glob)
130 readline(*) -> (*) host: localhost:$HGPORT\r\n (glob)
131 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
131 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
132 readline(*) -> (2) \r\n (glob)
132 readline(*) -> (2) \r\n (glob)
133 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob)
133 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob)
134 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob)
134 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob)
135 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
135 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
136 readline(*) -> (1?) Accept-Encoding* (glob)
136 readline(*) -> (1?) Accept-Encoding* (glob)
137 read limit reached; closing socket
137 read limit reached; closing socket
138 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
138 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
139 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
139 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
140 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
140 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
141 readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
141 readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
142 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
142 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
143 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
143 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
144 readline(4 from *) -> (4) host (glob)
144 readline(4 from *) -> (4) host (glob)
145 read limit reached; closing socket
145 read limit reached; closing socket
146
146
147 $ rm -f error.log
147 $ rm -f error.log
148
148
149 Failure to read getbundle HTTP request
149 Failure to read getbundle HTTP request
150 --------------------------------------
150 --------------------------------------
151
151
152 $ hg serve \
152 $ hg serve \
153 > --config badserver.close-after-recv-patterns="GET /\?cmd=batch,user-agent: mercurial/proto-1.0,GET /\?cmd=getbundle" \
153 > --config badserver.close-after-recv-patterns="GET /\?cmd=batch,user-agent: mercurial/proto-1.0,GET /\?cmd=getbundle" \
154 > --config badserver.close-after-recv-bytes=110,26,281 \
154 > --config badserver.close-after-recv-bytes=110,26,281 \
155 > -p $HGPORT -d --pid-file=hg.pid -E error.log
155 > -p $HGPORT -d --pid-file=hg.pid -E error.log
156 $ cat hg.pid > $DAEMON_PIDS
156 $ cat hg.pid > $DAEMON_PIDS
157 $ hg clone http://localhost:$HGPORT/ clone
157 $ hg clone http://localhost:$HGPORT/ clone
158 requesting all changes
158 requesting all changes
159 abort: error: bad HTTP status line: * (glob)
159 abort: error: bad HTTP status line: * (glob)
160 [100]
160 [100]
161
161
162 $ killdaemons.py $DAEMON_PIDS
162 $ killdaemons.py $DAEMON_PIDS
163
163
164 $ cat error.log
164 $ cat error.log
165 readline(1 from -1) -> (1) x (?)
165 readline(1 from -1) -> (1) x (?)
166 readline(1 from -1) -> (1) x (?)
166 readline(1 from -1) -> (1) x (?)
167 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
167 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
168 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
168 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
169 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
169 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
170 readline(*) -> (*) host: localhost:$HGPORT\r\n (glob)
170 readline(*) -> (*) host: localhost:$HGPORT\r\n (glob)
171 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
171 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
172 readline(*) -> (2) \r\n (glob)
172 readline(*) -> (2) \r\n (glob)
173 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob)
173 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob)
174 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob)
174 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob)
175 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
175 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
176 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
176 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
177 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
177 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
178 readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
178 readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
179 readline(*) -> (1?) x-hgproto-1:* (glob)
179 readline(*) -> (1?) x-hgproto-1:* (glob)
180 read limit reached; closing socket
180 read limit reached; closing socket
181 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
181 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
182 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
182 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
183 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
183 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
184 readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
184 readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
185 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
185 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
186 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
186 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
187 readline(*) -> (*) host: localhost:$HGPORT\r\n (glob)
187 readline(*) -> (*) host: localhost:$HGPORT\r\n (glob)
188 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
188 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
189 readline(*) -> (2) \r\n (glob)
189 readline(*) -> (2) \r\n (glob)
190 sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n
190 sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n
191 sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n;
191 sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n;
192 readline(24 from ~) -> (*) GET /?cmd=getbundle HTTP* (glob)
192 readline(24 from ~) -> (*) GET /?cmd=getbundle HTTP* (glob)
193 read limit reached; closing socket
193 read limit reached; closing socket
194 readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
194 readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
195 readline(281 from *) -> (27) Accept-Encoding: identity\r\n (glob)
195 readline(281 from *) -> (27) Accept-Encoding: identity\r\n (glob)
196 readline(254 from *) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
196 readline(254 from *) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
197 readline(225 from *) -> (225) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtag (glob)
197 readline(225 from *) -> (225) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtag (glob)
198 read limit reached; closing socket
198 read limit reached; closing socket
199
199
200 $ rm -f error.log
200 $ rm -f error.log
201
201
202 Now do a variation using POST to send arguments
202 Now do a variation using POST to send arguments
203 ===============================================
203 ===============================================
204
204
205 $ hg serve \
205 $ hg serve \
206 > --config badserver.close-after-recv-patterns="x-hgargs-post:,user-agent: mercurial/proto-1.0" \
206 > --config badserver.close-after-recv-patterns="x-hgargs-post:,user-agent: mercurial/proto-1.0" \
207 > --config badserver.close-after-recv-bytes="14,26" \
207 > --config badserver.close-after-recv-bytes="14,26" \
208 > --config experimental.httppostargs=true \
208 > --config experimental.httppostargs=true \
209 > -p $HGPORT -d --pid-file=hg.pid -E error.log
209 > -p $HGPORT -d --pid-file=hg.pid -E error.log
210 $ cat hg.pid > $DAEMON_PIDS
210 $ cat hg.pid > $DAEMON_PIDS
211
211
212 $ hg clone http://localhost:$HGPORT/ clone
212 $ hg clone http://localhost:$HGPORT/ clone
213 abort: error: bad HTTP status line: * (glob)
213 abort: error: bad HTTP status line: * (glob)
214 [100]
214 [100]
215
215
216 $ killdaemons.py $DAEMON_PIDS
216 $ killdaemons.py $DAEMON_PIDS
217
217
218 $ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
218 $ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
219 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
219 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
220 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
220 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
221 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
221 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
222 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
222 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
223 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
223 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
224 readline(*) -> (2) \r\n (glob)
224 readline(*) -> (2) \r\n (glob)
225 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob)
225 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob)
226 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob)
226 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob)
227 readline(~) -> (27) POST /?cmd=batch HTTP/1.1\r\n (glob)
227 readline(~) -> (27) POST /?cmd=batch HTTP/1.1\r\n (glob)
228 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
228 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
229 readline(*) -> (41) content-type: application/mercurial-0.1\r\n (glob)
229 readline(*) -> (41) content-type: application/mercurial-0.1\r\n (glob)
230 readline(*) -> (33) vary: X-HgArgs-Post,X-HgProto-1\r\n (glob)
230 readline(*) -> (33) vary: X-HgArgs-Post,X-HgProto-1\r\n (glob)
231 readline(*) -> (19) x-hgargs-post: 28\r\n (glob)
231 readline(*) -> (19) x-hgargs-post: 28\r\n (glob)
232 readline(*) -> (1?) x-hgproto-1: * (glob)
232 readline(*) -> (1?) x-hgproto-1: * (glob)
233 read limit reached; closing socket
233 read limit reached; closing socket
234 readline(~) -> (27) POST /?cmd=batch HTTP/1.1\r\n
234 readline(~) -> (27) POST /?cmd=batch HTTP/1.1\r\n
235 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
235 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
236 readline(*) -> (41) content-type: application/mercurial-0.1\r\n (glob)
236 readline(*) -> (41) content-type: application/mercurial-0.1\r\n (glob)
237 readline(*) -> (33) vary: X-HgArgs-Post,X-HgProto-1\r\n (glob)
237 readline(*) -> (33) vary: X-HgArgs-Post,X-HgProto-1\r\n (glob)
238 readline(*) -> (19) x-hgargs-post: 28\r\n (glob)
238 readline(*) -> (19) x-hgargs-post: 28\r\n (glob)
239 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
239 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
240 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
240 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
241 readline(*) -> (20) content-length: 28\r\n (glob)
241 readline(*) -> (20) content-length: 28\r\n (glob)
242 readline(*) -> (*) host: localhost:$HGPORT\r\n (glob)
242 readline(*) -> (*) host: localhost:$HGPORT\r\n (glob)
243 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
243 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
244 readline(*) -> (2) \r\n (glob)
244 readline(*) -> (2) \r\n (glob)
245 read(24 from 28) -> (*) cmds=* (glob)
245 read(24 from 28) -> (*) cmds=* (glob)
246 read limit reached; closing socket
246 read limit reached; closing socket
247 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=batch': (glob)
247 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=batch': (glob)
248 Traceback (most recent call last):
248 Traceback (most recent call last):
249 Exception: connection closed after receiving N bytes
249 Exception: connection closed after receiving N bytes
250
250
251
251
252 $ rm -f error.log
252 $ rm -f error.log
253
253
254 Now move on to partial server responses
254 Now move on to partial server responses
255 =======================================
255 =======================================
256
256
257 Server sends a single character from the HTTP response line
257 Server sends a single character from the HTTP response line
258 -----------------------------------------------------------
258 -----------------------------------------------------------
259
259
260 $ hg serve --config badserver.close-after-send-bytes=1 -p $HGPORT -d --pid-file=hg.pid -E error.log
260 $ hg serve --config badserver.close-after-send-bytes=1 -p $HGPORT -d --pid-file=hg.pid -E error.log
261 $ cat hg.pid > $DAEMON_PIDS
261 $ cat hg.pid > $DAEMON_PIDS
262
262
263 $ hg clone http://localhost:$HGPORT/ clone
263 $ hg clone http://localhost:$HGPORT/ clone
264 abort: error: bad HTTP status line: H
264 abort: error: bad HTTP status line: H
265 [100]
265 [100]
266
266
267 $ killdaemons.py $DAEMON_PIDS
267 $ killdaemons.py $DAEMON_PIDS
268
268
269 $ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
269 $ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
270 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
270 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
271 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
271 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
272 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
272 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
273 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
273 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
274 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
274 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
275 readline(*) -> (2) \r\n (glob)
275 readline(*) -> (2) \r\n (glob)
276 sendall(1 from 160) -> (0) H
276 sendall(1 from 160) -> (0) H
277 write limit reached; closing socket
277 write limit reached; closing socket
278 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=capabilities': (glob)
278 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=capabilities': (glob)
279 Traceback (most recent call last):
279 Traceback (most recent call last):
280 Exception: connection closed after sending N bytes
280 Exception: connection closed after sending N bytes
281
281
282
282
283 $ rm -f error.log
283 $ rm -f error.log
284
284
285 Server sends an incomplete capabilities response body
285 Server sends an incomplete capabilities response body
286 -----------------------------------------------------
286 -----------------------------------------------------
287
287
288 $ hg serve \
288 $ hg serve \
289 > --config badserver.close-after-send-patterns='batch branchmap bund' \
289 > --config badserver.close-after-send-patterns='batch branchmap bund' \
290 > -p $HGPORT -d --pid-file=hg.pid -E error.log
290 > -p $HGPORT -d --pid-file=hg.pid -E error.log
291 $ cat hg.pid > $DAEMON_PIDS
291 $ cat hg.pid > $DAEMON_PIDS
292
292
293 $ hg clone http://localhost:$HGPORT/ clone
293 $ hg clone http://localhost:$HGPORT/ clone
294 abort: HTTP request error (incomplete response; expected * bytes got 20) (glob)
294 abort: HTTP request error (incomplete response; expected * bytes got 20) (glob)
295 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
295 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
296 [255]
296 [255]
297
297
298 $ killdaemons.py $DAEMON_PIDS
298 $ killdaemons.py $DAEMON_PIDS
299
299
300 $ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
300 $ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
301 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
301 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
302 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
302 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
303 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
303 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
304 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
304 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
305 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
305 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
306 readline(*) -> (2) \r\n (glob)
306 readline(*) -> (2) \r\n (glob)
307 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob)
307 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob)
308 sendall(20 from *) -> (0) batch branchmap bund (glob)
308 sendall(20 from *) -> (0) batch branchmap bund (glob)
309 write limit reached; closing socket
309 write limit reached; closing socket
310 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=capabilities': (glob)
310 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=capabilities': (glob)
311 Traceback (most recent call last):
311 Traceback (most recent call last):
312 Exception: connection closed after sending N bytes
312 Exception: connection closed after sending N bytes
313
313
314
314
315 $ rm -f error.log
315 $ rm -f error.log
316
316
317 Server sends incomplete headers for batch request
317 Server sends incomplete headers for batch request
318 -------------------------------------------------
318 -------------------------------------------------
319
319
320 $ hg serve \
320 $ hg serve \
321 > --config badserver.close-after-send-patterns='(.*Content-Type: applicat){2}' \
321 > --config badserver.close-after-send-patterns='(.*Content-Type: applicat){2}' \
322 > -p $HGPORT -d --pid-file=hg.pid -E error.log
322 > -p $HGPORT -d --pid-file=hg.pid -E error.log
323 $ cat hg.pid > $DAEMON_PIDS
323 $ cat hg.pid > $DAEMON_PIDS
324
324
325 TODO this output is horrible
325 TODO this output is horrible
326
326
327 $ hg clone http://localhost:$HGPORT/ clone
327 $ hg clone http://localhost:$HGPORT/ clone
328 abort: 'http://localhost:$HGPORT/' does not appear to be an hg repository:
328 abort: 'http://localhost:$HGPORT/' does not appear to be an hg repository:
329 ---%<--- (applicat)
329 ---%<--- (applicat)
330
330
331 ---%<---
331 ---%<---
332
332
333 [255]
333 [255]
334
334
335 $ killdaemons.py $DAEMON_PIDS
335 $ killdaemons.py $DAEMON_PIDS
336
336
337 $ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
337 $ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
338 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
338 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
339 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
339 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
340 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
340 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
341 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
341 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
342 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
342 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
343 readline(*) -> (2) \r\n (glob)
343 readline(*) -> (2) \r\n (glob)
344 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob)
344 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob)
345 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob)
345 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob)
346 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
346 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
347 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
347 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
348 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
348 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
349 readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
349 readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
350 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
350 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
351 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
351 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
352 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
352 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
353 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
353 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
354 readline(*) -> (2) \r\n (glob)
354 readline(*) -> (2) \r\n (glob)
355 sendall(118 from 159) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: applicat
355 sendall(118 from 159) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: applicat
356 write limit reached; closing socket
356 write limit reached; closing socket
357 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=batch': (glob)
357 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=batch': (glob)
358 Traceback (most recent call last):
358 Traceback (most recent call last):
359 Exception: connection closed after sending N bytes
359 Exception: connection closed after sending N bytes
360
360
361
361
362 $ rm -f error.log
362 $ rm -f error.log
363
363
364 Server sends an incomplete HTTP response body to batch request
364 Server sends an incomplete HTTP response body to batch request
365 --------------------------------------------------------------
365 --------------------------------------------------------------
366
366
367 $ hg serve \
367 $ hg serve \
368 > --config badserver.close-after-send-patterns=96ee1d7354c4ad7372047672 \
368 > --config badserver.close-after-send-patterns=96ee1d7354c4ad7372047672 \
369 > -p $HGPORT -d --pid-file=hg.pid -E error.log
369 > -p $HGPORT -d --pid-file=hg.pid -E error.log
370 $ cat hg.pid > $DAEMON_PIDS
370 $ cat hg.pid > $DAEMON_PIDS
371
371
372 $ hg clone http://localhost:$HGPORT/ clone
372 $ hg clone http://localhost:$HGPORT/ clone
373 abort: unexpected response:
373 abort: unexpected response:
374 '96ee1d7354c4ad7372047672'
374 '96ee1d7354c4ad7372047672'
375 [255]
375 [255]
376
376
377 $ killdaemons.py $DAEMON_PIDS
377 $ killdaemons.py $DAEMON_PIDS
378
378
379 $ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
379 $ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
380 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
380 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
381 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
381 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
382 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
382 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
383 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
383 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
384 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
384 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
385 readline(*) -> (2) \r\n (glob)
385 readline(*) -> (2) \r\n (glob)
386 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob)
386 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob)
387 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob)
387 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob)
388 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
388 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
389 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
389 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
390 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
390 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
391 readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
391 readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
392 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
392 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
393 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
393 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
394 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
394 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
395 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
395 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
396 readline(*) -> (2) \r\n (glob)
396 readline(*) -> (2) \r\n (glob)
397 sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n
397 sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n
398 sendall(24 from 42) -> (0) 96ee1d7354c4ad7372047672
398 sendall(24 from 42) -> (0) 96ee1d7354c4ad7372047672
399 write limit reached; closing socket
399 write limit reached; closing socket
400 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=batch': (glob)
400 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=batch': (glob)
401 Traceback (most recent call last):
401 Traceback (most recent call last):
402 Exception: connection closed after sending N bytes
402 Exception: connection closed after sending N bytes
403
403
404
404
405 $ rm -f error.log
405 $ rm -f error.log
406
406
407 Server sends incomplete headers for getbundle response
407 Server sends incomplete headers for getbundle response
408 ------------------------------------------------------
408 ------------------------------------------------------
409
409
410 $ hg serve \
410 $ hg serve \
411 > --config badserver.close-after-send-patterns='(.*Content-Type: application/mercuri){3}' \
411 > --config badserver.close-after-send-patterns='(.*Content-Type: application/mercuri){3}' \
412 > -p $HGPORT -d --pid-file=hg.pid -E error.log
412 > -p $HGPORT -d --pid-file=hg.pid -E error.log
413 $ cat hg.pid > $DAEMON_PIDS
413 $ cat hg.pid > $DAEMON_PIDS
414
414
415 TODO this output is terrible
415 TODO this output is terrible
416
416
417 $ hg clone http://localhost:$HGPORT/ clone
417 $ hg clone http://localhost:$HGPORT/ clone
418 requesting all changes
418 requesting all changes
419 abort: 'http://localhost:$HGPORT/' does not appear to be an hg repository:
419 abort: 'http://localhost:$HGPORT/' does not appear to be an hg repository:
420 ---%<--- (application/mercuri)
420 ---%<--- (application/mercuri)
421
421
422 ---%<---
422 ---%<---
423
423
424 [255]
424 [255]
425
425
426 $ killdaemons.py $DAEMON_PIDS
426 $ killdaemons.py $DAEMON_PIDS
427
427
428 $ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
428 $ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
429 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
429 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
430 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
430 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
431 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
431 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
432 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
432 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
433 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
433 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
434 readline(*) -> (2) \r\n (glob)
434 readline(*) -> (2) \r\n (glob)
435 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob)
435 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob)
436 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob)
436 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob)
437 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
437 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
438 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
438 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
439 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
439 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
440 readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
440 readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
441 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
441 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
442 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
442 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
443 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
443 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
444 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
444 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
445 readline(*) -> (2) \r\n (glob)
445 readline(*) -> (2) \r\n (glob)
446 sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n
446 sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n
447 sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n;
447 sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n;
448 readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
448 readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
449 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
449 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
450 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
450 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
451 readline(*) -> (447) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
451 readline(*) -> (447) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
452 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
452 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
453 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
453 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
454 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
454 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
455 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
455 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
456 readline(*) -> (2) \r\n (glob)
456 readline(*) -> (2) \r\n (glob)
457 sendall(129 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercuri
457 sendall(129 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercuri
458 write limit reached; closing socket
458 write limit reached; closing socket
459 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
459 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
460 Traceback (most recent call last):
460 Traceback (most recent call last):
461 Exception: connection closed after sending N bytes
461 Exception: connection closed after sending N bytes
462
462
463
463
464 $ rm -f error.log
464 $ rm -f error.log
465
465
466 Server stops before it sends transfer encoding
466 Server stops before it sends transfer encoding
467 ----------------------------------------------
467 ----------------------------------------------
468
468
469 $ hg serve \
469 $ hg serve \
470 > --config badserver.close-after-send-patterns="Transfer-Encoding: chunke" \
470 > --config badserver.close-after-send-patterns="Transfer-Encoding: chunke" \
471 > -p $HGPORT -d --pid-file=hg.pid -E error.log
471 > -p $HGPORT -d --pid-file=hg.pid -E error.log
472 $ cat hg.pid > $DAEMON_PIDS
472 $ cat hg.pid > $DAEMON_PIDS
473
473
474 $ hg clone http://localhost:$HGPORT/ clone
474 $ hg clone http://localhost:$HGPORT/ clone
475 requesting all changes
475 requesting all changes
476 abort: stream ended unexpectedly (got 0 bytes, expected 1)
476 abort: stream ended unexpectedly (got 0 bytes, expected 1)
477 [255]
477 [255]
478
478
479 $ killdaemons.py $DAEMON_PIDS
479 $ killdaemons.py $DAEMON_PIDS
480
480
481 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -6
481 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -6
482 sendall(162 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunke
482 sendall(162 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunke
483 write limit reached; closing socket
483 write limit reached; closing socket
484 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
484 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
485 Traceback (most recent call last):
485 Traceback (most recent call last):
486 Exception: connection closed after sending N bytes
486 Exception: connection closed after sending N bytes
487
487
488 $ rm -f error.log
488 $ rm -f error.log
489
489
490 Server sends empty HTTP body for getbundle
490 Server sends empty HTTP body for getbundle
491 ------------------------------------------
491 ------------------------------------------
492
492
493 $ hg serve \
493 $ hg serve \
494 > --config badserver.close-after-send-patterns='Transfer-Encoding: chunked\r\n\r\n' \
494 > --config badserver.close-after-send-patterns='Transfer-Encoding: chunked\r\n\r\n' \
495 > -p $HGPORT -d --pid-file=hg.pid -E error.log
495 > -p $HGPORT -d --pid-file=hg.pid -E error.log
496 $ cat hg.pid > $DAEMON_PIDS
496 $ cat hg.pid > $DAEMON_PIDS
497
497
498 $ hg clone http://localhost:$HGPORT/ clone
498 $ hg clone http://localhost:$HGPORT/ clone
499 requesting all changes
499 requesting all changes
500 abort: HTTP request error (incomplete response)
500 abort: HTTP request error (incomplete response)
501 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
501 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
502 [255]
502 [255]
503
503
504 $ killdaemons.py $DAEMON_PIDS
504 $ killdaemons.py $DAEMON_PIDS
505
505
506 $ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
506 $ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
507 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
507 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
508 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
508 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
509 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
509 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
510 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
510 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
511 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
511 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
512 readline(*) -> (2) \r\n (glob)
512 readline(*) -> (2) \r\n (glob)
513 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob)
513 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob)
514 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob)
514 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob)
515 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
515 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
516 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
516 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
517 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
517 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
518 readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
518 readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
519 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
519 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
520 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
520 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
521 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
521 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
522 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
522 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
523 readline(*) -> (2) \r\n (glob)
523 readline(*) -> (2) \r\n (glob)
524 sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n
524 sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n
525 sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n;
525 sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n;
526 readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
526 readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
527 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
527 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
528 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
528 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
529 readline(*) -> (447) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
529 readline(*) -> (447) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
530 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
530 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
531 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
531 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
532 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
532 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
533 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
533 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
534 readline(*) -> (2) \r\n (glob)
534 readline(*) -> (2) \r\n (glob)
535 sendall(167 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
535 sendall(167 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
536 write limit reached; closing socket
536 write limit reached; closing socket
537 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
537 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
538 Traceback (most recent call last):
538 Traceback (most recent call last):
539 Exception: connection closed after sending N bytes
539 Exception: connection closed after sending N bytes
540
540
541
541
542 $ rm -f error.log
542 $ rm -f error.log
543
543
544 Server sends partial compression string
544 Server sends partial compression string
545 ---------------------------------------
545 ---------------------------------------
546
546
547 $ hg serve \
547 $ hg serve \
548 > --config badserver.close-after-send-patterns='4\r\nHG20\r\n' \
548 > --config badserver.close-after-send-patterns='4\r\nHG20\r\n' \
549 > -p $HGPORT -d --pid-file=hg.pid -E error.log
549 > -p $HGPORT -d --pid-file=hg.pid -E error.log
550 $ cat hg.pid > $DAEMON_PIDS
550 $ cat hg.pid > $DAEMON_PIDS
551
551
552 $ hg clone http://localhost:$HGPORT/ clone
552 $ hg clone http://localhost:$HGPORT/ clone
553 requesting all changes
553 requesting all changes
554 abort: HTTP request error (incomplete response)
554 abort: HTTP request error (incomplete response)
555 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
555 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
556 [255]
556 [255]
557
557
558 $ killdaemons.py $DAEMON_PIDS
558 $ killdaemons.py $DAEMON_PIDS
559
559
560 $ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
560 $ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
561 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
561 readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
562 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
562 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
563 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
563 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
564 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
564 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
565 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
565 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
566 readline(*) -> (2) \r\n (glob)
566 readline(*) -> (2) \r\n (glob)
567 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob)
567 sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob)
568 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob)
568 sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob)
569 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
569 readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
570 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
570 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
571 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
571 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
572 readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
572 readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
573 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
573 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
574 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
574 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
575 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
575 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
576 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
576 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
577 readline(*) -> (2) \r\n (glob)
577 readline(*) -> (2) \r\n (glob)
578 sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n
578 sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n
579 sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n;
579 sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n;
580 readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
580 readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
581 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
581 readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
582 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
582 readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
583 readline(*) -> (447) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
583 readline(*) -> (447) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
584 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
584 readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
585 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
585 readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
586 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
586 readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
587 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
587 readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
588 readline(*) -> (2) \r\n (glob)
588 readline(*) -> (2) \r\n (glob)
589 sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
589 sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
590 sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
590 sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
591 sendall(9) -> 4\r\nnone\r\n
591 sendall(9) -> 4\r\nnone\r\n
592 sendall(9 from 9) -> (0) 4\r\nHG20\r\n
592 sendall(9 from 9) -> (0) 4\r\nHG20\r\n
593 write limit reached; closing socket
593 write limit reached; closing socket
594 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
594 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
595 Traceback (most recent call last):
595 Traceback (most recent call last):
596 Exception: connection closed after sending N bytes
596 Exception: connection closed after sending N bytes
597
597
598
598
599 $ rm -f error.log
599 $ rm -f error.log
600
600
601 Server sends partial bundle2 header magic
601 Server sends partial bundle2 header magic
602 -----------------------------------------
602 -----------------------------------------
603
603
604 $ hg serve \
604 $ hg serve \
605 > --config badserver.close-after-send-patterns='4\r\nHG2' \
605 > --config badserver.close-after-send-patterns='4\r\nHG2' \
606 > -p $HGPORT -d --pid-file=hg.pid -E error.log
606 > -p $HGPORT -d --pid-file=hg.pid -E error.log
607 $ cat hg.pid > $DAEMON_PIDS
607 $ cat hg.pid > $DAEMON_PIDS
608
608
609 $ hg clone http://localhost:$HGPORT/ clone
609 $ hg clone http://localhost:$HGPORT/ clone
610 requesting all changes
610 requesting all changes
611 abort: HTTP request error (incomplete response*) (glob)
611 abort: HTTP request error (incomplete response*) (glob)
612 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
612 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
613 [255]
613 [255]
614
614
615 $ killdaemons.py $DAEMON_PIDS
615 $ killdaemons.py $DAEMON_PIDS
616
616
617 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -9
617 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -9
618 sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
618 sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
619 sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
619 sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
620 sendall(9) -> 4\r\nnone\r\n
620 sendall(9) -> 4\r\nnone\r\n
621 sendall(6 from 9) -> (0) 4\r\nHG2
621 sendall(6 from 9) -> (0) 4\r\nHG2
622 write limit reached; closing socket
622 write limit reached; closing socket
623 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
623 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
624 Traceback (most recent call last):
624 Traceback (most recent call last):
625 Exception: connection closed after sending N bytes
625 Exception: connection closed after sending N bytes
626
626
627 $ rm -f error.log
627 $ rm -f error.log
628
628
629 Server sends incomplete bundle2 stream params length
629 Server sends incomplete bundle2 stream params length
630 ----------------------------------------------------
630 ----------------------------------------------------
631
631
632 $ hg serve \
632 $ hg serve \
633 > --config badserver.close-after-send-patterns='4\r\n\0\0\0' \
633 > --config badserver.close-after-send-patterns='4\r\n\0\0\0' \
634 > -p $HGPORT -d --pid-file=hg.pid -E error.log
634 > -p $HGPORT -d --pid-file=hg.pid -E error.log
635 $ cat hg.pid > $DAEMON_PIDS
635 $ cat hg.pid > $DAEMON_PIDS
636
636
637 $ hg clone http://localhost:$HGPORT/ clone
637 $ hg clone http://localhost:$HGPORT/ clone
638 requesting all changes
638 requesting all changes
639 abort: HTTP request error (incomplete response*) (glob)
639 abort: HTTP request error (incomplete response*) (glob)
640 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
640 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
641 [255]
641 [255]
642
642
643 $ killdaemons.py $DAEMON_PIDS
643 $ killdaemons.py $DAEMON_PIDS
644
644
645 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -10
645 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -10
646 sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
646 sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
647 sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
647 sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
648 sendall(9) -> 4\r\nnone\r\n
648 sendall(9) -> 4\r\nnone\r\n
649 sendall(9) -> 4\r\nHG20\r\n
649 sendall(9) -> 4\r\nHG20\r\n
650 sendall(6 from 9) -> (0) 4\\r\\n\x00\x00\x00 (esc)
650 sendall(6 from 9) -> (0) 4\\r\\n\x00\x00\x00 (esc)
651 write limit reached; closing socket
651 write limit reached; closing socket
652 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
652 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
653 Traceback (most recent call last):
653 Traceback (most recent call last):
654 Exception: connection closed after sending N bytes
654 Exception: connection closed after sending N bytes
655
655
656 $ rm -f error.log
656 $ rm -f error.log
657
657
658 Servers stops after bundle2 stream params header
658 Servers stops after bundle2 stream params header
659 ------------------------------------------------
659 ------------------------------------------------
660
660
661 $ hg serve \
661 $ hg serve \
662 > --config badserver.close-after-send-patterns='4\r\n\0\0\0\0\r\n' \
662 > --config badserver.close-after-send-patterns='4\r\n\0\0\0\0\r\n' \
663 > -p $HGPORT -d --pid-file=hg.pid -E error.log
663 > -p $HGPORT -d --pid-file=hg.pid -E error.log
664 $ cat hg.pid > $DAEMON_PIDS
664 $ cat hg.pid > $DAEMON_PIDS
665
665
666 $ hg clone http://localhost:$HGPORT/ clone
666 $ hg clone http://localhost:$HGPORT/ clone
667 requesting all changes
667 requesting all changes
668 abort: HTTP request error (incomplete response)
668 abort: HTTP request error (incomplete response)
669 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
669 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
670 [255]
670 [255]
671
671
672 $ killdaemons.py $DAEMON_PIDS
672 $ killdaemons.py $DAEMON_PIDS
673
673
674 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -10
674 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -10
675 sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
675 sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
676 sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
676 sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
677 sendall(9) -> 4\r\nnone\r\n
677 sendall(9) -> 4\r\nnone\r\n
678 sendall(9) -> 4\r\nHG20\r\n
678 sendall(9) -> 4\r\nHG20\r\n
679 sendall(9 from 9) -> (0) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
679 sendall(9 from 9) -> (0) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
680 write limit reached; closing socket
680 write limit reached; closing socket
681 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
681 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
682 Traceback (most recent call last):
682 Traceback (most recent call last):
683 Exception: connection closed after sending N bytes
683 Exception: connection closed after sending N bytes
684
684
685 $ rm -f error.log
685 $ rm -f error.log
686
686
687 Server stops sending after bundle2 part header length
687 Server stops sending after bundle2 part header length
688 -----------------------------------------------------
688 -----------------------------------------------------
689
689
690 $ hg serve \
690 $ hg serve \
691 > --config badserver.close-after-send-patterns='4\r\n\0\0\0\)\r\n' \
691 > --config badserver.close-after-send-patterns='4\r\n\0\0\0\)\r\n' \
692 > -p $HGPORT -d --pid-file=hg.pid -E error.log
692 > -p $HGPORT -d --pid-file=hg.pid -E error.log
693 $ cat hg.pid > $DAEMON_PIDS
693 $ cat hg.pid > $DAEMON_PIDS
694
694
695 $ hg clone http://localhost:$HGPORT/ clone
695 $ hg clone http://localhost:$HGPORT/ clone
696 requesting all changes
696 requesting all changes
697 abort: HTTP request error (incomplete response)
697 abort: HTTP request error (incomplete response)
698 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
698 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
699 [255]
699 [255]
700
700
701 $ killdaemons.py $DAEMON_PIDS
701 $ killdaemons.py $DAEMON_PIDS
702
702
703 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -11
703 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -11
704 sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
704 sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
705 sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
705 sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
706 sendall(9) -> 4\r\nnone\r\n
706 sendall(9) -> 4\r\nnone\r\n
707 sendall(9) -> 4\r\nHG20\r\n
707 sendall(9) -> 4\r\nHG20\r\n
708 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
708 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
709 sendall(9 from 9) -> (0) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
709 sendall(9 from 9) -> (0) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
710 write limit reached; closing socket
710 write limit reached; closing socket
711 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
711 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
712 Traceback (most recent call last):
712 Traceback (most recent call last):
713 Exception: connection closed after sending N bytes
713 Exception: connection closed after sending N bytes
714
714
715 $ rm -f error.log
715 $ rm -f error.log
716
716
717 Server stops sending after bundle2 part header
717 Server stops sending after bundle2 part header
718 ----------------------------------------------
718 ----------------------------------------------
719
719
720 $ hg serve \
720 $ hg serve \
721 > --config badserver.close-after-send-patterns="version03nbchanges1\\r\\n" \
721 > --config badserver.close-after-send-patterns="version03nbchanges1\\r\\n" \
722 > -p $HGPORT -d --pid-file=hg.pid -E error.log
722 > -p $HGPORT -d --pid-file=hg.pid -E error.log
723 $ cat hg.pid > $DAEMON_PIDS
723 $ cat hg.pid > $DAEMON_PIDS
724
724
725 $ hg clone http://localhost:$HGPORT/ clone
725 $ hg clone http://localhost:$HGPORT/ clone
726 requesting all changes
726 requesting all changes
727 adding changesets
727 adding changesets
728 transaction abort!
729 rollback completed
730 abort: HTTP request error (incomplete response)
728 abort: HTTP request error (incomplete response)
731 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
729 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
732 [255]
730 [255]
733
731
734 $ killdaemons.py $DAEMON_PIDS
732 $ killdaemons.py $DAEMON_PIDS
735
733
736 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -12
734 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -12
737 sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
735 sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
738 sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
736 sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
739 sendall(9) -> 4\r\nnone\r\n
737 sendall(9) -> 4\r\nnone\r\n
740 sendall(9) -> 4\r\nHG20\r\n
738 sendall(9) -> 4\r\nHG20\r\n
741 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
739 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
742 sendall(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
740 sendall(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
743 sendall(47 from 47) -> (0) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version03nbchanges1\\r\\n (esc)
741 sendall(47 from 47) -> (0) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version03nbchanges1\\r\\n (esc)
744 write limit reached; closing socket
742 write limit reached; closing socket
745 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
743 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
746 Traceback (most recent call last):
744 Traceback (most recent call last):
747 Exception: connection closed after sending N bytes
745 Exception: connection closed after sending N bytes
748
746
749 $ rm -f error.log
747 $ rm -f error.log
750
748
751 Server stops after bundle2 part payload chunk size
749 Server stops after bundle2 part payload chunk size
752 --------------------------------------------------
750 --------------------------------------------------
753
751
754 $ hg serve \
752 $ hg serve \
755 > --config badserver.close-after-send-patterns='1dc\r\n.......' \
753 > --config badserver.close-after-send-patterns='1dc\r\n.......' \
756 > -p $HGPORT -d --pid-file=hg.pid -E error.log
754 > -p $HGPORT -d --pid-file=hg.pid -E error.log
757 $ cat hg.pid > $DAEMON_PIDS
755 $ cat hg.pid > $DAEMON_PIDS
758
756
759 $ hg clone http://localhost:$HGPORT/ clone
757 $ hg clone http://localhost:$HGPORT/ clone
760 requesting all changes
758 requesting all changes
761 adding changesets
759 adding changesets
762 transaction abort!
763 rollback completed
764 abort: HTTP request error (incomplete response*) (glob)
760 abort: HTTP request error (incomplete response*) (glob)
765 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
761 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
766 [255]
762 [255]
767
763
768 $ killdaemons.py $DAEMON_PIDS
764 $ killdaemons.py $DAEMON_PIDS
769
765
770 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -14
766 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -14
771 sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
767 sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
772 sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
768 sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
773 sendall(9) -> 4\r\nnone\r\n
769 sendall(9) -> 4\r\nnone\r\n
774 sendall(9) -> 4\r\nHG20\r\n
770 sendall(9) -> 4\r\nHG20\r\n
775 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
771 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
776 sendall(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
772 sendall(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
777 sendall(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version03nbchanges1\\r\\n (esc)
773 sendall(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version03nbchanges1\\r\\n (esc)
778 sendall(9) -> 4\\r\\n\x00\x00\x01\xdc\\r\\n (esc)
774 sendall(9) -> 4\\r\\n\x00\x00\x01\xdc\\r\\n (esc)
779 sendall(12 from 483) -> (0) 1dc\\r\\n\x00\x00\x00\xb4\x96\xee\x1d (esc)
775 sendall(12 from 483) -> (0) 1dc\\r\\n\x00\x00\x00\xb4\x96\xee\x1d (esc)
780 write limit reached; closing socket
776 write limit reached; closing socket
781 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
777 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
782 Traceback (most recent call last):
778 Traceback (most recent call last):
783 Exception: connection closed after sending N bytes
779 Exception: connection closed after sending N bytes
784
780
785 $ rm -f error.log
781 $ rm -f error.log
786
782
787 Server stops sending in middle of bundle2 payload chunk
783 Server stops sending in middle of bundle2 payload chunk
788 -------------------------------------------------------
784 -------------------------------------------------------
789
785
790 $ hg serve \
786 $ hg serve \
791 > --config badserver.close-after-send-patterns=':jL\0\0\x00\0\0\0\0\0\0\0\r\n' \
787 > --config badserver.close-after-send-patterns=':jL\0\0\x00\0\0\0\0\0\0\0\r\n' \
792 > -p $HGPORT -d --pid-file=hg.pid -E error.log
788 > -p $HGPORT -d --pid-file=hg.pid -E error.log
793 $ cat hg.pid > $DAEMON_PIDS
789 $ cat hg.pid > $DAEMON_PIDS
794
790
795 $ hg clone http://localhost:$HGPORT/ clone
791 $ hg clone http://localhost:$HGPORT/ clone
796 requesting all changes
792 requesting all changes
797 adding changesets
793 adding changesets
798 transaction abort!
799 rollback completed
800 abort: HTTP request error (incomplete response)
794 abort: HTTP request error (incomplete response)
801 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
795 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
802 [255]
796 [255]
803
797
804 $ killdaemons.py $DAEMON_PIDS
798 $ killdaemons.py $DAEMON_PIDS
805
799
806 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -14
800 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -14
807 sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
801 sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
808 sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
802 sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
809 sendall(9) -> 4\r\nnone\r\n
803 sendall(9) -> 4\r\nnone\r\n
810 sendall(9) -> 4\r\nHG20\r\n
804 sendall(9) -> 4\r\nHG20\r\n
811 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
805 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
812 sendall(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
806 sendall(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
813 sendall(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version03nbchanges1\\r\\n (esc)
807 sendall(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version03nbchanges1\\r\\n (esc)
814 sendall(9) -> 4\\r\\n\x00\x00\x01\xdc\\r\\n (esc)
808 sendall(9) -> 4\\r\\n\x00\x00\x01\xdc\\r\\n (esc)
815 sendall(483 from 483) -> (0) 1dc\\r\\n\x00\x00\x00\xb4\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa3j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00j\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
809 sendall(483 from 483) -> (0) 1dc\\r\\n\x00\x00\x00\xb4\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa3j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00j\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
816 write limit reached; closing socket
810 write limit reached; closing socket
817 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
811 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
818 Traceback (most recent call last):
812 Traceback (most recent call last):
819 Exception: connection closed after sending N bytes
813 Exception: connection closed after sending N bytes
820
814
821 $ rm -f error.log
815 $ rm -f error.log
822
816
823 Server stops sending after 0 length payload chunk size
817 Server stops sending after 0 length payload chunk size
824 ------------------------------------------------------
818 ------------------------------------------------------
825
819
826 $ hg serve \
820 $ hg serve \
827 > --config badserver.close-after-send-patterns=LISTKEYS \
821 > --config badserver.close-after-send-patterns=LISTKEYS \
828 > -p $HGPORT -d --pid-file=hg.pid -E error.log
822 > -p $HGPORT -d --pid-file=hg.pid -E error.log
829 $ cat hg.pid > $DAEMON_PIDS
823 $ cat hg.pid > $DAEMON_PIDS
830
824
831 $ hg clone http://localhost:$HGPORT/ clone
825 $ hg clone http://localhost:$HGPORT/ clone
832 requesting all changes
826 requesting all changes
833 adding changesets
827 adding changesets
834 adding manifests
828 adding manifests
835 adding file changes
829 adding file changes
836 transaction abort!
830 transaction abort!
837 rollback completed
831 rollback completed
838 abort: HTTP request error (incomplete response*) (glob)
832 abort: HTTP request error (incomplete response*) (glob)
839 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
833 (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
840 [255]
834 [255]
841
835
842 $ killdaemons.py $DAEMON_PIDS
836 $ killdaemons.py $DAEMON_PIDS
843
837
844 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -16
838 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -16
845 sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
839 sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
846 sendall(9) -> 4\r\nnone\r\n
840 sendall(9) -> 4\r\nnone\r\n
847 sendall(9) -> 4\r\nHG20\r\n
841 sendall(9) -> 4\r\nHG20\r\n
848 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
842 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
849 sendall(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
843 sendall(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
850 sendall(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version03nbchanges1\\r\\n (esc)
844 sendall(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version03nbchanges1\\r\\n (esc)
851 sendall(9) -> 4\\r\\n\x00\x00\x01\xdc\\r\\n (esc)
845 sendall(9) -> 4\\r\\n\x00\x00\x01\xdc\\r\\n (esc)
852 sendall(483) -> 1dc\\r\\n\x00\x00\x00\xb4\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa3j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00j\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
846 sendall(483) -> 1dc\\r\\n\x00\x00\x00\xb4\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa3j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00j\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
853 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
847 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
854 sendall(9) -> 4\\r\\n\x00\x00\x00 \\r\\n (esc)
848 sendall(9) -> 4\\r\\n\x00\x00\x00 \\r\\n (esc)
855 sendall(13 from 38) -> (0) 20\\r\\n\x08LISTKEYS (esc)
849 sendall(13 from 38) -> (0) 20\\r\\n\x08LISTKEYS (esc)
856 write limit reached; closing socket
850 write limit reached; closing socket
857 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
851 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
858 Traceback (most recent call last):
852 Traceback (most recent call last):
859 Exception: connection closed after sending N bytes
853 Exception: connection closed after sending N bytes
860
854
861 $ rm -f error.log
855 $ rm -f error.log
862
856
863 Server stops sending after 0 part bundle part header (indicating end of bundle2 payload)
857 Server stops sending after 0 part bundle part header (indicating end of bundle2 payload)
864 ----------------------------------------------------------------------------------------
858 ----------------------------------------------------------------------------------------
865
859
866 This is before the 0 size chunked transfer part that signals end of HTTP response.
860 This is before the 0 size chunked transfer part that signals end of HTTP response.
867
861
868 $ hg serve \
862 $ hg serve \
869 > --config badserver.close-after-send-patterns='(.*4\r\n\0\0\0\0\r\n){5}' \
863 > --config badserver.close-after-send-patterns='(.*4\r\n\0\0\0\0\r\n){5}' \
870 > -p $HGPORT -d --pid-file=hg.pid -E error.log
864 > -p $HGPORT -d --pid-file=hg.pid -E error.log
871 $ cat hg.pid > $DAEMON_PIDS
865 $ cat hg.pid > $DAEMON_PIDS
872
866
873 $ hg clone http://localhost:$HGPORT/ clone
867 $ hg clone http://localhost:$HGPORT/ clone
874 requesting all changes
868 requesting all changes
875 adding changesets
869 adding changesets
876 adding manifests
870 adding manifests
877 adding file changes
871 adding file changes
878 added 1 changesets with 1 changes to 1 files
872 added 1 changesets with 1 changes to 1 files
879 new changesets 96ee1d7354c4
873 new changesets 96ee1d7354c4
880 updating to branch default
874 updating to branch default
881 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
875 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
882
876
883 $ killdaemons.py $DAEMON_PIDS
877 $ killdaemons.py $DAEMON_PIDS
884
878
885 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -20
879 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -20
886 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
880 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
887 sendall(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
881 sendall(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
888 sendall(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version03nbchanges1\\r\\n (esc)
882 sendall(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version03nbchanges1\\r\\n (esc)
889 sendall(9) -> 4\\r\\n\x00\x00\x01\xdc\\r\\n (esc)
883 sendall(9) -> 4\\r\\n\x00\x00\x01\xdc\\r\\n (esc)
890 sendall(483) -> 1dc\\r\\n\x00\x00\x00\xb4\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa3j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00j\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
884 sendall(483) -> 1dc\\r\\n\x00\x00\x00\xb4\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa3j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00j\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
891 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
885 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
892 sendall(9) -> 4\\r\\n\x00\x00\x00 \\r\\n (esc)
886 sendall(9) -> 4\\r\\n\x00\x00\x00 \\r\\n (esc)
893 sendall(38) -> 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
887 sendall(38) -> 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
894 sendall(9) -> 4\\r\\n\x00\x00\x00:\\r\\n (esc)
888 sendall(9) -> 4\\r\\n\x00\x00\x00:\\r\\n (esc)
895 sendall(64) -> 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
889 sendall(64) -> 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
896 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
890 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
897 sendall(9) -> 4\\r\\n\x00\x00\x00#\\r\\n (esc)
891 sendall(9) -> 4\\r\\n\x00\x00\x00#\\r\\n (esc)
898 sendall(41) -> 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
892 sendall(41) -> 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
899 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
893 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
900 sendall(9 from 9) -> (0) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
894 sendall(9 from 9) -> (0) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
901 write limit reached; closing socket
895 write limit reached; closing socket
902 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
896 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
903 Traceback (most recent call last):
897 Traceback (most recent call last):
904 Exception: connection closed after sending N bytes
898 Exception: connection closed after sending N bytes
905
899
906 $ rm -f error.log
900 $ rm -f error.log
907 $ rm -rf clone
901 $ rm -rf clone
908
902
909 Server sends a size 0 chunked-transfer size without terminating \r\n
903 Server sends a size 0 chunked-transfer size without terminating \r\n
910 --------------------------------------------------------------------
904 --------------------------------------------------------------------
911
905
912 $ hg serve \
906 $ hg serve \
913 > --config badserver.close-after-send-patterns="(.*4\\r\\n\0\0\0\0\\r\\n0\r\n)" \
907 > --config badserver.close-after-send-patterns="(.*4\\r\\n\0\0\0\0\\r\\n0\r\n)" \
914 > -p $HGPORT -d --pid-file=hg.pid -E error.log
908 > -p $HGPORT -d --pid-file=hg.pid -E error.log
915 $ cat hg.pid > $DAEMON_PIDS
909 $ cat hg.pid > $DAEMON_PIDS
916
910
917 $ hg clone http://localhost:$HGPORT/ clone
911 $ hg clone http://localhost:$HGPORT/ clone
918 requesting all changes
912 requesting all changes
919 adding changesets
913 adding changesets
920 adding manifests
914 adding manifests
921 adding file changes
915 adding file changes
922 added 1 changesets with 1 changes to 1 files
916 added 1 changesets with 1 changes to 1 files
923 new changesets 96ee1d7354c4
917 new changesets 96ee1d7354c4
924 updating to branch default
918 updating to branch default
925 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
919 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
926
920
927 $ killdaemons.py $DAEMON_PIDS
921 $ killdaemons.py $DAEMON_PIDS
928
922
929 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -21
923 $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -21
930 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
924 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
931 sendall(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
925 sendall(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
932 sendall(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version03nbchanges1\\r\\n (esc)
926 sendall(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version03nbchanges1\\r\\n (esc)
933 sendall(9) -> 4\\r\\n\x00\x00\x01\xdc\\r\\n (esc)
927 sendall(9) -> 4\\r\\n\x00\x00\x01\xdc\\r\\n (esc)
934 sendall(483) -> 1dc\\r\\n\x00\x00\x00\xb4\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa3j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00j\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
928 sendall(483) -> 1dc\\r\\n\x00\x00\x00\xb4\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa3j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00j\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
935 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
929 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
936 sendall(9) -> 4\\r\\n\x00\x00\x00 \\r\\n (esc)
930 sendall(9) -> 4\\r\\n\x00\x00\x00 \\r\\n (esc)
937 sendall(38) -> 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
931 sendall(38) -> 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
938 sendall(9) -> 4\\r\\n\x00\x00\x00:\\r\\n (esc)
932 sendall(9) -> 4\\r\\n\x00\x00\x00:\\r\\n (esc)
939 sendall(64) -> 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
933 sendall(64) -> 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
940 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
934 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
941 sendall(9) -> 4\\r\\n\x00\x00\x00#\\r\\n (esc)
935 sendall(9) -> 4\\r\\n\x00\x00\x00#\\r\\n (esc)
942 sendall(41) -> 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
936 sendall(41) -> 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
943 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
937 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
944 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
938 sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
945 sendall(3 from 5) -> (0) 0\r\n
939 sendall(3 from 5) -> (0) 0\r\n
946 write limit reached; closing socket
940 write limit reached; closing socket
947 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
941 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
948 Traceback (most recent call last):
942 Traceback (most recent call last):
949 Exception: connection closed after sending N bytes
943 Exception: connection closed after sending N bytes
950
944
951 $ rm -f error.log
945 $ rm -f error.log
952 $ rm -rf clone
946 $ rm -rf clone
General Comments 0
You need to be logged in to leave comments. Login now