##// END OF EJS Templates
debug: add an option to display statistic about a unbundling operation...
marmoute -
r50506:35d4c212 default
parent child Browse files
Show More
@@ -1,2225 +1,2425 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2006 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import os
9 import os
10 import struct
10 import struct
11 import weakref
11 import weakref
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullrev,
16 nullrev,
17 short,
17 short,
18 )
18 )
19 from .pycompat import open
19 from .pycompat import open
20
20
21 from . import (
21 from . import (
22 error,
22 error,
23 match as matchmod,
23 match as matchmod,
24 mdiff,
24 mdiff,
25 phases,
25 phases,
26 pycompat,
26 pycompat,
27 requirements,
27 requirements,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31
31
32 from .interfaces import repository
32 from .interfaces import repository
33 from .revlogutils import sidedata as sidedatamod
33 from .revlogutils import sidedata as sidedatamod
34 from .revlogutils import constants as revlog_constants
34 from .revlogutils import constants as revlog_constants
35 from .utils import storageutil
35 from .utils import storageutil
36
36
37 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
37 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
38 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
38 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
39 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
39 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
40 _CHANGEGROUPV4_DELTA_HEADER = struct.Struct(b">B20s20s20s20s20sH")
40 _CHANGEGROUPV4_DELTA_HEADER = struct.Struct(b">B20s20s20s20s20sH")
41
41
42 LFS_REQUIREMENT = b'lfs'
42 LFS_REQUIREMENT = b'lfs'
43
43
44 readexactly = util.readexactly
44 readexactly = util.readexactly
45
45
46
46
47 def getchunk(stream):
47 def getchunk(stream):
48 """return the next chunk from stream as a string"""
48 """return the next chunk from stream as a string"""
49 d = readexactly(stream, 4)
49 d = readexactly(stream, 4)
50 l = struct.unpack(b">l", d)[0]
50 l = struct.unpack(b">l", d)[0]
51 if l <= 4:
51 if l <= 4:
52 if l:
52 if l:
53 raise error.Abort(_(b"invalid chunk length %d") % l)
53 raise error.Abort(_(b"invalid chunk length %d") % l)
54 return b""
54 return b""
55 return readexactly(stream, l - 4)
55 return readexactly(stream, l - 4)
56
56
57
57
58 def chunkheader(length):
58 def chunkheader(length):
59 """return a changegroup chunk header (string)"""
59 """return a changegroup chunk header (string)"""
60 return struct.pack(b">l", length + 4)
60 return struct.pack(b">l", length + 4)
61
61
62
62
63 def closechunk():
63 def closechunk():
64 """return a changegroup chunk header (string) for a zero-length chunk"""
64 """return a changegroup chunk header (string) for a zero-length chunk"""
65 return struct.pack(b">l", 0)
65 return struct.pack(b">l", 0)
66
66
67
67
68 def _fileheader(path):
68 def _fileheader(path):
69 """Obtain a changegroup chunk header for a named path."""
69 """Obtain a changegroup chunk header for a named path."""
70 return chunkheader(len(path)) + path
70 return chunkheader(len(path)) + path
71
71
72
72
73 def writechunks(ui, chunks, filename, vfs=None):
73 def writechunks(ui, chunks, filename, vfs=None):
74 """Write chunks to a file and return its filename.
74 """Write chunks to a file and return its filename.
75
75
76 The stream is assumed to be a bundle file.
76 The stream is assumed to be a bundle file.
77 Existing files will not be overwritten.
77 Existing files will not be overwritten.
78 If no filename is specified, a temporary file is created.
78 If no filename is specified, a temporary file is created.
79 """
79 """
80 fh = None
80 fh = None
81 cleanup = None
81 cleanup = None
82 try:
82 try:
83 if filename:
83 if filename:
84 if vfs:
84 if vfs:
85 fh = vfs.open(filename, b"wb")
85 fh = vfs.open(filename, b"wb")
86 else:
86 else:
87 # Increase default buffer size because default is usually
87 # Increase default buffer size because default is usually
88 # small (4k is common on Linux).
88 # small (4k is common on Linux).
89 fh = open(filename, b"wb", 131072)
89 fh = open(filename, b"wb", 131072)
90 else:
90 else:
91 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
91 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
92 fh = os.fdopen(fd, "wb")
92 fh = os.fdopen(fd, "wb")
93 cleanup = filename
93 cleanup = filename
94 for c in chunks:
94 for c in chunks:
95 fh.write(c)
95 fh.write(c)
96 cleanup = None
96 cleanup = None
97 return filename
97 return filename
98 finally:
98 finally:
99 if fh is not None:
99 if fh is not None:
100 fh.close()
100 fh.close()
101 if cleanup is not None:
101 if cleanup is not None:
102 if filename and vfs:
102 if filename and vfs:
103 vfs.unlink(cleanup)
103 vfs.unlink(cleanup)
104 else:
104 else:
105 os.unlink(cleanup)
105 os.unlink(cleanup)
106
106
107
107
108 def _dbg_ubdl_line(
109 ui,
110 indent,
111 key,
112 base_value=None,
113 percentage_base=None,
114 percentage_key=None,
115 ):
116 """Print one line of debug_unbundle_debug_info"""
117 line = b"DEBUG-UNBUNDLING: "
118 line += b' ' * (2 * indent)
119 key += b":"
120 padding = b''
121 if base_value is not None:
122 assert len(key) + 1 + (2 * indent) <= _KEY_PART_WIDTH
123 line += key.ljust(_KEY_PART_WIDTH - (2 * indent))
124 if isinstance(base_value, float):
125 line += b"%14.3f seconds" % base_value
126 else:
127 line += b"%10d" % base_value
128 padding = b' '
129 else:
130 line += key
131
132 if percentage_base is not None:
133 line += padding
134 padding = b''
135 assert base_value is not None
136 percentage = base_value * 100 // percentage_base
137 if percentage_key is not None:
138 line += b" (%3d%% of %s)" % (
139 percentage,
140 percentage_key,
141 )
142 else:
143 line += b" (%3d%%)" % percentage
144
145 line += b'\n'
146 ui.write_err(line)
147
148
149 def _sumf(items):
150 # python < 3.8 does not support a `start=0.0` argument to sum
151 # So we have to cheat a bit until we drop support for those version
152 if not items:
153 return 0.0
154 return sum(items)
155
156
157 def display_unbundle_debug_info(ui, debug_info):
158 """display an unbundling report from debug information"""
159 cl_info = []
160 mn_info = []
161 fl_info = []
162 _dispatch = [
163 (b'CHANGELOG:', cl_info),
164 (b'MANIFESTLOG:', mn_info),
165 (b'FILELOG:', fl_info),
166 ]
167 for e in debug_info:
168 for prefix, info in _dispatch:
169 if e["target-revlog"].startswith(prefix):
170 info.append(e)
171 break
172 else:
173 assert False, 'unreachable'
174 each_info = [
175 (b'changelog', cl_info),
176 (b'manifests', mn_info),
177 (b'files', fl_info),
178 ]
179
180 # General Revision Countss
181 _dbg_ubdl_line(ui, 0, b'revisions', len(debug_info))
182 for key, info in each_info:
183 if not info:
184 continue
185 _dbg_ubdl_line(ui, 1, key, len(info), len(debug_info))
186
187 # General Time spent
188 all_durations = [e['duration'] for e in debug_info]
189 all_durations.sort()
190 total_duration = _sumf(all_durations)
191 _dbg_ubdl_line(ui, 0, b'total-time', total_duration)
192
193 for key, info in each_info:
194 if not info:
195 continue
196 durations = [e['duration'] for e in info]
197 durations.sort()
198 _dbg_ubdl_line(ui, 1, key, _sumf(durations), total_duration)
199
200 # Count and cache reuse per delta types
201 each_types = {}
202 for key, info in each_info:
203 each_types[key] = types = {
204 b'full': 0,
205 b'full-cached': 0,
206 b'snapshot': 0,
207 b'snapshot-cached': 0,
208 b'delta': 0,
209 b'delta-cached': 0,
210 b'unknown': 0,
211 b'unknown-cached': 0,
212 }
213 for e in info:
214 types[e['type']] += 1
215 if e['using-cached-base']:
216 types[e['type'] + b'-cached'] += 1
217
218 EXPECTED_TYPES = (b'full', b'snapshot', b'delta', b'unknown')
219 if debug_info:
220 _dbg_ubdl_line(ui, 0, b'type-count')
221 for key, info in each_info:
222 if info:
223 _dbg_ubdl_line(ui, 1, key)
224 t = each_types[key]
225 for tn in EXPECTED_TYPES:
226 if t[tn]:
227 tc = tn + b'-cached'
228 _dbg_ubdl_line(ui, 2, tn, t[tn])
229 _dbg_ubdl_line(ui, 3, b'cached', t[tc], t[tn])
230
231 # time perf delta types and reuse
232 each_type_time = {}
233 for key, info in each_info:
234 each_type_time[key] = t = {
235 b'full': [],
236 b'full-cached': [],
237 b'snapshot': [],
238 b'snapshot-cached': [],
239 b'delta': [],
240 b'delta-cached': [],
241 b'unknown': [],
242 b'unknown-cached': [],
243 }
244 for e in info:
245 t[e['type']].append(e['duration'])
246 if e['using-cached-base']:
247 t[e['type'] + b'-cached'].append(e['duration'])
248 for t_key, value in list(t.items()):
249 value.sort()
250 t[t_key] = _sumf(value)
251
252 if debug_info:
253 _dbg_ubdl_line(ui, 0, b'type-time')
254 for key, info in each_info:
255 if info:
256 _dbg_ubdl_line(ui, 1, key)
257 t = each_type_time[key]
258 td = total_duration # to same space on next lines
259 for tn in EXPECTED_TYPES:
260 if t[tn]:
261 tc = tn + b'-cached'
262 _dbg_ubdl_line(ui, 2, tn, t[tn], td, b"total")
263 _dbg_ubdl_line(ui, 3, b'cached', t[tc], td, b"total")
264
265
108 class cg1unpacker:
266 class cg1unpacker:
109 """Unpacker for cg1 changegroup streams.
267 """Unpacker for cg1 changegroup streams.
110
268
111 A changegroup unpacker handles the framing of the revision data in
269 A changegroup unpacker handles the framing of the revision data in
112 the wire format. Most consumers will want to use the apply()
270 the wire format. Most consumers will want to use the apply()
113 method to add the changes from the changegroup to a repository.
271 method to add the changes from the changegroup to a repository.
114
272
115 If you're forwarding a changegroup unmodified to another consumer,
273 If you're forwarding a changegroup unmodified to another consumer,
116 use getchunks(), which returns an iterator of changegroup
274 use getchunks(), which returns an iterator of changegroup
117 chunks. This is mostly useful for cases where you need to know the
275 chunks. This is mostly useful for cases where you need to know the
118 data stream has ended by observing the end of the changegroup.
276 data stream has ended by observing the end of the changegroup.
119
277
120 deltachunk() is useful only if you're applying delta data. Most
278 deltachunk() is useful only if you're applying delta data. Most
121 consumers should prefer apply() instead.
279 consumers should prefer apply() instead.
122
280
123 A few other public methods exist. Those are used only for
281 A few other public methods exist. Those are used only for
124 bundlerepo and some debug commands - their use is discouraged.
282 bundlerepo and some debug commands - their use is discouraged.
125 """
283 """
126
284
127 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
285 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
128 deltaheadersize = deltaheader.size
286 deltaheadersize = deltaheader.size
129 version = b'01'
287 version = b'01'
130 _grouplistcount = 1 # One list of files after the manifests
288 _grouplistcount = 1 # One list of files after the manifests
131
289
132 def __init__(self, fh, alg, extras=None):
290 def __init__(self, fh, alg, extras=None):
133 if alg is None:
291 if alg is None:
134 alg = b'UN'
292 alg = b'UN'
135 if alg not in util.compengines.supportedbundletypes:
293 if alg not in util.compengines.supportedbundletypes:
136 raise error.Abort(_(b'unknown stream compression type: %s') % alg)
294 raise error.Abort(_(b'unknown stream compression type: %s') % alg)
137 if alg == b'BZ':
295 if alg == b'BZ':
138 alg = b'_truncatedBZ'
296 alg = b'_truncatedBZ'
139
297
140 compengine = util.compengines.forbundletype(alg)
298 compengine = util.compengines.forbundletype(alg)
141 self._stream = compengine.decompressorreader(fh)
299 self._stream = compengine.decompressorreader(fh)
142 self._type = alg
300 self._type = alg
143 self.extras = extras or {}
301 self.extras = extras or {}
144 self.callback = None
302 self.callback = None
145
303
146 # These methods (compressed, read, seek, tell) all appear to only
304 # These methods (compressed, read, seek, tell) all appear to only
147 # be used by bundlerepo, but it's a little hard to tell.
305 # be used by bundlerepo, but it's a little hard to tell.
148 def compressed(self):
306 def compressed(self):
149 return self._type is not None and self._type != b'UN'
307 return self._type is not None and self._type != b'UN'
150
308
151 def read(self, l):
309 def read(self, l):
152 return self._stream.read(l)
310 return self._stream.read(l)
153
311
154 def seek(self, pos):
312 def seek(self, pos):
155 return self._stream.seek(pos)
313 return self._stream.seek(pos)
156
314
157 def tell(self):
315 def tell(self):
158 return self._stream.tell()
316 return self._stream.tell()
159
317
160 def close(self):
318 def close(self):
161 return self._stream.close()
319 return self._stream.close()
162
320
163 def _chunklength(self):
321 def _chunklength(self):
164 d = readexactly(self._stream, 4)
322 d = readexactly(self._stream, 4)
165 l = struct.unpack(b">l", d)[0]
323 l = struct.unpack(b">l", d)[0]
166 if l <= 4:
324 if l <= 4:
167 if l:
325 if l:
168 raise error.Abort(_(b"invalid chunk length %d") % l)
326 raise error.Abort(_(b"invalid chunk length %d") % l)
169 return 0
327 return 0
170 if self.callback:
328 if self.callback:
171 self.callback()
329 self.callback()
172 return l - 4
330 return l - 4
173
331
174 def changelogheader(self):
332 def changelogheader(self):
175 """v10 does not have a changelog header chunk"""
333 """v10 does not have a changelog header chunk"""
176 return {}
334 return {}
177
335
178 def manifestheader(self):
336 def manifestheader(self):
179 """v10 does not have a manifest header chunk"""
337 """v10 does not have a manifest header chunk"""
180 return {}
338 return {}
181
339
182 def filelogheader(self):
340 def filelogheader(self):
183 """return the header of the filelogs chunk, v10 only has the filename"""
341 """return the header of the filelogs chunk, v10 only has the filename"""
184 l = self._chunklength()
342 l = self._chunklength()
185 if not l:
343 if not l:
186 return {}
344 return {}
187 fname = readexactly(self._stream, l)
345 fname = readexactly(self._stream, l)
188 return {b'filename': fname}
346 return {b'filename': fname}
189
347
190 def _deltaheader(self, headertuple, prevnode):
348 def _deltaheader(self, headertuple, prevnode):
191 node, p1, p2, cs = headertuple
349 node, p1, p2, cs = headertuple
192 if prevnode is None:
350 if prevnode is None:
193 deltabase = p1
351 deltabase = p1
194 else:
352 else:
195 deltabase = prevnode
353 deltabase = prevnode
196 flags = 0
354 flags = 0
197 protocol_flags = 0
355 protocol_flags = 0
198 return node, p1, p2, deltabase, cs, flags, protocol_flags
356 return node, p1, p2, deltabase, cs, flags, protocol_flags
199
357
200 def deltachunk(self, prevnode):
358 def deltachunk(self, prevnode):
201 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags, sidedata, proto_flags)
359 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags, sidedata, proto_flags)
202 l = self._chunklength()
360 l = self._chunklength()
203 if not l:
361 if not l:
204 return {}
362 return {}
205 headerdata = readexactly(self._stream, self.deltaheadersize)
363 headerdata = readexactly(self._stream, self.deltaheadersize)
206 header = self.deltaheader.unpack(headerdata)
364 header = self.deltaheader.unpack(headerdata)
207 delta = readexactly(self._stream, l - self.deltaheadersize)
365 delta = readexactly(self._stream, l - self.deltaheadersize)
208 header = self._deltaheader(header, prevnode)
366 header = self._deltaheader(header, prevnode)
209 node, p1, p2, deltabase, cs, flags, protocol_flags = header
367 node, p1, p2, deltabase, cs, flags, protocol_flags = header
210 return node, p1, p2, cs, deltabase, delta, flags, {}, protocol_flags
368 return node, p1, p2, cs, deltabase, delta, flags, {}, protocol_flags
211
369
212 def getchunks(self):
370 def getchunks(self):
213 """returns all the chunks contains in the bundle
371 """returns all the chunks contains in the bundle
214
372
215 Used when you need to forward the binary stream to a file or another
373 Used when you need to forward the binary stream to a file or another
216 network API. To do so, it parse the changegroup data, otherwise it will
374 network API. To do so, it parse the changegroup data, otherwise it will
217 block in case of sshrepo because it don't know the end of the stream.
375 block in case of sshrepo because it don't know the end of the stream.
218 """
376 """
219 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
377 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
220 # and a list of filelogs. For changegroup 3, we expect 4 parts:
378 # and a list of filelogs. For changegroup 3, we expect 4 parts:
221 # changelog, manifestlog, a list of tree manifestlogs, and a list of
379 # changelog, manifestlog, a list of tree manifestlogs, and a list of
222 # filelogs.
380 # filelogs.
223 #
381 #
224 # Changelog and manifestlog parts are terminated with empty chunks. The
382 # Changelog and manifestlog parts are terminated with empty chunks. The
225 # tree and file parts are a list of entry sections. Each entry section
383 # tree and file parts are a list of entry sections. Each entry section
226 # is a series of chunks terminating in an empty chunk. The list of these
384 # is a series of chunks terminating in an empty chunk. The list of these
227 # entry sections is terminated in yet another empty chunk, so we know
385 # entry sections is terminated in yet another empty chunk, so we know
228 # we've reached the end of the tree/file list when we reach an empty
386 # we've reached the end of the tree/file list when we reach an empty
229 # chunk that was proceeded by no non-empty chunks.
387 # chunk that was proceeded by no non-empty chunks.
230
388
231 parts = 0
389 parts = 0
232 while parts < 2 + self._grouplistcount:
390 while parts < 2 + self._grouplistcount:
233 noentries = True
391 noentries = True
234 while True:
392 while True:
235 chunk = getchunk(self)
393 chunk = getchunk(self)
236 if not chunk:
394 if not chunk:
237 # The first two empty chunks represent the end of the
395 # The first two empty chunks represent the end of the
238 # changelog and the manifestlog portions. The remaining
396 # changelog and the manifestlog portions. The remaining
239 # empty chunks represent either A) the end of individual
397 # empty chunks represent either A) the end of individual
240 # tree or file entries in the file list, or B) the end of
398 # tree or file entries in the file list, or B) the end of
241 # the entire list. It's the end of the entire list if there
399 # the entire list. It's the end of the entire list if there
242 # were no entries (i.e. noentries is True).
400 # were no entries (i.e. noentries is True).
243 if parts < 2:
401 if parts < 2:
244 parts += 1
402 parts += 1
245 elif noentries:
403 elif noentries:
246 parts += 1
404 parts += 1
247 break
405 break
248 noentries = False
406 noentries = False
249 yield chunkheader(len(chunk))
407 yield chunkheader(len(chunk))
250 pos = 0
408 pos = 0
251 while pos < len(chunk):
409 while pos < len(chunk):
252 next = pos + 2 ** 20
410 next = pos + 2 ** 20
253 yield chunk[pos:next]
411 yield chunk[pos:next]
254 pos = next
412 pos = next
255 yield closechunk()
413 yield closechunk()
256
414
257 def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
415 def _unpackmanifests(
416 self,
417 repo,
418 revmap,
419 trp,
420 prog,
421 addrevisioncb=None,
422 debug_info=None,
423 ):
258 self.callback = prog.increment
424 self.callback = prog.increment
259 # no need to check for empty manifest group here:
425 # no need to check for empty manifest group here:
260 # if the result of the merge of 1 and 2 is the same in 3 and 4,
426 # if the result of the merge of 1 and 2 is the same in 3 and 4,
261 # no new manifest will be created and the manifest group will
427 # no new manifest will be created and the manifest group will
262 # be empty during the pull
428 # be empty during the pull
263 self.manifestheader()
429 self.manifestheader()
264 deltas = self.deltaiter()
430 deltas = self.deltaiter()
265 storage = repo.manifestlog.getstorage(b'')
431 storage = repo.manifestlog.getstorage(b'')
266 storage.addgroup(deltas, revmap, trp, addrevisioncb=addrevisioncb)
432 storage.addgroup(
433 deltas,
434 revmap,
435 trp,
436 addrevisioncb=addrevisioncb,
437 debug_info=debug_info,
438 )
267 prog.complete()
439 prog.complete()
268 self.callback = None
440 self.callback = None
269
441
270 def apply(
442 def apply(
271 self,
443 self,
272 repo,
444 repo,
273 tr,
445 tr,
274 srctype,
446 srctype,
275 url,
447 url,
276 targetphase=phases.draft,
448 targetphase=phases.draft,
277 expectedtotal=None,
449 expectedtotal=None,
278 sidedata_categories=None,
450 sidedata_categories=None,
279 ):
451 ):
280 """Add the changegroup returned by source.read() to this repo.
452 """Add the changegroup returned by source.read() to this repo.
281 srctype is a string like 'push', 'pull', or 'unbundle'. url is
453 srctype is a string like 'push', 'pull', or 'unbundle'. url is
282 the URL of the repo where this changegroup is coming from.
454 the URL of the repo where this changegroup is coming from.
283
455
284 Return an integer summarizing the change to this repo:
456 Return an integer summarizing the change to this repo:
285 - nothing changed or no source: 0
457 - nothing changed or no source: 0
286 - more heads than before: 1+added heads (2..n)
458 - more heads than before: 1+added heads (2..n)
287 - fewer heads than before: -1-removed heads (-2..-n)
459 - fewer heads than before: -1-removed heads (-2..-n)
288 - number of heads stays the same: 1
460 - number of heads stays the same: 1
289
461
290 `sidedata_categories` is an optional set of the remote's sidedata wanted
462 `sidedata_categories` is an optional set of the remote's sidedata wanted
291 categories.
463 categories.
292 """
464 """
293 repo = repo.unfiltered()
465 repo = repo.unfiltered()
294
466
467 debug_info = None
468 if repo.ui.configbool(b'debug', b'unbundling-stats'):
469 debug_info = []
470
295 # Only useful if we're adding sidedata categories. If both peers have
471 # Only useful if we're adding sidedata categories. If both peers have
296 # the same categories, then we simply don't do anything.
472 # the same categories, then we simply don't do anything.
297 adding_sidedata = (
473 adding_sidedata = (
298 (
474 (
299 requirements.REVLOGV2_REQUIREMENT in repo.requirements
475 requirements.REVLOGV2_REQUIREMENT in repo.requirements
300 or requirements.CHANGELOGV2_REQUIREMENT in repo.requirements
476 or requirements.CHANGELOGV2_REQUIREMENT in repo.requirements
301 )
477 )
302 and self.version == b'04'
478 and self.version == b'04'
303 and srctype == b'pull'
479 and srctype == b'pull'
304 )
480 )
305 if adding_sidedata:
481 if adding_sidedata:
306 sidedata_helpers = sidedatamod.get_sidedata_helpers(
482 sidedata_helpers = sidedatamod.get_sidedata_helpers(
307 repo,
483 repo,
308 sidedata_categories or set(),
484 sidedata_categories or set(),
309 pull=True,
485 pull=True,
310 )
486 )
311 else:
487 else:
312 sidedata_helpers = None
488 sidedata_helpers = None
313
489
314 def csmap(x):
490 def csmap(x):
315 repo.ui.debug(b"add changeset %s\n" % short(x))
491 repo.ui.debug(b"add changeset %s\n" % short(x))
316 return len(cl)
492 return len(cl)
317
493
318 def revmap(x):
494 def revmap(x):
319 return cl.rev(x)
495 return cl.rev(x)
320
496
321 try:
497 try:
322 # The transaction may already carry source information. In this
498 # The transaction may already carry source information. In this
323 # case we use the top level data. We overwrite the argument
499 # case we use the top level data. We overwrite the argument
324 # because we need to use the top level value (if they exist)
500 # because we need to use the top level value (if they exist)
325 # in this function.
501 # in this function.
326 srctype = tr.hookargs.setdefault(b'source', srctype)
502 srctype = tr.hookargs.setdefault(b'source', srctype)
327 tr.hookargs.setdefault(b'url', url)
503 tr.hookargs.setdefault(b'url', url)
328 repo.hook(
504 repo.hook(
329 b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
505 b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
330 )
506 )
331
507
332 # write changelog data to temp files so concurrent readers
508 # write changelog data to temp files so concurrent readers
333 # will not see an inconsistent view
509 # will not see an inconsistent view
334 cl = repo.changelog
510 cl = repo.changelog
335 cl.delayupdate(tr)
511 cl.delayupdate(tr)
336 oldheads = set(cl.heads())
512 oldheads = set(cl.heads())
337
513
338 trp = weakref.proxy(tr)
514 trp = weakref.proxy(tr)
339 # pull off the changeset group
515 # pull off the changeset group
340 repo.ui.status(_(b"adding changesets\n"))
516 repo.ui.status(_(b"adding changesets\n"))
341 clstart = len(cl)
517 clstart = len(cl)
342 progress = repo.ui.makeprogress(
518 progress = repo.ui.makeprogress(
343 _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
519 _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
344 )
520 )
345 self.callback = progress.increment
521 self.callback = progress.increment
346
522
347 efilesset = set()
523 efilesset = set()
348 duprevs = []
524 duprevs = []
349
525
350 def ondupchangelog(cl, rev):
526 def ondupchangelog(cl, rev):
351 if rev < clstart:
527 if rev < clstart:
352 duprevs.append(rev) # pytype: disable=attribute-error
528 duprevs.append(rev) # pytype: disable=attribute-error
353
529
354 def onchangelog(cl, rev):
530 def onchangelog(cl, rev):
355 ctx = cl.changelogrevision(rev)
531 ctx = cl.changelogrevision(rev)
356 assert efilesset is not None # help pytype
532 assert efilesset is not None # help pytype
357 efilesset.update(ctx.files)
533 efilesset.update(ctx.files)
358 repo.register_changeset(rev, ctx)
534 repo.register_changeset(rev, ctx)
359
535
360 self.changelogheader()
536 self.changelogheader()
361 deltas = self.deltaiter()
537 deltas = self.deltaiter()
362 if not cl.addgroup(
538 if not cl.addgroup(
363 deltas,
539 deltas,
364 csmap,
540 csmap,
365 trp,
541 trp,
366 alwayscache=True,
542 alwayscache=True,
367 addrevisioncb=onchangelog,
543 addrevisioncb=onchangelog,
368 duplicaterevisioncb=ondupchangelog,
544 duplicaterevisioncb=ondupchangelog,
545 debug_info=debug_info,
369 ):
546 ):
370 repo.ui.develwarn(
547 repo.ui.develwarn(
371 b'applied empty changelog from changegroup',
548 b'applied empty changelog from changegroup',
372 config=b'warn-empty-changegroup',
549 config=b'warn-empty-changegroup',
373 )
550 )
374 efiles = len(efilesset)
551 efiles = len(efilesset)
375 clend = len(cl)
552 clend = len(cl)
376 changesets = clend - clstart
553 changesets = clend - clstart
377 progress.complete()
554 progress.complete()
378 del deltas
555 del deltas
379 # TODO Python 2.7 removal
556 # TODO Python 2.7 removal
380 # del efilesset
557 # del efilesset
381 efilesset = None
558 efilesset = None
382 self.callback = None
559 self.callback = None
383
560
384 # Keep track of the (non-changelog) revlogs we've updated and their
561 # Keep track of the (non-changelog) revlogs we've updated and their
385 # range of new revisions for sidedata rewrite.
562 # range of new revisions for sidedata rewrite.
386 # TODO do something more efficient than keeping the reference to
563 # TODO do something more efficient than keeping the reference to
387 # the revlogs, especially memory-wise.
564 # the revlogs, especially memory-wise.
388 touched_manifests = {}
565 touched_manifests = {}
389 touched_filelogs = {}
566 touched_filelogs = {}
390
567
391 # pull off the manifest group
568 # pull off the manifest group
392 repo.ui.status(_(b"adding manifests\n"))
569 repo.ui.status(_(b"adding manifests\n"))
393 # We know that we'll never have more manifests than we had
570 # We know that we'll never have more manifests than we had
394 # changesets.
571 # changesets.
395 progress = repo.ui.makeprogress(
572 progress = repo.ui.makeprogress(
396 _(b'manifests'), unit=_(b'chunks'), total=changesets
573 _(b'manifests'), unit=_(b'chunks'), total=changesets
397 )
574 )
398 on_manifest_rev = None
575 on_manifest_rev = None
399 if sidedata_helpers:
576 if sidedata_helpers:
400 if revlog_constants.KIND_MANIFESTLOG in sidedata_helpers[1]:
577 if revlog_constants.KIND_MANIFESTLOG in sidedata_helpers[1]:
401
578
402 def on_manifest_rev(manifest, rev):
579 def on_manifest_rev(manifest, rev):
403 range = touched_manifests.get(manifest)
580 range = touched_manifests.get(manifest)
404 if not range:
581 if not range:
405 touched_manifests[manifest] = (rev, rev)
582 touched_manifests[manifest] = (rev, rev)
406 else:
583 else:
407 assert rev == range[1] + 1
584 assert rev == range[1] + 1
408 touched_manifests[manifest] = (range[0], rev)
585 touched_manifests[manifest] = (range[0], rev)
409
586
410 self._unpackmanifests(
587 self._unpackmanifests(
411 repo,
588 repo,
412 revmap,
589 revmap,
413 trp,
590 trp,
414 progress,
591 progress,
415 addrevisioncb=on_manifest_rev,
592 addrevisioncb=on_manifest_rev,
593 debug_info=debug_info,
416 )
594 )
417
595
418 needfiles = {}
596 needfiles = {}
419 if repo.ui.configbool(b'server', b'validate'):
597 if repo.ui.configbool(b'server', b'validate'):
420 cl = repo.changelog
598 cl = repo.changelog
421 ml = repo.manifestlog
599 ml = repo.manifestlog
422 # validate incoming csets have their manifests
600 # validate incoming csets have their manifests
423 for cset in range(clstart, clend):
601 for cset in range(clstart, clend):
424 mfnode = cl.changelogrevision(cset).manifest
602 mfnode = cl.changelogrevision(cset).manifest
425 mfest = ml[mfnode].readdelta()
603 mfest = ml[mfnode].readdelta()
426 # store file nodes we must see
604 # store file nodes we must see
427 for f, n in mfest.items():
605 for f, n in mfest.items():
428 needfiles.setdefault(f, set()).add(n)
606 needfiles.setdefault(f, set()).add(n)
429
607
430 on_filelog_rev = None
608 on_filelog_rev = None
431 if sidedata_helpers:
609 if sidedata_helpers:
432 if revlog_constants.KIND_FILELOG in sidedata_helpers[1]:
610 if revlog_constants.KIND_FILELOG in sidedata_helpers[1]:
433
611
434 def on_filelog_rev(filelog, rev):
612 def on_filelog_rev(filelog, rev):
435 range = touched_filelogs.get(filelog)
613 range = touched_filelogs.get(filelog)
436 if not range:
614 if not range:
437 touched_filelogs[filelog] = (rev, rev)
615 touched_filelogs[filelog] = (rev, rev)
438 else:
616 else:
439 assert rev == range[1] + 1
617 assert rev == range[1] + 1
440 touched_filelogs[filelog] = (range[0], rev)
618 touched_filelogs[filelog] = (range[0], rev)
441
619
442 # process the files
620 # process the files
443 repo.ui.status(_(b"adding file changes\n"))
621 repo.ui.status(_(b"adding file changes\n"))
444 newrevs, newfiles = _addchangegroupfiles(
622 newrevs, newfiles = _addchangegroupfiles(
445 repo,
623 repo,
446 self,
624 self,
447 revmap,
625 revmap,
448 trp,
626 trp,
449 efiles,
627 efiles,
450 needfiles,
628 needfiles,
451 addrevisioncb=on_filelog_rev,
629 addrevisioncb=on_filelog_rev,
630 debug_info=debug_info,
452 )
631 )
453
632
454 if sidedata_helpers:
633 if sidedata_helpers:
455 if revlog_constants.KIND_CHANGELOG in sidedata_helpers[1]:
634 if revlog_constants.KIND_CHANGELOG in sidedata_helpers[1]:
456 cl.rewrite_sidedata(
635 cl.rewrite_sidedata(
457 trp, sidedata_helpers, clstart, clend - 1
636 trp, sidedata_helpers, clstart, clend - 1
458 )
637 )
459 for mf, (startrev, endrev) in touched_manifests.items():
638 for mf, (startrev, endrev) in touched_manifests.items():
460 mf.rewrite_sidedata(trp, sidedata_helpers, startrev, endrev)
639 mf.rewrite_sidedata(trp, sidedata_helpers, startrev, endrev)
461 for fl, (startrev, endrev) in touched_filelogs.items():
640 for fl, (startrev, endrev) in touched_filelogs.items():
462 fl.rewrite_sidedata(trp, sidedata_helpers, startrev, endrev)
641 fl.rewrite_sidedata(trp, sidedata_helpers, startrev, endrev)
463
642
464 # making sure the value exists
643 # making sure the value exists
465 tr.changes.setdefault(b'changegroup-count-changesets', 0)
644 tr.changes.setdefault(b'changegroup-count-changesets', 0)
466 tr.changes.setdefault(b'changegroup-count-revisions', 0)
645 tr.changes.setdefault(b'changegroup-count-revisions', 0)
467 tr.changes.setdefault(b'changegroup-count-files', 0)
646 tr.changes.setdefault(b'changegroup-count-files', 0)
468 tr.changes.setdefault(b'changegroup-count-heads', 0)
647 tr.changes.setdefault(b'changegroup-count-heads', 0)
469
648
470 # some code use bundle operation for internal purpose. They usually
649 # some code use bundle operation for internal purpose. They usually
471 # set `ui.quiet` to do this outside of user sight. Size the report
650 # set `ui.quiet` to do this outside of user sight. Size the report
472 # of such operation now happens at the end of the transaction, that
651 # of such operation now happens at the end of the transaction, that
473 # ui.quiet has not direct effect on the output.
652 # ui.quiet has not direct effect on the output.
474 #
653 #
475 # To preserve this intend use an inelegant hack, we fail to report
654 # To preserve this intend use an inelegant hack, we fail to report
476 # the change if `quiet` is set. We should probably move to
655 # the change if `quiet` is set. We should probably move to
477 # something better, but this is a good first step to allow the "end
656 # something better, but this is a good first step to allow the "end
478 # of transaction report" to pass tests.
657 # of transaction report" to pass tests.
479 if not repo.ui.quiet:
658 if not repo.ui.quiet:
480 tr.changes[b'changegroup-count-changesets'] += changesets
659 tr.changes[b'changegroup-count-changesets'] += changesets
481 tr.changes[b'changegroup-count-revisions'] += newrevs
660 tr.changes[b'changegroup-count-revisions'] += newrevs
482 tr.changes[b'changegroup-count-files'] += newfiles
661 tr.changes[b'changegroup-count-files'] += newfiles
483
662
484 deltaheads = 0
663 deltaheads = 0
485 if oldheads:
664 if oldheads:
486 heads = cl.heads()
665 heads = cl.heads()
487 deltaheads += len(heads) - len(oldheads)
666 deltaheads += len(heads) - len(oldheads)
488 for h in heads:
667 for h in heads:
489 if h not in oldheads and repo[h].closesbranch():
668 if h not in oldheads and repo[h].closesbranch():
490 deltaheads -= 1
669 deltaheads -= 1
491
670
492 # see previous comment about checking ui.quiet
671 # see previous comment about checking ui.quiet
493 if not repo.ui.quiet:
672 if not repo.ui.quiet:
494 tr.changes[b'changegroup-count-heads'] += deltaheads
673 tr.changes[b'changegroup-count-heads'] += deltaheads
495 repo.invalidatevolatilesets()
674 repo.invalidatevolatilesets()
496
675
497 if changesets > 0:
676 if changesets > 0:
498 if b'node' not in tr.hookargs:
677 if b'node' not in tr.hookargs:
499 tr.hookargs[b'node'] = hex(cl.node(clstart))
678 tr.hookargs[b'node'] = hex(cl.node(clstart))
500 tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
679 tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
501 hookargs = dict(tr.hookargs)
680 hookargs = dict(tr.hookargs)
502 else:
681 else:
503 hookargs = dict(tr.hookargs)
682 hookargs = dict(tr.hookargs)
504 hookargs[b'node'] = hex(cl.node(clstart))
683 hookargs[b'node'] = hex(cl.node(clstart))
505 hookargs[b'node_last'] = hex(cl.node(clend - 1))
684 hookargs[b'node_last'] = hex(cl.node(clend - 1))
506 repo.hook(
685 repo.hook(
507 b'pretxnchangegroup',
686 b'pretxnchangegroup',
508 throw=True,
687 throw=True,
509 **pycompat.strkwargs(hookargs)
688 **pycompat.strkwargs(hookargs)
510 )
689 )
511
690
512 added = range(clstart, clend)
691 added = range(clstart, clend)
513 phaseall = None
692 phaseall = None
514 if srctype in (b'push', b'serve'):
693 if srctype in (b'push', b'serve'):
515 # Old servers can not push the boundary themselves.
694 # Old servers can not push the boundary themselves.
516 # New servers won't push the boundary if changeset already
695 # New servers won't push the boundary if changeset already
517 # exists locally as secret
696 # exists locally as secret
518 #
697 #
519 # We should not use added here but the list of all change in
698 # We should not use added here but the list of all change in
520 # the bundle
699 # the bundle
521 if repo.publishing():
700 if repo.publishing():
522 targetphase = phaseall = phases.public
701 targetphase = phaseall = phases.public
523 else:
702 else:
524 # closer target phase computation
703 # closer target phase computation
525
704
526 # Those changesets have been pushed from the
705 # Those changesets have been pushed from the
527 # outside, their phases are going to be pushed
706 # outside, their phases are going to be pushed
528 # alongside. Therefor `targetphase` is
707 # alongside. Therefor `targetphase` is
529 # ignored.
708 # ignored.
530 targetphase = phaseall = phases.draft
709 targetphase = phaseall = phases.draft
531 if added:
710 if added:
532 phases.registernew(repo, tr, targetphase, added)
711 phases.registernew(repo, tr, targetphase, added)
533 if phaseall is not None:
712 if phaseall is not None:
534 if duprevs:
713 if duprevs:
535 duprevs.extend(added)
714 duprevs.extend(added)
536 else:
715 else:
537 duprevs = added
716 duprevs = added
538 phases.advanceboundary(repo, tr, phaseall, [], revs=duprevs)
717 phases.advanceboundary(repo, tr, phaseall, [], revs=duprevs)
539 duprevs = []
718 duprevs = []
540
719
541 if changesets > 0:
720 if changesets > 0:
542
721
543 def runhooks(unused_success):
722 def runhooks(unused_success):
544 # These hooks run when the lock releases, not when the
723 # These hooks run when the lock releases, not when the
545 # transaction closes. So it's possible for the changelog
724 # transaction closes. So it's possible for the changelog
546 # to have changed since we last saw it.
725 # to have changed since we last saw it.
547 if clstart >= len(repo):
726 if clstart >= len(repo):
548 return
727 return
549
728
550 repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
729 repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
551
730
552 for rev in added:
731 for rev in added:
553 args = hookargs.copy()
732 args = hookargs.copy()
554 args[b'node'] = hex(cl.node(rev))
733 args[b'node'] = hex(cl.node(rev))
555 del args[b'node_last']
734 del args[b'node_last']
556 repo.hook(b"incoming", **pycompat.strkwargs(args))
735 repo.hook(b"incoming", **pycompat.strkwargs(args))
557
736
558 newheads = [h for h in repo.heads() if h not in oldheads]
737 newheads = [h for h in repo.heads() if h not in oldheads]
559 repo.ui.log(
738 repo.ui.log(
560 b"incoming",
739 b"incoming",
561 b"%d incoming changes - new heads: %s\n",
740 b"%d incoming changes - new heads: %s\n",
562 len(added),
741 len(added),
563 b', '.join([hex(c[:6]) for c in newheads]),
742 b', '.join([hex(c[:6]) for c in newheads]),
564 )
743 )
565
744
566 tr.addpostclose(
745 tr.addpostclose(
567 b'changegroup-runhooks-%020i' % clstart,
746 b'changegroup-runhooks-%020i' % clstart,
568 lambda tr: repo._afterlock(runhooks),
747 lambda tr: repo._afterlock(runhooks),
569 )
748 )
749 if debug_info is not None:
750 display_unbundle_debug_info(repo.ui, debug_info)
570 finally:
751 finally:
571 repo.ui.flush()
752 repo.ui.flush()
572 # never return 0 here:
753 # never return 0 here:
573 if deltaheads < 0:
754 if deltaheads < 0:
574 ret = deltaheads - 1
755 ret = deltaheads - 1
575 else:
756 else:
576 ret = deltaheads + 1
757 ret = deltaheads + 1
577 return ret
758 return ret
578
759
579 def deltaiter(self):
760 def deltaiter(self):
580 """
761 """
581 returns an iterator of the deltas in this changegroup
762 returns an iterator of the deltas in this changegroup
582
763
583 Useful for passing to the underlying storage system to be stored.
764 Useful for passing to the underlying storage system to be stored.
584 """
765 """
585 chain = None
766 chain = None
586 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
767 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
587 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags, sidedata, proto_flags)
768 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags, sidedata, proto_flags)
588 yield chunkdata[:8]
769 yield chunkdata[:8]
589 chain = chunkdata[0]
770 chain = chunkdata[0]
590
771
591
772
592 class cg2unpacker(cg1unpacker):
773 class cg2unpacker(cg1unpacker):
593 """Unpacker for cg2 streams.
774 """Unpacker for cg2 streams.
594
775
595 cg2 streams add support for generaldelta, so the delta header
776 cg2 streams add support for generaldelta, so the delta header
596 format is slightly different. All other features about the data
777 format is slightly different. All other features about the data
597 remain the same.
778 remain the same.
598 """
779 """
599
780
600 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
781 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
601 deltaheadersize = deltaheader.size
782 deltaheadersize = deltaheader.size
602 version = b'02'
783 version = b'02'
603
784
604 def _deltaheader(self, headertuple, prevnode):
785 def _deltaheader(self, headertuple, prevnode):
605 node, p1, p2, deltabase, cs = headertuple
786 node, p1, p2, deltabase, cs = headertuple
606 flags = 0
787 flags = 0
607 protocol_flags = 0
788 protocol_flags = 0
608 return node, p1, p2, deltabase, cs, flags, protocol_flags
789 return node, p1, p2, deltabase, cs, flags, protocol_flags
609
790
610
791
611 class cg3unpacker(cg2unpacker):
792 class cg3unpacker(cg2unpacker):
612 """Unpacker for cg3 streams.
793 """Unpacker for cg3 streams.
613
794
614 cg3 streams add support for exchanging treemanifests and revlog
795 cg3 streams add support for exchanging treemanifests and revlog
615 flags. It adds the revlog flags to the delta header and an empty chunk
796 flags. It adds the revlog flags to the delta header and an empty chunk
616 separating manifests and files.
797 separating manifests and files.
617 """
798 """
618
799
619 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
800 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
620 deltaheadersize = deltaheader.size
801 deltaheadersize = deltaheader.size
621 version = b'03'
802 version = b'03'
622 _grouplistcount = 2 # One list of manifests and one list of files
803 _grouplistcount = 2 # One list of manifests and one list of files
623
804
624 def _deltaheader(self, headertuple, prevnode):
805 def _deltaheader(self, headertuple, prevnode):
625 node, p1, p2, deltabase, cs, flags = headertuple
806 node, p1, p2, deltabase, cs, flags = headertuple
626 protocol_flags = 0
807 protocol_flags = 0
627 return node, p1, p2, deltabase, cs, flags, protocol_flags
808 return node, p1, p2, deltabase, cs, flags, protocol_flags
628
809
629 def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
810 def _unpackmanifests(
811 self,
812 repo,
813 revmap,
814 trp,
815 prog,
816 addrevisioncb=None,
817 debug_info=None,
818 ):
630 super(cg3unpacker, self)._unpackmanifests(
819 super(cg3unpacker, self)._unpackmanifests(
631 repo, revmap, trp, prog, addrevisioncb=addrevisioncb
820 repo,
821 revmap,
822 trp,
823 prog,
824 addrevisioncb=addrevisioncb,
825 debug_info=debug_info,
632 )
826 )
633 for chunkdata in iter(self.filelogheader, {}):
827 for chunkdata in iter(self.filelogheader, {}):
634 # If we get here, there are directory manifests in the changegroup
828 # If we get here, there are directory manifests in the changegroup
635 d = chunkdata[b"filename"]
829 d = chunkdata[b"filename"]
636 repo.ui.debug(b"adding %s revisions\n" % d)
830 repo.ui.debug(b"adding %s revisions\n" % d)
637 deltas = self.deltaiter()
831 deltas = self.deltaiter()
638 if not repo.manifestlog.getstorage(d).addgroup(
832 if not repo.manifestlog.getstorage(d).addgroup(
639 deltas, revmap, trp, addrevisioncb=addrevisioncb
833 deltas,
834 revmap,
835 trp,
836 addrevisioncb=addrevisioncb,
837 debug_info=debug_info,
640 ):
838 ):
641 raise error.Abort(_(b"received dir revlog group is empty"))
839 raise error.Abort(_(b"received dir revlog group is empty"))
642
840
643
841
644 class cg4unpacker(cg3unpacker):
842 class cg4unpacker(cg3unpacker):
645 """Unpacker for cg4 streams.
843 """Unpacker for cg4 streams.
646
844
647 cg4 streams add support for exchanging sidedata.
845 cg4 streams add support for exchanging sidedata.
648 """
846 """
649
847
650 deltaheader = _CHANGEGROUPV4_DELTA_HEADER
848 deltaheader = _CHANGEGROUPV4_DELTA_HEADER
651 deltaheadersize = deltaheader.size
849 deltaheadersize = deltaheader.size
652 version = b'04'
850 version = b'04'
653
851
654 def _deltaheader(self, headertuple, prevnode):
852 def _deltaheader(self, headertuple, prevnode):
655 protocol_flags, node, p1, p2, deltabase, cs, flags = headertuple
853 protocol_flags, node, p1, p2, deltabase, cs, flags = headertuple
656 return node, p1, p2, deltabase, cs, flags, protocol_flags
854 return node, p1, p2, deltabase, cs, flags, protocol_flags
657
855
658 def deltachunk(self, prevnode):
856 def deltachunk(self, prevnode):
659 res = super(cg4unpacker, self).deltachunk(prevnode)
857 res = super(cg4unpacker, self).deltachunk(prevnode)
660 if not res:
858 if not res:
661 return res
859 return res
662
860
663 (
861 (
664 node,
862 node,
665 p1,
863 p1,
666 p2,
864 p2,
667 cs,
865 cs,
668 deltabase,
866 deltabase,
669 delta,
867 delta,
670 flags,
868 flags,
671 sidedata,
869 sidedata,
672 protocol_flags,
870 protocol_flags,
673 ) = res
871 ) = res
674 assert not sidedata
872 assert not sidedata
675
873
676 sidedata = {}
874 sidedata = {}
677 if protocol_flags & storageutil.CG_FLAG_SIDEDATA:
875 if protocol_flags & storageutil.CG_FLAG_SIDEDATA:
678 sidedata_raw = getchunk(self._stream)
876 sidedata_raw = getchunk(self._stream)
679 sidedata = sidedatamod.deserialize_sidedata(sidedata_raw)
877 sidedata = sidedatamod.deserialize_sidedata(sidedata_raw)
680
878
681 return (
879 return (
682 node,
880 node,
683 p1,
881 p1,
684 p2,
882 p2,
685 cs,
883 cs,
686 deltabase,
884 deltabase,
687 delta,
885 delta,
688 flags,
886 flags,
689 sidedata,
887 sidedata,
690 protocol_flags,
888 protocol_flags,
691 )
889 )
692
890
693
891
694 class headerlessfixup:
892 class headerlessfixup:
695 def __init__(self, fh, h):
893 def __init__(self, fh, h):
696 self._h = h
894 self._h = h
697 self._fh = fh
895 self._fh = fh
698
896
699 def read(self, n):
897 def read(self, n):
700 if self._h:
898 if self._h:
701 d, self._h = self._h[:n], self._h[n:]
899 d, self._h = self._h[:n], self._h[n:]
702 if len(d) < n:
900 if len(d) < n:
703 d += readexactly(self._fh, n - len(d))
901 d += readexactly(self._fh, n - len(d))
704 return d
902 return d
705 return readexactly(self._fh, n)
903 return readexactly(self._fh, n)
706
904
707
905
708 def _revisiondeltatochunks(repo, delta, headerfn):
906 def _revisiondeltatochunks(repo, delta, headerfn):
709 """Serialize a revisiondelta to changegroup chunks."""
907 """Serialize a revisiondelta to changegroup chunks."""
710
908
711 # The captured revision delta may be encoded as a delta against
909 # The captured revision delta may be encoded as a delta against
712 # a base revision or as a full revision. The changegroup format
910 # a base revision or as a full revision. The changegroup format
713 # requires that everything on the wire be deltas. So for full
911 # requires that everything on the wire be deltas. So for full
714 # revisions, we need to invent a header that says to rewrite
912 # revisions, we need to invent a header that says to rewrite
715 # data.
913 # data.
716
914
717 if delta.delta is not None:
915 if delta.delta is not None:
718 prefix, data = b'', delta.delta
916 prefix, data = b'', delta.delta
719 elif delta.basenode == repo.nullid:
917 elif delta.basenode == repo.nullid:
720 data = delta.revision
918 data = delta.revision
721 prefix = mdiff.trivialdiffheader(len(data))
919 prefix = mdiff.trivialdiffheader(len(data))
722 else:
920 else:
723 data = delta.revision
921 data = delta.revision
724 prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
922 prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
725
923
726 meta = headerfn(delta)
924 meta = headerfn(delta)
727
925
728 yield chunkheader(len(meta) + len(prefix) + len(data))
926 yield chunkheader(len(meta) + len(prefix) + len(data))
729 yield meta
927 yield meta
730 if prefix:
928 if prefix:
731 yield prefix
929 yield prefix
732 yield data
930 yield data
733
931
734 if delta.protocol_flags & storageutil.CG_FLAG_SIDEDATA:
932 if delta.protocol_flags & storageutil.CG_FLAG_SIDEDATA:
735 # Need a separate chunk for sidedata to be able to differentiate
933 # Need a separate chunk for sidedata to be able to differentiate
736 # "raw delta" length and sidedata length
934 # "raw delta" length and sidedata length
737 sidedata = delta.sidedata
935 sidedata = delta.sidedata
738 yield chunkheader(len(sidedata))
936 yield chunkheader(len(sidedata))
739 yield sidedata
937 yield sidedata
740
938
741
939
742 def _sortnodesellipsis(store, nodes, cl, lookup):
940 def _sortnodesellipsis(store, nodes, cl, lookup):
743 """Sort nodes for changegroup generation."""
941 """Sort nodes for changegroup generation."""
744 # Ellipses serving mode.
942 # Ellipses serving mode.
745 #
943 #
746 # In a perfect world, we'd generate better ellipsis-ified graphs
944 # In a perfect world, we'd generate better ellipsis-ified graphs
747 # for non-changelog revlogs. In practice, we haven't started doing
945 # for non-changelog revlogs. In practice, we haven't started doing
748 # that yet, so the resulting DAGs for the manifestlog and filelogs
946 # that yet, so the resulting DAGs for the manifestlog and filelogs
749 # are actually full of bogus parentage on all the ellipsis
947 # are actually full of bogus parentage on all the ellipsis
750 # nodes. This has the side effect that, while the contents are
948 # nodes. This has the side effect that, while the contents are
751 # correct, the individual DAGs might be completely out of whack in
949 # correct, the individual DAGs might be completely out of whack in
752 # a case like 882681bc3166 and its ancestors (back about 10
950 # a case like 882681bc3166 and its ancestors (back about 10
753 # revisions or so) in the main hg repo.
951 # revisions or so) in the main hg repo.
754 #
952 #
755 # The one invariant we *know* holds is that the new (potentially
953 # The one invariant we *know* holds is that the new (potentially
756 # bogus) DAG shape will be valid if we order the nodes in the
954 # bogus) DAG shape will be valid if we order the nodes in the
757 # order that they're introduced in dramatis personae by the
955 # order that they're introduced in dramatis personae by the
758 # changelog, so what we do is we sort the non-changelog histories
956 # changelog, so what we do is we sort the non-changelog histories
759 # by the order in which they are used by the changelog.
957 # by the order in which they are used by the changelog.
760 key = lambda n: cl.rev(lookup(n))
958 key = lambda n: cl.rev(lookup(n))
761 return sorted(nodes, key=key)
959 return sorted(nodes, key=key)
762
960
763
961
764 def _resolvenarrowrevisioninfo(
962 def _resolvenarrowrevisioninfo(
765 cl,
963 cl,
766 store,
964 store,
767 ischangelog,
965 ischangelog,
768 rev,
966 rev,
769 linkrev,
967 linkrev,
770 linknode,
968 linknode,
771 clrevtolocalrev,
969 clrevtolocalrev,
772 fullclnodes,
970 fullclnodes,
773 precomputedellipsis,
971 precomputedellipsis,
774 ):
972 ):
775 linkparents = precomputedellipsis[linkrev]
973 linkparents = precomputedellipsis[linkrev]
776
974
777 def local(clrev):
975 def local(clrev):
778 """Turn a changelog revnum into a local revnum.
976 """Turn a changelog revnum into a local revnum.
779
977
780 The ellipsis dag is stored as revnums on the changelog,
978 The ellipsis dag is stored as revnums on the changelog,
781 but when we're producing ellipsis entries for
979 but when we're producing ellipsis entries for
782 non-changelog revlogs, we need to turn those numbers into
980 non-changelog revlogs, we need to turn those numbers into
783 something local. This does that for us, and during the
981 something local. This does that for us, and during the
784 changelog sending phase will also expand the stored
982 changelog sending phase will also expand the stored
785 mappings as needed.
983 mappings as needed.
786 """
984 """
787 if clrev == nullrev:
985 if clrev == nullrev:
788 return nullrev
986 return nullrev
789
987
790 if ischangelog:
988 if ischangelog:
791 return clrev
989 return clrev
792
990
793 # Walk the ellipsis-ized changelog breadth-first looking for a
991 # Walk the ellipsis-ized changelog breadth-first looking for a
794 # change that has been linked from the current revlog.
992 # change that has been linked from the current revlog.
795 #
993 #
796 # For a flat manifest revlog only a single step should be necessary
994 # For a flat manifest revlog only a single step should be necessary
797 # as all relevant changelog entries are relevant to the flat
995 # as all relevant changelog entries are relevant to the flat
798 # manifest.
996 # manifest.
799 #
997 #
800 # For a filelog or tree manifest dirlog however not every changelog
998 # For a filelog or tree manifest dirlog however not every changelog
801 # entry will have been relevant, so we need to skip some changelog
999 # entry will have been relevant, so we need to skip some changelog
802 # nodes even after ellipsis-izing.
1000 # nodes even after ellipsis-izing.
803 walk = [clrev]
1001 walk = [clrev]
804 while walk:
1002 while walk:
805 p = walk[0]
1003 p = walk[0]
806 walk = walk[1:]
1004 walk = walk[1:]
807 if p in clrevtolocalrev:
1005 if p in clrevtolocalrev:
808 return clrevtolocalrev[p]
1006 return clrevtolocalrev[p]
809 elif p in fullclnodes:
1007 elif p in fullclnodes:
810 walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
1008 walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
811 elif p in precomputedellipsis:
1009 elif p in precomputedellipsis:
812 walk.extend(
1010 walk.extend(
813 [pp for pp in precomputedellipsis[p] if pp != nullrev]
1011 [pp for pp in precomputedellipsis[p] if pp != nullrev]
814 )
1012 )
815 else:
1013 else:
816 # In this case, we've got an ellipsis with parents
1014 # In this case, we've got an ellipsis with parents
817 # outside the current bundle (likely an
1015 # outside the current bundle (likely an
818 # incremental pull). We "know" that we can use the
1016 # incremental pull). We "know" that we can use the
819 # value of this same revlog at whatever revision
1017 # value of this same revlog at whatever revision
820 # is pointed to by linknode. "Know" is in scare
1018 # is pointed to by linknode. "Know" is in scare
821 # quotes because I haven't done enough examination
1019 # quotes because I haven't done enough examination
822 # of edge cases to convince myself this is really
1020 # of edge cases to convince myself this is really
823 # a fact - it works for all the (admittedly
1021 # a fact - it works for all the (admittedly
824 # thorough) cases in our testsuite, but I would be
1022 # thorough) cases in our testsuite, but I would be
825 # somewhat unsurprised to find a case in the wild
1023 # somewhat unsurprised to find a case in the wild
826 # where this breaks down a bit. That said, I don't
1024 # where this breaks down a bit. That said, I don't
827 # know if it would hurt anything.
1025 # know if it would hurt anything.
828 for i in range(rev, 0, -1):
1026 for i in range(rev, 0, -1):
829 if store.linkrev(i) == clrev:
1027 if store.linkrev(i) == clrev:
830 return i
1028 return i
831 # We failed to resolve a parent for this node, so
1029 # We failed to resolve a parent for this node, so
832 # we crash the changegroup construction.
1030 # we crash the changegroup construction.
833 if util.safehasattr(store, 'target'):
1031 if util.safehasattr(store, 'target'):
834 target = store.display_id
1032 target = store.display_id
835 else:
1033 else:
836 # some revlog not actually a revlog
1034 # some revlog not actually a revlog
837 target = store._revlog.display_id
1035 target = store._revlog.display_id
838
1036
839 raise error.Abort(
1037 raise error.Abort(
840 b"unable to resolve parent while packing '%s' %r"
1038 b"unable to resolve parent while packing '%s' %r"
841 b' for changeset %r' % (target, rev, clrev)
1039 b' for changeset %r' % (target, rev, clrev)
842 )
1040 )
843
1041
844 return nullrev
1042 return nullrev
845
1043
846 if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
1044 if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
847 p1, p2 = nullrev, nullrev
1045 p1, p2 = nullrev, nullrev
848 elif len(linkparents) == 1:
1046 elif len(linkparents) == 1:
849 (p1,) = sorted(local(p) for p in linkparents)
1047 (p1,) = sorted(local(p) for p in linkparents)
850 p2 = nullrev
1048 p2 = nullrev
851 else:
1049 else:
852 p1, p2 = sorted(local(p) for p in linkparents)
1050 p1, p2 = sorted(local(p) for p in linkparents)
853
1051
854 p1node, p2node = store.node(p1), store.node(p2)
1052 p1node, p2node = store.node(p1), store.node(p2)
855
1053
856 return p1node, p2node, linknode
1054 return p1node, p2node, linknode
857
1055
858
1056
859 def deltagroup(
1057 def deltagroup(
860 repo,
1058 repo,
861 store,
1059 store,
862 nodes,
1060 nodes,
863 ischangelog,
1061 ischangelog,
864 lookup,
1062 lookup,
865 forcedeltaparentprev,
1063 forcedeltaparentprev,
866 topic=None,
1064 topic=None,
867 ellipses=False,
1065 ellipses=False,
868 clrevtolocalrev=None,
1066 clrevtolocalrev=None,
869 fullclnodes=None,
1067 fullclnodes=None,
870 precomputedellipsis=None,
1068 precomputedellipsis=None,
871 sidedata_helpers=None,
1069 sidedata_helpers=None,
872 debug_info=None,
1070 debug_info=None,
873 ):
1071 ):
874 """Calculate deltas for a set of revisions.
1072 """Calculate deltas for a set of revisions.
875
1073
876 Is a generator of ``revisiondelta`` instances.
1074 Is a generator of ``revisiondelta`` instances.
877
1075
878 If topic is not None, progress detail will be generated using this
1076 If topic is not None, progress detail will be generated using this
879 topic name (e.g. changesets, manifests, etc).
1077 topic name (e.g. changesets, manifests, etc).
880
1078
881 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
1079 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
882 `sidedata_helpers`.
1080 `sidedata_helpers`.
883 """
1081 """
884 if not nodes:
1082 if not nodes:
885 return
1083 return
886
1084
887 cl = repo.changelog
1085 cl = repo.changelog
888
1086
889 if ischangelog:
1087 if ischangelog:
890 # `hg log` shows changesets in storage order. To preserve order
1088 # `hg log` shows changesets in storage order. To preserve order
891 # across clones, send out changesets in storage order.
1089 # across clones, send out changesets in storage order.
892 nodesorder = b'storage'
1090 nodesorder = b'storage'
893 elif ellipses:
1091 elif ellipses:
894 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
1092 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
895 nodesorder = b'nodes'
1093 nodesorder = b'nodes'
896 else:
1094 else:
897 nodesorder = None
1095 nodesorder = None
898
1096
899 # Perform ellipses filtering and revision massaging. We do this before
1097 # Perform ellipses filtering and revision massaging. We do this before
900 # emitrevisions() because a) filtering out revisions creates less work
1098 # emitrevisions() because a) filtering out revisions creates less work
901 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
1099 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
902 # assumptions about delta choices and we would possibly send a delta
1100 # assumptions about delta choices and we would possibly send a delta
903 # referencing a missing base revision.
1101 # referencing a missing base revision.
904 #
1102 #
905 # Also, calling lookup() has side-effects with regards to populating
1103 # Also, calling lookup() has side-effects with regards to populating
906 # data structures. If we don't call lookup() for each node or if we call
1104 # data structures. If we don't call lookup() for each node or if we call
907 # lookup() after the first pass through each node, things can break -
1105 # lookup() after the first pass through each node, things can break -
908 # possibly intermittently depending on the python hash seed! For that
1106 # possibly intermittently depending on the python hash seed! For that
909 # reason, we store a mapping of all linknodes during the initial node
1107 # reason, we store a mapping of all linknodes during the initial node
910 # pass rather than use lookup() on the output side.
1108 # pass rather than use lookup() on the output side.
911 if ellipses:
1109 if ellipses:
912 filtered = []
1110 filtered = []
913 adjustedparents = {}
1111 adjustedparents = {}
914 linknodes = {}
1112 linknodes = {}
915
1113
916 for node in nodes:
1114 for node in nodes:
917 rev = store.rev(node)
1115 rev = store.rev(node)
918 linknode = lookup(node)
1116 linknode = lookup(node)
919 linkrev = cl.rev(linknode)
1117 linkrev = cl.rev(linknode)
920 clrevtolocalrev[linkrev] = rev
1118 clrevtolocalrev[linkrev] = rev
921
1119
922 # If linknode is in fullclnodes, it means the corresponding
1120 # If linknode is in fullclnodes, it means the corresponding
923 # changeset was a full changeset and is being sent unaltered.
1121 # changeset was a full changeset and is being sent unaltered.
924 if linknode in fullclnodes:
1122 if linknode in fullclnodes:
925 linknodes[node] = linknode
1123 linknodes[node] = linknode
926
1124
927 # If the corresponding changeset wasn't in the set computed
1125 # If the corresponding changeset wasn't in the set computed
928 # as relevant to us, it should be dropped outright.
1126 # as relevant to us, it should be dropped outright.
929 elif linkrev not in precomputedellipsis:
1127 elif linkrev not in precomputedellipsis:
930 continue
1128 continue
931
1129
932 else:
1130 else:
933 # We could probably do this later and avoid the dict
1131 # We could probably do this later and avoid the dict
934 # holding state. But it likely doesn't matter.
1132 # holding state. But it likely doesn't matter.
935 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
1133 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
936 cl,
1134 cl,
937 store,
1135 store,
938 ischangelog,
1136 ischangelog,
939 rev,
1137 rev,
940 linkrev,
1138 linkrev,
941 linknode,
1139 linknode,
942 clrevtolocalrev,
1140 clrevtolocalrev,
943 fullclnodes,
1141 fullclnodes,
944 precomputedellipsis,
1142 precomputedellipsis,
945 )
1143 )
946
1144
947 adjustedparents[node] = (p1node, p2node)
1145 adjustedparents[node] = (p1node, p2node)
948 linknodes[node] = linknode
1146 linknodes[node] = linknode
949
1147
950 filtered.append(node)
1148 filtered.append(node)
951
1149
952 nodes = filtered
1150 nodes = filtered
953
1151
954 # We expect the first pass to be fast, so we only engage the progress
1152 # We expect the first pass to be fast, so we only engage the progress
955 # meter for constructing the revision deltas.
1153 # meter for constructing the revision deltas.
956 progress = None
1154 progress = None
957 if topic is not None:
1155 if topic is not None:
958 progress = repo.ui.makeprogress(
1156 progress = repo.ui.makeprogress(
959 topic, unit=_(b'chunks'), total=len(nodes)
1157 topic, unit=_(b'chunks'), total=len(nodes)
960 )
1158 )
961
1159
962 configtarget = repo.ui.config(b'devel', b'bundle.delta')
1160 configtarget = repo.ui.config(b'devel', b'bundle.delta')
963 if configtarget not in (b'', b'p1', b'full'):
1161 if configtarget not in (b'', b'p1', b'full'):
964 msg = _(b"""config "devel.bundle.delta" as unknown value: %s""")
1162 msg = _(b"""config "devel.bundle.delta" as unknown value: %s""")
965 repo.ui.warn(msg % configtarget)
1163 repo.ui.warn(msg % configtarget)
966
1164
967 deltamode = repository.CG_DELTAMODE_STD
1165 deltamode = repository.CG_DELTAMODE_STD
968 if forcedeltaparentprev:
1166 if forcedeltaparentprev:
969 deltamode = repository.CG_DELTAMODE_PREV
1167 deltamode = repository.CG_DELTAMODE_PREV
970 elif configtarget == b'p1':
1168 elif configtarget == b'p1':
971 deltamode = repository.CG_DELTAMODE_P1
1169 deltamode = repository.CG_DELTAMODE_P1
972 elif configtarget == b'full':
1170 elif configtarget == b'full':
973 deltamode = repository.CG_DELTAMODE_FULL
1171 deltamode = repository.CG_DELTAMODE_FULL
974
1172
975 revisions = store.emitrevisions(
1173 revisions = store.emitrevisions(
976 nodes,
1174 nodes,
977 nodesorder=nodesorder,
1175 nodesorder=nodesorder,
978 revisiondata=True,
1176 revisiondata=True,
979 assumehaveparentrevisions=not ellipses,
1177 assumehaveparentrevisions=not ellipses,
980 deltamode=deltamode,
1178 deltamode=deltamode,
981 sidedata_helpers=sidedata_helpers,
1179 sidedata_helpers=sidedata_helpers,
982 debug_info=debug_info,
1180 debug_info=debug_info,
983 )
1181 )
984
1182
985 for i, revision in enumerate(revisions):
1183 for i, revision in enumerate(revisions):
986 if progress:
1184 if progress:
987 progress.update(i + 1)
1185 progress.update(i + 1)
988
1186
989 if ellipses:
1187 if ellipses:
990 linknode = linknodes[revision.node]
1188 linknode = linknodes[revision.node]
991
1189
992 if revision.node in adjustedparents:
1190 if revision.node in adjustedparents:
993 p1node, p2node = adjustedparents[revision.node]
1191 p1node, p2node = adjustedparents[revision.node]
994 revision.p1node = p1node
1192 revision.p1node = p1node
995 revision.p2node = p2node
1193 revision.p2node = p2node
996 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
1194 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
997
1195
998 else:
1196 else:
999 linknode = lookup(revision.node)
1197 linknode = lookup(revision.node)
1000
1198
1001 revision.linknode = linknode
1199 revision.linknode = linknode
1002 yield revision
1200 yield revision
1003
1201
1004 if progress:
1202 if progress:
1005 progress.complete()
1203 progress.complete()
1006
1204
1007
1205
1008 def make_debug_info():
1206 def make_debug_info():
1009 """ "build a "new" debug_info dictionnary
1207 """ "build a "new" debug_info dictionnary
1010
1208
1011 That dictionnary can be used to gather information about the bundle process
1209 That dictionnary can be used to gather information about the bundle process
1012 """
1210 """
1013 return {
1211 return {
1014 'revision-total': 0,
1212 'revision-total': 0,
1015 'revision-changelog': 0,
1213 'revision-changelog': 0,
1016 'revision-manifest': 0,
1214 'revision-manifest': 0,
1017 'revision-files': 0,
1215 'revision-files': 0,
1018 'file-count': 0,
1216 'file-count': 0,
1019 'merge-total': 0,
1217 'merge-total': 0,
1020 'available-delta': 0,
1218 'available-delta': 0,
1021 'available-full': 0,
1219 'available-full': 0,
1022 'delta-against-prev': 0,
1220 'delta-against-prev': 0,
1023 'delta-full': 0,
1221 'delta-full': 0,
1024 'delta-against-p1': 0,
1222 'delta-against-p1': 0,
1025 'denied-delta-candeltafn': 0,
1223 'denied-delta-candeltafn': 0,
1026 'denied-base-not-available': 0,
1224 'denied-base-not-available': 0,
1027 'reused-storage-delta': 0,
1225 'reused-storage-delta': 0,
1028 'computed-delta': 0,
1226 'computed-delta': 0,
1029 }
1227 }
1030
1228
1031
1229
1032 def merge_debug_info(base, other):
1230 def merge_debug_info(base, other):
1033 """merge the debug information from <other> into <base>
1231 """merge the debug information from <other> into <base>
1034
1232
1035 This function can be used to gather lower level information into higher level ones.
1233 This function can be used to gather lower level information into higher level ones.
1036 """
1234 """
1037 for key in (
1235 for key in (
1038 'revision-total',
1236 'revision-total',
1039 'revision-changelog',
1237 'revision-changelog',
1040 'revision-manifest',
1238 'revision-manifest',
1041 'revision-files',
1239 'revision-files',
1042 'merge-total',
1240 'merge-total',
1043 'available-delta',
1241 'available-delta',
1044 'available-full',
1242 'available-full',
1045 'delta-against-prev',
1243 'delta-against-prev',
1046 'delta-full',
1244 'delta-full',
1047 'delta-against-p1',
1245 'delta-against-p1',
1048 'denied-delta-candeltafn',
1246 'denied-delta-candeltafn',
1049 'denied-base-not-available',
1247 'denied-base-not-available',
1050 'reused-storage-delta',
1248 'reused-storage-delta',
1051 'computed-delta',
1249 'computed-delta',
1052 ):
1250 ):
1053 base[key] += other[key]
1251 base[key] += other[key]
1054
1252
1055
1253
1056 _KEY_PART_WIDTH = 17
1254 _KEY_PART_WIDTH = 17
1057
1255
1058
1256
1059 def _dbg_bdl_line(
1257 def _dbg_bdl_line(
1060 ui,
1258 ui,
1061 indent,
1259 indent,
1062 key,
1260 key,
1063 base_value=None,
1261 base_value=None,
1064 percentage_base=None,
1262 percentage_base=None,
1065 percentage_key=None,
1263 percentage_key=None,
1066 percentage_ref=None,
1264 percentage_ref=None,
1067 extra=None,
1265 extra=None,
1068 ):
1266 ):
1069 """Print one line of debug_bundle_debug_info"""
1267 """Print one line of debug_bundle_debug_info"""
1070 line = b"DEBUG-BUNDLING: "
1268 line = b"DEBUG-BUNDLING: "
1071 line += b' ' * (2 * indent)
1269 line += b' ' * (2 * indent)
1072 key += b":"
1270 key += b":"
1073 if base_value is not None:
1271 if base_value is not None:
1074 assert len(key) + 1 + (2 * indent) <= _KEY_PART_WIDTH
1272 assert len(key) + 1 + (2 * indent) <= _KEY_PART_WIDTH
1075 line += key.ljust(_KEY_PART_WIDTH - (2 * indent))
1273 line += key.ljust(_KEY_PART_WIDTH - (2 * indent))
1076 line += b"%10d" % base_value
1274 line += b"%10d" % base_value
1077 else:
1275 else:
1078 line += key
1276 line += key
1079
1277
1080 if percentage_base is not None:
1278 if percentage_base is not None:
1081 assert base_value is not None
1279 assert base_value is not None
1082 percentage = base_value * 100 // percentage_base
1280 percentage = base_value * 100 // percentage_base
1083 if percentage_key is not None:
1281 if percentage_key is not None:
1084 line += b" (%d%% of %s %d)" % (
1282 line += b" (%d%% of %s %d)" % (
1085 percentage,
1283 percentage,
1086 percentage_key,
1284 percentage_key,
1087 percentage_ref,
1285 percentage_ref,
1088 )
1286 )
1089 else:
1287 else:
1090 line += b" (%d%%)" % percentage
1288 line += b" (%d%%)" % percentage
1091
1289
1092 if extra:
1290 if extra:
1093 line += b" "
1291 line += b" "
1094 line += extra
1292 line += extra
1095
1293
1096 line += b'\n'
1294 line += b'\n'
1097 ui.write_err(line)
1295 ui.write_err(line)
1098
1296
1099
1297
1100 def display_bundling_debug_info(
1298 def display_bundling_debug_info(
1101 ui,
1299 ui,
1102 debug_info,
1300 debug_info,
1103 cl_debug_info,
1301 cl_debug_info,
1104 mn_debug_info,
1302 mn_debug_info,
1105 fl_debug_info,
1303 fl_debug_info,
1106 ):
1304 ):
1107 """display debug information gathered during a bundling through `ui`"""
1305 """display debug information gathered during a bundling through `ui`"""
1108 d = debug_info
1306 d = debug_info
1109 c = cl_debug_info
1307 c = cl_debug_info
1110 m = mn_debug_info
1308 m = mn_debug_info
1111 f = fl_debug_info
1309 f = fl_debug_info
1112 all_info = [
1310 all_info = [
1113 (b"changelog", b"cl", c),
1311 (b"changelog", b"cl", c),
1114 (b"manifests", b"mn", m),
1312 (b"manifests", b"mn", m),
1115 (b"files", b"fl", f),
1313 (b"files", b"fl", f),
1116 ]
1314 ]
1117 _dbg_bdl_line(ui, 0, b'revisions', d['revision-total'])
1315 _dbg_bdl_line(ui, 0, b'revisions', d['revision-total'])
1118 _dbg_bdl_line(ui, 1, b'changelog', d['revision-changelog'])
1316 _dbg_bdl_line(ui, 1, b'changelog', d['revision-changelog'])
1119 _dbg_bdl_line(ui, 1, b'manifest', d['revision-manifest'])
1317 _dbg_bdl_line(ui, 1, b'manifest', d['revision-manifest'])
1120 extra = b'(for %d revlogs)' % d['file-count']
1318 extra = b'(for %d revlogs)' % d['file-count']
1121 _dbg_bdl_line(ui, 1, b'files', d['revision-files'], extra=extra)
1319 _dbg_bdl_line(ui, 1, b'files', d['revision-files'], extra=extra)
1122 if d['merge-total']:
1320 if d['merge-total']:
1123 _dbg_bdl_line(ui, 1, b'merge', d['merge-total'], d['revision-total'])
1321 _dbg_bdl_line(ui, 1, b'merge', d['merge-total'], d['revision-total'])
1124 for k, __, v in all_info:
1322 for k, __, v in all_info:
1125 if v['merge-total']:
1323 if v['merge-total']:
1126 _dbg_bdl_line(ui, 2, k, v['merge-total'], v['revision-total'])
1324 _dbg_bdl_line(ui, 2, k, v['merge-total'], v['revision-total'])
1127
1325
1128 _dbg_bdl_line(ui, 0, b'deltas')
1326 _dbg_bdl_line(ui, 0, b'deltas')
1129 _dbg_bdl_line(
1327 _dbg_bdl_line(
1130 ui,
1328 ui,
1131 1,
1329 1,
1132 b'from-storage',
1330 b'from-storage',
1133 d['reused-storage-delta'],
1331 d['reused-storage-delta'],
1134 percentage_base=d['available-delta'],
1332 percentage_base=d['available-delta'],
1135 percentage_key=b"available",
1333 percentage_key=b"available",
1136 percentage_ref=d['available-delta'],
1334 percentage_ref=d['available-delta'],
1137 )
1335 )
1138
1336
1139 if d['denied-delta-candeltafn']:
1337 if d['denied-delta-candeltafn']:
1140 _dbg_bdl_line(ui, 2, b'denied-fn', d['denied-delta-candeltafn'])
1338 _dbg_bdl_line(ui, 2, b'denied-fn', d['denied-delta-candeltafn'])
1141 for __, k, v in all_info:
1339 for __, k, v in all_info:
1142 if v['denied-delta-candeltafn']:
1340 if v['denied-delta-candeltafn']:
1143 _dbg_bdl_line(ui, 3, k, v['denied-delta-candeltafn'])
1341 _dbg_bdl_line(ui, 3, k, v['denied-delta-candeltafn'])
1144
1342
1145 if d['denied-base-not-available']:
1343 if d['denied-base-not-available']:
1146 _dbg_bdl_line(ui, 2, b'denied-nb', d['denied-base-not-available'])
1344 _dbg_bdl_line(ui, 2, b'denied-nb', d['denied-base-not-available'])
1147 for k, __, v in all_info:
1345 for k, __, v in all_info:
1148 if v['denied-base-not-available']:
1346 if v['denied-base-not-available']:
1149 _dbg_bdl_line(ui, 3, k, v['denied-base-not-available'])
1347 _dbg_bdl_line(ui, 3, k, v['denied-base-not-available'])
1150
1348
1151 if d['computed-delta']:
1349 if d['computed-delta']:
1152 _dbg_bdl_line(ui, 1, b'computed', d['computed-delta'])
1350 _dbg_bdl_line(ui, 1, b'computed', d['computed-delta'])
1153
1351
1154 if d['available-full']:
1352 if d['available-full']:
1155 _dbg_bdl_line(
1353 _dbg_bdl_line(
1156 ui,
1354 ui,
1157 2,
1355 2,
1158 b'full',
1356 b'full',
1159 d['delta-full'],
1357 d['delta-full'],
1160 percentage_base=d['available-full'],
1358 percentage_base=d['available-full'],
1161 percentage_key=b"native",
1359 percentage_key=b"native",
1162 percentage_ref=d['available-full'],
1360 percentage_ref=d['available-full'],
1163 )
1361 )
1164 for k, __, v in all_info:
1362 for k, __, v in all_info:
1165 if v['available-full']:
1363 if v['available-full']:
1166 _dbg_bdl_line(
1364 _dbg_bdl_line(
1167 ui,
1365 ui,
1168 3,
1366 3,
1169 k,
1367 k,
1170 v['delta-full'],
1368 v['delta-full'],
1171 percentage_base=v['available-full'],
1369 percentage_base=v['available-full'],
1172 percentage_key=b"native",
1370 percentage_key=b"native",
1173 percentage_ref=v['available-full'],
1371 percentage_ref=v['available-full'],
1174 )
1372 )
1175
1373
1176 if d['delta-against-prev']:
1374 if d['delta-against-prev']:
1177 _dbg_bdl_line(ui, 2, b'previous', d['delta-against-prev'])
1375 _dbg_bdl_line(ui, 2, b'previous', d['delta-against-prev'])
1178 for k, __, v in all_info:
1376 for k, __, v in all_info:
1179 if v['delta-against-prev']:
1377 if v['delta-against-prev']:
1180 _dbg_bdl_line(ui, 3, k, v['delta-against-prev'])
1378 _dbg_bdl_line(ui, 3, k, v['delta-against-prev'])
1181
1379
1182 if d['delta-against-p1']:
1380 if d['delta-against-p1']:
1183 _dbg_bdl_line(ui, 2, b'parent-1', d['delta-against-prev'])
1381 _dbg_bdl_line(ui, 2, b'parent-1', d['delta-against-prev'])
1184 for k, __, v in all_info:
1382 for k, __, v in all_info:
1185 if v['delta-against-p1']:
1383 if v['delta-against-p1']:
1186 _dbg_bdl_line(ui, 3, k, v['delta-against-p1'])
1384 _dbg_bdl_line(ui, 3, k, v['delta-against-p1'])
1187
1385
1188
1386
1189 class cgpacker:
1387 class cgpacker:
1190 def __init__(
1388 def __init__(
1191 self,
1389 self,
1192 repo,
1390 repo,
1193 oldmatcher,
1391 oldmatcher,
1194 matcher,
1392 matcher,
1195 version,
1393 version,
1196 builddeltaheader,
1394 builddeltaheader,
1197 manifestsend,
1395 manifestsend,
1198 forcedeltaparentprev=False,
1396 forcedeltaparentprev=False,
1199 bundlecaps=None,
1397 bundlecaps=None,
1200 ellipses=False,
1398 ellipses=False,
1201 shallow=False,
1399 shallow=False,
1202 ellipsisroots=None,
1400 ellipsisroots=None,
1203 fullnodes=None,
1401 fullnodes=None,
1204 remote_sidedata=None,
1402 remote_sidedata=None,
1205 ):
1403 ):
1206 """Given a source repo, construct a bundler.
1404 """Given a source repo, construct a bundler.
1207
1405
1208 oldmatcher is a matcher that matches on files the client already has.
1406 oldmatcher is a matcher that matches on files the client already has.
1209 These will not be included in the changegroup.
1407 These will not be included in the changegroup.
1210
1408
1211 matcher is a matcher that matches on files to include in the
1409 matcher is a matcher that matches on files to include in the
1212 changegroup. Used to facilitate sparse changegroups.
1410 changegroup. Used to facilitate sparse changegroups.
1213
1411
1214 forcedeltaparentprev indicates whether delta parents must be against
1412 forcedeltaparentprev indicates whether delta parents must be against
1215 the previous revision in a delta group. This should only be used for
1413 the previous revision in a delta group. This should only be used for
1216 compatibility with changegroup version 1.
1414 compatibility with changegroup version 1.
1217
1415
1218 builddeltaheader is a callable that constructs the header for a group
1416 builddeltaheader is a callable that constructs the header for a group
1219 delta.
1417 delta.
1220
1418
1221 manifestsend is a chunk to send after manifests have been fully emitted.
1419 manifestsend is a chunk to send after manifests have been fully emitted.
1222
1420
1223 ellipses indicates whether ellipsis serving mode is enabled.
1421 ellipses indicates whether ellipsis serving mode is enabled.
1224
1422
1225 bundlecaps is optional and can be used to specify the set of
1423 bundlecaps is optional and can be used to specify the set of
1226 capabilities which can be used to build the bundle. While bundlecaps is
1424 capabilities which can be used to build the bundle. While bundlecaps is
1227 unused in core Mercurial, extensions rely on this feature to communicate
1425 unused in core Mercurial, extensions rely on this feature to communicate
1228 capabilities to customize the changegroup packer.
1426 capabilities to customize the changegroup packer.
1229
1427
1230 shallow indicates whether shallow data might be sent. The packer may
1428 shallow indicates whether shallow data might be sent. The packer may
1231 need to pack file contents not introduced by the changes being packed.
1429 need to pack file contents not introduced by the changes being packed.
1232
1430
1233 fullnodes is the set of changelog nodes which should not be ellipsis
1431 fullnodes is the set of changelog nodes which should not be ellipsis
1234 nodes. We store this rather than the set of nodes that should be
1432 nodes. We store this rather than the set of nodes that should be
1235 ellipsis because for very large histories we expect this to be
1433 ellipsis because for very large histories we expect this to be
1236 significantly smaller.
1434 significantly smaller.
1237
1435
1238 remote_sidedata is the set of sidedata categories wanted by the remote.
1436 remote_sidedata is the set of sidedata categories wanted by the remote.
1239 """
1437 """
1240 assert oldmatcher
1438 assert oldmatcher
1241 assert matcher
1439 assert matcher
1242 self._oldmatcher = oldmatcher
1440 self._oldmatcher = oldmatcher
1243 self._matcher = matcher
1441 self._matcher = matcher
1244
1442
1245 self.version = version
1443 self.version = version
1246 self._forcedeltaparentprev = forcedeltaparentprev
1444 self._forcedeltaparentprev = forcedeltaparentprev
1247 self._builddeltaheader = builddeltaheader
1445 self._builddeltaheader = builddeltaheader
1248 self._manifestsend = manifestsend
1446 self._manifestsend = manifestsend
1249 self._ellipses = ellipses
1447 self._ellipses = ellipses
1250
1448
1251 # Set of capabilities we can use to build the bundle.
1449 # Set of capabilities we can use to build the bundle.
1252 if bundlecaps is None:
1450 if bundlecaps is None:
1253 bundlecaps = set()
1451 bundlecaps = set()
1254 self._bundlecaps = bundlecaps
1452 self._bundlecaps = bundlecaps
1255 if remote_sidedata is None:
1453 if remote_sidedata is None:
1256 remote_sidedata = set()
1454 remote_sidedata = set()
1257 self._remote_sidedata = remote_sidedata
1455 self._remote_sidedata = remote_sidedata
1258 self._isshallow = shallow
1456 self._isshallow = shallow
1259 self._fullclnodes = fullnodes
1457 self._fullclnodes = fullnodes
1260
1458
1261 # Maps ellipsis revs to their roots at the changelog level.
1459 # Maps ellipsis revs to their roots at the changelog level.
1262 self._precomputedellipsis = ellipsisroots
1460 self._precomputedellipsis = ellipsisroots
1263
1461
1264 self._repo = repo
1462 self._repo = repo
1265
1463
1266 if self._repo.ui.verbose and not self._repo.ui.debugflag:
1464 if self._repo.ui.verbose and not self._repo.ui.debugflag:
1267 self._verbosenote = self._repo.ui.note
1465 self._verbosenote = self._repo.ui.note
1268 else:
1466 else:
1269 self._verbosenote = lambda s: None
1467 self._verbosenote = lambda s: None
1270
1468
1271 def generate(
1469 def generate(
1272 self,
1470 self,
1273 commonrevs,
1471 commonrevs,
1274 clnodes,
1472 clnodes,
1275 fastpathlinkrev,
1473 fastpathlinkrev,
1276 source,
1474 source,
1277 changelog=True,
1475 changelog=True,
1278 ):
1476 ):
1279 """Yield a sequence of changegroup byte chunks.
1477 """Yield a sequence of changegroup byte chunks.
1280 If changelog is False, changelog data won't be added to changegroup
1478 If changelog is False, changelog data won't be added to changegroup
1281 """
1479 """
1282
1480
1283 debug_info = None
1481 debug_info = None
1284 repo = self._repo
1482 repo = self._repo
1285 if repo.ui.configbool(b'debug', b'bundling-stats'):
1483 if repo.ui.configbool(b'debug', b'bundling-stats'):
1286 debug_info = make_debug_info()
1484 debug_info = make_debug_info()
1287 cl = repo.changelog
1485 cl = repo.changelog
1288
1486
1289 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
1487 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
1290 size = 0
1488 size = 0
1291
1489
1292 sidedata_helpers = None
1490 sidedata_helpers = None
1293 if self.version == b'04':
1491 if self.version == b'04':
1294 remote_sidedata = self._remote_sidedata
1492 remote_sidedata = self._remote_sidedata
1295 if source == b'strip':
1493 if source == b'strip':
1296 # We're our own remote when stripping, get the no-op helpers
1494 # We're our own remote when stripping, get the no-op helpers
1297 # TODO a better approach would be for the strip bundle to
1495 # TODO a better approach would be for the strip bundle to
1298 # correctly advertise its sidedata categories directly.
1496 # correctly advertise its sidedata categories directly.
1299 remote_sidedata = repo._wanted_sidedata
1497 remote_sidedata = repo._wanted_sidedata
1300 sidedata_helpers = sidedatamod.get_sidedata_helpers(
1498 sidedata_helpers = sidedatamod.get_sidedata_helpers(
1301 repo,
1499 repo,
1302 remote_sidedata,
1500 remote_sidedata,
1303 )
1501 )
1304
1502
1305 cl_debug_info = None
1503 cl_debug_info = None
1306 if debug_info is not None:
1504 if debug_info is not None:
1307 cl_debug_info = make_debug_info()
1505 cl_debug_info = make_debug_info()
1308 clstate, deltas = self._generatechangelog(
1506 clstate, deltas = self._generatechangelog(
1309 cl,
1507 cl,
1310 clnodes,
1508 clnodes,
1311 generate=changelog,
1509 generate=changelog,
1312 sidedata_helpers=sidedata_helpers,
1510 sidedata_helpers=sidedata_helpers,
1313 debug_info=cl_debug_info,
1511 debug_info=cl_debug_info,
1314 )
1512 )
1315 for delta in deltas:
1513 for delta in deltas:
1316 for chunk in _revisiondeltatochunks(
1514 for chunk in _revisiondeltatochunks(
1317 self._repo, delta, self._builddeltaheader
1515 self._repo, delta, self._builddeltaheader
1318 ):
1516 ):
1319 size += len(chunk)
1517 size += len(chunk)
1320 yield chunk
1518 yield chunk
1321
1519
1322 close = closechunk()
1520 close = closechunk()
1323 size += len(close)
1521 size += len(close)
1324 yield closechunk()
1522 yield closechunk()
1325 if debug_info is not None:
1523 if debug_info is not None:
1326 merge_debug_info(debug_info, cl_debug_info)
1524 merge_debug_info(debug_info, cl_debug_info)
1327 debug_info['revision-changelog'] = cl_debug_info['revision-total']
1525 debug_info['revision-changelog'] = cl_debug_info['revision-total']
1328
1526
1329 self._verbosenote(_(b'%8.i (changelog)\n') % size)
1527 self._verbosenote(_(b'%8.i (changelog)\n') % size)
1330
1528
1331 clrevorder = clstate[b'clrevorder']
1529 clrevorder = clstate[b'clrevorder']
1332 manifests = clstate[b'manifests']
1530 manifests = clstate[b'manifests']
1333 changedfiles = clstate[b'changedfiles']
1531 changedfiles = clstate[b'changedfiles']
1334
1532
1335 if debug_info is not None:
1533 if debug_info is not None:
1336 debug_info['file-count'] = len(changedfiles)
1534 debug_info['file-count'] = len(changedfiles)
1337
1535
1338 # We need to make sure that the linkrev in the changegroup refers to
1536 # We need to make sure that the linkrev in the changegroup refers to
1339 # the first changeset that introduced the manifest or file revision.
1537 # the first changeset that introduced the manifest or file revision.
1340 # The fastpath is usually safer than the slowpath, because the filelogs
1538 # The fastpath is usually safer than the slowpath, because the filelogs
1341 # are walked in revlog order.
1539 # are walked in revlog order.
1342 #
1540 #
1343 # When taking the slowpath when the manifest revlog uses generaldelta,
1541 # When taking the slowpath when the manifest revlog uses generaldelta,
1344 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
1542 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
1345 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
1543 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
1346 #
1544 #
1347 # When taking the fastpath, we are only vulnerable to reordering
1545 # When taking the fastpath, we are only vulnerable to reordering
1348 # of the changelog itself. The changelog never uses generaldelta and is
1546 # of the changelog itself. The changelog never uses generaldelta and is
1349 # never reordered. To handle this case, we simply take the slowpath,
1547 # never reordered. To handle this case, we simply take the slowpath,
1350 # which already has the 'clrevorder' logic. This was also fixed in
1548 # which already has the 'clrevorder' logic. This was also fixed in
1351 # cc0ff93d0c0c.
1549 # cc0ff93d0c0c.
1352
1550
1353 # Treemanifests don't work correctly with fastpathlinkrev
1551 # Treemanifests don't work correctly with fastpathlinkrev
1354 # either, because we don't discover which directory nodes to
1552 # either, because we don't discover which directory nodes to
1355 # send along with files. This could probably be fixed.
1553 # send along with files. This could probably be fixed.
1356 fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo)
1554 fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo)
1357
1555
1358 fnodes = {} # needed file nodes
1556 fnodes = {} # needed file nodes
1359
1557
1360 size = 0
1558 size = 0
1361 mn_debug_info = None
1559 mn_debug_info = None
1362 if debug_info is not None:
1560 if debug_info is not None:
1363 mn_debug_info = make_debug_info()
1561 mn_debug_info = make_debug_info()
1364 it = self.generatemanifests(
1562 it = self.generatemanifests(
1365 commonrevs,
1563 commonrevs,
1366 clrevorder,
1564 clrevorder,
1367 fastpathlinkrev,
1565 fastpathlinkrev,
1368 manifests,
1566 manifests,
1369 fnodes,
1567 fnodes,
1370 source,
1568 source,
1371 clstate[b'clrevtomanifestrev'],
1569 clstate[b'clrevtomanifestrev'],
1372 sidedata_helpers=sidedata_helpers,
1570 sidedata_helpers=sidedata_helpers,
1373 debug_info=mn_debug_info,
1571 debug_info=mn_debug_info,
1374 )
1572 )
1375
1573
1376 for tree, deltas in it:
1574 for tree, deltas in it:
1377 if tree:
1575 if tree:
1378 assert self.version in (b'03', b'04')
1576 assert self.version in (b'03', b'04')
1379 chunk = _fileheader(tree)
1577 chunk = _fileheader(tree)
1380 size += len(chunk)
1578 size += len(chunk)
1381 yield chunk
1579 yield chunk
1382
1580
1383 for delta in deltas:
1581 for delta in deltas:
1384 chunks = _revisiondeltatochunks(
1582 chunks = _revisiondeltatochunks(
1385 self._repo, delta, self._builddeltaheader
1583 self._repo, delta, self._builddeltaheader
1386 )
1584 )
1387 for chunk in chunks:
1585 for chunk in chunks:
1388 size += len(chunk)
1586 size += len(chunk)
1389 yield chunk
1587 yield chunk
1390
1588
1391 close = closechunk()
1589 close = closechunk()
1392 size += len(close)
1590 size += len(close)
1393 yield close
1591 yield close
1394 if debug_info is not None:
1592 if debug_info is not None:
1395 merge_debug_info(debug_info, mn_debug_info)
1593 merge_debug_info(debug_info, mn_debug_info)
1396 debug_info['revision-manifest'] = mn_debug_info['revision-total']
1594 debug_info['revision-manifest'] = mn_debug_info['revision-total']
1397
1595
1398 self._verbosenote(_(b'%8.i (manifests)\n') % size)
1596 self._verbosenote(_(b'%8.i (manifests)\n') % size)
1399 yield self._manifestsend
1597 yield self._manifestsend
1400
1598
1401 mfdicts = None
1599 mfdicts = None
1402 if self._ellipses and self._isshallow:
1600 if self._ellipses and self._isshallow:
1403 mfdicts = [
1601 mfdicts = [
1404 (repo.manifestlog[n].read(), lr)
1602 (repo.manifestlog[n].read(), lr)
1405 for (n, lr) in pycompat.iteritems(manifests)
1603 for (n, lr) in pycompat.iteritems(manifests)
1406 ]
1604 ]
1407
1605
1408 manifests.clear()
1606 manifests.clear()
1409 clrevs = {cl.rev(x) for x in clnodes}
1607 clrevs = {cl.rev(x) for x in clnodes}
1410
1608
1411 fl_debug_info = None
1609 fl_debug_info = None
1412 if debug_info is not None:
1610 if debug_info is not None:
1413 fl_debug_info = make_debug_info()
1611 fl_debug_info = make_debug_info()
1414 it = self.generatefiles(
1612 it = self.generatefiles(
1415 changedfiles,
1613 changedfiles,
1416 commonrevs,
1614 commonrevs,
1417 source,
1615 source,
1418 mfdicts,
1616 mfdicts,
1419 fastpathlinkrev,
1617 fastpathlinkrev,
1420 fnodes,
1618 fnodes,
1421 clrevs,
1619 clrevs,
1422 sidedata_helpers=sidedata_helpers,
1620 sidedata_helpers=sidedata_helpers,
1423 debug_info=fl_debug_info,
1621 debug_info=fl_debug_info,
1424 )
1622 )
1425
1623
1426 for path, deltas in it:
1624 for path, deltas in it:
1427 h = _fileheader(path)
1625 h = _fileheader(path)
1428 size = len(h)
1626 size = len(h)
1429 yield h
1627 yield h
1430
1628
1431 for delta in deltas:
1629 for delta in deltas:
1432 chunks = _revisiondeltatochunks(
1630 chunks = _revisiondeltatochunks(
1433 self._repo, delta, self._builddeltaheader
1631 self._repo, delta, self._builddeltaheader
1434 )
1632 )
1435 for chunk in chunks:
1633 for chunk in chunks:
1436 size += len(chunk)
1634 size += len(chunk)
1437 yield chunk
1635 yield chunk
1438
1636
1439 close = closechunk()
1637 close = closechunk()
1440 size += len(close)
1638 size += len(close)
1441 yield close
1639 yield close
1442
1640
1443 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1641 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1444
1642
1445 yield closechunk()
1643 yield closechunk()
1446 if debug_info is not None:
1644 if debug_info is not None:
1447 merge_debug_info(debug_info, fl_debug_info)
1645 merge_debug_info(debug_info, fl_debug_info)
1448 debug_info['revision-files'] = fl_debug_info['revision-total']
1646 debug_info['revision-files'] = fl_debug_info['revision-total']
1449
1647
1450 if debug_info is not None:
1648 if debug_info is not None:
1451 display_bundling_debug_info(
1649 display_bundling_debug_info(
1452 repo.ui,
1650 repo.ui,
1453 debug_info,
1651 debug_info,
1454 cl_debug_info,
1652 cl_debug_info,
1455 mn_debug_info,
1653 mn_debug_info,
1456 fl_debug_info,
1654 fl_debug_info,
1457 )
1655 )
1458
1656
1459 if clnodes:
1657 if clnodes:
1460 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1658 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1461
1659
1462 def _generatechangelog(
1660 def _generatechangelog(
1463 self,
1661 self,
1464 cl,
1662 cl,
1465 nodes,
1663 nodes,
1466 generate=True,
1664 generate=True,
1467 sidedata_helpers=None,
1665 sidedata_helpers=None,
1468 debug_info=None,
1666 debug_info=None,
1469 ):
1667 ):
1470 """Generate data for changelog chunks.
1668 """Generate data for changelog chunks.
1471
1669
1472 Returns a 2-tuple of a dict containing state and an iterable of
1670 Returns a 2-tuple of a dict containing state and an iterable of
1473 byte chunks. The state will not be fully populated until the
1671 byte chunks. The state will not be fully populated until the
1474 chunk stream has been fully consumed.
1672 chunk stream has been fully consumed.
1475
1673
1476 if generate is False, the state will be fully populated and no chunk
1674 if generate is False, the state will be fully populated and no chunk
1477 stream will be yielded
1675 stream will be yielded
1478
1676
1479 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
1677 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
1480 `sidedata_helpers`.
1678 `sidedata_helpers`.
1481 """
1679 """
1482 clrevorder = {}
1680 clrevorder = {}
1483 manifests = {}
1681 manifests = {}
1484 mfl = self._repo.manifestlog
1682 mfl = self._repo.manifestlog
1485 changedfiles = set()
1683 changedfiles = set()
1486 clrevtomanifestrev = {}
1684 clrevtomanifestrev = {}
1487
1685
1488 state = {
1686 state = {
1489 b'clrevorder': clrevorder,
1687 b'clrevorder': clrevorder,
1490 b'manifests': manifests,
1688 b'manifests': manifests,
1491 b'changedfiles': changedfiles,
1689 b'changedfiles': changedfiles,
1492 b'clrevtomanifestrev': clrevtomanifestrev,
1690 b'clrevtomanifestrev': clrevtomanifestrev,
1493 }
1691 }
1494
1692
1495 if not (generate or self._ellipses):
1693 if not (generate or self._ellipses):
1496 # sort the nodes in storage order
1694 # sort the nodes in storage order
1497 nodes = sorted(nodes, key=cl.rev)
1695 nodes = sorted(nodes, key=cl.rev)
1498 for node in nodes:
1696 for node in nodes:
1499 c = cl.changelogrevision(node)
1697 c = cl.changelogrevision(node)
1500 clrevorder[node] = len(clrevorder)
1698 clrevorder[node] = len(clrevorder)
1501 # record the first changeset introducing this manifest version
1699 # record the first changeset introducing this manifest version
1502 manifests.setdefault(c.manifest, node)
1700 manifests.setdefault(c.manifest, node)
1503 # Record a complete list of potentially-changed files in
1701 # Record a complete list of potentially-changed files in
1504 # this manifest.
1702 # this manifest.
1505 changedfiles.update(c.files)
1703 changedfiles.update(c.files)
1506
1704
1507 return state, ()
1705 return state, ()
1508
1706
1509 # Callback for the changelog, used to collect changed files and
1707 # Callback for the changelog, used to collect changed files and
1510 # manifest nodes.
1708 # manifest nodes.
1511 # Returns the linkrev node (identity in the changelog case).
1709 # Returns the linkrev node (identity in the changelog case).
1512 def lookupcl(x):
1710 def lookupcl(x):
1513 c = cl.changelogrevision(x)
1711 c = cl.changelogrevision(x)
1514 clrevorder[x] = len(clrevorder)
1712 clrevorder[x] = len(clrevorder)
1515
1713
1516 if self._ellipses:
1714 if self._ellipses:
1517 # Only update manifests if x is going to be sent. Otherwise we
1715 # Only update manifests if x is going to be sent. Otherwise we
1518 # end up with bogus linkrevs specified for manifests and
1716 # end up with bogus linkrevs specified for manifests and
1519 # we skip some manifest nodes that we should otherwise
1717 # we skip some manifest nodes that we should otherwise
1520 # have sent.
1718 # have sent.
1521 if (
1719 if (
1522 x in self._fullclnodes
1720 x in self._fullclnodes
1523 or cl.rev(x) in self._precomputedellipsis
1721 or cl.rev(x) in self._precomputedellipsis
1524 ):
1722 ):
1525
1723
1526 manifestnode = c.manifest
1724 manifestnode = c.manifest
1527 # Record the first changeset introducing this manifest
1725 # Record the first changeset introducing this manifest
1528 # version.
1726 # version.
1529 manifests.setdefault(manifestnode, x)
1727 manifests.setdefault(manifestnode, x)
1530 # Set this narrow-specific dict so we have the lowest
1728 # Set this narrow-specific dict so we have the lowest
1531 # manifest revnum to look up for this cl revnum. (Part of
1729 # manifest revnum to look up for this cl revnum. (Part of
1532 # mapping changelog ellipsis parents to manifest ellipsis
1730 # mapping changelog ellipsis parents to manifest ellipsis
1533 # parents)
1731 # parents)
1534 clrevtomanifestrev.setdefault(
1732 clrevtomanifestrev.setdefault(
1535 cl.rev(x), mfl.rev(manifestnode)
1733 cl.rev(x), mfl.rev(manifestnode)
1536 )
1734 )
1537 # We can't trust the changed files list in the changeset if the
1735 # We can't trust the changed files list in the changeset if the
1538 # client requested a shallow clone.
1736 # client requested a shallow clone.
1539 if self._isshallow:
1737 if self._isshallow:
1540 changedfiles.update(mfl[c.manifest].read().keys())
1738 changedfiles.update(mfl[c.manifest].read().keys())
1541 else:
1739 else:
1542 changedfiles.update(c.files)
1740 changedfiles.update(c.files)
1543 else:
1741 else:
1544 # record the first changeset introducing this manifest version
1742 # record the first changeset introducing this manifest version
1545 manifests.setdefault(c.manifest, x)
1743 manifests.setdefault(c.manifest, x)
1546 # Record a complete list of potentially-changed files in
1744 # Record a complete list of potentially-changed files in
1547 # this manifest.
1745 # this manifest.
1548 changedfiles.update(c.files)
1746 changedfiles.update(c.files)
1549
1747
1550 return x
1748 return x
1551
1749
1552 gen = deltagroup(
1750 gen = deltagroup(
1553 self._repo,
1751 self._repo,
1554 cl,
1752 cl,
1555 nodes,
1753 nodes,
1556 True,
1754 True,
1557 lookupcl,
1755 lookupcl,
1558 self._forcedeltaparentprev,
1756 self._forcedeltaparentprev,
1559 ellipses=self._ellipses,
1757 ellipses=self._ellipses,
1560 topic=_(b'changesets'),
1758 topic=_(b'changesets'),
1561 clrevtolocalrev={},
1759 clrevtolocalrev={},
1562 fullclnodes=self._fullclnodes,
1760 fullclnodes=self._fullclnodes,
1563 precomputedellipsis=self._precomputedellipsis,
1761 precomputedellipsis=self._precomputedellipsis,
1564 sidedata_helpers=sidedata_helpers,
1762 sidedata_helpers=sidedata_helpers,
1565 debug_info=debug_info,
1763 debug_info=debug_info,
1566 )
1764 )
1567
1765
1568 return state, gen
1766 return state, gen
1569
1767
1570 def generatemanifests(
1768 def generatemanifests(
1571 self,
1769 self,
1572 commonrevs,
1770 commonrevs,
1573 clrevorder,
1771 clrevorder,
1574 fastpathlinkrev,
1772 fastpathlinkrev,
1575 manifests,
1773 manifests,
1576 fnodes,
1774 fnodes,
1577 source,
1775 source,
1578 clrevtolocalrev,
1776 clrevtolocalrev,
1579 sidedata_helpers=None,
1777 sidedata_helpers=None,
1580 debug_info=None,
1778 debug_info=None,
1581 ):
1779 ):
1582 """Returns an iterator of changegroup chunks containing manifests.
1780 """Returns an iterator of changegroup chunks containing manifests.
1583
1781
1584 `source` is unused here, but is used by extensions like remotefilelog to
1782 `source` is unused here, but is used by extensions like remotefilelog to
1585 change what is sent based in pulls vs pushes, etc.
1783 change what is sent based in pulls vs pushes, etc.
1586
1784
1587 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
1785 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
1588 `sidedata_helpers`.
1786 `sidedata_helpers`.
1589 """
1787 """
1590 repo = self._repo
1788 repo = self._repo
1591 mfl = repo.manifestlog
1789 mfl = repo.manifestlog
1592 tmfnodes = {b'': manifests}
1790 tmfnodes = {b'': manifests}
1593
1791
1594 # Callback for the manifest, used to collect linkrevs for filelog
1792 # Callback for the manifest, used to collect linkrevs for filelog
1595 # revisions.
1793 # revisions.
1596 # Returns the linkrev node (collected in lookupcl).
1794 # Returns the linkrev node (collected in lookupcl).
1597 def makelookupmflinknode(tree, nodes):
1795 def makelookupmflinknode(tree, nodes):
1598 if fastpathlinkrev:
1796 if fastpathlinkrev:
1599 assert not tree
1797 assert not tree
1600
1798
1601 # pytype: disable=unsupported-operands
1799 # pytype: disable=unsupported-operands
1602 return manifests.__getitem__
1800 return manifests.__getitem__
1603 # pytype: enable=unsupported-operands
1801 # pytype: enable=unsupported-operands
1604
1802
1605 def lookupmflinknode(x):
1803 def lookupmflinknode(x):
1606 """Callback for looking up the linknode for manifests.
1804 """Callback for looking up the linknode for manifests.
1607
1805
1608 Returns the linkrev node for the specified manifest.
1806 Returns the linkrev node for the specified manifest.
1609
1807
1610 SIDE EFFECT:
1808 SIDE EFFECT:
1611
1809
1612 1) fclnodes gets populated with the list of relevant
1810 1) fclnodes gets populated with the list of relevant
1613 file nodes if we're not using fastpathlinkrev
1811 file nodes if we're not using fastpathlinkrev
1614 2) When treemanifests are in use, collects treemanifest nodes
1812 2) When treemanifests are in use, collects treemanifest nodes
1615 to send
1813 to send
1616
1814
1617 Note that this means manifests must be completely sent to
1815 Note that this means manifests must be completely sent to
1618 the client before you can trust the list of files and
1816 the client before you can trust the list of files and
1619 treemanifests to send.
1817 treemanifests to send.
1620 """
1818 """
1621 clnode = nodes[x]
1819 clnode = nodes[x]
1622 mdata = mfl.get(tree, x).readfast(shallow=True)
1820 mdata = mfl.get(tree, x).readfast(shallow=True)
1623 for p, n, fl in mdata.iterentries():
1821 for p, n, fl in mdata.iterentries():
1624 if fl == b't': # subdirectory manifest
1822 if fl == b't': # subdirectory manifest
1625 subtree = tree + p + b'/'
1823 subtree = tree + p + b'/'
1626 tmfclnodes = tmfnodes.setdefault(subtree, {})
1824 tmfclnodes = tmfnodes.setdefault(subtree, {})
1627 tmfclnode = tmfclnodes.setdefault(n, clnode)
1825 tmfclnode = tmfclnodes.setdefault(n, clnode)
1628 if clrevorder[clnode] < clrevorder[tmfclnode]:
1826 if clrevorder[clnode] < clrevorder[tmfclnode]:
1629 tmfclnodes[n] = clnode
1827 tmfclnodes[n] = clnode
1630 else:
1828 else:
1631 f = tree + p
1829 f = tree + p
1632 fclnodes = fnodes.setdefault(f, {})
1830 fclnodes = fnodes.setdefault(f, {})
1633 fclnode = fclnodes.setdefault(n, clnode)
1831 fclnode = fclnodes.setdefault(n, clnode)
1634 if clrevorder[clnode] < clrevorder[fclnode]:
1832 if clrevorder[clnode] < clrevorder[fclnode]:
1635 fclnodes[n] = clnode
1833 fclnodes[n] = clnode
1636 return clnode
1834 return clnode
1637
1835
1638 return lookupmflinknode
1836 return lookupmflinknode
1639
1837
1640 while tmfnodes:
1838 while tmfnodes:
1641 tree, nodes = tmfnodes.popitem()
1839 tree, nodes = tmfnodes.popitem()
1642
1840
1643 should_visit = self._matcher.visitdir(tree[:-1])
1841 should_visit = self._matcher.visitdir(tree[:-1])
1644 if tree and not should_visit:
1842 if tree and not should_visit:
1645 continue
1843 continue
1646
1844
1647 store = mfl.getstorage(tree)
1845 store = mfl.getstorage(tree)
1648
1846
1649 if not should_visit:
1847 if not should_visit:
1650 # No nodes to send because this directory is out of
1848 # No nodes to send because this directory is out of
1651 # the client's view of the repository (probably
1849 # the client's view of the repository (probably
1652 # because of narrow clones). Do this even for the root
1850 # because of narrow clones). Do this even for the root
1653 # directory (tree=='')
1851 # directory (tree=='')
1654 prunednodes = []
1852 prunednodes = []
1655 else:
1853 else:
1656 # Avoid sending any manifest nodes we can prove the
1854 # Avoid sending any manifest nodes we can prove the
1657 # client already has by checking linkrevs. See the
1855 # client already has by checking linkrevs. See the
1658 # related comment in generatefiles().
1856 # related comment in generatefiles().
1659 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1857 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1660
1858
1661 if tree and not prunednodes:
1859 if tree and not prunednodes:
1662 continue
1860 continue
1663
1861
1664 lookupfn = makelookupmflinknode(tree, nodes)
1862 lookupfn = makelookupmflinknode(tree, nodes)
1665
1863
1666 deltas = deltagroup(
1864 deltas = deltagroup(
1667 self._repo,
1865 self._repo,
1668 store,
1866 store,
1669 prunednodes,
1867 prunednodes,
1670 False,
1868 False,
1671 lookupfn,
1869 lookupfn,
1672 self._forcedeltaparentprev,
1870 self._forcedeltaparentprev,
1673 ellipses=self._ellipses,
1871 ellipses=self._ellipses,
1674 topic=_(b'manifests'),
1872 topic=_(b'manifests'),
1675 clrevtolocalrev=clrevtolocalrev,
1873 clrevtolocalrev=clrevtolocalrev,
1676 fullclnodes=self._fullclnodes,
1874 fullclnodes=self._fullclnodes,
1677 precomputedellipsis=self._precomputedellipsis,
1875 precomputedellipsis=self._precomputedellipsis,
1678 sidedata_helpers=sidedata_helpers,
1876 sidedata_helpers=sidedata_helpers,
1679 debug_info=debug_info,
1877 debug_info=debug_info,
1680 )
1878 )
1681
1879
1682 if not self._oldmatcher.visitdir(store.tree[:-1]):
1880 if not self._oldmatcher.visitdir(store.tree[:-1]):
1683 yield tree, deltas
1881 yield tree, deltas
1684 else:
1882 else:
1685 # 'deltas' is a generator and we need to consume it even if
1883 # 'deltas' is a generator and we need to consume it even if
1686 # we are not going to send it because a side-effect is that
1884 # we are not going to send it because a side-effect is that
1687 # it updates tmdnodes (via lookupfn)
1885 # it updates tmdnodes (via lookupfn)
1688 for d in deltas:
1886 for d in deltas:
1689 pass
1887 pass
1690 if not tree:
1888 if not tree:
1691 yield tree, []
1889 yield tree, []
1692
1890
1693 def _prunemanifests(self, store, nodes, commonrevs):
1891 def _prunemanifests(self, store, nodes, commonrevs):
1694 if not self._ellipses:
1892 if not self._ellipses:
1695 # In non-ellipses case and large repositories, it is better to
1893 # In non-ellipses case and large repositories, it is better to
1696 # prevent calling of store.rev and store.linkrev on a lot of
1894 # prevent calling of store.rev and store.linkrev on a lot of
1697 # nodes as compared to sending some extra data
1895 # nodes as compared to sending some extra data
1698 return nodes.copy()
1896 return nodes.copy()
1699 # This is split out as a separate method to allow filtering
1897 # This is split out as a separate method to allow filtering
1700 # commonrevs in extension code.
1898 # commonrevs in extension code.
1701 #
1899 #
1702 # TODO(augie): this shouldn't be required, instead we should
1900 # TODO(augie): this shouldn't be required, instead we should
1703 # make filtering of revisions to send delegated to the store
1901 # make filtering of revisions to send delegated to the store
1704 # layer.
1902 # layer.
1705 frev, flr = store.rev, store.linkrev
1903 frev, flr = store.rev, store.linkrev
1706 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1904 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1707
1905
1708 # The 'source' parameter is useful for extensions
1906 # The 'source' parameter is useful for extensions
1709 def generatefiles(
1907 def generatefiles(
1710 self,
1908 self,
1711 changedfiles,
1909 changedfiles,
1712 commonrevs,
1910 commonrevs,
1713 source,
1911 source,
1714 mfdicts,
1912 mfdicts,
1715 fastpathlinkrev,
1913 fastpathlinkrev,
1716 fnodes,
1914 fnodes,
1717 clrevs,
1915 clrevs,
1718 sidedata_helpers=None,
1916 sidedata_helpers=None,
1719 debug_info=None,
1917 debug_info=None,
1720 ):
1918 ):
1721 changedfiles = [
1919 changedfiles = [
1722 f
1920 f
1723 for f in changedfiles
1921 for f in changedfiles
1724 if self._matcher(f) and not self._oldmatcher(f)
1922 if self._matcher(f) and not self._oldmatcher(f)
1725 ]
1923 ]
1726
1924
1727 if not fastpathlinkrev:
1925 if not fastpathlinkrev:
1728
1926
1729 def normallinknodes(unused, fname):
1927 def normallinknodes(unused, fname):
1730 return fnodes.get(fname, {})
1928 return fnodes.get(fname, {})
1731
1929
1732 else:
1930 else:
1733 cln = self._repo.changelog.node
1931 cln = self._repo.changelog.node
1734
1932
1735 def normallinknodes(store, fname):
1933 def normallinknodes(store, fname):
1736 flinkrev = store.linkrev
1934 flinkrev = store.linkrev
1737 fnode = store.node
1935 fnode = store.node
1738 revs = ((r, flinkrev(r)) for r in store)
1936 revs = ((r, flinkrev(r)) for r in store)
1739 return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
1937 return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
1740
1938
1741 clrevtolocalrev = {}
1939 clrevtolocalrev = {}
1742
1940
1743 if self._isshallow:
1941 if self._isshallow:
1744 # In a shallow clone, the linknodes callback needs to also include
1942 # In a shallow clone, the linknodes callback needs to also include
1745 # those file nodes that are in the manifests we sent but weren't
1943 # those file nodes that are in the manifests we sent but weren't
1746 # introduced by those manifests.
1944 # introduced by those manifests.
1747 commonctxs = [self._repo[c] for c in commonrevs]
1945 commonctxs = [self._repo[c] for c in commonrevs]
1748 clrev = self._repo.changelog.rev
1946 clrev = self._repo.changelog.rev
1749
1947
1750 def linknodes(flog, fname):
1948 def linknodes(flog, fname):
1751 for c in commonctxs:
1949 for c in commonctxs:
1752 try:
1950 try:
1753 fnode = c.filenode(fname)
1951 fnode = c.filenode(fname)
1754 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1952 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1755 except error.ManifestLookupError:
1953 except error.ManifestLookupError:
1756 pass
1954 pass
1757 links = normallinknodes(flog, fname)
1955 links = normallinknodes(flog, fname)
1758 if len(links) != len(mfdicts):
1956 if len(links) != len(mfdicts):
1759 for mf, lr in mfdicts:
1957 for mf, lr in mfdicts:
1760 fnode = mf.get(fname, None)
1958 fnode = mf.get(fname, None)
1761 if fnode in links:
1959 if fnode in links:
1762 links[fnode] = min(links[fnode], lr, key=clrev)
1960 links[fnode] = min(links[fnode], lr, key=clrev)
1763 elif fnode:
1961 elif fnode:
1764 links[fnode] = lr
1962 links[fnode] = lr
1765 return links
1963 return links
1766
1964
1767 else:
1965 else:
1768 linknodes = normallinknodes
1966 linknodes = normallinknodes
1769
1967
1770 repo = self._repo
1968 repo = self._repo
1771 progress = repo.ui.makeprogress(
1969 progress = repo.ui.makeprogress(
1772 _(b'files'), unit=_(b'files'), total=len(changedfiles)
1970 _(b'files'), unit=_(b'files'), total=len(changedfiles)
1773 )
1971 )
1774 for i, fname in enumerate(sorted(changedfiles)):
1972 for i, fname in enumerate(sorted(changedfiles)):
1775 filerevlog = repo.file(fname)
1973 filerevlog = repo.file(fname)
1776 if not filerevlog:
1974 if not filerevlog:
1777 raise error.Abort(
1975 raise error.Abort(
1778 _(b"empty or missing file data for %s") % fname
1976 _(b"empty or missing file data for %s") % fname
1779 )
1977 )
1780
1978
1781 clrevtolocalrev.clear()
1979 clrevtolocalrev.clear()
1782
1980
1783 linkrevnodes = linknodes(filerevlog, fname)
1981 linkrevnodes = linknodes(filerevlog, fname)
1784 # Lookup for filenodes, we collected the linkrev nodes above in the
1982 # Lookup for filenodes, we collected the linkrev nodes above in the
1785 # fastpath case and with lookupmf in the slowpath case.
1983 # fastpath case and with lookupmf in the slowpath case.
1786 def lookupfilelog(x):
1984 def lookupfilelog(x):
1787 return linkrevnodes[x]
1985 return linkrevnodes[x]
1788
1986
1789 frev, flr = filerevlog.rev, filerevlog.linkrev
1987 frev, flr = filerevlog.rev, filerevlog.linkrev
1790 # Skip sending any filenode we know the client already
1988 # Skip sending any filenode we know the client already
1791 # has. This avoids over-sending files relatively
1989 # has. This avoids over-sending files relatively
1792 # inexpensively, so it's not a problem if we under-filter
1990 # inexpensively, so it's not a problem if we under-filter
1793 # here.
1991 # here.
1794 filenodes = [
1992 filenodes = [
1795 n for n in linkrevnodes if flr(frev(n)) not in commonrevs
1993 n for n in linkrevnodes if flr(frev(n)) not in commonrevs
1796 ]
1994 ]
1797
1995
1798 if not filenodes:
1996 if not filenodes:
1799 continue
1997 continue
1800
1998
1801 progress.update(i + 1, item=fname)
1999 progress.update(i + 1, item=fname)
1802
2000
1803 deltas = deltagroup(
2001 deltas = deltagroup(
1804 self._repo,
2002 self._repo,
1805 filerevlog,
2003 filerevlog,
1806 filenodes,
2004 filenodes,
1807 False,
2005 False,
1808 lookupfilelog,
2006 lookupfilelog,
1809 self._forcedeltaparentprev,
2007 self._forcedeltaparentprev,
1810 ellipses=self._ellipses,
2008 ellipses=self._ellipses,
1811 clrevtolocalrev=clrevtolocalrev,
2009 clrevtolocalrev=clrevtolocalrev,
1812 fullclnodes=self._fullclnodes,
2010 fullclnodes=self._fullclnodes,
1813 precomputedellipsis=self._precomputedellipsis,
2011 precomputedellipsis=self._precomputedellipsis,
1814 sidedata_helpers=sidedata_helpers,
2012 sidedata_helpers=sidedata_helpers,
1815 debug_info=debug_info,
2013 debug_info=debug_info,
1816 )
2014 )
1817
2015
1818 yield fname, deltas
2016 yield fname, deltas
1819
2017
1820 progress.complete()
2018 progress.complete()
1821
2019
1822
2020
1823 def _makecg1packer(
2021 def _makecg1packer(
1824 repo,
2022 repo,
1825 oldmatcher,
2023 oldmatcher,
1826 matcher,
2024 matcher,
1827 bundlecaps,
2025 bundlecaps,
1828 ellipses=False,
2026 ellipses=False,
1829 shallow=False,
2027 shallow=False,
1830 ellipsisroots=None,
2028 ellipsisroots=None,
1831 fullnodes=None,
2029 fullnodes=None,
1832 remote_sidedata=None,
2030 remote_sidedata=None,
1833 ):
2031 ):
1834 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
2032 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1835 d.node, d.p1node, d.p2node, d.linknode
2033 d.node, d.p1node, d.p2node, d.linknode
1836 )
2034 )
1837
2035
1838 return cgpacker(
2036 return cgpacker(
1839 repo,
2037 repo,
1840 oldmatcher,
2038 oldmatcher,
1841 matcher,
2039 matcher,
1842 b'01',
2040 b'01',
1843 builddeltaheader=builddeltaheader,
2041 builddeltaheader=builddeltaheader,
1844 manifestsend=b'',
2042 manifestsend=b'',
1845 forcedeltaparentprev=True,
2043 forcedeltaparentprev=True,
1846 bundlecaps=bundlecaps,
2044 bundlecaps=bundlecaps,
1847 ellipses=ellipses,
2045 ellipses=ellipses,
1848 shallow=shallow,
2046 shallow=shallow,
1849 ellipsisroots=ellipsisroots,
2047 ellipsisroots=ellipsisroots,
1850 fullnodes=fullnodes,
2048 fullnodes=fullnodes,
1851 )
2049 )
1852
2050
1853
2051
1854 def _makecg2packer(
2052 def _makecg2packer(
1855 repo,
2053 repo,
1856 oldmatcher,
2054 oldmatcher,
1857 matcher,
2055 matcher,
1858 bundlecaps,
2056 bundlecaps,
1859 ellipses=False,
2057 ellipses=False,
1860 shallow=False,
2058 shallow=False,
1861 ellipsisroots=None,
2059 ellipsisroots=None,
1862 fullnodes=None,
2060 fullnodes=None,
1863 remote_sidedata=None,
2061 remote_sidedata=None,
1864 ):
2062 ):
1865 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
2063 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1866 d.node, d.p1node, d.p2node, d.basenode, d.linknode
2064 d.node, d.p1node, d.p2node, d.basenode, d.linknode
1867 )
2065 )
1868
2066
1869 return cgpacker(
2067 return cgpacker(
1870 repo,
2068 repo,
1871 oldmatcher,
2069 oldmatcher,
1872 matcher,
2070 matcher,
1873 b'02',
2071 b'02',
1874 builddeltaheader=builddeltaheader,
2072 builddeltaheader=builddeltaheader,
1875 manifestsend=b'',
2073 manifestsend=b'',
1876 bundlecaps=bundlecaps,
2074 bundlecaps=bundlecaps,
1877 ellipses=ellipses,
2075 ellipses=ellipses,
1878 shallow=shallow,
2076 shallow=shallow,
1879 ellipsisroots=ellipsisroots,
2077 ellipsisroots=ellipsisroots,
1880 fullnodes=fullnodes,
2078 fullnodes=fullnodes,
1881 )
2079 )
1882
2080
1883
2081
1884 def _makecg3packer(
2082 def _makecg3packer(
1885 repo,
2083 repo,
1886 oldmatcher,
2084 oldmatcher,
1887 matcher,
2085 matcher,
1888 bundlecaps,
2086 bundlecaps,
1889 ellipses=False,
2087 ellipses=False,
1890 shallow=False,
2088 shallow=False,
1891 ellipsisroots=None,
2089 ellipsisroots=None,
1892 fullnodes=None,
2090 fullnodes=None,
1893 remote_sidedata=None,
2091 remote_sidedata=None,
1894 ):
2092 ):
1895 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
2093 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1896 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
2094 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
1897 )
2095 )
1898
2096
1899 return cgpacker(
2097 return cgpacker(
1900 repo,
2098 repo,
1901 oldmatcher,
2099 oldmatcher,
1902 matcher,
2100 matcher,
1903 b'03',
2101 b'03',
1904 builddeltaheader=builddeltaheader,
2102 builddeltaheader=builddeltaheader,
1905 manifestsend=closechunk(),
2103 manifestsend=closechunk(),
1906 bundlecaps=bundlecaps,
2104 bundlecaps=bundlecaps,
1907 ellipses=ellipses,
2105 ellipses=ellipses,
1908 shallow=shallow,
2106 shallow=shallow,
1909 ellipsisroots=ellipsisroots,
2107 ellipsisroots=ellipsisroots,
1910 fullnodes=fullnodes,
2108 fullnodes=fullnodes,
1911 )
2109 )
1912
2110
1913
2111
1914 def _makecg4packer(
2112 def _makecg4packer(
1915 repo,
2113 repo,
1916 oldmatcher,
2114 oldmatcher,
1917 matcher,
2115 matcher,
1918 bundlecaps,
2116 bundlecaps,
1919 ellipses=False,
2117 ellipses=False,
1920 shallow=False,
2118 shallow=False,
1921 ellipsisroots=None,
2119 ellipsisroots=None,
1922 fullnodes=None,
2120 fullnodes=None,
1923 remote_sidedata=None,
2121 remote_sidedata=None,
1924 ):
2122 ):
1925 # Sidedata is in a separate chunk from the delta to differentiate
2123 # Sidedata is in a separate chunk from the delta to differentiate
1926 # "raw delta" and sidedata.
2124 # "raw delta" and sidedata.
1927 def builddeltaheader(d):
2125 def builddeltaheader(d):
1928 return _CHANGEGROUPV4_DELTA_HEADER.pack(
2126 return _CHANGEGROUPV4_DELTA_HEADER.pack(
1929 d.protocol_flags,
2127 d.protocol_flags,
1930 d.node,
2128 d.node,
1931 d.p1node,
2129 d.p1node,
1932 d.p2node,
2130 d.p2node,
1933 d.basenode,
2131 d.basenode,
1934 d.linknode,
2132 d.linknode,
1935 d.flags,
2133 d.flags,
1936 )
2134 )
1937
2135
1938 return cgpacker(
2136 return cgpacker(
1939 repo,
2137 repo,
1940 oldmatcher,
2138 oldmatcher,
1941 matcher,
2139 matcher,
1942 b'04',
2140 b'04',
1943 builddeltaheader=builddeltaheader,
2141 builddeltaheader=builddeltaheader,
1944 manifestsend=closechunk(),
2142 manifestsend=closechunk(),
1945 bundlecaps=bundlecaps,
2143 bundlecaps=bundlecaps,
1946 ellipses=ellipses,
2144 ellipses=ellipses,
1947 shallow=shallow,
2145 shallow=shallow,
1948 ellipsisroots=ellipsisroots,
2146 ellipsisroots=ellipsisroots,
1949 fullnodes=fullnodes,
2147 fullnodes=fullnodes,
1950 remote_sidedata=remote_sidedata,
2148 remote_sidedata=remote_sidedata,
1951 )
2149 )
1952
2150
1953
2151
1954 _packermap = {
2152 _packermap = {
1955 b'01': (_makecg1packer, cg1unpacker),
2153 b'01': (_makecg1packer, cg1unpacker),
1956 # cg2 adds support for exchanging generaldelta
2154 # cg2 adds support for exchanging generaldelta
1957 b'02': (_makecg2packer, cg2unpacker),
2155 b'02': (_makecg2packer, cg2unpacker),
1958 # cg3 adds support for exchanging revlog flags and treemanifests
2156 # cg3 adds support for exchanging revlog flags and treemanifests
1959 b'03': (_makecg3packer, cg3unpacker),
2157 b'03': (_makecg3packer, cg3unpacker),
1960 # ch4 adds support for exchanging sidedata
2158 # ch4 adds support for exchanging sidedata
1961 b'04': (_makecg4packer, cg4unpacker),
2159 b'04': (_makecg4packer, cg4unpacker),
1962 }
2160 }
1963
2161
1964
2162
1965 def allsupportedversions(repo):
2163 def allsupportedversions(repo):
1966 versions = set(_packermap.keys())
2164 versions = set(_packermap.keys())
1967 needv03 = False
2165 needv03 = False
1968 if (
2166 if (
1969 repo.ui.configbool(b'experimental', b'changegroup3')
2167 repo.ui.configbool(b'experimental', b'changegroup3')
1970 or repo.ui.configbool(b'experimental', b'treemanifest')
2168 or repo.ui.configbool(b'experimental', b'treemanifest')
1971 or scmutil.istreemanifest(repo)
2169 or scmutil.istreemanifest(repo)
1972 ):
2170 ):
1973 # we keep version 03 because we need to to exchange treemanifest data
2171 # we keep version 03 because we need to to exchange treemanifest data
1974 #
2172 #
1975 # we also keep vresion 01 and 02, because it is possible for repo to
2173 # we also keep vresion 01 and 02, because it is possible for repo to
1976 # contains both normal and tree manifest at the same time. so using
2174 # contains both normal and tree manifest at the same time. so using
1977 # older version to pull data is viable
2175 # older version to pull data is viable
1978 #
2176 #
1979 # (or even to push subset of history)
2177 # (or even to push subset of history)
1980 needv03 = True
2178 needv03 = True
1981 if not needv03:
2179 if not needv03:
1982 versions.discard(b'03')
2180 versions.discard(b'03')
1983 want_v4 = (
2181 want_v4 = (
1984 repo.ui.configbool(b'experimental', b'changegroup4')
2182 repo.ui.configbool(b'experimental', b'changegroup4')
1985 or requirements.REVLOGV2_REQUIREMENT in repo.requirements
2183 or requirements.REVLOGV2_REQUIREMENT in repo.requirements
1986 or requirements.CHANGELOGV2_REQUIREMENT in repo.requirements
2184 or requirements.CHANGELOGV2_REQUIREMENT in repo.requirements
1987 )
2185 )
1988 if not want_v4:
2186 if not want_v4:
1989 versions.discard(b'04')
2187 versions.discard(b'04')
1990 return versions
2188 return versions
1991
2189
1992
2190
1993 # Changegroup versions that can be applied to the repo
2191 # Changegroup versions that can be applied to the repo
1994 def supportedincomingversions(repo):
2192 def supportedincomingversions(repo):
1995 return allsupportedversions(repo)
2193 return allsupportedversions(repo)
1996
2194
1997
2195
1998 # Changegroup versions that can be created from the repo
2196 # Changegroup versions that can be created from the repo
1999 def supportedoutgoingversions(repo):
2197 def supportedoutgoingversions(repo):
2000 versions = allsupportedversions(repo)
2198 versions = allsupportedversions(repo)
2001 if scmutil.istreemanifest(repo):
2199 if scmutil.istreemanifest(repo):
2002 # Versions 01 and 02 support only flat manifests and it's just too
2200 # Versions 01 and 02 support only flat manifests and it's just too
2003 # expensive to convert between the flat manifest and tree manifest on
2201 # expensive to convert between the flat manifest and tree manifest on
2004 # the fly. Since tree manifests are hashed differently, all of history
2202 # the fly. Since tree manifests are hashed differently, all of history
2005 # would have to be converted. Instead, we simply don't even pretend to
2203 # would have to be converted. Instead, we simply don't even pretend to
2006 # support versions 01 and 02.
2204 # support versions 01 and 02.
2007 versions.discard(b'01')
2205 versions.discard(b'01')
2008 versions.discard(b'02')
2206 versions.discard(b'02')
2009 if requirements.NARROW_REQUIREMENT in repo.requirements:
2207 if requirements.NARROW_REQUIREMENT in repo.requirements:
2010 # Versions 01 and 02 don't support revlog flags, and we need to
2208 # Versions 01 and 02 don't support revlog flags, and we need to
2011 # support that for stripping and unbundling to work.
2209 # support that for stripping and unbundling to work.
2012 versions.discard(b'01')
2210 versions.discard(b'01')
2013 versions.discard(b'02')
2211 versions.discard(b'02')
2014 if LFS_REQUIREMENT in repo.requirements:
2212 if LFS_REQUIREMENT in repo.requirements:
2015 # Versions 01 and 02 don't support revlog flags, and we need to
2213 # Versions 01 and 02 don't support revlog flags, and we need to
2016 # mark LFS entries with REVIDX_EXTSTORED.
2214 # mark LFS entries with REVIDX_EXTSTORED.
2017 versions.discard(b'01')
2215 versions.discard(b'01')
2018 versions.discard(b'02')
2216 versions.discard(b'02')
2019
2217
2020 return versions
2218 return versions
2021
2219
2022
2220
2023 def localversion(repo):
2221 def localversion(repo):
2024 # Finds the best version to use for bundles that are meant to be used
2222 # Finds the best version to use for bundles that are meant to be used
2025 # locally, such as those from strip and shelve, and temporary bundles.
2223 # locally, such as those from strip and shelve, and temporary bundles.
2026 return max(supportedoutgoingversions(repo))
2224 return max(supportedoutgoingversions(repo))
2027
2225
2028
2226
2029 def safeversion(repo):
2227 def safeversion(repo):
2030 # Finds the smallest version that it's safe to assume clients of the repo
2228 # Finds the smallest version that it's safe to assume clients of the repo
2031 # will support. For example, all hg versions that support generaldelta also
2229 # will support. For example, all hg versions that support generaldelta also
2032 # support changegroup 02.
2230 # support changegroup 02.
2033 versions = supportedoutgoingversions(repo)
2231 versions = supportedoutgoingversions(repo)
2034 if requirements.GENERALDELTA_REQUIREMENT in repo.requirements:
2232 if requirements.GENERALDELTA_REQUIREMENT in repo.requirements:
2035 versions.discard(b'01')
2233 versions.discard(b'01')
2036 assert versions
2234 assert versions
2037 return min(versions)
2235 return min(versions)
2038
2236
2039
2237
2040 def getbundler(
2238 def getbundler(
2041 version,
2239 version,
2042 repo,
2240 repo,
2043 bundlecaps=None,
2241 bundlecaps=None,
2044 oldmatcher=None,
2242 oldmatcher=None,
2045 matcher=None,
2243 matcher=None,
2046 ellipses=False,
2244 ellipses=False,
2047 shallow=False,
2245 shallow=False,
2048 ellipsisroots=None,
2246 ellipsisroots=None,
2049 fullnodes=None,
2247 fullnodes=None,
2050 remote_sidedata=None,
2248 remote_sidedata=None,
2051 ):
2249 ):
2052 assert version in supportedoutgoingversions(repo)
2250 assert version in supportedoutgoingversions(repo)
2053
2251
2054 if matcher is None:
2252 if matcher is None:
2055 matcher = matchmod.always()
2253 matcher = matchmod.always()
2056 if oldmatcher is None:
2254 if oldmatcher is None:
2057 oldmatcher = matchmod.never()
2255 oldmatcher = matchmod.never()
2058
2256
2059 if version == b'01' and not matcher.always():
2257 if version == b'01' and not matcher.always():
2060 raise error.ProgrammingError(
2258 raise error.ProgrammingError(
2061 b'version 01 changegroups do not support sparse file matchers'
2259 b'version 01 changegroups do not support sparse file matchers'
2062 )
2260 )
2063
2261
2064 if ellipses and version in (b'01', b'02'):
2262 if ellipses and version in (b'01', b'02'):
2065 raise error.Abort(
2263 raise error.Abort(
2066 _(
2264 _(
2067 b'ellipsis nodes require at least cg3 on client and server, '
2265 b'ellipsis nodes require at least cg3 on client and server, '
2068 b'but negotiated version %s'
2266 b'but negotiated version %s'
2069 )
2267 )
2070 % version
2268 % version
2071 )
2269 )
2072
2270
2073 # Requested files could include files not in the local store. So
2271 # Requested files could include files not in the local store. So
2074 # filter those out.
2272 # filter those out.
2075 matcher = repo.narrowmatch(matcher)
2273 matcher = repo.narrowmatch(matcher)
2076
2274
2077 fn = _packermap[version][0]
2275 fn = _packermap[version][0]
2078 return fn(
2276 return fn(
2079 repo,
2277 repo,
2080 oldmatcher,
2278 oldmatcher,
2081 matcher,
2279 matcher,
2082 bundlecaps,
2280 bundlecaps,
2083 ellipses=ellipses,
2281 ellipses=ellipses,
2084 shallow=shallow,
2282 shallow=shallow,
2085 ellipsisroots=ellipsisroots,
2283 ellipsisroots=ellipsisroots,
2086 fullnodes=fullnodes,
2284 fullnodes=fullnodes,
2087 remote_sidedata=remote_sidedata,
2285 remote_sidedata=remote_sidedata,
2088 )
2286 )
2089
2287
2090
2288
2091 def getunbundler(version, fh, alg, extras=None):
2289 def getunbundler(version, fh, alg, extras=None):
2092 return _packermap[version][1](fh, alg, extras=extras)
2290 return _packermap[version][1](fh, alg, extras=extras)
2093
2291
2094
2292
2095 def _changegroupinfo(repo, nodes, source):
2293 def _changegroupinfo(repo, nodes, source):
2096 if repo.ui.verbose or source == b'bundle':
2294 if repo.ui.verbose or source == b'bundle':
2097 repo.ui.status(_(b"%d changesets found\n") % len(nodes))
2295 repo.ui.status(_(b"%d changesets found\n") % len(nodes))
2098 if repo.ui.debugflag:
2296 if repo.ui.debugflag:
2099 repo.ui.debug(b"list of changesets:\n")
2297 repo.ui.debug(b"list of changesets:\n")
2100 for node in nodes:
2298 for node in nodes:
2101 repo.ui.debug(b"%s\n" % hex(node))
2299 repo.ui.debug(b"%s\n" % hex(node))
2102
2300
2103
2301
2104 def makechangegroup(
2302 def makechangegroup(
2105 repo,
2303 repo,
2106 outgoing,
2304 outgoing,
2107 version,
2305 version,
2108 source,
2306 source,
2109 fastpath=False,
2307 fastpath=False,
2110 bundlecaps=None,
2308 bundlecaps=None,
2111 ):
2309 ):
2112 cgstream = makestream(
2310 cgstream = makestream(
2113 repo,
2311 repo,
2114 outgoing,
2312 outgoing,
2115 version,
2313 version,
2116 source,
2314 source,
2117 fastpath=fastpath,
2315 fastpath=fastpath,
2118 bundlecaps=bundlecaps,
2316 bundlecaps=bundlecaps,
2119 )
2317 )
2120 return getunbundler(
2318 return getunbundler(
2121 version,
2319 version,
2122 util.chunkbuffer(cgstream),
2320 util.chunkbuffer(cgstream),
2123 None,
2321 None,
2124 {b'clcount': len(outgoing.missing)},
2322 {b'clcount': len(outgoing.missing)},
2125 )
2323 )
2126
2324
2127
2325
2128 def makestream(
2326 def makestream(
2129 repo,
2327 repo,
2130 outgoing,
2328 outgoing,
2131 version,
2329 version,
2132 source,
2330 source,
2133 fastpath=False,
2331 fastpath=False,
2134 bundlecaps=None,
2332 bundlecaps=None,
2135 matcher=None,
2333 matcher=None,
2136 remote_sidedata=None,
2334 remote_sidedata=None,
2137 ):
2335 ):
2138 bundler = getbundler(
2336 bundler = getbundler(
2139 version,
2337 version,
2140 repo,
2338 repo,
2141 bundlecaps=bundlecaps,
2339 bundlecaps=bundlecaps,
2142 matcher=matcher,
2340 matcher=matcher,
2143 remote_sidedata=remote_sidedata,
2341 remote_sidedata=remote_sidedata,
2144 )
2342 )
2145
2343
2146 repo = repo.unfiltered()
2344 repo = repo.unfiltered()
2147 commonrevs = outgoing.common
2345 commonrevs = outgoing.common
2148 csets = outgoing.missing
2346 csets = outgoing.missing
2149 heads = outgoing.ancestorsof
2347 heads = outgoing.ancestorsof
2150 # We go through the fast path if we get told to, or if all (unfiltered
2348 # We go through the fast path if we get told to, or if all (unfiltered
2151 # heads have been requested (since we then know there all linkrevs will
2349 # heads have been requested (since we then know there all linkrevs will
2152 # be pulled by the client).
2350 # be pulled by the client).
2153 heads.sort()
2351 heads.sort()
2154 fastpathlinkrev = fastpath or (
2352 fastpathlinkrev = fastpath or (
2155 repo.filtername is None and heads == sorted(repo.heads())
2353 repo.filtername is None and heads == sorted(repo.heads())
2156 )
2354 )
2157
2355
2158 repo.hook(b'preoutgoing', throw=True, source=source)
2356 repo.hook(b'preoutgoing', throw=True, source=source)
2159 _changegroupinfo(repo, csets, source)
2357 _changegroupinfo(repo, csets, source)
2160 return bundler.generate(
2358 return bundler.generate(
2161 commonrevs,
2359 commonrevs,
2162 csets,
2360 csets,
2163 fastpathlinkrev,
2361 fastpathlinkrev,
2164 source,
2362 source,
2165 )
2363 )
2166
2364
2167
2365
2168 def _addchangegroupfiles(
2366 def _addchangegroupfiles(
2169 repo,
2367 repo,
2170 source,
2368 source,
2171 revmap,
2369 revmap,
2172 trp,
2370 trp,
2173 expectedfiles,
2371 expectedfiles,
2174 needfiles,
2372 needfiles,
2175 addrevisioncb=None,
2373 addrevisioncb=None,
2374 debug_info=None,
2176 ):
2375 ):
2177 revisions = 0
2376 revisions = 0
2178 files = 0
2377 files = 0
2179 progress = repo.ui.makeprogress(
2378 progress = repo.ui.makeprogress(
2180 _(b'files'), unit=_(b'files'), total=expectedfiles
2379 _(b'files'), unit=_(b'files'), total=expectedfiles
2181 )
2380 )
2182 for chunkdata in iter(source.filelogheader, {}):
2381 for chunkdata in iter(source.filelogheader, {}):
2183 files += 1
2382 files += 1
2184 f = chunkdata[b"filename"]
2383 f = chunkdata[b"filename"]
2185 repo.ui.debug(b"adding %s revisions\n" % f)
2384 repo.ui.debug(b"adding %s revisions\n" % f)
2186 progress.increment()
2385 progress.increment()
2187 fl = repo.file(f)
2386 fl = repo.file(f)
2188 o = len(fl)
2387 o = len(fl)
2189 try:
2388 try:
2190 deltas = source.deltaiter()
2389 deltas = source.deltaiter()
2191 added = fl.addgroup(
2390 added = fl.addgroup(
2192 deltas,
2391 deltas,
2193 revmap,
2392 revmap,
2194 trp,
2393 trp,
2195 addrevisioncb=addrevisioncb,
2394 addrevisioncb=addrevisioncb,
2395 debug_info=debug_info,
2196 )
2396 )
2197 if not added:
2397 if not added:
2198 raise error.Abort(_(b"received file revlog group is empty"))
2398 raise error.Abort(_(b"received file revlog group is empty"))
2199 except error.CensoredBaseError as e:
2399 except error.CensoredBaseError as e:
2200 raise error.Abort(_(b"received delta base is censored: %s") % e)
2400 raise error.Abort(_(b"received delta base is censored: %s") % e)
2201 revisions += len(fl) - o
2401 revisions += len(fl) - o
2202 if f in needfiles:
2402 if f in needfiles:
2203 needs = needfiles[f]
2403 needs = needfiles[f]
2204 for new in range(o, len(fl)):
2404 for new in range(o, len(fl)):
2205 n = fl.node(new)
2405 n = fl.node(new)
2206 if n in needs:
2406 if n in needs:
2207 needs.remove(n)
2407 needs.remove(n)
2208 else:
2408 else:
2209 raise error.Abort(_(b"received spurious file revlog entry"))
2409 raise error.Abort(_(b"received spurious file revlog entry"))
2210 if not needs:
2410 if not needs:
2211 del needfiles[f]
2411 del needfiles[f]
2212 progress.complete()
2412 progress.complete()
2213
2413
2214 for f, needs in needfiles.items():
2414 for f, needs in needfiles.items():
2215 fl = repo.file(f)
2415 fl = repo.file(f)
2216 for n in needs:
2416 for n in needs:
2217 try:
2417 try:
2218 fl.rev(n)
2418 fl.rev(n)
2219 except error.LookupError:
2419 except error.LookupError:
2220 raise error.Abort(
2420 raise error.Abort(
2221 _(b'missing file data for %s:%s - run hg verify')
2421 _(b'missing file data for %s:%s - run hg verify')
2222 % (f, hex(n))
2422 % (f, hex(n))
2223 )
2423 )
2224
2424
2225 return revisions, files
2425 return revisions, files
@@ -1,2883 +1,2889 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import functools
9 import functools
10 import re
10 import re
11
11
12 from . import (
12 from . import (
13 encoding,
13 encoding,
14 error,
14 error,
15 )
15 )
16
16
17
17
18 def loadconfigtable(ui, extname, configtable):
18 def loadconfigtable(ui, extname, configtable):
19 """update config item known to the ui with the extension ones"""
19 """update config item known to the ui with the extension ones"""
20 for section, items in sorted(configtable.items()):
20 for section, items in sorted(configtable.items()):
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownkeys = set(knownitems)
22 knownkeys = set(knownitems)
23 newkeys = set(items)
23 newkeys = set(items)
24 for key in sorted(knownkeys & newkeys):
24 for key in sorted(knownkeys & newkeys):
25 msg = b"extension '%s' overwrite config item '%s.%s'"
25 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg %= (extname, section, key)
26 msg %= (extname, section, key)
27 ui.develwarn(msg, config=b'warn-config')
27 ui.develwarn(msg, config=b'warn-config')
28
28
29 knownitems.update(items)
29 knownitems.update(items)
30
30
31
31
32 class configitem:
32 class configitem:
33 """represent a known config item
33 """represent a known config item
34
34
35 :section: the official config section where to find this item,
35 :section: the official config section where to find this item,
36 :name: the official name within the section,
36 :name: the official name within the section,
37 :default: default value for this item,
37 :default: default value for this item,
38 :alias: optional list of tuples as alternatives,
38 :alias: optional list of tuples as alternatives,
39 :generic: this is a generic definition, match name using regular expression.
39 :generic: this is a generic definition, match name using regular expression.
40 """
40 """
41
41
42 def __init__(
42 def __init__(
43 self,
43 self,
44 section,
44 section,
45 name,
45 name,
46 default=None,
46 default=None,
47 alias=(),
47 alias=(),
48 generic=False,
48 generic=False,
49 priority=0,
49 priority=0,
50 experimental=False,
50 experimental=False,
51 ):
51 ):
52 self.section = section
52 self.section = section
53 self.name = name
53 self.name = name
54 self.default = default
54 self.default = default
55 self.alias = list(alias)
55 self.alias = list(alias)
56 self.generic = generic
56 self.generic = generic
57 self.priority = priority
57 self.priority = priority
58 self.experimental = experimental
58 self.experimental = experimental
59 self._re = None
59 self._re = None
60 if generic:
60 if generic:
61 self._re = re.compile(self.name)
61 self._re = re.compile(self.name)
62
62
63
63
64 class itemregister(dict):
64 class itemregister(dict):
65 """A specialized dictionary that can handle wild-card selection"""
65 """A specialized dictionary that can handle wild-card selection"""
66
66
67 def __init__(self):
67 def __init__(self):
68 super(itemregister, self).__init__()
68 super(itemregister, self).__init__()
69 self._generics = set()
69 self._generics = set()
70
70
71 def update(self, other):
71 def update(self, other):
72 super(itemregister, self).update(other)
72 super(itemregister, self).update(other)
73 self._generics.update(other._generics)
73 self._generics.update(other._generics)
74
74
75 def __setitem__(self, key, item):
75 def __setitem__(self, key, item):
76 super(itemregister, self).__setitem__(key, item)
76 super(itemregister, self).__setitem__(key, item)
77 if item.generic:
77 if item.generic:
78 self._generics.add(item)
78 self._generics.add(item)
79
79
80 def get(self, key):
80 def get(self, key):
81 baseitem = super(itemregister, self).get(key)
81 baseitem = super(itemregister, self).get(key)
82 if baseitem is not None and not baseitem.generic:
82 if baseitem is not None and not baseitem.generic:
83 return baseitem
83 return baseitem
84
84
85 # search for a matching generic item
85 # search for a matching generic item
86 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
86 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 for item in generics:
87 for item in generics:
88 # we use 'match' instead of 'search' to make the matching simpler
88 # we use 'match' instead of 'search' to make the matching simpler
89 # for people unfamiliar with regular expression. Having the match
89 # for people unfamiliar with regular expression. Having the match
90 # rooted to the start of the string will produce less surprising
90 # rooted to the start of the string will produce less surprising
91 # result for user writing simple regex for sub-attribute.
91 # result for user writing simple regex for sub-attribute.
92 #
92 #
93 # For example using "color\..*" match produces an unsurprising
93 # For example using "color\..*" match produces an unsurprising
94 # result, while using search could suddenly match apparently
94 # result, while using search could suddenly match apparently
95 # unrelated configuration that happens to contains "color."
95 # unrelated configuration that happens to contains "color."
96 # anywhere. This is a tradeoff where we favor requiring ".*" on
96 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # some match to avoid the need to prefix most pattern with "^".
97 # some match to avoid the need to prefix most pattern with "^".
98 # The "^" seems more error prone.
98 # The "^" seems more error prone.
99 if item._re.match(key):
99 if item._re.match(key):
100 return item
100 return item
101
101
102 return None
102 return None
103
103
104
104
105 coreitems = {}
105 coreitems = {}
106
106
107
107
108 def _register(configtable, *args, **kwargs):
108 def _register(configtable, *args, **kwargs):
109 item = configitem(*args, **kwargs)
109 item = configitem(*args, **kwargs)
110 section = configtable.setdefault(item.section, itemregister())
110 section = configtable.setdefault(item.section, itemregister())
111 if item.name in section:
111 if item.name in section:
112 msg = b"duplicated config item registration for '%s.%s'"
112 msg = b"duplicated config item registration for '%s.%s'"
113 raise error.ProgrammingError(msg % (item.section, item.name))
113 raise error.ProgrammingError(msg % (item.section, item.name))
114 section[item.name] = item
114 section[item.name] = item
115
115
116
116
117 # special value for case where the default is derived from other values
117 # special value for case where the default is derived from other values
118 dynamicdefault = object()
118 dynamicdefault = object()
119
119
120 # Registering actual config items
120 # Registering actual config items
121
121
122
122
123 def getitemregister(configtable):
123 def getitemregister(configtable):
124 f = functools.partial(_register, configtable)
124 f = functools.partial(_register, configtable)
125 # export pseudo enum as configitem.*
125 # export pseudo enum as configitem.*
126 f.dynamicdefault = dynamicdefault
126 f.dynamicdefault = dynamicdefault
127 return f
127 return f
128
128
129
129
130 coreconfigitem = getitemregister(coreitems)
130 coreconfigitem = getitemregister(coreitems)
131
131
132
132
133 def _registerdiffopts(section, configprefix=b''):
133 def _registerdiffopts(section, configprefix=b''):
134 coreconfigitem(
134 coreconfigitem(
135 section,
135 section,
136 configprefix + b'nodates',
136 configprefix + b'nodates',
137 default=False,
137 default=False,
138 )
138 )
139 coreconfigitem(
139 coreconfigitem(
140 section,
140 section,
141 configprefix + b'showfunc',
141 configprefix + b'showfunc',
142 default=False,
142 default=False,
143 )
143 )
144 coreconfigitem(
144 coreconfigitem(
145 section,
145 section,
146 configprefix + b'unified',
146 configprefix + b'unified',
147 default=None,
147 default=None,
148 )
148 )
149 coreconfigitem(
149 coreconfigitem(
150 section,
150 section,
151 configprefix + b'git',
151 configprefix + b'git',
152 default=False,
152 default=False,
153 )
153 )
154 coreconfigitem(
154 coreconfigitem(
155 section,
155 section,
156 configprefix + b'ignorews',
156 configprefix + b'ignorews',
157 default=False,
157 default=False,
158 )
158 )
159 coreconfigitem(
159 coreconfigitem(
160 section,
160 section,
161 configprefix + b'ignorewsamount',
161 configprefix + b'ignorewsamount',
162 default=False,
162 default=False,
163 )
163 )
164 coreconfigitem(
164 coreconfigitem(
165 section,
165 section,
166 configprefix + b'ignoreblanklines',
166 configprefix + b'ignoreblanklines',
167 default=False,
167 default=False,
168 )
168 )
169 coreconfigitem(
169 coreconfigitem(
170 section,
170 section,
171 configprefix + b'ignorewseol',
171 configprefix + b'ignorewseol',
172 default=False,
172 default=False,
173 )
173 )
174 coreconfigitem(
174 coreconfigitem(
175 section,
175 section,
176 configprefix + b'nobinary',
176 configprefix + b'nobinary',
177 default=False,
177 default=False,
178 )
178 )
179 coreconfigitem(
179 coreconfigitem(
180 section,
180 section,
181 configprefix + b'noprefix',
181 configprefix + b'noprefix',
182 default=False,
182 default=False,
183 )
183 )
184 coreconfigitem(
184 coreconfigitem(
185 section,
185 section,
186 configprefix + b'word-diff',
186 configprefix + b'word-diff',
187 default=False,
187 default=False,
188 )
188 )
189
189
190
190
191 coreconfigitem(
191 coreconfigitem(
192 b'alias',
192 b'alias',
193 b'.*',
193 b'.*',
194 default=dynamicdefault,
194 default=dynamicdefault,
195 generic=True,
195 generic=True,
196 )
196 )
197 coreconfigitem(
197 coreconfigitem(
198 b'auth',
198 b'auth',
199 b'cookiefile',
199 b'cookiefile',
200 default=None,
200 default=None,
201 )
201 )
202 _registerdiffopts(section=b'annotate')
202 _registerdiffopts(section=b'annotate')
203 # bookmarks.pushing: internal hack for discovery
203 # bookmarks.pushing: internal hack for discovery
204 coreconfigitem(
204 coreconfigitem(
205 b'bookmarks',
205 b'bookmarks',
206 b'pushing',
206 b'pushing',
207 default=list,
207 default=list,
208 )
208 )
209 # bundle.mainreporoot: internal hack for bundlerepo
209 # bundle.mainreporoot: internal hack for bundlerepo
210 coreconfigitem(
210 coreconfigitem(
211 b'bundle',
211 b'bundle',
212 b'mainreporoot',
212 b'mainreporoot',
213 default=b'',
213 default=b'',
214 )
214 )
215 coreconfigitem(
215 coreconfigitem(
216 b'censor',
216 b'censor',
217 b'policy',
217 b'policy',
218 default=b'abort',
218 default=b'abort',
219 experimental=True,
219 experimental=True,
220 )
220 )
221 coreconfigitem(
221 coreconfigitem(
222 b'chgserver',
222 b'chgserver',
223 b'idletimeout',
223 b'idletimeout',
224 default=3600,
224 default=3600,
225 )
225 )
226 coreconfigitem(
226 coreconfigitem(
227 b'chgserver',
227 b'chgserver',
228 b'skiphash',
228 b'skiphash',
229 default=False,
229 default=False,
230 )
230 )
231 coreconfigitem(
231 coreconfigitem(
232 b'cmdserver',
232 b'cmdserver',
233 b'log',
233 b'log',
234 default=None,
234 default=None,
235 )
235 )
236 coreconfigitem(
236 coreconfigitem(
237 b'cmdserver',
237 b'cmdserver',
238 b'max-log-files',
238 b'max-log-files',
239 default=7,
239 default=7,
240 )
240 )
241 coreconfigitem(
241 coreconfigitem(
242 b'cmdserver',
242 b'cmdserver',
243 b'max-log-size',
243 b'max-log-size',
244 default=b'1 MB',
244 default=b'1 MB',
245 )
245 )
246 coreconfigitem(
246 coreconfigitem(
247 b'cmdserver',
247 b'cmdserver',
248 b'max-repo-cache',
248 b'max-repo-cache',
249 default=0,
249 default=0,
250 experimental=True,
250 experimental=True,
251 )
251 )
252 coreconfigitem(
252 coreconfigitem(
253 b'cmdserver',
253 b'cmdserver',
254 b'message-encodings',
254 b'message-encodings',
255 default=list,
255 default=list,
256 )
256 )
257 coreconfigitem(
257 coreconfigitem(
258 b'cmdserver',
258 b'cmdserver',
259 b'track-log',
259 b'track-log',
260 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
260 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 )
261 )
262 coreconfigitem(
262 coreconfigitem(
263 b'cmdserver',
263 b'cmdserver',
264 b'shutdown-on-interrupt',
264 b'shutdown-on-interrupt',
265 default=True,
265 default=True,
266 )
266 )
267 coreconfigitem(
267 coreconfigitem(
268 b'color',
268 b'color',
269 b'.*',
269 b'.*',
270 default=None,
270 default=None,
271 generic=True,
271 generic=True,
272 )
272 )
273 coreconfigitem(
273 coreconfigitem(
274 b'color',
274 b'color',
275 b'mode',
275 b'mode',
276 default=b'auto',
276 default=b'auto',
277 )
277 )
278 coreconfigitem(
278 coreconfigitem(
279 b'color',
279 b'color',
280 b'pagermode',
280 b'pagermode',
281 default=dynamicdefault,
281 default=dynamicdefault,
282 )
282 )
283 coreconfigitem(
283 coreconfigitem(
284 b'command-templates',
284 b'command-templates',
285 b'graphnode',
285 b'graphnode',
286 default=None,
286 default=None,
287 alias=[(b'ui', b'graphnodetemplate')],
287 alias=[(b'ui', b'graphnodetemplate')],
288 )
288 )
289 coreconfigitem(
289 coreconfigitem(
290 b'command-templates',
290 b'command-templates',
291 b'log',
291 b'log',
292 default=None,
292 default=None,
293 alias=[(b'ui', b'logtemplate')],
293 alias=[(b'ui', b'logtemplate')],
294 )
294 )
295 coreconfigitem(
295 coreconfigitem(
296 b'command-templates',
296 b'command-templates',
297 b'mergemarker',
297 b'mergemarker',
298 default=(
298 default=(
299 b'{node|short} '
299 b'{node|short} '
300 b'{ifeq(tags, "tip", "", '
300 b'{ifeq(tags, "tip", "", '
301 b'ifeq(tags, "", "", "{tags} "))}'
301 b'ifeq(tags, "", "", "{tags} "))}'
302 b'{if(bookmarks, "{bookmarks} ")}'
302 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{ifeq(branch, "default", "", "{branch} ")}'
303 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'- {author|user}: {desc|firstline}'
304 b'- {author|user}: {desc|firstline}'
305 ),
305 ),
306 alias=[(b'ui', b'mergemarkertemplate')],
306 alias=[(b'ui', b'mergemarkertemplate')],
307 )
307 )
308 coreconfigitem(
308 coreconfigitem(
309 b'command-templates',
309 b'command-templates',
310 b'pre-merge-tool-output',
310 b'pre-merge-tool-output',
311 default=None,
311 default=None,
312 alias=[(b'ui', b'pre-merge-tool-output-template')],
312 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 )
313 )
314 coreconfigitem(
314 coreconfigitem(
315 b'command-templates',
315 b'command-templates',
316 b'oneline-summary',
316 b'oneline-summary',
317 default=None,
317 default=None,
318 )
318 )
319 coreconfigitem(
319 coreconfigitem(
320 b'command-templates',
320 b'command-templates',
321 b'oneline-summary.*',
321 b'oneline-summary.*',
322 default=dynamicdefault,
322 default=dynamicdefault,
323 generic=True,
323 generic=True,
324 )
324 )
325 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
325 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 coreconfigitem(
326 coreconfigitem(
327 b'commands',
327 b'commands',
328 b'commit.post-status',
328 b'commit.post-status',
329 default=False,
329 default=False,
330 )
330 )
331 coreconfigitem(
331 coreconfigitem(
332 b'commands',
332 b'commands',
333 b'grep.all-files',
333 b'grep.all-files',
334 default=False,
334 default=False,
335 experimental=True,
335 experimental=True,
336 )
336 )
337 coreconfigitem(
337 coreconfigitem(
338 b'commands',
338 b'commands',
339 b'merge.require-rev',
339 b'merge.require-rev',
340 default=False,
340 default=False,
341 )
341 )
342 coreconfigitem(
342 coreconfigitem(
343 b'commands',
343 b'commands',
344 b'push.require-revs',
344 b'push.require-revs',
345 default=False,
345 default=False,
346 )
346 )
347 coreconfigitem(
347 coreconfigitem(
348 b'commands',
348 b'commands',
349 b'resolve.confirm',
349 b'resolve.confirm',
350 default=False,
350 default=False,
351 )
351 )
352 coreconfigitem(
352 coreconfigitem(
353 b'commands',
353 b'commands',
354 b'resolve.explicit-re-merge',
354 b'resolve.explicit-re-merge',
355 default=False,
355 default=False,
356 )
356 )
357 coreconfigitem(
357 coreconfigitem(
358 b'commands',
358 b'commands',
359 b'resolve.mark-check',
359 b'resolve.mark-check',
360 default=b'none',
360 default=b'none',
361 )
361 )
362 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
362 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 coreconfigitem(
363 coreconfigitem(
364 b'commands',
364 b'commands',
365 b'show.aliasprefix',
365 b'show.aliasprefix',
366 default=list,
366 default=list,
367 )
367 )
368 coreconfigitem(
368 coreconfigitem(
369 b'commands',
369 b'commands',
370 b'status.relative',
370 b'status.relative',
371 default=False,
371 default=False,
372 )
372 )
373 coreconfigitem(
373 coreconfigitem(
374 b'commands',
374 b'commands',
375 b'status.skipstates',
375 b'status.skipstates',
376 default=[],
376 default=[],
377 experimental=True,
377 experimental=True,
378 )
378 )
379 coreconfigitem(
379 coreconfigitem(
380 b'commands',
380 b'commands',
381 b'status.terse',
381 b'status.terse',
382 default=b'',
382 default=b'',
383 )
383 )
384 coreconfigitem(
384 coreconfigitem(
385 b'commands',
385 b'commands',
386 b'status.verbose',
386 b'status.verbose',
387 default=False,
387 default=False,
388 )
388 )
389 coreconfigitem(
389 coreconfigitem(
390 b'commands',
390 b'commands',
391 b'update.check',
391 b'update.check',
392 default=None,
392 default=None,
393 )
393 )
394 coreconfigitem(
394 coreconfigitem(
395 b'commands',
395 b'commands',
396 b'update.requiredest',
396 b'update.requiredest',
397 default=False,
397 default=False,
398 )
398 )
399 coreconfigitem(
399 coreconfigitem(
400 b'committemplate',
400 b'committemplate',
401 b'.*',
401 b'.*',
402 default=None,
402 default=None,
403 generic=True,
403 generic=True,
404 )
404 )
405 coreconfigitem(
405 coreconfigitem(
406 b'convert',
406 b'convert',
407 b'bzr.saverev',
407 b'bzr.saverev',
408 default=True,
408 default=True,
409 )
409 )
410 coreconfigitem(
410 coreconfigitem(
411 b'convert',
411 b'convert',
412 b'cvsps.cache',
412 b'cvsps.cache',
413 default=True,
413 default=True,
414 )
414 )
415 coreconfigitem(
415 coreconfigitem(
416 b'convert',
416 b'convert',
417 b'cvsps.fuzz',
417 b'cvsps.fuzz',
418 default=60,
418 default=60,
419 )
419 )
420 coreconfigitem(
420 coreconfigitem(
421 b'convert',
421 b'convert',
422 b'cvsps.logencoding',
422 b'cvsps.logencoding',
423 default=None,
423 default=None,
424 )
424 )
425 coreconfigitem(
425 coreconfigitem(
426 b'convert',
426 b'convert',
427 b'cvsps.mergefrom',
427 b'cvsps.mergefrom',
428 default=None,
428 default=None,
429 )
429 )
430 coreconfigitem(
430 coreconfigitem(
431 b'convert',
431 b'convert',
432 b'cvsps.mergeto',
432 b'cvsps.mergeto',
433 default=None,
433 default=None,
434 )
434 )
435 coreconfigitem(
435 coreconfigitem(
436 b'convert',
436 b'convert',
437 b'git.committeractions',
437 b'git.committeractions',
438 default=lambda: [b'messagedifferent'],
438 default=lambda: [b'messagedifferent'],
439 )
439 )
440 coreconfigitem(
440 coreconfigitem(
441 b'convert',
441 b'convert',
442 b'git.extrakeys',
442 b'git.extrakeys',
443 default=list,
443 default=list,
444 )
444 )
445 coreconfigitem(
445 coreconfigitem(
446 b'convert',
446 b'convert',
447 b'git.findcopiesharder',
447 b'git.findcopiesharder',
448 default=False,
448 default=False,
449 )
449 )
450 coreconfigitem(
450 coreconfigitem(
451 b'convert',
451 b'convert',
452 b'git.remoteprefix',
452 b'git.remoteprefix',
453 default=b'remote',
453 default=b'remote',
454 )
454 )
455 coreconfigitem(
455 coreconfigitem(
456 b'convert',
456 b'convert',
457 b'git.renamelimit',
457 b'git.renamelimit',
458 default=400,
458 default=400,
459 )
459 )
460 coreconfigitem(
460 coreconfigitem(
461 b'convert',
461 b'convert',
462 b'git.saverev',
462 b'git.saverev',
463 default=True,
463 default=True,
464 )
464 )
465 coreconfigitem(
465 coreconfigitem(
466 b'convert',
466 b'convert',
467 b'git.similarity',
467 b'git.similarity',
468 default=50,
468 default=50,
469 )
469 )
470 coreconfigitem(
470 coreconfigitem(
471 b'convert',
471 b'convert',
472 b'git.skipsubmodules',
472 b'git.skipsubmodules',
473 default=False,
473 default=False,
474 )
474 )
475 coreconfigitem(
475 coreconfigitem(
476 b'convert',
476 b'convert',
477 b'hg.clonebranches',
477 b'hg.clonebranches',
478 default=False,
478 default=False,
479 )
479 )
480 coreconfigitem(
480 coreconfigitem(
481 b'convert',
481 b'convert',
482 b'hg.ignoreerrors',
482 b'hg.ignoreerrors',
483 default=False,
483 default=False,
484 )
484 )
485 coreconfigitem(
485 coreconfigitem(
486 b'convert',
486 b'convert',
487 b'hg.preserve-hash',
487 b'hg.preserve-hash',
488 default=False,
488 default=False,
489 )
489 )
490 coreconfigitem(
490 coreconfigitem(
491 b'convert',
491 b'convert',
492 b'hg.revs',
492 b'hg.revs',
493 default=None,
493 default=None,
494 )
494 )
495 coreconfigitem(
495 coreconfigitem(
496 b'convert',
496 b'convert',
497 b'hg.saverev',
497 b'hg.saverev',
498 default=False,
498 default=False,
499 )
499 )
500 coreconfigitem(
500 coreconfigitem(
501 b'convert',
501 b'convert',
502 b'hg.sourcename',
502 b'hg.sourcename',
503 default=None,
503 default=None,
504 )
504 )
505 coreconfigitem(
505 coreconfigitem(
506 b'convert',
506 b'convert',
507 b'hg.startrev',
507 b'hg.startrev',
508 default=None,
508 default=None,
509 )
509 )
510 coreconfigitem(
510 coreconfigitem(
511 b'convert',
511 b'convert',
512 b'hg.tagsbranch',
512 b'hg.tagsbranch',
513 default=b'default',
513 default=b'default',
514 )
514 )
515 coreconfigitem(
515 coreconfigitem(
516 b'convert',
516 b'convert',
517 b'hg.usebranchnames',
517 b'hg.usebranchnames',
518 default=True,
518 default=True,
519 )
519 )
520 coreconfigitem(
520 coreconfigitem(
521 b'convert',
521 b'convert',
522 b'ignoreancestorcheck',
522 b'ignoreancestorcheck',
523 default=False,
523 default=False,
524 experimental=True,
524 experimental=True,
525 )
525 )
526 coreconfigitem(
526 coreconfigitem(
527 b'convert',
527 b'convert',
528 b'localtimezone',
528 b'localtimezone',
529 default=False,
529 default=False,
530 )
530 )
531 coreconfigitem(
531 coreconfigitem(
532 b'convert',
532 b'convert',
533 b'p4.encoding',
533 b'p4.encoding',
534 default=dynamicdefault,
534 default=dynamicdefault,
535 )
535 )
536 coreconfigitem(
536 coreconfigitem(
537 b'convert',
537 b'convert',
538 b'p4.startrev',
538 b'p4.startrev',
539 default=0,
539 default=0,
540 )
540 )
541 coreconfigitem(
541 coreconfigitem(
542 b'convert',
542 b'convert',
543 b'skiptags',
543 b'skiptags',
544 default=False,
544 default=False,
545 )
545 )
546 coreconfigitem(
546 coreconfigitem(
547 b'convert',
547 b'convert',
548 b'svn.debugsvnlog',
548 b'svn.debugsvnlog',
549 default=True,
549 default=True,
550 )
550 )
551 coreconfigitem(
551 coreconfigitem(
552 b'convert',
552 b'convert',
553 b'svn.trunk',
553 b'svn.trunk',
554 default=None,
554 default=None,
555 )
555 )
556 coreconfigitem(
556 coreconfigitem(
557 b'convert',
557 b'convert',
558 b'svn.tags',
558 b'svn.tags',
559 default=None,
559 default=None,
560 )
560 )
561 coreconfigitem(
561 coreconfigitem(
562 b'convert',
562 b'convert',
563 b'svn.branches',
563 b'svn.branches',
564 default=None,
564 default=None,
565 )
565 )
566 coreconfigitem(
566 coreconfigitem(
567 b'convert',
567 b'convert',
568 b'svn.startrev',
568 b'svn.startrev',
569 default=0,
569 default=0,
570 )
570 )
571 coreconfigitem(
571 coreconfigitem(
572 b'convert',
572 b'convert',
573 b'svn.dangerous-set-commit-dates',
573 b'svn.dangerous-set-commit-dates',
574 default=False,
574 default=False,
575 )
575 )
576 coreconfigitem(
576 coreconfigitem(
577 b'debug',
577 b'debug',
578 b'dirstate.delaywrite',
578 b'dirstate.delaywrite',
579 default=0,
579 default=0,
580 )
580 )
581 coreconfigitem(
581 coreconfigitem(
582 b'debug',
582 b'debug',
583 b'revlog.verifyposition.changelog',
583 b'revlog.verifyposition.changelog',
584 default=b'',
584 default=b'',
585 )
585 )
586 coreconfigitem(
586 coreconfigitem(
587 b'debug',
587 b'debug',
588 b'revlog.debug-delta',
588 b'revlog.debug-delta',
589 default=False,
589 default=False,
590 )
590 )
591 # display extra information about the bundling process
591 # display extra information about the bundling process
592 coreconfigitem(
592 coreconfigitem(
593 b'debug',
593 b'debug',
594 b'bundling-stats',
594 b'bundling-stats',
595 default=False,
595 default=False,
596 )
596 )
597 # display extra information about the unbundling process
598 coreconfigitem(
599 b'debug',
600 b'unbundling-stats',
601 default=False,
602 )
597 coreconfigitem(
603 coreconfigitem(
598 b'defaults',
604 b'defaults',
599 b'.*',
605 b'.*',
600 default=None,
606 default=None,
601 generic=True,
607 generic=True,
602 )
608 )
603 coreconfigitem(
609 coreconfigitem(
604 b'devel',
610 b'devel',
605 b'all-warnings',
611 b'all-warnings',
606 default=False,
612 default=False,
607 )
613 )
608 coreconfigitem(
614 coreconfigitem(
609 b'devel',
615 b'devel',
610 b'bundle2.debug',
616 b'bundle2.debug',
611 default=False,
617 default=False,
612 )
618 )
613 coreconfigitem(
619 coreconfigitem(
614 b'devel',
620 b'devel',
615 b'bundle.delta',
621 b'bundle.delta',
616 default=b'',
622 default=b'',
617 )
623 )
618 coreconfigitem(
624 coreconfigitem(
619 b'devel',
625 b'devel',
620 b'cache-vfs',
626 b'cache-vfs',
621 default=None,
627 default=None,
622 )
628 )
623 coreconfigitem(
629 coreconfigitem(
624 b'devel',
630 b'devel',
625 b'check-locks',
631 b'check-locks',
626 default=False,
632 default=False,
627 )
633 )
628 coreconfigitem(
634 coreconfigitem(
629 b'devel',
635 b'devel',
630 b'check-relroot',
636 b'check-relroot',
631 default=False,
637 default=False,
632 )
638 )
633 # Track copy information for all file, not just "added" one (very slow)
639 # Track copy information for all file, not just "added" one (very slow)
634 coreconfigitem(
640 coreconfigitem(
635 b'devel',
641 b'devel',
636 b'copy-tracing.trace-all-files',
642 b'copy-tracing.trace-all-files',
637 default=False,
643 default=False,
638 )
644 )
639 coreconfigitem(
645 coreconfigitem(
640 b'devel',
646 b'devel',
641 b'default-date',
647 b'default-date',
642 default=None,
648 default=None,
643 )
649 )
644 coreconfigitem(
650 coreconfigitem(
645 b'devel',
651 b'devel',
646 b'deprec-warn',
652 b'deprec-warn',
647 default=False,
653 default=False,
648 )
654 )
649 coreconfigitem(
655 coreconfigitem(
650 b'devel',
656 b'devel',
651 b'disableloaddefaultcerts',
657 b'disableloaddefaultcerts',
652 default=False,
658 default=False,
653 )
659 )
654 coreconfigitem(
660 coreconfigitem(
655 b'devel',
661 b'devel',
656 b'warn-empty-changegroup',
662 b'warn-empty-changegroup',
657 default=False,
663 default=False,
658 )
664 )
659 coreconfigitem(
665 coreconfigitem(
660 b'devel',
666 b'devel',
661 b'legacy.exchange',
667 b'legacy.exchange',
662 default=list,
668 default=list,
663 )
669 )
664 # When True, revlogs use a special reference version of the nodemap, that is not
670 # When True, revlogs use a special reference version of the nodemap, that is not
665 # performant but is "known" to behave properly.
671 # performant but is "known" to behave properly.
666 coreconfigitem(
672 coreconfigitem(
667 b'devel',
673 b'devel',
668 b'persistent-nodemap',
674 b'persistent-nodemap',
669 default=False,
675 default=False,
670 )
676 )
671 coreconfigitem(
677 coreconfigitem(
672 b'devel',
678 b'devel',
673 b'servercafile',
679 b'servercafile',
674 default=b'',
680 default=b'',
675 )
681 )
676 coreconfigitem(
682 coreconfigitem(
677 b'devel',
683 b'devel',
678 b'serverexactprotocol',
684 b'serverexactprotocol',
679 default=b'',
685 default=b'',
680 )
686 )
681 coreconfigitem(
687 coreconfigitem(
682 b'devel',
688 b'devel',
683 b'serverrequirecert',
689 b'serverrequirecert',
684 default=False,
690 default=False,
685 )
691 )
686 coreconfigitem(
692 coreconfigitem(
687 b'devel',
693 b'devel',
688 b'strip-obsmarkers',
694 b'strip-obsmarkers',
689 default=True,
695 default=True,
690 )
696 )
691 coreconfigitem(
697 coreconfigitem(
692 b'devel',
698 b'devel',
693 b'warn-config',
699 b'warn-config',
694 default=None,
700 default=None,
695 )
701 )
696 coreconfigitem(
702 coreconfigitem(
697 b'devel',
703 b'devel',
698 b'warn-config-default',
704 b'warn-config-default',
699 default=None,
705 default=None,
700 )
706 )
701 coreconfigitem(
707 coreconfigitem(
702 b'devel',
708 b'devel',
703 b'user.obsmarker',
709 b'user.obsmarker',
704 default=None,
710 default=None,
705 )
711 )
706 coreconfigitem(
712 coreconfigitem(
707 b'devel',
713 b'devel',
708 b'warn-config-unknown',
714 b'warn-config-unknown',
709 default=None,
715 default=None,
710 )
716 )
711 coreconfigitem(
717 coreconfigitem(
712 b'devel',
718 b'devel',
713 b'debug.copies',
719 b'debug.copies',
714 default=False,
720 default=False,
715 )
721 )
716 coreconfigitem(
722 coreconfigitem(
717 b'devel',
723 b'devel',
718 b'copy-tracing.multi-thread',
724 b'copy-tracing.multi-thread',
719 default=True,
725 default=True,
720 )
726 )
721 coreconfigitem(
727 coreconfigitem(
722 b'devel',
728 b'devel',
723 b'debug.extensions',
729 b'debug.extensions',
724 default=False,
730 default=False,
725 )
731 )
726 coreconfigitem(
732 coreconfigitem(
727 b'devel',
733 b'devel',
728 b'debug.repo-filters',
734 b'debug.repo-filters',
729 default=False,
735 default=False,
730 )
736 )
731 coreconfigitem(
737 coreconfigitem(
732 b'devel',
738 b'devel',
733 b'debug.peer-request',
739 b'debug.peer-request',
734 default=False,
740 default=False,
735 )
741 )
736 # If discovery.exchange-heads is False, the discovery will not start with
742 # If discovery.exchange-heads is False, the discovery will not start with
737 # remote head fetching and local head querying.
743 # remote head fetching and local head querying.
738 coreconfigitem(
744 coreconfigitem(
739 b'devel',
745 b'devel',
740 b'discovery.exchange-heads',
746 b'discovery.exchange-heads',
741 default=True,
747 default=True,
742 )
748 )
743 # If discovery.grow-sample is False, the sample size used in set discovery will
749 # If discovery.grow-sample is False, the sample size used in set discovery will
744 # not be increased through the process
750 # not be increased through the process
745 coreconfigitem(
751 coreconfigitem(
746 b'devel',
752 b'devel',
747 b'discovery.grow-sample',
753 b'discovery.grow-sample',
748 default=True,
754 default=True,
749 )
755 )
750 # When discovery.grow-sample.dynamic is True, the default, the sample size is
756 # When discovery.grow-sample.dynamic is True, the default, the sample size is
751 # adapted to the shape of the undecided set (it is set to the max of:
757 # adapted to the shape of the undecided set (it is set to the max of:
752 # <target-size>, len(roots(undecided)), len(heads(undecided)
758 # <target-size>, len(roots(undecided)), len(heads(undecided)
753 coreconfigitem(
759 coreconfigitem(
754 b'devel',
760 b'devel',
755 b'discovery.grow-sample.dynamic',
761 b'discovery.grow-sample.dynamic',
756 default=True,
762 default=True,
757 )
763 )
758 # discovery.grow-sample.rate control the rate at which the sample grow
764 # discovery.grow-sample.rate control the rate at which the sample grow
759 coreconfigitem(
765 coreconfigitem(
760 b'devel',
766 b'devel',
761 b'discovery.grow-sample.rate',
767 b'discovery.grow-sample.rate',
762 default=1.05,
768 default=1.05,
763 )
769 )
764 # If discovery.randomize is False, random sampling during discovery are
770 # If discovery.randomize is False, random sampling during discovery are
765 # deterministic. It is meant for integration tests.
771 # deterministic. It is meant for integration tests.
766 coreconfigitem(
772 coreconfigitem(
767 b'devel',
773 b'devel',
768 b'discovery.randomize',
774 b'discovery.randomize',
769 default=True,
775 default=True,
770 )
776 )
771 # Control the initial size of the discovery sample
777 # Control the initial size of the discovery sample
772 coreconfigitem(
778 coreconfigitem(
773 b'devel',
779 b'devel',
774 b'discovery.sample-size',
780 b'discovery.sample-size',
775 default=200,
781 default=200,
776 )
782 )
777 # Control the initial size of the discovery for initial change
783 # Control the initial size of the discovery for initial change
778 coreconfigitem(
784 coreconfigitem(
779 b'devel',
785 b'devel',
780 b'discovery.sample-size.initial',
786 b'discovery.sample-size.initial',
781 default=100,
787 default=100,
782 )
788 )
783 _registerdiffopts(section=b'diff')
789 _registerdiffopts(section=b'diff')
784 coreconfigitem(
790 coreconfigitem(
785 b'diff',
791 b'diff',
786 b'merge',
792 b'merge',
787 default=False,
793 default=False,
788 experimental=True,
794 experimental=True,
789 )
795 )
790 coreconfigitem(
796 coreconfigitem(
791 b'email',
797 b'email',
792 b'bcc',
798 b'bcc',
793 default=None,
799 default=None,
794 )
800 )
795 coreconfigitem(
801 coreconfigitem(
796 b'email',
802 b'email',
797 b'cc',
803 b'cc',
798 default=None,
804 default=None,
799 )
805 )
800 coreconfigitem(
806 coreconfigitem(
801 b'email',
807 b'email',
802 b'charsets',
808 b'charsets',
803 default=list,
809 default=list,
804 )
810 )
805 coreconfigitem(
811 coreconfigitem(
806 b'email',
812 b'email',
807 b'from',
813 b'from',
808 default=None,
814 default=None,
809 )
815 )
810 coreconfigitem(
816 coreconfigitem(
811 b'email',
817 b'email',
812 b'method',
818 b'method',
813 default=b'smtp',
819 default=b'smtp',
814 )
820 )
815 coreconfigitem(
821 coreconfigitem(
816 b'email',
822 b'email',
817 b'reply-to',
823 b'reply-to',
818 default=None,
824 default=None,
819 )
825 )
820 coreconfigitem(
826 coreconfigitem(
821 b'email',
827 b'email',
822 b'to',
828 b'to',
823 default=None,
829 default=None,
824 )
830 )
825 coreconfigitem(
831 coreconfigitem(
826 b'experimental',
832 b'experimental',
827 b'archivemetatemplate',
833 b'archivemetatemplate',
828 default=dynamicdefault,
834 default=dynamicdefault,
829 )
835 )
830 coreconfigitem(
836 coreconfigitem(
831 b'experimental',
837 b'experimental',
832 b'auto-publish',
838 b'auto-publish',
833 default=b'publish',
839 default=b'publish',
834 )
840 )
835 coreconfigitem(
841 coreconfigitem(
836 b'experimental',
842 b'experimental',
837 b'bundle-phases',
843 b'bundle-phases',
838 default=False,
844 default=False,
839 )
845 )
840 coreconfigitem(
846 coreconfigitem(
841 b'experimental',
847 b'experimental',
842 b'bundle2-advertise',
848 b'bundle2-advertise',
843 default=True,
849 default=True,
844 )
850 )
845 coreconfigitem(
851 coreconfigitem(
846 b'experimental',
852 b'experimental',
847 b'bundle2-output-capture',
853 b'bundle2-output-capture',
848 default=False,
854 default=False,
849 )
855 )
850 coreconfigitem(
856 coreconfigitem(
851 b'experimental',
857 b'experimental',
852 b'bundle2.pushback',
858 b'bundle2.pushback',
853 default=False,
859 default=False,
854 )
860 )
855 coreconfigitem(
861 coreconfigitem(
856 b'experimental',
862 b'experimental',
857 b'bundle2lazylocking',
863 b'bundle2lazylocking',
858 default=False,
864 default=False,
859 )
865 )
860 coreconfigitem(
866 coreconfigitem(
861 b'experimental',
867 b'experimental',
862 b'bundlecomplevel',
868 b'bundlecomplevel',
863 default=None,
869 default=None,
864 )
870 )
865 coreconfigitem(
871 coreconfigitem(
866 b'experimental',
872 b'experimental',
867 b'bundlecomplevel.bzip2',
873 b'bundlecomplevel.bzip2',
868 default=None,
874 default=None,
869 )
875 )
870 coreconfigitem(
876 coreconfigitem(
871 b'experimental',
877 b'experimental',
872 b'bundlecomplevel.gzip',
878 b'bundlecomplevel.gzip',
873 default=None,
879 default=None,
874 )
880 )
875 coreconfigitem(
881 coreconfigitem(
876 b'experimental',
882 b'experimental',
877 b'bundlecomplevel.none',
883 b'bundlecomplevel.none',
878 default=None,
884 default=None,
879 )
885 )
880 coreconfigitem(
886 coreconfigitem(
881 b'experimental',
887 b'experimental',
882 b'bundlecomplevel.zstd',
888 b'bundlecomplevel.zstd',
883 default=None,
889 default=None,
884 )
890 )
885 coreconfigitem(
891 coreconfigitem(
886 b'experimental',
892 b'experimental',
887 b'bundlecompthreads',
893 b'bundlecompthreads',
888 default=None,
894 default=None,
889 )
895 )
890 coreconfigitem(
896 coreconfigitem(
891 b'experimental',
897 b'experimental',
892 b'bundlecompthreads.bzip2',
898 b'bundlecompthreads.bzip2',
893 default=None,
899 default=None,
894 )
900 )
895 coreconfigitem(
901 coreconfigitem(
896 b'experimental',
902 b'experimental',
897 b'bundlecompthreads.gzip',
903 b'bundlecompthreads.gzip',
898 default=None,
904 default=None,
899 )
905 )
900 coreconfigitem(
906 coreconfigitem(
901 b'experimental',
907 b'experimental',
902 b'bundlecompthreads.none',
908 b'bundlecompthreads.none',
903 default=None,
909 default=None,
904 )
910 )
905 coreconfigitem(
911 coreconfigitem(
906 b'experimental',
912 b'experimental',
907 b'bundlecompthreads.zstd',
913 b'bundlecompthreads.zstd',
908 default=None,
914 default=None,
909 )
915 )
910 coreconfigitem(
916 coreconfigitem(
911 b'experimental',
917 b'experimental',
912 b'changegroup3',
918 b'changegroup3',
913 default=False,
919 default=False,
914 )
920 )
915 coreconfigitem(
921 coreconfigitem(
916 b'experimental',
922 b'experimental',
917 b'changegroup4',
923 b'changegroup4',
918 default=False,
924 default=False,
919 )
925 )
920 coreconfigitem(
926 coreconfigitem(
921 b'experimental',
927 b'experimental',
922 b'cleanup-as-archived',
928 b'cleanup-as-archived',
923 default=False,
929 default=False,
924 )
930 )
925 coreconfigitem(
931 coreconfigitem(
926 b'experimental',
932 b'experimental',
927 b'clientcompressionengines',
933 b'clientcompressionengines',
928 default=list,
934 default=list,
929 )
935 )
930 coreconfigitem(
936 coreconfigitem(
931 b'experimental',
937 b'experimental',
932 b'copytrace',
938 b'copytrace',
933 default=b'on',
939 default=b'on',
934 )
940 )
935 coreconfigitem(
941 coreconfigitem(
936 b'experimental',
942 b'experimental',
937 b'copytrace.movecandidateslimit',
943 b'copytrace.movecandidateslimit',
938 default=100,
944 default=100,
939 )
945 )
940 coreconfigitem(
946 coreconfigitem(
941 b'experimental',
947 b'experimental',
942 b'copytrace.sourcecommitlimit',
948 b'copytrace.sourcecommitlimit',
943 default=100,
949 default=100,
944 )
950 )
945 coreconfigitem(
951 coreconfigitem(
946 b'experimental',
952 b'experimental',
947 b'copies.read-from',
953 b'copies.read-from',
948 default=b"filelog-only",
954 default=b"filelog-only",
949 )
955 )
950 coreconfigitem(
956 coreconfigitem(
951 b'experimental',
957 b'experimental',
952 b'copies.write-to',
958 b'copies.write-to',
953 default=b'filelog-only',
959 default=b'filelog-only',
954 )
960 )
955 coreconfigitem(
961 coreconfigitem(
956 b'experimental',
962 b'experimental',
957 b'crecordtest',
963 b'crecordtest',
958 default=None,
964 default=None,
959 )
965 )
960 coreconfigitem(
966 coreconfigitem(
961 b'experimental',
967 b'experimental',
962 b'directaccess',
968 b'directaccess',
963 default=False,
969 default=False,
964 )
970 )
965 coreconfigitem(
971 coreconfigitem(
966 b'experimental',
972 b'experimental',
967 b'directaccess.revnums',
973 b'directaccess.revnums',
968 default=False,
974 default=False,
969 )
975 )
970 coreconfigitem(
976 coreconfigitem(
971 b'experimental',
977 b'experimental',
972 b'editortmpinhg',
978 b'editortmpinhg',
973 default=False,
979 default=False,
974 )
980 )
975 coreconfigitem(
981 coreconfigitem(
976 b'experimental',
982 b'experimental',
977 b'evolution',
983 b'evolution',
978 default=list,
984 default=list,
979 )
985 )
980 coreconfigitem(
986 coreconfigitem(
981 b'experimental',
987 b'experimental',
982 b'evolution.allowdivergence',
988 b'evolution.allowdivergence',
983 default=False,
989 default=False,
984 alias=[(b'experimental', b'allowdivergence')],
990 alias=[(b'experimental', b'allowdivergence')],
985 )
991 )
986 coreconfigitem(
992 coreconfigitem(
987 b'experimental',
993 b'experimental',
988 b'evolution.allowunstable',
994 b'evolution.allowunstable',
989 default=None,
995 default=None,
990 )
996 )
991 coreconfigitem(
997 coreconfigitem(
992 b'experimental',
998 b'experimental',
993 b'evolution.createmarkers',
999 b'evolution.createmarkers',
994 default=None,
1000 default=None,
995 )
1001 )
996 coreconfigitem(
1002 coreconfigitem(
997 b'experimental',
1003 b'experimental',
998 b'evolution.effect-flags',
1004 b'evolution.effect-flags',
999 default=True,
1005 default=True,
1000 alias=[(b'experimental', b'effect-flags')],
1006 alias=[(b'experimental', b'effect-flags')],
1001 )
1007 )
1002 coreconfigitem(
1008 coreconfigitem(
1003 b'experimental',
1009 b'experimental',
1004 b'evolution.exchange',
1010 b'evolution.exchange',
1005 default=None,
1011 default=None,
1006 )
1012 )
1007 coreconfigitem(
1013 coreconfigitem(
1008 b'experimental',
1014 b'experimental',
1009 b'evolution.bundle-obsmarker',
1015 b'evolution.bundle-obsmarker',
1010 default=False,
1016 default=False,
1011 )
1017 )
1012 coreconfigitem(
1018 coreconfigitem(
1013 b'experimental',
1019 b'experimental',
1014 b'evolution.bundle-obsmarker:mandatory',
1020 b'evolution.bundle-obsmarker:mandatory',
1015 default=True,
1021 default=True,
1016 )
1022 )
1017 coreconfigitem(
1023 coreconfigitem(
1018 b'experimental',
1024 b'experimental',
1019 b'log.topo',
1025 b'log.topo',
1020 default=False,
1026 default=False,
1021 )
1027 )
1022 coreconfigitem(
1028 coreconfigitem(
1023 b'experimental',
1029 b'experimental',
1024 b'evolution.report-instabilities',
1030 b'evolution.report-instabilities',
1025 default=True,
1031 default=True,
1026 )
1032 )
1027 coreconfigitem(
1033 coreconfigitem(
1028 b'experimental',
1034 b'experimental',
1029 b'evolution.track-operation',
1035 b'evolution.track-operation',
1030 default=True,
1036 default=True,
1031 )
1037 )
1032 # repo-level config to exclude a revset visibility
1038 # repo-level config to exclude a revset visibility
1033 #
1039 #
1034 # The target use case is to use `share` to expose different subset of the same
1040 # The target use case is to use `share` to expose different subset of the same
1035 # repository, especially server side. See also `server.view`.
1041 # repository, especially server side. See also `server.view`.
1036 coreconfigitem(
1042 coreconfigitem(
1037 b'experimental',
1043 b'experimental',
1038 b'extra-filter-revs',
1044 b'extra-filter-revs',
1039 default=None,
1045 default=None,
1040 )
1046 )
1041 coreconfigitem(
1047 coreconfigitem(
1042 b'experimental',
1048 b'experimental',
1043 b'maxdeltachainspan',
1049 b'maxdeltachainspan',
1044 default=-1,
1050 default=-1,
1045 )
1051 )
1046 # tracks files which were undeleted (merge might delete them but we explicitly
1052 # tracks files which were undeleted (merge might delete them but we explicitly
1047 # kept/undeleted them) and creates new filenodes for them
1053 # kept/undeleted them) and creates new filenodes for them
1048 coreconfigitem(
1054 coreconfigitem(
1049 b'experimental',
1055 b'experimental',
1050 b'merge-track-salvaged',
1056 b'merge-track-salvaged',
1051 default=False,
1057 default=False,
1052 )
1058 )
1053 coreconfigitem(
1059 coreconfigitem(
1054 b'experimental',
1060 b'experimental',
1055 b'mmapindexthreshold',
1061 b'mmapindexthreshold',
1056 default=None,
1062 default=None,
1057 )
1063 )
1058 coreconfigitem(
1064 coreconfigitem(
1059 b'experimental',
1065 b'experimental',
1060 b'narrow',
1066 b'narrow',
1061 default=False,
1067 default=False,
1062 )
1068 )
1063 coreconfigitem(
1069 coreconfigitem(
1064 b'experimental',
1070 b'experimental',
1065 b'nonnormalparanoidcheck',
1071 b'nonnormalparanoidcheck',
1066 default=False,
1072 default=False,
1067 )
1073 )
1068 coreconfigitem(
1074 coreconfigitem(
1069 b'experimental',
1075 b'experimental',
1070 b'exportableenviron',
1076 b'exportableenviron',
1071 default=list,
1077 default=list,
1072 )
1078 )
1073 coreconfigitem(
1079 coreconfigitem(
1074 b'experimental',
1080 b'experimental',
1075 b'extendedheader.index',
1081 b'extendedheader.index',
1076 default=None,
1082 default=None,
1077 )
1083 )
1078 coreconfigitem(
1084 coreconfigitem(
1079 b'experimental',
1085 b'experimental',
1080 b'extendedheader.similarity',
1086 b'extendedheader.similarity',
1081 default=False,
1087 default=False,
1082 )
1088 )
1083 coreconfigitem(
1089 coreconfigitem(
1084 b'experimental',
1090 b'experimental',
1085 b'graphshorten',
1091 b'graphshorten',
1086 default=False,
1092 default=False,
1087 )
1093 )
1088 coreconfigitem(
1094 coreconfigitem(
1089 b'experimental',
1095 b'experimental',
1090 b'graphstyle.parent',
1096 b'graphstyle.parent',
1091 default=dynamicdefault,
1097 default=dynamicdefault,
1092 )
1098 )
1093 coreconfigitem(
1099 coreconfigitem(
1094 b'experimental',
1100 b'experimental',
1095 b'graphstyle.missing',
1101 b'graphstyle.missing',
1096 default=dynamicdefault,
1102 default=dynamicdefault,
1097 )
1103 )
1098 coreconfigitem(
1104 coreconfigitem(
1099 b'experimental',
1105 b'experimental',
1100 b'graphstyle.grandparent',
1106 b'graphstyle.grandparent',
1101 default=dynamicdefault,
1107 default=dynamicdefault,
1102 )
1108 )
1103 coreconfigitem(
1109 coreconfigitem(
1104 b'experimental',
1110 b'experimental',
1105 b'hook-track-tags',
1111 b'hook-track-tags',
1106 default=False,
1112 default=False,
1107 )
1113 )
1108 coreconfigitem(
1114 coreconfigitem(
1109 b'experimental',
1115 b'experimental',
1110 b'httppostargs',
1116 b'httppostargs',
1111 default=False,
1117 default=False,
1112 )
1118 )
1113 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1119 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1114 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1120 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1115
1121
1116 coreconfigitem(
1122 coreconfigitem(
1117 b'experimental',
1123 b'experimental',
1118 b'obsmarkers-exchange-debug',
1124 b'obsmarkers-exchange-debug',
1119 default=False,
1125 default=False,
1120 )
1126 )
1121 coreconfigitem(
1127 coreconfigitem(
1122 b'experimental',
1128 b'experimental',
1123 b'remotenames',
1129 b'remotenames',
1124 default=False,
1130 default=False,
1125 )
1131 )
1126 coreconfigitem(
1132 coreconfigitem(
1127 b'experimental',
1133 b'experimental',
1128 b'removeemptydirs',
1134 b'removeemptydirs',
1129 default=True,
1135 default=True,
1130 )
1136 )
1131 coreconfigitem(
1137 coreconfigitem(
1132 b'experimental',
1138 b'experimental',
1133 b'revert.interactive.select-to-keep',
1139 b'revert.interactive.select-to-keep',
1134 default=False,
1140 default=False,
1135 )
1141 )
1136 coreconfigitem(
1142 coreconfigitem(
1137 b'experimental',
1143 b'experimental',
1138 b'revisions.prefixhexnode',
1144 b'revisions.prefixhexnode',
1139 default=False,
1145 default=False,
1140 )
1146 )
1141 # "out of experimental" todo list.
1147 # "out of experimental" todo list.
1142 #
1148 #
1143 # * include management of a persistent nodemap in the main docket
1149 # * include management of a persistent nodemap in the main docket
1144 # * enforce a "no-truncate" policy for mmap safety
1150 # * enforce a "no-truncate" policy for mmap safety
1145 # - for censoring operation
1151 # - for censoring operation
1146 # - for stripping operation
1152 # - for stripping operation
1147 # - for rollback operation
1153 # - for rollback operation
1148 # * proper streaming (race free) of the docket file
1154 # * proper streaming (race free) of the docket file
1149 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1155 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1150 # * Exchange-wise, we will also need to do something more efficient than
1156 # * Exchange-wise, we will also need to do something more efficient than
1151 # keeping references to the affected revlogs, especially memory-wise when
1157 # keeping references to the affected revlogs, especially memory-wise when
1152 # rewriting sidedata.
1158 # rewriting sidedata.
1153 # * introduce a proper solution to reduce the number of filelog related files.
1159 # * introduce a proper solution to reduce the number of filelog related files.
1154 # * use caching for reading sidedata (similar to what we do for data).
1160 # * use caching for reading sidedata (similar to what we do for data).
1155 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1161 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1156 # * Improvement to consider
1162 # * Improvement to consider
1157 # - avoid compression header in chunk using the default compression?
1163 # - avoid compression header in chunk using the default compression?
1158 # - forbid "inline" compression mode entirely?
1164 # - forbid "inline" compression mode entirely?
1159 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1165 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1160 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1166 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1161 # - keep track of chain base or size (probably not that useful anymore)
1167 # - keep track of chain base or size (probably not that useful anymore)
1162 coreconfigitem(
1168 coreconfigitem(
1163 b'experimental',
1169 b'experimental',
1164 b'revlogv2',
1170 b'revlogv2',
1165 default=None,
1171 default=None,
1166 )
1172 )
1167 coreconfigitem(
1173 coreconfigitem(
1168 b'experimental',
1174 b'experimental',
1169 b'revisions.disambiguatewithin',
1175 b'revisions.disambiguatewithin',
1170 default=None,
1176 default=None,
1171 )
1177 )
1172 coreconfigitem(
1178 coreconfigitem(
1173 b'experimental',
1179 b'experimental',
1174 b'rust.index',
1180 b'rust.index',
1175 default=False,
1181 default=False,
1176 )
1182 )
1177 coreconfigitem(
1183 coreconfigitem(
1178 b'experimental',
1184 b'experimental',
1179 b'server.filesdata.recommended-batch-size',
1185 b'server.filesdata.recommended-batch-size',
1180 default=50000,
1186 default=50000,
1181 )
1187 )
1182 coreconfigitem(
1188 coreconfigitem(
1183 b'experimental',
1189 b'experimental',
1184 b'server.manifestdata.recommended-batch-size',
1190 b'server.manifestdata.recommended-batch-size',
1185 default=100000,
1191 default=100000,
1186 )
1192 )
1187 coreconfigitem(
1193 coreconfigitem(
1188 b'experimental',
1194 b'experimental',
1189 b'server.stream-narrow-clones',
1195 b'server.stream-narrow-clones',
1190 default=False,
1196 default=False,
1191 )
1197 )
1192 coreconfigitem(
1198 coreconfigitem(
1193 b'experimental',
1199 b'experimental',
1194 b'single-head-per-branch',
1200 b'single-head-per-branch',
1195 default=False,
1201 default=False,
1196 )
1202 )
1197 coreconfigitem(
1203 coreconfigitem(
1198 b'experimental',
1204 b'experimental',
1199 b'single-head-per-branch:account-closed-heads',
1205 b'single-head-per-branch:account-closed-heads',
1200 default=False,
1206 default=False,
1201 )
1207 )
1202 coreconfigitem(
1208 coreconfigitem(
1203 b'experimental',
1209 b'experimental',
1204 b'single-head-per-branch:public-changes-only',
1210 b'single-head-per-branch:public-changes-only',
1205 default=False,
1211 default=False,
1206 )
1212 )
1207 coreconfigitem(
1213 coreconfigitem(
1208 b'experimental',
1214 b'experimental',
1209 b'sparse-read',
1215 b'sparse-read',
1210 default=False,
1216 default=False,
1211 )
1217 )
1212 coreconfigitem(
1218 coreconfigitem(
1213 b'experimental',
1219 b'experimental',
1214 b'sparse-read.density-threshold',
1220 b'sparse-read.density-threshold',
1215 default=0.50,
1221 default=0.50,
1216 )
1222 )
1217 coreconfigitem(
1223 coreconfigitem(
1218 b'experimental',
1224 b'experimental',
1219 b'sparse-read.min-gap-size',
1225 b'sparse-read.min-gap-size',
1220 default=b'65K',
1226 default=b'65K',
1221 )
1227 )
1222 coreconfigitem(
1228 coreconfigitem(
1223 b'experimental',
1229 b'experimental',
1224 b'treemanifest',
1230 b'treemanifest',
1225 default=False,
1231 default=False,
1226 )
1232 )
1227 coreconfigitem(
1233 coreconfigitem(
1228 b'experimental',
1234 b'experimental',
1229 b'update.atomic-file',
1235 b'update.atomic-file',
1230 default=False,
1236 default=False,
1231 )
1237 )
1232 coreconfigitem(
1238 coreconfigitem(
1233 b'experimental',
1239 b'experimental',
1234 b'web.full-garbage-collection-rate',
1240 b'web.full-garbage-collection-rate',
1235 default=1, # still forcing a full collection on each request
1241 default=1, # still forcing a full collection on each request
1236 )
1242 )
1237 coreconfigitem(
1243 coreconfigitem(
1238 b'experimental',
1244 b'experimental',
1239 b'worker.wdir-get-thread-safe',
1245 b'worker.wdir-get-thread-safe',
1240 default=False,
1246 default=False,
1241 )
1247 )
1242 coreconfigitem(
1248 coreconfigitem(
1243 b'experimental',
1249 b'experimental',
1244 b'worker.repository-upgrade',
1250 b'worker.repository-upgrade',
1245 default=False,
1251 default=False,
1246 )
1252 )
1247 coreconfigitem(
1253 coreconfigitem(
1248 b'experimental',
1254 b'experimental',
1249 b'xdiff',
1255 b'xdiff',
1250 default=False,
1256 default=False,
1251 )
1257 )
1252 coreconfigitem(
1258 coreconfigitem(
1253 b'extensions',
1259 b'extensions',
1254 b'[^:]*',
1260 b'[^:]*',
1255 default=None,
1261 default=None,
1256 generic=True,
1262 generic=True,
1257 )
1263 )
1258 coreconfigitem(
1264 coreconfigitem(
1259 b'extensions',
1265 b'extensions',
1260 b'[^:]*:required',
1266 b'[^:]*:required',
1261 default=False,
1267 default=False,
1262 generic=True,
1268 generic=True,
1263 )
1269 )
1264 coreconfigitem(
1270 coreconfigitem(
1265 b'extdata',
1271 b'extdata',
1266 b'.*',
1272 b'.*',
1267 default=None,
1273 default=None,
1268 generic=True,
1274 generic=True,
1269 )
1275 )
1270 coreconfigitem(
1276 coreconfigitem(
1271 b'format',
1277 b'format',
1272 b'bookmarks-in-store',
1278 b'bookmarks-in-store',
1273 default=False,
1279 default=False,
1274 )
1280 )
1275 coreconfigitem(
1281 coreconfigitem(
1276 b'format',
1282 b'format',
1277 b'chunkcachesize',
1283 b'chunkcachesize',
1278 default=None,
1284 default=None,
1279 experimental=True,
1285 experimental=True,
1280 )
1286 )
1281 coreconfigitem(
1287 coreconfigitem(
1282 # Enable this dirstate format *when creating a new repository*.
1288 # Enable this dirstate format *when creating a new repository*.
1283 # Which format to use for existing repos is controlled by .hg/requires
1289 # Which format to use for existing repos is controlled by .hg/requires
1284 b'format',
1290 b'format',
1285 b'use-dirstate-v2',
1291 b'use-dirstate-v2',
1286 default=False,
1292 default=False,
1287 experimental=True,
1293 experimental=True,
1288 alias=[(b'format', b'exp-rc-dirstate-v2')],
1294 alias=[(b'format', b'exp-rc-dirstate-v2')],
1289 )
1295 )
1290 coreconfigitem(
1296 coreconfigitem(
1291 b'format',
1297 b'format',
1292 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories',
1298 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories',
1293 default=False,
1299 default=False,
1294 experimental=True,
1300 experimental=True,
1295 )
1301 )
1296 coreconfigitem(
1302 coreconfigitem(
1297 b'format',
1303 b'format',
1298 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet',
1304 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet',
1299 default=False,
1305 default=False,
1300 experimental=True,
1306 experimental=True,
1301 )
1307 )
1302 coreconfigitem(
1308 coreconfigitem(
1303 b'format',
1309 b'format',
1304 b'use-dirstate-tracked-hint',
1310 b'use-dirstate-tracked-hint',
1305 default=False,
1311 default=False,
1306 experimental=True,
1312 experimental=True,
1307 )
1313 )
1308 coreconfigitem(
1314 coreconfigitem(
1309 b'format',
1315 b'format',
1310 b'use-dirstate-tracked-hint.version',
1316 b'use-dirstate-tracked-hint.version',
1311 default=1,
1317 default=1,
1312 experimental=True,
1318 experimental=True,
1313 )
1319 )
1314 coreconfigitem(
1320 coreconfigitem(
1315 b'format',
1321 b'format',
1316 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories',
1322 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories',
1317 default=False,
1323 default=False,
1318 experimental=True,
1324 experimental=True,
1319 )
1325 )
1320 coreconfigitem(
1326 coreconfigitem(
1321 b'format',
1327 b'format',
1322 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet',
1328 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet',
1323 default=False,
1329 default=False,
1324 experimental=True,
1330 experimental=True,
1325 )
1331 )
1326 coreconfigitem(
1332 coreconfigitem(
1327 b'format',
1333 b'format',
1328 b'dotencode',
1334 b'dotencode',
1329 default=True,
1335 default=True,
1330 )
1336 )
1331 coreconfigitem(
1337 coreconfigitem(
1332 b'format',
1338 b'format',
1333 b'generaldelta',
1339 b'generaldelta',
1334 default=False,
1340 default=False,
1335 experimental=True,
1341 experimental=True,
1336 )
1342 )
1337 coreconfigitem(
1343 coreconfigitem(
1338 b'format',
1344 b'format',
1339 b'manifestcachesize',
1345 b'manifestcachesize',
1340 default=None,
1346 default=None,
1341 experimental=True,
1347 experimental=True,
1342 )
1348 )
1343 coreconfigitem(
1349 coreconfigitem(
1344 b'format',
1350 b'format',
1345 b'maxchainlen',
1351 b'maxchainlen',
1346 default=dynamicdefault,
1352 default=dynamicdefault,
1347 experimental=True,
1353 experimental=True,
1348 )
1354 )
1349 coreconfigitem(
1355 coreconfigitem(
1350 b'format',
1356 b'format',
1351 b'obsstore-version',
1357 b'obsstore-version',
1352 default=None,
1358 default=None,
1353 )
1359 )
1354 coreconfigitem(
1360 coreconfigitem(
1355 b'format',
1361 b'format',
1356 b'sparse-revlog',
1362 b'sparse-revlog',
1357 default=True,
1363 default=True,
1358 )
1364 )
1359 coreconfigitem(
1365 coreconfigitem(
1360 b'format',
1366 b'format',
1361 b'revlog-compression',
1367 b'revlog-compression',
1362 default=lambda: [b'zstd', b'zlib'],
1368 default=lambda: [b'zstd', b'zlib'],
1363 alias=[(b'experimental', b'format.compression')],
1369 alias=[(b'experimental', b'format.compression')],
1364 )
1370 )
1365 # Experimental TODOs:
1371 # Experimental TODOs:
1366 #
1372 #
1367 # * Same as for revlogv2 (but for the reduction of the number of files)
1373 # * Same as for revlogv2 (but for the reduction of the number of files)
1368 # * Actually computing the rank of changesets
1374 # * Actually computing the rank of changesets
1369 # * Improvement to investigate
1375 # * Improvement to investigate
1370 # - storing .hgtags fnode
1376 # - storing .hgtags fnode
1371 # - storing branch related identifier
1377 # - storing branch related identifier
1372
1378
1373 coreconfigitem(
1379 coreconfigitem(
1374 b'format',
1380 b'format',
1375 b'exp-use-changelog-v2',
1381 b'exp-use-changelog-v2',
1376 default=None,
1382 default=None,
1377 experimental=True,
1383 experimental=True,
1378 )
1384 )
1379 coreconfigitem(
1385 coreconfigitem(
1380 b'format',
1386 b'format',
1381 b'usefncache',
1387 b'usefncache',
1382 default=True,
1388 default=True,
1383 )
1389 )
1384 coreconfigitem(
1390 coreconfigitem(
1385 b'format',
1391 b'format',
1386 b'usegeneraldelta',
1392 b'usegeneraldelta',
1387 default=True,
1393 default=True,
1388 )
1394 )
1389 coreconfigitem(
1395 coreconfigitem(
1390 b'format',
1396 b'format',
1391 b'usestore',
1397 b'usestore',
1392 default=True,
1398 default=True,
1393 )
1399 )
1394
1400
1395
1401
1396 def _persistent_nodemap_default():
1402 def _persistent_nodemap_default():
1397 """compute `use-persistent-nodemap` default value
1403 """compute `use-persistent-nodemap` default value
1398
1404
1399 The feature is disabled unless a fast implementation is available.
1405 The feature is disabled unless a fast implementation is available.
1400 """
1406 """
1401 from . import policy
1407 from . import policy
1402
1408
1403 return policy.importrust('revlog') is not None
1409 return policy.importrust('revlog') is not None
1404
1410
1405
1411
1406 coreconfigitem(
1412 coreconfigitem(
1407 b'format',
1413 b'format',
1408 b'use-persistent-nodemap',
1414 b'use-persistent-nodemap',
1409 default=_persistent_nodemap_default,
1415 default=_persistent_nodemap_default,
1410 )
1416 )
1411 coreconfigitem(
1417 coreconfigitem(
1412 b'format',
1418 b'format',
1413 b'exp-use-copies-side-data-changeset',
1419 b'exp-use-copies-side-data-changeset',
1414 default=False,
1420 default=False,
1415 experimental=True,
1421 experimental=True,
1416 )
1422 )
1417 coreconfigitem(
1423 coreconfigitem(
1418 b'format',
1424 b'format',
1419 b'use-share-safe',
1425 b'use-share-safe',
1420 default=True,
1426 default=True,
1421 )
1427 )
1422 coreconfigitem(
1428 coreconfigitem(
1423 b'format',
1429 b'format',
1424 b'use-share-safe.automatic-upgrade-of-mismatching-repositories',
1430 b'use-share-safe.automatic-upgrade-of-mismatching-repositories',
1425 default=False,
1431 default=False,
1426 experimental=True,
1432 experimental=True,
1427 )
1433 )
1428 coreconfigitem(
1434 coreconfigitem(
1429 b'format',
1435 b'format',
1430 b'use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet',
1436 b'use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet',
1431 default=False,
1437 default=False,
1432 experimental=True,
1438 experimental=True,
1433 )
1439 )
1434
1440
1435 # Moving this on by default means we are confident about the scaling of phases.
1441 # Moving this on by default means we are confident about the scaling of phases.
1436 # This is not garanteed to be the case at the time this message is written.
1442 # This is not garanteed to be the case at the time this message is written.
1437 coreconfigitem(
1443 coreconfigitem(
1438 b'format',
1444 b'format',
1439 b'use-internal-phase',
1445 b'use-internal-phase',
1440 default=False,
1446 default=False,
1441 experimental=True,
1447 experimental=True,
1442 )
1448 )
1443 # The interaction between the archived phase and obsolescence markers needs to
1449 # The interaction between the archived phase and obsolescence markers needs to
1444 # be sorted out before wider usage of this are to be considered.
1450 # be sorted out before wider usage of this are to be considered.
1445 #
1451 #
1446 # At the time this message is written, behavior when archiving obsolete
1452 # At the time this message is written, behavior when archiving obsolete
1447 # changeset differ significantly from stripping. As part of stripping, we also
1453 # changeset differ significantly from stripping. As part of stripping, we also
1448 # remove the obsolescence marker associated to the stripped changesets,
1454 # remove the obsolescence marker associated to the stripped changesets,
1449 # revealing the precedecessors changesets when applicable. When archiving, we
1455 # revealing the precedecessors changesets when applicable. When archiving, we
1450 # don't touch the obsolescence markers, keeping everything hidden. This can
1456 # don't touch the obsolescence markers, keeping everything hidden. This can
1451 # result in quite confusing situation for people combining exchanging draft
1457 # result in quite confusing situation for people combining exchanging draft
1452 # with the archived phases. As some markers needed by others may be skipped
1458 # with the archived phases. As some markers needed by others may be skipped
1453 # during exchange.
1459 # during exchange.
1454 coreconfigitem(
1460 coreconfigitem(
1455 b'format',
1461 b'format',
1456 b'exp-archived-phase',
1462 b'exp-archived-phase',
1457 default=False,
1463 default=False,
1458 experimental=True,
1464 experimental=True,
1459 )
1465 )
1460 coreconfigitem(
1466 coreconfigitem(
1461 b'shelve',
1467 b'shelve',
1462 b'store',
1468 b'store',
1463 default=b'internal',
1469 default=b'internal',
1464 experimental=True,
1470 experimental=True,
1465 )
1471 )
1466 coreconfigitem(
1472 coreconfigitem(
1467 b'fsmonitor',
1473 b'fsmonitor',
1468 b'warn_when_unused',
1474 b'warn_when_unused',
1469 default=True,
1475 default=True,
1470 )
1476 )
1471 coreconfigitem(
1477 coreconfigitem(
1472 b'fsmonitor',
1478 b'fsmonitor',
1473 b'warn_update_file_count',
1479 b'warn_update_file_count',
1474 default=50000,
1480 default=50000,
1475 )
1481 )
1476 coreconfigitem(
1482 coreconfigitem(
1477 b'fsmonitor',
1483 b'fsmonitor',
1478 b'warn_update_file_count_rust',
1484 b'warn_update_file_count_rust',
1479 default=400000,
1485 default=400000,
1480 )
1486 )
1481 coreconfigitem(
1487 coreconfigitem(
1482 b'help',
1488 b'help',
1483 br'hidden-command\..*',
1489 br'hidden-command\..*',
1484 default=False,
1490 default=False,
1485 generic=True,
1491 generic=True,
1486 )
1492 )
1487 coreconfigitem(
1493 coreconfigitem(
1488 b'help',
1494 b'help',
1489 br'hidden-topic\..*',
1495 br'hidden-topic\..*',
1490 default=False,
1496 default=False,
1491 generic=True,
1497 generic=True,
1492 )
1498 )
1493 coreconfigitem(
1499 coreconfigitem(
1494 b'hooks',
1500 b'hooks',
1495 b'[^:]*',
1501 b'[^:]*',
1496 default=dynamicdefault,
1502 default=dynamicdefault,
1497 generic=True,
1503 generic=True,
1498 )
1504 )
1499 coreconfigitem(
1505 coreconfigitem(
1500 b'hooks',
1506 b'hooks',
1501 b'.*:run-with-plain',
1507 b'.*:run-with-plain',
1502 default=True,
1508 default=True,
1503 generic=True,
1509 generic=True,
1504 )
1510 )
1505 coreconfigitem(
1511 coreconfigitem(
1506 b'hgweb-paths',
1512 b'hgweb-paths',
1507 b'.*',
1513 b'.*',
1508 default=list,
1514 default=list,
1509 generic=True,
1515 generic=True,
1510 )
1516 )
1511 coreconfigitem(
1517 coreconfigitem(
1512 b'hostfingerprints',
1518 b'hostfingerprints',
1513 b'.*',
1519 b'.*',
1514 default=list,
1520 default=list,
1515 generic=True,
1521 generic=True,
1516 )
1522 )
1517 coreconfigitem(
1523 coreconfigitem(
1518 b'hostsecurity',
1524 b'hostsecurity',
1519 b'ciphers',
1525 b'ciphers',
1520 default=None,
1526 default=None,
1521 )
1527 )
1522 coreconfigitem(
1528 coreconfigitem(
1523 b'hostsecurity',
1529 b'hostsecurity',
1524 b'minimumprotocol',
1530 b'minimumprotocol',
1525 default=dynamicdefault,
1531 default=dynamicdefault,
1526 )
1532 )
1527 coreconfigitem(
1533 coreconfigitem(
1528 b'hostsecurity',
1534 b'hostsecurity',
1529 b'.*:minimumprotocol$',
1535 b'.*:minimumprotocol$',
1530 default=dynamicdefault,
1536 default=dynamicdefault,
1531 generic=True,
1537 generic=True,
1532 )
1538 )
1533 coreconfigitem(
1539 coreconfigitem(
1534 b'hostsecurity',
1540 b'hostsecurity',
1535 b'.*:ciphers$',
1541 b'.*:ciphers$',
1536 default=dynamicdefault,
1542 default=dynamicdefault,
1537 generic=True,
1543 generic=True,
1538 )
1544 )
1539 coreconfigitem(
1545 coreconfigitem(
1540 b'hostsecurity',
1546 b'hostsecurity',
1541 b'.*:fingerprints$',
1547 b'.*:fingerprints$',
1542 default=list,
1548 default=list,
1543 generic=True,
1549 generic=True,
1544 )
1550 )
1545 coreconfigitem(
1551 coreconfigitem(
1546 b'hostsecurity',
1552 b'hostsecurity',
1547 b'.*:verifycertsfile$',
1553 b'.*:verifycertsfile$',
1548 default=None,
1554 default=None,
1549 generic=True,
1555 generic=True,
1550 )
1556 )
1551
1557
1552 coreconfigitem(
1558 coreconfigitem(
1553 b'http_proxy',
1559 b'http_proxy',
1554 b'always',
1560 b'always',
1555 default=False,
1561 default=False,
1556 )
1562 )
1557 coreconfigitem(
1563 coreconfigitem(
1558 b'http_proxy',
1564 b'http_proxy',
1559 b'host',
1565 b'host',
1560 default=None,
1566 default=None,
1561 )
1567 )
1562 coreconfigitem(
1568 coreconfigitem(
1563 b'http_proxy',
1569 b'http_proxy',
1564 b'no',
1570 b'no',
1565 default=list,
1571 default=list,
1566 )
1572 )
1567 coreconfigitem(
1573 coreconfigitem(
1568 b'http_proxy',
1574 b'http_proxy',
1569 b'passwd',
1575 b'passwd',
1570 default=None,
1576 default=None,
1571 )
1577 )
1572 coreconfigitem(
1578 coreconfigitem(
1573 b'http_proxy',
1579 b'http_proxy',
1574 b'user',
1580 b'user',
1575 default=None,
1581 default=None,
1576 )
1582 )
1577
1583
1578 coreconfigitem(
1584 coreconfigitem(
1579 b'http',
1585 b'http',
1580 b'timeout',
1586 b'timeout',
1581 default=None,
1587 default=None,
1582 )
1588 )
1583
1589
1584 coreconfigitem(
1590 coreconfigitem(
1585 b'logtoprocess',
1591 b'logtoprocess',
1586 b'commandexception',
1592 b'commandexception',
1587 default=None,
1593 default=None,
1588 )
1594 )
1589 coreconfigitem(
1595 coreconfigitem(
1590 b'logtoprocess',
1596 b'logtoprocess',
1591 b'commandfinish',
1597 b'commandfinish',
1592 default=None,
1598 default=None,
1593 )
1599 )
1594 coreconfigitem(
1600 coreconfigitem(
1595 b'logtoprocess',
1601 b'logtoprocess',
1596 b'command',
1602 b'command',
1597 default=None,
1603 default=None,
1598 )
1604 )
1599 coreconfigitem(
1605 coreconfigitem(
1600 b'logtoprocess',
1606 b'logtoprocess',
1601 b'develwarn',
1607 b'develwarn',
1602 default=None,
1608 default=None,
1603 )
1609 )
1604 coreconfigitem(
1610 coreconfigitem(
1605 b'logtoprocess',
1611 b'logtoprocess',
1606 b'uiblocked',
1612 b'uiblocked',
1607 default=None,
1613 default=None,
1608 )
1614 )
1609 coreconfigitem(
1615 coreconfigitem(
1610 b'merge',
1616 b'merge',
1611 b'checkunknown',
1617 b'checkunknown',
1612 default=b'abort',
1618 default=b'abort',
1613 )
1619 )
1614 coreconfigitem(
1620 coreconfigitem(
1615 b'merge',
1621 b'merge',
1616 b'checkignored',
1622 b'checkignored',
1617 default=b'abort',
1623 default=b'abort',
1618 )
1624 )
1619 coreconfigitem(
1625 coreconfigitem(
1620 b'experimental',
1626 b'experimental',
1621 b'merge.checkpathconflicts',
1627 b'merge.checkpathconflicts',
1622 default=False,
1628 default=False,
1623 )
1629 )
1624 coreconfigitem(
1630 coreconfigitem(
1625 b'merge',
1631 b'merge',
1626 b'followcopies',
1632 b'followcopies',
1627 default=True,
1633 default=True,
1628 )
1634 )
1629 coreconfigitem(
1635 coreconfigitem(
1630 b'merge',
1636 b'merge',
1631 b'on-failure',
1637 b'on-failure',
1632 default=b'continue',
1638 default=b'continue',
1633 )
1639 )
1634 coreconfigitem(
1640 coreconfigitem(
1635 b'merge',
1641 b'merge',
1636 b'preferancestor',
1642 b'preferancestor',
1637 default=lambda: [b'*'],
1643 default=lambda: [b'*'],
1638 experimental=True,
1644 experimental=True,
1639 )
1645 )
1640 coreconfigitem(
1646 coreconfigitem(
1641 b'merge',
1647 b'merge',
1642 b'strict-capability-check',
1648 b'strict-capability-check',
1643 default=False,
1649 default=False,
1644 )
1650 )
1645 coreconfigitem(
1651 coreconfigitem(
1646 b'merge',
1652 b'merge',
1647 b'disable-partial-tools',
1653 b'disable-partial-tools',
1648 default=False,
1654 default=False,
1649 experimental=True,
1655 experimental=True,
1650 )
1656 )
1651 coreconfigitem(
1657 coreconfigitem(
1652 b'partial-merge-tools',
1658 b'partial-merge-tools',
1653 b'.*',
1659 b'.*',
1654 default=None,
1660 default=None,
1655 generic=True,
1661 generic=True,
1656 experimental=True,
1662 experimental=True,
1657 )
1663 )
1658 coreconfigitem(
1664 coreconfigitem(
1659 b'partial-merge-tools',
1665 b'partial-merge-tools',
1660 br'.*\.patterns',
1666 br'.*\.patterns',
1661 default=dynamicdefault,
1667 default=dynamicdefault,
1662 generic=True,
1668 generic=True,
1663 priority=-1,
1669 priority=-1,
1664 experimental=True,
1670 experimental=True,
1665 )
1671 )
1666 coreconfigitem(
1672 coreconfigitem(
1667 b'partial-merge-tools',
1673 b'partial-merge-tools',
1668 br'.*\.executable$',
1674 br'.*\.executable$',
1669 default=dynamicdefault,
1675 default=dynamicdefault,
1670 generic=True,
1676 generic=True,
1671 priority=-1,
1677 priority=-1,
1672 experimental=True,
1678 experimental=True,
1673 )
1679 )
1674 coreconfigitem(
1680 coreconfigitem(
1675 b'partial-merge-tools',
1681 b'partial-merge-tools',
1676 br'.*\.order',
1682 br'.*\.order',
1677 default=0,
1683 default=0,
1678 generic=True,
1684 generic=True,
1679 priority=-1,
1685 priority=-1,
1680 experimental=True,
1686 experimental=True,
1681 )
1687 )
1682 coreconfigitem(
1688 coreconfigitem(
1683 b'partial-merge-tools',
1689 b'partial-merge-tools',
1684 br'.*\.args',
1690 br'.*\.args',
1685 default=b"$local $base $other",
1691 default=b"$local $base $other",
1686 generic=True,
1692 generic=True,
1687 priority=-1,
1693 priority=-1,
1688 experimental=True,
1694 experimental=True,
1689 )
1695 )
1690 coreconfigitem(
1696 coreconfigitem(
1691 b'partial-merge-tools',
1697 b'partial-merge-tools',
1692 br'.*\.disable',
1698 br'.*\.disable',
1693 default=False,
1699 default=False,
1694 generic=True,
1700 generic=True,
1695 priority=-1,
1701 priority=-1,
1696 experimental=True,
1702 experimental=True,
1697 )
1703 )
1698 coreconfigitem(
1704 coreconfigitem(
1699 b'merge-tools',
1705 b'merge-tools',
1700 b'.*',
1706 b'.*',
1701 default=None,
1707 default=None,
1702 generic=True,
1708 generic=True,
1703 )
1709 )
1704 coreconfigitem(
1710 coreconfigitem(
1705 b'merge-tools',
1711 b'merge-tools',
1706 br'.*\.args$',
1712 br'.*\.args$',
1707 default=b"$local $base $other",
1713 default=b"$local $base $other",
1708 generic=True,
1714 generic=True,
1709 priority=-1,
1715 priority=-1,
1710 )
1716 )
1711 coreconfigitem(
1717 coreconfigitem(
1712 b'merge-tools',
1718 b'merge-tools',
1713 br'.*\.binary$',
1719 br'.*\.binary$',
1714 default=False,
1720 default=False,
1715 generic=True,
1721 generic=True,
1716 priority=-1,
1722 priority=-1,
1717 )
1723 )
1718 coreconfigitem(
1724 coreconfigitem(
1719 b'merge-tools',
1725 b'merge-tools',
1720 br'.*\.check$',
1726 br'.*\.check$',
1721 default=list,
1727 default=list,
1722 generic=True,
1728 generic=True,
1723 priority=-1,
1729 priority=-1,
1724 )
1730 )
1725 coreconfigitem(
1731 coreconfigitem(
1726 b'merge-tools',
1732 b'merge-tools',
1727 br'.*\.checkchanged$',
1733 br'.*\.checkchanged$',
1728 default=False,
1734 default=False,
1729 generic=True,
1735 generic=True,
1730 priority=-1,
1736 priority=-1,
1731 )
1737 )
1732 coreconfigitem(
1738 coreconfigitem(
1733 b'merge-tools',
1739 b'merge-tools',
1734 br'.*\.executable$',
1740 br'.*\.executable$',
1735 default=dynamicdefault,
1741 default=dynamicdefault,
1736 generic=True,
1742 generic=True,
1737 priority=-1,
1743 priority=-1,
1738 )
1744 )
1739 coreconfigitem(
1745 coreconfigitem(
1740 b'merge-tools',
1746 b'merge-tools',
1741 br'.*\.fixeol$',
1747 br'.*\.fixeol$',
1742 default=False,
1748 default=False,
1743 generic=True,
1749 generic=True,
1744 priority=-1,
1750 priority=-1,
1745 )
1751 )
1746 coreconfigitem(
1752 coreconfigitem(
1747 b'merge-tools',
1753 b'merge-tools',
1748 br'.*\.gui$',
1754 br'.*\.gui$',
1749 default=False,
1755 default=False,
1750 generic=True,
1756 generic=True,
1751 priority=-1,
1757 priority=-1,
1752 )
1758 )
1753 coreconfigitem(
1759 coreconfigitem(
1754 b'merge-tools',
1760 b'merge-tools',
1755 br'.*\.mergemarkers$',
1761 br'.*\.mergemarkers$',
1756 default=b'basic',
1762 default=b'basic',
1757 generic=True,
1763 generic=True,
1758 priority=-1,
1764 priority=-1,
1759 )
1765 )
1760 coreconfigitem(
1766 coreconfigitem(
1761 b'merge-tools',
1767 b'merge-tools',
1762 br'.*\.mergemarkertemplate$',
1768 br'.*\.mergemarkertemplate$',
1763 default=dynamicdefault, # take from command-templates.mergemarker
1769 default=dynamicdefault, # take from command-templates.mergemarker
1764 generic=True,
1770 generic=True,
1765 priority=-1,
1771 priority=-1,
1766 )
1772 )
1767 coreconfigitem(
1773 coreconfigitem(
1768 b'merge-tools',
1774 b'merge-tools',
1769 br'.*\.priority$',
1775 br'.*\.priority$',
1770 default=0,
1776 default=0,
1771 generic=True,
1777 generic=True,
1772 priority=-1,
1778 priority=-1,
1773 )
1779 )
1774 coreconfigitem(
1780 coreconfigitem(
1775 b'merge-tools',
1781 b'merge-tools',
1776 br'.*\.premerge$',
1782 br'.*\.premerge$',
1777 default=dynamicdefault,
1783 default=dynamicdefault,
1778 generic=True,
1784 generic=True,
1779 priority=-1,
1785 priority=-1,
1780 )
1786 )
1781 coreconfigitem(
1787 coreconfigitem(
1782 b'merge-tools',
1788 b'merge-tools',
1783 br'.*\.symlink$',
1789 br'.*\.symlink$',
1784 default=False,
1790 default=False,
1785 generic=True,
1791 generic=True,
1786 priority=-1,
1792 priority=-1,
1787 )
1793 )
1788 coreconfigitem(
1794 coreconfigitem(
1789 b'pager',
1795 b'pager',
1790 b'attend-.*',
1796 b'attend-.*',
1791 default=dynamicdefault,
1797 default=dynamicdefault,
1792 generic=True,
1798 generic=True,
1793 )
1799 )
1794 coreconfigitem(
1800 coreconfigitem(
1795 b'pager',
1801 b'pager',
1796 b'ignore',
1802 b'ignore',
1797 default=list,
1803 default=list,
1798 )
1804 )
1799 coreconfigitem(
1805 coreconfigitem(
1800 b'pager',
1806 b'pager',
1801 b'pager',
1807 b'pager',
1802 default=dynamicdefault,
1808 default=dynamicdefault,
1803 )
1809 )
1804 coreconfigitem(
1810 coreconfigitem(
1805 b'patch',
1811 b'patch',
1806 b'eol',
1812 b'eol',
1807 default=b'strict',
1813 default=b'strict',
1808 )
1814 )
1809 coreconfigitem(
1815 coreconfigitem(
1810 b'patch',
1816 b'patch',
1811 b'fuzz',
1817 b'fuzz',
1812 default=2,
1818 default=2,
1813 )
1819 )
1814 coreconfigitem(
1820 coreconfigitem(
1815 b'paths',
1821 b'paths',
1816 b'default',
1822 b'default',
1817 default=None,
1823 default=None,
1818 )
1824 )
1819 coreconfigitem(
1825 coreconfigitem(
1820 b'paths',
1826 b'paths',
1821 b'default-push',
1827 b'default-push',
1822 default=None,
1828 default=None,
1823 )
1829 )
1824 coreconfigitem(
1830 coreconfigitem(
1825 b'paths',
1831 b'paths',
1826 b'.*',
1832 b'.*',
1827 default=None,
1833 default=None,
1828 generic=True,
1834 generic=True,
1829 )
1835 )
1830 coreconfigitem(
1836 coreconfigitem(
1831 b'paths',
1837 b'paths',
1832 b'.*:bookmarks.mode',
1838 b'.*:bookmarks.mode',
1833 default='default',
1839 default='default',
1834 generic=True,
1840 generic=True,
1835 )
1841 )
1836 coreconfigitem(
1842 coreconfigitem(
1837 b'paths',
1843 b'paths',
1838 b'.*:multi-urls',
1844 b'.*:multi-urls',
1839 default=False,
1845 default=False,
1840 generic=True,
1846 generic=True,
1841 )
1847 )
1842 coreconfigitem(
1848 coreconfigitem(
1843 b'paths',
1849 b'paths',
1844 b'.*:pushrev',
1850 b'.*:pushrev',
1845 default=None,
1851 default=None,
1846 generic=True,
1852 generic=True,
1847 )
1853 )
1848 coreconfigitem(
1854 coreconfigitem(
1849 b'paths',
1855 b'paths',
1850 b'.*:pushurl',
1856 b'.*:pushurl',
1851 default=None,
1857 default=None,
1852 generic=True,
1858 generic=True,
1853 )
1859 )
1854 coreconfigitem(
1860 coreconfigitem(
1855 b'phases',
1861 b'phases',
1856 b'checksubrepos',
1862 b'checksubrepos',
1857 default=b'follow',
1863 default=b'follow',
1858 )
1864 )
1859 coreconfigitem(
1865 coreconfigitem(
1860 b'phases',
1866 b'phases',
1861 b'new-commit',
1867 b'new-commit',
1862 default=b'draft',
1868 default=b'draft',
1863 )
1869 )
1864 coreconfigitem(
1870 coreconfigitem(
1865 b'phases',
1871 b'phases',
1866 b'publish',
1872 b'publish',
1867 default=True,
1873 default=True,
1868 )
1874 )
1869 coreconfigitem(
1875 coreconfigitem(
1870 b'profiling',
1876 b'profiling',
1871 b'enabled',
1877 b'enabled',
1872 default=False,
1878 default=False,
1873 )
1879 )
1874 coreconfigitem(
1880 coreconfigitem(
1875 b'profiling',
1881 b'profiling',
1876 b'format',
1882 b'format',
1877 default=b'text',
1883 default=b'text',
1878 )
1884 )
1879 coreconfigitem(
1885 coreconfigitem(
1880 b'profiling',
1886 b'profiling',
1881 b'freq',
1887 b'freq',
1882 default=1000,
1888 default=1000,
1883 )
1889 )
1884 coreconfigitem(
1890 coreconfigitem(
1885 b'profiling',
1891 b'profiling',
1886 b'limit',
1892 b'limit',
1887 default=30,
1893 default=30,
1888 )
1894 )
1889 coreconfigitem(
1895 coreconfigitem(
1890 b'profiling',
1896 b'profiling',
1891 b'nested',
1897 b'nested',
1892 default=0,
1898 default=0,
1893 )
1899 )
1894 coreconfigitem(
1900 coreconfigitem(
1895 b'profiling',
1901 b'profiling',
1896 b'output',
1902 b'output',
1897 default=None,
1903 default=None,
1898 )
1904 )
1899 coreconfigitem(
1905 coreconfigitem(
1900 b'profiling',
1906 b'profiling',
1901 b'showmax',
1907 b'showmax',
1902 default=0.999,
1908 default=0.999,
1903 )
1909 )
1904 coreconfigitem(
1910 coreconfigitem(
1905 b'profiling',
1911 b'profiling',
1906 b'showmin',
1912 b'showmin',
1907 default=dynamicdefault,
1913 default=dynamicdefault,
1908 )
1914 )
1909 coreconfigitem(
1915 coreconfigitem(
1910 b'profiling',
1916 b'profiling',
1911 b'showtime',
1917 b'showtime',
1912 default=True,
1918 default=True,
1913 )
1919 )
1914 coreconfigitem(
1920 coreconfigitem(
1915 b'profiling',
1921 b'profiling',
1916 b'sort',
1922 b'sort',
1917 default=b'inlinetime',
1923 default=b'inlinetime',
1918 )
1924 )
1919 coreconfigitem(
1925 coreconfigitem(
1920 b'profiling',
1926 b'profiling',
1921 b'statformat',
1927 b'statformat',
1922 default=b'hotpath',
1928 default=b'hotpath',
1923 )
1929 )
1924 coreconfigitem(
1930 coreconfigitem(
1925 b'profiling',
1931 b'profiling',
1926 b'time-track',
1932 b'time-track',
1927 default=dynamicdefault,
1933 default=dynamicdefault,
1928 )
1934 )
1929 coreconfigitem(
1935 coreconfigitem(
1930 b'profiling',
1936 b'profiling',
1931 b'type',
1937 b'type',
1932 default=b'stat',
1938 default=b'stat',
1933 )
1939 )
1934 coreconfigitem(
1940 coreconfigitem(
1935 b'progress',
1941 b'progress',
1936 b'assume-tty',
1942 b'assume-tty',
1937 default=False,
1943 default=False,
1938 )
1944 )
1939 coreconfigitem(
1945 coreconfigitem(
1940 b'progress',
1946 b'progress',
1941 b'changedelay',
1947 b'changedelay',
1942 default=1,
1948 default=1,
1943 )
1949 )
1944 coreconfigitem(
1950 coreconfigitem(
1945 b'progress',
1951 b'progress',
1946 b'clear-complete',
1952 b'clear-complete',
1947 default=True,
1953 default=True,
1948 )
1954 )
1949 coreconfigitem(
1955 coreconfigitem(
1950 b'progress',
1956 b'progress',
1951 b'debug',
1957 b'debug',
1952 default=False,
1958 default=False,
1953 )
1959 )
1954 coreconfigitem(
1960 coreconfigitem(
1955 b'progress',
1961 b'progress',
1956 b'delay',
1962 b'delay',
1957 default=3,
1963 default=3,
1958 )
1964 )
1959 coreconfigitem(
1965 coreconfigitem(
1960 b'progress',
1966 b'progress',
1961 b'disable',
1967 b'disable',
1962 default=False,
1968 default=False,
1963 )
1969 )
1964 coreconfigitem(
1970 coreconfigitem(
1965 b'progress',
1971 b'progress',
1966 b'estimateinterval',
1972 b'estimateinterval',
1967 default=60.0,
1973 default=60.0,
1968 )
1974 )
1969 coreconfigitem(
1975 coreconfigitem(
1970 b'progress',
1976 b'progress',
1971 b'format',
1977 b'format',
1972 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1978 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1973 )
1979 )
1974 coreconfigitem(
1980 coreconfigitem(
1975 b'progress',
1981 b'progress',
1976 b'refresh',
1982 b'refresh',
1977 default=0.1,
1983 default=0.1,
1978 )
1984 )
1979 coreconfigitem(
1985 coreconfigitem(
1980 b'progress',
1986 b'progress',
1981 b'width',
1987 b'width',
1982 default=dynamicdefault,
1988 default=dynamicdefault,
1983 )
1989 )
1984 coreconfigitem(
1990 coreconfigitem(
1985 b'pull',
1991 b'pull',
1986 b'confirm',
1992 b'confirm',
1987 default=False,
1993 default=False,
1988 )
1994 )
1989 coreconfigitem(
1995 coreconfigitem(
1990 b'push',
1996 b'push',
1991 b'pushvars.server',
1997 b'pushvars.server',
1992 default=False,
1998 default=False,
1993 )
1999 )
1994 coreconfigitem(
2000 coreconfigitem(
1995 b'rewrite',
2001 b'rewrite',
1996 b'backup-bundle',
2002 b'backup-bundle',
1997 default=True,
2003 default=True,
1998 alias=[(b'ui', b'history-editing-backup')],
2004 alias=[(b'ui', b'history-editing-backup')],
1999 )
2005 )
2000 coreconfigitem(
2006 coreconfigitem(
2001 b'rewrite',
2007 b'rewrite',
2002 b'update-timestamp',
2008 b'update-timestamp',
2003 default=False,
2009 default=False,
2004 )
2010 )
2005 coreconfigitem(
2011 coreconfigitem(
2006 b'rewrite',
2012 b'rewrite',
2007 b'empty-successor',
2013 b'empty-successor',
2008 default=b'skip',
2014 default=b'skip',
2009 experimental=True,
2015 experimental=True,
2010 )
2016 )
2011 # experimental as long as format.use-dirstate-v2 is.
2017 # experimental as long as format.use-dirstate-v2 is.
2012 coreconfigitem(
2018 coreconfigitem(
2013 b'storage',
2019 b'storage',
2014 b'dirstate-v2.slow-path',
2020 b'dirstate-v2.slow-path',
2015 default=b"abort",
2021 default=b"abort",
2016 experimental=True,
2022 experimental=True,
2017 )
2023 )
2018 coreconfigitem(
2024 coreconfigitem(
2019 b'storage',
2025 b'storage',
2020 b'new-repo-backend',
2026 b'new-repo-backend',
2021 default=b'revlogv1',
2027 default=b'revlogv1',
2022 experimental=True,
2028 experimental=True,
2023 )
2029 )
2024 coreconfigitem(
2030 coreconfigitem(
2025 b'storage',
2031 b'storage',
2026 b'revlog.optimize-delta-parent-choice',
2032 b'revlog.optimize-delta-parent-choice',
2027 default=True,
2033 default=True,
2028 alias=[(b'format', b'aggressivemergedeltas')],
2034 alias=[(b'format', b'aggressivemergedeltas')],
2029 )
2035 )
2030 coreconfigitem(
2036 coreconfigitem(
2031 b'storage',
2037 b'storage',
2032 b'revlog.issue6528.fix-incoming',
2038 b'revlog.issue6528.fix-incoming',
2033 default=True,
2039 default=True,
2034 )
2040 )
2035 # experimental as long as rust is experimental (or a C version is implemented)
2041 # experimental as long as rust is experimental (or a C version is implemented)
2036 coreconfigitem(
2042 coreconfigitem(
2037 b'storage',
2043 b'storage',
2038 b'revlog.persistent-nodemap.mmap',
2044 b'revlog.persistent-nodemap.mmap',
2039 default=True,
2045 default=True,
2040 )
2046 )
2041 # experimental as long as format.use-persistent-nodemap is.
2047 # experimental as long as format.use-persistent-nodemap is.
2042 coreconfigitem(
2048 coreconfigitem(
2043 b'storage',
2049 b'storage',
2044 b'revlog.persistent-nodemap.slow-path',
2050 b'revlog.persistent-nodemap.slow-path',
2045 default=b"abort",
2051 default=b"abort",
2046 )
2052 )
2047
2053
2048 coreconfigitem(
2054 coreconfigitem(
2049 b'storage',
2055 b'storage',
2050 b'revlog.reuse-external-delta',
2056 b'revlog.reuse-external-delta',
2051 default=True,
2057 default=True,
2052 )
2058 )
2053 coreconfigitem(
2059 coreconfigitem(
2054 b'storage',
2060 b'storage',
2055 b'revlog.reuse-external-delta-parent',
2061 b'revlog.reuse-external-delta-parent',
2056 default=None,
2062 default=None,
2057 )
2063 )
2058 coreconfigitem(
2064 coreconfigitem(
2059 b'storage',
2065 b'storage',
2060 b'revlog.zlib.level',
2066 b'revlog.zlib.level',
2061 default=None,
2067 default=None,
2062 )
2068 )
2063 coreconfigitem(
2069 coreconfigitem(
2064 b'storage',
2070 b'storage',
2065 b'revlog.zstd.level',
2071 b'revlog.zstd.level',
2066 default=None,
2072 default=None,
2067 )
2073 )
2068 coreconfigitem(
2074 coreconfigitem(
2069 b'server',
2075 b'server',
2070 b'bookmarks-pushkey-compat',
2076 b'bookmarks-pushkey-compat',
2071 default=True,
2077 default=True,
2072 )
2078 )
2073 coreconfigitem(
2079 coreconfigitem(
2074 b'server',
2080 b'server',
2075 b'bundle1',
2081 b'bundle1',
2076 default=True,
2082 default=True,
2077 )
2083 )
2078 coreconfigitem(
2084 coreconfigitem(
2079 b'server',
2085 b'server',
2080 b'bundle1gd',
2086 b'bundle1gd',
2081 default=None,
2087 default=None,
2082 )
2088 )
2083 coreconfigitem(
2089 coreconfigitem(
2084 b'server',
2090 b'server',
2085 b'bundle1.pull',
2091 b'bundle1.pull',
2086 default=None,
2092 default=None,
2087 )
2093 )
2088 coreconfigitem(
2094 coreconfigitem(
2089 b'server',
2095 b'server',
2090 b'bundle1gd.pull',
2096 b'bundle1gd.pull',
2091 default=None,
2097 default=None,
2092 )
2098 )
2093 coreconfigitem(
2099 coreconfigitem(
2094 b'server',
2100 b'server',
2095 b'bundle1.push',
2101 b'bundle1.push',
2096 default=None,
2102 default=None,
2097 )
2103 )
2098 coreconfigitem(
2104 coreconfigitem(
2099 b'server',
2105 b'server',
2100 b'bundle1gd.push',
2106 b'bundle1gd.push',
2101 default=None,
2107 default=None,
2102 )
2108 )
2103 coreconfigitem(
2109 coreconfigitem(
2104 b'server',
2110 b'server',
2105 b'bundle2.stream',
2111 b'bundle2.stream',
2106 default=True,
2112 default=True,
2107 alias=[(b'experimental', b'bundle2.stream')],
2113 alias=[(b'experimental', b'bundle2.stream')],
2108 )
2114 )
2109 coreconfigitem(
2115 coreconfigitem(
2110 b'server',
2116 b'server',
2111 b'compressionengines',
2117 b'compressionengines',
2112 default=list,
2118 default=list,
2113 )
2119 )
2114 coreconfigitem(
2120 coreconfigitem(
2115 b'server',
2121 b'server',
2116 b'concurrent-push-mode',
2122 b'concurrent-push-mode',
2117 default=b'check-related',
2123 default=b'check-related',
2118 )
2124 )
2119 coreconfigitem(
2125 coreconfigitem(
2120 b'server',
2126 b'server',
2121 b'disablefullbundle',
2127 b'disablefullbundle',
2122 default=False,
2128 default=False,
2123 )
2129 )
2124 coreconfigitem(
2130 coreconfigitem(
2125 b'server',
2131 b'server',
2126 b'maxhttpheaderlen',
2132 b'maxhttpheaderlen',
2127 default=1024,
2133 default=1024,
2128 )
2134 )
2129 coreconfigitem(
2135 coreconfigitem(
2130 b'server',
2136 b'server',
2131 b'pullbundle',
2137 b'pullbundle',
2132 default=True,
2138 default=True,
2133 )
2139 )
2134 coreconfigitem(
2140 coreconfigitem(
2135 b'server',
2141 b'server',
2136 b'preferuncompressed',
2142 b'preferuncompressed',
2137 default=False,
2143 default=False,
2138 )
2144 )
2139 coreconfigitem(
2145 coreconfigitem(
2140 b'server',
2146 b'server',
2141 b'streamunbundle',
2147 b'streamunbundle',
2142 default=False,
2148 default=False,
2143 )
2149 )
2144 coreconfigitem(
2150 coreconfigitem(
2145 b'server',
2151 b'server',
2146 b'uncompressed',
2152 b'uncompressed',
2147 default=True,
2153 default=True,
2148 )
2154 )
2149 coreconfigitem(
2155 coreconfigitem(
2150 b'server',
2156 b'server',
2151 b'uncompressedallowsecret',
2157 b'uncompressedallowsecret',
2152 default=False,
2158 default=False,
2153 )
2159 )
2154 coreconfigitem(
2160 coreconfigitem(
2155 b'server',
2161 b'server',
2156 b'view',
2162 b'view',
2157 default=b'served',
2163 default=b'served',
2158 )
2164 )
2159 coreconfigitem(
2165 coreconfigitem(
2160 b'server',
2166 b'server',
2161 b'validate',
2167 b'validate',
2162 default=False,
2168 default=False,
2163 )
2169 )
2164 coreconfigitem(
2170 coreconfigitem(
2165 b'server',
2171 b'server',
2166 b'zliblevel',
2172 b'zliblevel',
2167 default=-1,
2173 default=-1,
2168 )
2174 )
2169 coreconfigitem(
2175 coreconfigitem(
2170 b'server',
2176 b'server',
2171 b'zstdlevel',
2177 b'zstdlevel',
2172 default=3,
2178 default=3,
2173 )
2179 )
2174 coreconfigitem(
2180 coreconfigitem(
2175 b'share',
2181 b'share',
2176 b'pool',
2182 b'pool',
2177 default=None,
2183 default=None,
2178 )
2184 )
2179 coreconfigitem(
2185 coreconfigitem(
2180 b'share',
2186 b'share',
2181 b'poolnaming',
2187 b'poolnaming',
2182 default=b'identity',
2188 default=b'identity',
2183 )
2189 )
2184 coreconfigitem(
2190 coreconfigitem(
2185 b'share',
2191 b'share',
2186 b'safe-mismatch.source-not-safe',
2192 b'safe-mismatch.source-not-safe',
2187 default=b'abort',
2193 default=b'abort',
2188 )
2194 )
2189 coreconfigitem(
2195 coreconfigitem(
2190 b'share',
2196 b'share',
2191 b'safe-mismatch.source-safe',
2197 b'safe-mismatch.source-safe',
2192 default=b'abort',
2198 default=b'abort',
2193 )
2199 )
2194 coreconfigitem(
2200 coreconfigitem(
2195 b'share',
2201 b'share',
2196 b'safe-mismatch.source-not-safe.warn',
2202 b'safe-mismatch.source-not-safe.warn',
2197 default=True,
2203 default=True,
2198 )
2204 )
2199 coreconfigitem(
2205 coreconfigitem(
2200 b'share',
2206 b'share',
2201 b'safe-mismatch.source-safe.warn',
2207 b'safe-mismatch.source-safe.warn',
2202 default=True,
2208 default=True,
2203 )
2209 )
2204 coreconfigitem(
2210 coreconfigitem(
2205 b'share',
2211 b'share',
2206 b'safe-mismatch.source-not-safe:verbose-upgrade',
2212 b'safe-mismatch.source-not-safe:verbose-upgrade',
2207 default=True,
2213 default=True,
2208 )
2214 )
2209 coreconfigitem(
2215 coreconfigitem(
2210 b'share',
2216 b'share',
2211 b'safe-mismatch.source-safe:verbose-upgrade',
2217 b'safe-mismatch.source-safe:verbose-upgrade',
2212 default=True,
2218 default=True,
2213 )
2219 )
2214 coreconfigitem(
2220 coreconfigitem(
2215 b'shelve',
2221 b'shelve',
2216 b'maxbackups',
2222 b'maxbackups',
2217 default=10,
2223 default=10,
2218 )
2224 )
2219 coreconfigitem(
2225 coreconfigitem(
2220 b'smtp',
2226 b'smtp',
2221 b'host',
2227 b'host',
2222 default=None,
2228 default=None,
2223 )
2229 )
2224 coreconfigitem(
2230 coreconfigitem(
2225 b'smtp',
2231 b'smtp',
2226 b'local_hostname',
2232 b'local_hostname',
2227 default=None,
2233 default=None,
2228 )
2234 )
2229 coreconfigitem(
2235 coreconfigitem(
2230 b'smtp',
2236 b'smtp',
2231 b'password',
2237 b'password',
2232 default=None,
2238 default=None,
2233 )
2239 )
2234 coreconfigitem(
2240 coreconfigitem(
2235 b'smtp',
2241 b'smtp',
2236 b'port',
2242 b'port',
2237 default=dynamicdefault,
2243 default=dynamicdefault,
2238 )
2244 )
2239 coreconfigitem(
2245 coreconfigitem(
2240 b'smtp',
2246 b'smtp',
2241 b'tls',
2247 b'tls',
2242 default=b'none',
2248 default=b'none',
2243 )
2249 )
2244 coreconfigitem(
2250 coreconfigitem(
2245 b'smtp',
2251 b'smtp',
2246 b'username',
2252 b'username',
2247 default=None,
2253 default=None,
2248 )
2254 )
2249 coreconfigitem(
2255 coreconfigitem(
2250 b'sparse',
2256 b'sparse',
2251 b'missingwarning',
2257 b'missingwarning',
2252 default=True,
2258 default=True,
2253 experimental=True,
2259 experimental=True,
2254 )
2260 )
2255 coreconfigitem(
2261 coreconfigitem(
2256 b'subrepos',
2262 b'subrepos',
2257 b'allowed',
2263 b'allowed',
2258 default=dynamicdefault, # to make backporting simpler
2264 default=dynamicdefault, # to make backporting simpler
2259 )
2265 )
2260 coreconfigitem(
2266 coreconfigitem(
2261 b'subrepos',
2267 b'subrepos',
2262 b'hg:allowed',
2268 b'hg:allowed',
2263 default=dynamicdefault,
2269 default=dynamicdefault,
2264 )
2270 )
2265 coreconfigitem(
2271 coreconfigitem(
2266 b'subrepos',
2272 b'subrepos',
2267 b'git:allowed',
2273 b'git:allowed',
2268 default=dynamicdefault,
2274 default=dynamicdefault,
2269 )
2275 )
2270 coreconfigitem(
2276 coreconfigitem(
2271 b'subrepos',
2277 b'subrepos',
2272 b'svn:allowed',
2278 b'svn:allowed',
2273 default=dynamicdefault,
2279 default=dynamicdefault,
2274 )
2280 )
2275 coreconfigitem(
2281 coreconfigitem(
2276 b'templates',
2282 b'templates',
2277 b'.*',
2283 b'.*',
2278 default=None,
2284 default=None,
2279 generic=True,
2285 generic=True,
2280 )
2286 )
2281 coreconfigitem(
2287 coreconfigitem(
2282 b'templateconfig',
2288 b'templateconfig',
2283 b'.*',
2289 b'.*',
2284 default=dynamicdefault,
2290 default=dynamicdefault,
2285 generic=True,
2291 generic=True,
2286 )
2292 )
2287 coreconfigitem(
2293 coreconfigitem(
2288 b'trusted',
2294 b'trusted',
2289 b'groups',
2295 b'groups',
2290 default=list,
2296 default=list,
2291 )
2297 )
2292 coreconfigitem(
2298 coreconfigitem(
2293 b'trusted',
2299 b'trusted',
2294 b'users',
2300 b'users',
2295 default=list,
2301 default=list,
2296 )
2302 )
2297 coreconfigitem(
2303 coreconfigitem(
2298 b'ui',
2304 b'ui',
2299 b'_usedassubrepo',
2305 b'_usedassubrepo',
2300 default=False,
2306 default=False,
2301 )
2307 )
2302 coreconfigitem(
2308 coreconfigitem(
2303 b'ui',
2309 b'ui',
2304 b'allowemptycommit',
2310 b'allowemptycommit',
2305 default=False,
2311 default=False,
2306 )
2312 )
2307 coreconfigitem(
2313 coreconfigitem(
2308 b'ui',
2314 b'ui',
2309 b'archivemeta',
2315 b'archivemeta',
2310 default=True,
2316 default=True,
2311 )
2317 )
2312 coreconfigitem(
2318 coreconfigitem(
2313 b'ui',
2319 b'ui',
2314 b'askusername',
2320 b'askusername',
2315 default=False,
2321 default=False,
2316 )
2322 )
2317 coreconfigitem(
2323 coreconfigitem(
2318 b'ui',
2324 b'ui',
2319 b'available-memory',
2325 b'available-memory',
2320 default=None,
2326 default=None,
2321 )
2327 )
2322
2328
2323 coreconfigitem(
2329 coreconfigitem(
2324 b'ui',
2330 b'ui',
2325 b'clonebundlefallback',
2331 b'clonebundlefallback',
2326 default=False,
2332 default=False,
2327 )
2333 )
2328 coreconfigitem(
2334 coreconfigitem(
2329 b'ui',
2335 b'ui',
2330 b'clonebundleprefers',
2336 b'clonebundleprefers',
2331 default=list,
2337 default=list,
2332 )
2338 )
2333 coreconfigitem(
2339 coreconfigitem(
2334 b'ui',
2340 b'ui',
2335 b'clonebundles',
2341 b'clonebundles',
2336 default=True,
2342 default=True,
2337 )
2343 )
2338 coreconfigitem(
2344 coreconfigitem(
2339 b'ui',
2345 b'ui',
2340 b'color',
2346 b'color',
2341 default=b'auto',
2347 default=b'auto',
2342 )
2348 )
2343 coreconfigitem(
2349 coreconfigitem(
2344 b'ui',
2350 b'ui',
2345 b'commitsubrepos',
2351 b'commitsubrepos',
2346 default=False,
2352 default=False,
2347 )
2353 )
2348 coreconfigitem(
2354 coreconfigitem(
2349 b'ui',
2355 b'ui',
2350 b'debug',
2356 b'debug',
2351 default=False,
2357 default=False,
2352 )
2358 )
2353 coreconfigitem(
2359 coreconfigitem(
2354 b'ui',
2360 b'ui',
2355 b'debugger',
2361 b'debugger',
2356 default=None,
2362 default=None,
2357 )
2363 )
2358 coreconfigitem(
2364 coreconfigitem(
2359 b'ui',
2365 b'ui',
2360 b'editor',
2366 b'editor',
2361 default=dynamicdefault,
2367 default=dynamicdefault,
2362 )
2368 )
2363 coreconfigitem(
2369 coreconfigitem(
2364 b'ui',
2370 b'ui',
2365 b'detailed-exit-code',
2371 b'detailed-exit-code',
2366 default=False,
2372 default=False,
2367 experimental=True,
2373 experimental=True,
2368 )
2374 )
2369 coreconfigitem(
2375 coreconfigitem(
2370 b'ui',
2376 b'ui',
2371 b'fallbackencoding',
2377 b'fallbackencoding',
2372 default=None,
2378 default=None,
2373 )
2379 )
2374 coreconfigitem(
2380 coreconfigitem(
2375 b'ui',
2381 b'ui',
2376 b'forcecwd',
2382 b'forcecwd',
2377 default=None,
2383 default=None,
2378 )
2384 )
2379 coreconfigitem(
2385 coreconfigitem(
2380 b'ui',
2386 b'ui',
2381 b'forcemerge',
2387 b'forcemerge',
2382 default=None,
2388 default=None,
2383 )
2389 )
2384 coreconfigitem(
2390 coreconfigitem(
2385 b'ui',
2391 b'ui',
2386 b'formatdebug',
2392 b'formatdebug',
2387 default=False,
2393 default=False,
2388 )
2394 )
2389 coreconfigitem(
2395 coreconfigitem(
2390 b'ui',
2396 b'ui',
2391 b'formatjson',
2397 b'formatjson',
2392 default=False,
2398 default=False,
2393 )
2399 )
2394 coreconfigitem(
2400 coreconfigitem(
2395 b'ui',
2401 b'ui',
2396 b'formatted',
2402 b'formatted',
2397 default=None,
2403 default=None,
2398 )
2404 )
2399 coreconfigitem(
2405 coreconfigitem(
2400 b'ui',
2406 b'ui',
2401 b'interactive',
2407 b'interactive',
2402 default=None,
2408 default=None,
2403 )
2409 )
2404 coreconfigitem(
2410 coreconfigitem(
2405 b'ui',
2411 b'ui',
2406 b'interface',
2412 b'interface',
2407 default=None,
2413 default=None,
2408 )
2414 )
2409 coreconfigitem(
2415 coreconfigitem(
2410 b'ui',
2416 b'ui',
2411 b'interface.chunkselector',
2417 b'interface.chunkselector',
2412 default=None,
2418 default=None,
2413 )
2419 )
2414 coreconfigitem(
2420 coreconfigitem(
2415 b'ui',
2421 b'ui',
2416 b'large-file-limit',
2422 b'large-file-limit',
2417 default=10 * (2 ** 20),
2423 default=10 * (2 ** 20),
2418 )
2424 )
2419 coreconfigitem(
2425 coreconfigitem(
2420 b'ui',
2426 b'ui',
2421 b'logblockedtimes',
2427 b'logblockedtimes',
2422 default=False,
2428 default=False,
2423 )
2429 )
2424 coreconfigitem(
2430 coreconfigitem(
2425 b'ui',
2431 b'ui',
2426 b'merge',
2432 b'merge',
2427 default=None,
2433 default=None,
2428 )
2434 )
2429 coreconfigitem(
2435 coreconfigitem(
2430 b'ui',
2436 b'ui',
2431 b'mergemarkers',
2437 b'mergemarkers',
2432 default=b'basic',
2438 default=b'basic',
2433 )
2439 )
2434 coreconfigitem(
2440 coreconfigitem(
2435 b'ui',
2441 b'ui',
2436 b'message-output',
2442 b'message-output',
2437 default=b'stdio',
2443 default=b'stdio',
2438 )
2444 )
2439 coreconfigitem(
2445 coreconfigitem(
2440 b'ui',
2446 b'ui',
2441 b'nontty',
2447 b'nontty',
2442 default=False,
2448 default=False,
2443 )
2449 )
2444 coreconfigitem(
2450 coreconfigitem(
2445 b'ui',
2451 b'ui',
2446 b'origbackuppath',
2452 b'origbackuppath',
2447 default=None,
2453 default=None,
2448 )
2454 )
2449 coreconfigitem(
2455 coreconfigitem(
2450 b'ui',
2456 b'ui',
2451 b'paginate',
2457 b'paginate',
2452 default=True,
2458 default=True,
2453 )
2459 )
2454 coreconfigitem(
2460 coreconfigitem(
2455 b'ui',
2461 b'ui',
2456 b'patch',
2462 b'patch',
2457 default=None,
2463 default=None,
2458 )
2464 )
2459 coreconfigitem(
2465 coreconfigitem(
2460 b'ui',
2466 b'ui',
2461 b'portablefilenames',
2467 b'portablefilenames',
2462 default=b'warn',
2468 default=b'warn',
2463 )
2469 )
2464 coreconfigitem(
2470 coreconfigitem(
2465 b'ui',
2471 b'ui',
2466 b'promptecho',
2472 b'promptecho',
2467 default=False,
2473 default=False,
2468 )
2474 )
2469 coreconfigitem(
2475 coreconfigitem(
2470 b'ui',
2476 b'ui',
2471 b'quiet',
2477 b'quiet',
2472 default=False,
2478 default=False,
2473 )
2479 )
2474 coreconfigitem(
2480 coreconfigitem(
2475 b'ui',
2481 b'ui',
2476 b'quietbookmarkmove',
2482 b'quietbookmarkmove',
2477 default=False,
2483 default=False,
2478 )
2484 )
2479 coreconfigitem(
2485 coreconfigitem(
2480 b'ui',
2486 b'ui',
2481 b'relative-paths',
2487 b'relative-paths',
2482 default=b'legacy',
2488 default=b'legacy',
2483 )
2489 )
2484 coreconfigitem(
2490 coreconfigitem(
2485 b'ui',
2491 b'ui',
2486 b'remotecmd',
2492 b'remotecmd',
2487 default=b'hg',
2493 default=b'hg',
2488 )
2494 )
2489 coreconfigitem(
2495 coreconfigitem(
2490 b'ui',
2496 b'ui',
2491 b'report_untrusted',
2497 b'report_untrusted',
2492 default=True,
2498 default=True,
2493 )
2499 )
2494 coreconfigitem(
2500 coreconfigitem(
2495 b'ui',
2501 b'ui',
2496 b'rollback',
2502 b'rollback',
2497 default=True,
2503 default=True,
2498 )
2504 )
2499 coreconfigitem(
2505 coreconfigitem(
2500 b'ui',
2506 b'ui',
2501 b'signal-safe-lock',
2507 b'signal-safe-lock',
2502 default=True,
2508 default=True,
2503 )
2509 )
2504 coreconfigitem(
2510 coreconfigitem(
2505 b'ui',
2511 b'ui',
2506 b'slash',
2512 b'slash',
2507 default=False,
2513 default=False,
2508 )
2514 )
2509 coreconfigitem(
2515 coreconfigitem(
2510 b'ui',
2516 b'ui',
2511 b'ssh',
2517 b'ssh',
2512 default=b'ssh',
2518 default=b'ssh',
2513 )
2519 )
2514 coreconfigitem(
2520 coreconfigitem(
2515 b'ui',
2521 b'ui',
2516 b'ssherrorhint',
2522 b'ssherrorhint',
2517 default=None,
2523 default=None,
2518 )
2524 )
2519 coreconfigitem(
2525 coreconfigitem(
2520 b'ui',
2526 b'ui',
2521 b'statuscopies',
2527 b'statuscopies',
2522 default=False,
2528 default=False,
2523 )
2529 )
2524 coreconfigitem(
2530 coreconfigitem(
2525 b'ui',
2531 b'ui',
2526 b'strict',
2532 b'strict',
2527 default=False,
2533 default=False,
2528 )
2534 )
2529 coreconfigitem(
2535 coreconfigitem(
2530 b'ui',
2536 b'ui',
2531 b'style',
2537 b'style',
2532 default=b'',
2538 default=b'',
2533 )
2539 )
2534 coreconfigitem(
2540 coreconfigitem(
2535 b'ui',
2541 b'ui',
2536 b'supportcontact',
2542 b'supportcontact',
2537 default=None,
2543 default=None,
2538 )
2544 )
2539 coreconfigitem(
2545 coreconfigitem(
2540 b'ui',
2546 b'ui',
2541 b'textwidth',
2547 b'textwidth',
2542 default=78,
2548 default=78,
2543 )
2549 )
2544 coreconfigitem(
2550 coreconfigitem(
2545 b'ui',
2551 b'ui',
2546 b'timeout',
2552 b'timeout',
2547 default=b'600',
2553 default=b'600',
2548 )
2554 )
2549 coreconfigitem(
2555 coreconfigitem(
2550 b'ui',
2556 b'ui',
2551 b'timeout.warn',
2557 b'timeout.warn',
2552 default=0,
2558 default=0,
2553 )
2559 )
2554 coreconfigitem(
2560 coreconfigitem(
2555 b'ui',
2561 b'ui',
2556 b'timestamp-output',
2562 b'timestamp-output',
2557 default=False,
2563 default=False,
2558 )
2564 )
2559 coreconfigitem(
2565 coreconfigitem(
2560 b'ui',
2566 b'ui',
2561 b'traceback',
2567 b'traceback',
2562 default=False,
2568 default=False,
2563 )
2569 )
2564 coreconfigitem(
2570 coreconfigitem(
2565 b'ui',
2571 b'ui',
2566 b'tweakdefaults',
2572 b'tweakdefaults',
2567 default=False,
2573 default=False,
2568 )
2574 )
2569 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2575 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2570 coreconfigitem(
2576 coreconfigitem(
2571 b'ui',
2577 b'ui',
2572 b'verbose',
2578 b'verbose',
2573 default=False,
2579 default=False,
2574 )
2580 )
2575 coreconfigitem(
2581 coreconfigitem(
2576 b'verify',
2582 b'verify',
2577 b'skipflags',
2583 b'skipflags',
2578 default=0,
2584 default=0,
2579 )
2585 )
2580 coreconfigitem(
2586 coreconfigitem(
2581 b'web',
2587 b'web',
2582 b'allowbz2',
2588 b'allowbz2',
2583 default=False,
2589 default=False,
2584 )
2590 )
2585 coreconfigitem(
2591 coreconfigitem(
2586 b'web',
2592 b'web',
2587 b'allowgz',
2593 b'allowgz',
2588 default=False,
2594 default=False,
2589 )
2595 )
2590 coreconfigitem(
2596 coreconfigitem(
2591 b'web',
2597 b'web',
2592 b'allow-pull',
2598 b'allow-pull',
2593 alias=[(b'web', b'allowpull')],
2599 alias=[(b'web', b'allowpull')],
2594 default=True,
2600 default=True,
2595 )
2601 )
2596 coreconfigitem(
2602 coreconfigitem(
2597 b'web',
2603 b'web',
2598 b'allow-push',
2604 b'allow-push',
2599 alias=[(b'web', b'allow_push')],
2605 alias=[(b'web', b'allow_push')],
2600 default=list,
2606 default=list,
2601 )
2607 )
2602 coreconfigitem(
2608 coreconfigitem(
2603 b'web',
2609 b'web',
2604 b'allowzip',
2610 b'allowzip',
2605 default=False,
2611 default=False,
2606 )
2612 )
2607 coreconfigitem(
2613 coreconfigitem(
2608 b'web',
2614 b'web',
2609 b'archivesubrepos',
2615 b'archivesubrepos',
2610 default=False,
2616 default=False,
2611 )
2617 )
2612 coreconfigitem(
2618 coreconfigitem(
2613 b'web',
2619 b'web',
2614 b'cache',
2620 b'cache',
2615 default=True,
2621 default=True,
2616 )
2622 )
2617 coreconfigitem(
2623 coreconfigitem(
2618 b'web',
2624 b'web',
2619 b'comparisoncontext',
2625 b'comparisoncontext',
2620 default=5,
2626 default=5,
2621 )
2627 )
2622 coreconfigitem(
2628 coreconfigitem(
2623 b'web',
2629 b'web',
2624 b'contact',
2630 b'contact',
2625 default=None,
2631 default=None,
2626 )
2632 )
2627 coreconfigitem(
2633 coreconfigitem(
2628 b'web',
2634 b'web',
2629 b'deny_push',
2635 b'deny_push',
2630 default=list,
2636 default=list,
2631 )
2637 )
2632 coreconfigitem(
2638 coreconfigitem(
2633 b'web',
2639 b'web',
2634 b'guessmime',
2640 b'guessmime',
2635 default=False,
2641 default=False,
2636 )
2642 )
2637 coreconfigitem(
2643 coreconfigitem(
2638 b'web',
2644 b'web',
2639 b'hidden',
2645 b'hidden',
2640 default=False,
2646 default=False,
2641 )
2647 )
2642 coreconfigitem(
2648 coreconfigitem(
2643 b'web',
2649 b'web',
2644 b'labels',
2650 b'labels',
2645 default=list,
2651 default=list,
2646 )
2652 )
2647 coreconfigitem(
2653 coreconfigitem(
2648 b'web',
2654 b'web',
2649 b'logoimg',
2655 b'logoimg',
2650 default=b'hglogo.png',
2656 default=b'hglogo.png',
2651 )
2657 )
2652 coreconfigitem(
2658 coreconfigitem(
2653 b'web',
2659 b'web',
2654 b'logourl',
2660 b'logourl',
2655 default=b'https://mercurial-scm.org/',
2661 default=b'https://mercurial-scm.org/',
2656 )
2662 )
2657 coreconfigitem(
2663 coreconfigitem(
2658 b'web',
2664 b'web',
2659 b'accesslog',
2665 b'accesslog',
2660 default=b'-',
2666 default=b'-',
2661 )
2667 )
2662 coreconfigitem(
2668 coreconfigitem(
2663 b'web',
2669 b'web',
2664 b'address',
2670 b'address',
2665 default=b'',
2671 default=b'',
2666 )
2672 )
2667 coreconfigitem(
2673 coreconfigitem(
2668 b'web',
2674 b'web',
2669 b'allow-archive',
2675 b'allow-archive',
2670 alias=[(b'web', b'allow_archive')],
2676 alias=[(b'web', b'allow_archive')],
2671 default=list,
2677 default=list,
2672 )
2678 )
2673 coreconfigitem(
2679 coreconfigitem(
2674 b'web',
2680 b'web',
2675 b'allow_read',
2681 b'allow_read',
2676 default=list,
2682 default=list,
2677 )
2683 )
2678 coreconfigitem(
2684 coreconfigitem(
2679 b'web',
2685 b'web',
2680 b'baseurl',
2686 b'baseurl',
2681 default=None,
2687 default=None,
2682 )
2688 )
2683 coreconfigitem(
2689 coreconfigitem(
2684 b'web',
2690 b'web',
2685 b'cacerts',
2691 b'cacerts',
2686 default=None,
2692 default=None,
2687 )
2693 )
2688 coreconfigitem(
2694 coreconfigitem(
2689 b'web',
2695 b'web',
2690 b'certificate',
2696 b'certificate',
2691 default=None,
2697 default=None,
2692 )
2698 )
2693 coreconfigitem(
2699 coreconfigitem(
2694 b'web',
2700 b'web',
2695 b'collapse',
2701 b'collapse',
2696 default=False,
2702 default=False,
2697 )
2703 )
2698 coreconfigitem(
2704 coreconfigitem(
2699 b'web',
2705 b'web',
2700 b'csp',
2706 b'csp',
2701 default=None,
2707 default=None,
2702 )
2708 )
2703 coreconfigitem(
2709 coreconfigitem(
2704 b'web',
2710 b'web',
2705 b'deny_read',
2711 b'deny_read',
2706 default=list,
2712 default=list,
2707 )
2713 )
2708 coreconfigitem(
2714 coreconfigitem(
2709 b'web',
2715 b'web',
2710 b'descend',
2716 b'descend',
2711 default=True,
2717 default=True,
2712 )
2718 )
2713 coreconfigitem(
2719 coreconfigitem(
2714 b'web',
2720 b'web',
2715 b'description',
2721 b'description',
2716 default=b"",
2722 default=b"",
2717 )
2723 )
2718 coreconfigitem(
2724 coreconfigitem(
2719 b'web',
2725 b'web',
2720 b'encoding',
2726 b'encoding',
2721 default=lambda: encoding.encoding,
2727 default=lambda: encoding.encoding,
2722 )
2728 )
2723 coreconfigitem(
2729 coreconfigitem(
2724 b'web',
2730 b'web',
2725 b'errorlog',
2731 b'errorlog',
2726 default=b'-',
2732 default=b'-',
2727 )
2733 )
2728 coreconfigitem(
2734 coreconfigitem(
2729 b'web',
2735 b'web',
2730 b'ipv6',
2736 b'ipv6',
2731 default=False,
2737 default=False,
2732 )
2738 )
2733 coreconfigitem(
2739 coreconfigitem(
2734 b'web',
2740 b'web',
2735 b'maxchanges',
2741 b'maxchanges',
2736 default=10,
2742 default=10,
2737 )
2743 )
2738 coreconfigitem(
2744 coreconfigitem(
2739 b'web',
2745 b'web',
2740 b'maxfiles',
2746 b'maxfiles',
2741 default=10,
2747 default=10,
2742 )
2748 )
2743 coreconfigitem(
2749 coreconfigitem(
2744 b'web',
2750 b'web',
2745 b'maxshortchanges',
2751 b'maxshortchanges',
2746 default=60,
2752 default=60,
2747 )
2753 )
2748 coreconfigitem(
2754 coreconfigitem(
2749 b'web',
2755 b'web',
2750 b'motd',
2756 b'motd',
2751 default=b'',
2757 default=b'',
2752 )
2758 )
2753 coreconfigitem(
2759 coreconfigitem(
2754 b'web',
2760 b'web',
2755 b'name',
2761 b'name',
2756 default=dynamicdefault,
2762 default=dynamicdefault,
2757 )
2763 )
2758 coreconfigitem(
2764 coreconfigitem(
2759 b'web',
2765 b'web',
2760 b'port',
2766 b'port',
2761 default=8000,
2767 default=8000,
2762 )
2768 )
2763 coreconfigitem(
2769 coreconfigitem(
2764 b'web',
2770 b'web',
2765 b'prefix',
2771 b'prefix',
2766 default=b'',
2772 default=b'',
2767 )
2773 )
2768 coreconfigitem(
2774 coreconfigitem(
2769 b'web',
2775 b'web',
2770 b'push_ssl',
2776 b'push_ssl',
2771 default=True,
2777 default=True,
2772 )
2778 )
2773 coreconfigitem(
2779 coreconfigitem(
2774 b'web',
2780 b'web',
2775 b'refreshinterval',
2781 b'refreshinterval',
2776 default=20,
2782 default=20,
2777 )
2783 )
2778 coreconfigitem(
2784 coreconfigitem(
2779 b'web',
2785 b'web',
2780 b'server-header',
2786 b'server-header',
2781 default=None,
2787 default=None,
2782 )
2788 )
2783 coreconfigitem(
2789 coreconfigitem(
2784 b'web',
2790 b'web',
2785 b'static',
2791 b'static',
2786 default=None,
2792 default=None,
2787 )
2793 )
2788 coreconfigitem(
2794 coreconfigitem(
2789 b'web',
2795 b'web',
2790 b'staticurl',
2796 b'staticurl',
2791 default=None,
2797 default=None,
2792 )
2798 )
2793 coreconfigitem(
2799 coreconfigitem(
2794 b'web',
2800 b'web',
2795 b'stripes',
2801 b'stripes',
2796 default=1,
2802 default=1,
2797 )
2803 )
2798 coreconfigitem(
2804 coreconfigitem(
2799 b'web',
2805 b'web',
2800 b'style',
2806 b'style',
2801 default=b'paper',
2807 default=b'paper',
2802 )
2808 )
2803 coreconfigitem(
2809 coreconfigitem(
2804 b'web',
2810 b'web',
2805 b'templates',
2811 b'templates',
2806 default=None,
2812 default=None,
2807 )
2813 )
2808 coreconfigitem(
2814 coreconfigitem(
2809 b'web',
2815 b'web',
2810 b'view',
2816 b'view',
2811 default=b'served',
2817 default=b'served',
2812 experimental=True,
2818 experimental=True,
2813 )
2819 )
2814 coreconfigitem(
2820 coreconfigitem(
2815 b'worker',
2821 b'worker',
2816 b'backgroundclose',
2822 b'backgroundclose',
2817 default=dynamicdefault,
2823 default=dynamicdefault,
2818 )
2824 )
2819 # Windows defaults to a limit of 512 open files. A buffer of 128
2825 # Windows defaults to a limit of 512 open files. A buffer of 128
2820 # should give us enough headway.
2826 # should give us enough headway.
2821 coreconfigitem(
2827 coreconfigitem(
2822 b'worker',
2828 b'worker',
2823 b'backgroundclosemaxqueue',
2829 b'backgroundclosemaxqueue',
2824 default=384,
2830 default=384,
2825 )
2831 )
2826 coreconfigitem(
2832 coreconfigitem(
2827 b'worker',
2833 b'worker',
2828 b'backgroundcloseminfilecount',
2834 b'backgroundcloseminfilecount',
2829 default=2048,
2835 default=2048,
2830 )
2836 )
2831 coreconfigitem(
2837 coreconfigitem(
2832 b'worker',
2838 b'worker',
2833 b'backgroundclosethreadcount',
2839 b'backgroundclosethreadcount',
2834 default=4,
2840 default=4,
2835 )
2841 )
2836 coreconfigitem(
2842 coreconfigitem(
2837 b'worker',
2843 b'worker',
2838 b'enabled',
2844 b'enabled',
2839 default=True,
2845 default=True,
2840 )
2846 )
2841 coreconfigitem(
2847 coreconfigitem(
2842 b'worker',
2848 b'worker',
2843 b'numcpus',
2849 b'numcpus',
2844 default=None,
2850 default=None,
2845 )
2851 )
2846
2852
2847 # Rebase related configuration moved to core because other extension are doing
2853 # Rebase related configuration moved to core because other extension are doing
2848 # strange things. For example, shelve import the extensions to reuse some bit
2854 # strange things. For example, shelve import the extensions to reuse some bit
2849 # without formally loading it.
2855 # without formally loading it.
2850 coreconfigitem(
2856 coreconfigitem(
2851 b'commands',
2857 b'commands',
2852 b'rebase.requiredest',
2858 b'rebase.requiredest',
2853 default=False,
2859 default=False,
2854 )
2860 )
2855 coreconfigitem(
2861 coreconfigitem(
2856 b'experimental',
2862 b'experimental',
2857 b'rebaseskipobsolete',
2863 b'rebaseskipobsolete',
2858 default=True,
2864 default=True,
2859 )
2865 )
2860 coreconfigitem(
2866 coreconfigitem(
2861 b'rebase',
2867 b'rebase',
2862 b'singletransaction',
2868 b'singletransaction',
2863 default=False,
2869 default=False,
2864 )
2870 )
2865 coreconfigitem(
2871 coreconfigitem(
2866 b'rebase',
2872 b'rebase',
2867 b'experimental.inmemory',
2873 b'experimental.inmemory',
2868 default=False,
2874 default=False,
2869 )
2875 )
2870
2876
2871 # This setting controls creation of a rebase_source extra field
2877 # This setting controls creation of a rebase_source extra field
2872 # during rebase. When False, no such field is created. This is
2878 # during rebase. When False, no such field is created. This is
2873 # useful eg for incrementally converting changesets and then
2879 # useful eg for incrementally converting changesets and then
2874 # rebasing them onto an existing repo.
2880 # rebasing them onto an existing repo.
2875 # WARNING: this is an advanced setting reserved for people who know
2881 # WARNING: this is an advanced setting reserved for people who know
2876 # exactly what they are doing. Misuse of this setting can easily
2882 # exactly what they are doing. Misuse of this setting can easily
2877 # result in obsmarker cycles and a vivid headache.
2883 # result in obsmarker cycles and a vivid headache.
2878 coreconfigitem(
2884 coreconfigitem(
2879 b'rebase',
2885 b'rebase',
2880 b'store-source',
2886 b'store-source',
2881 default=True,
2887 default=True,
2882 experimental=True,
2888 experimental=True,
2883 )
2889 )
@@ -1,299 +1,301 b''
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 from .i18n import _
9 from .i18n import _
10 from .node import nullrev
10 from .node import nullrev
11 from . import (
11 from . import (
12 error,
12 error,
13 revlog,
13 revlog,
14 )
14 )
15 from .interfaces import (
15 from .interfaces import (
16 repository,
16 repository,
17 util as interfaceutil,
17 util as interfaceutil,
18 )
18 )
19 from .utils import storageutil
19 from .utils import storageutil
20 from .revlogutils import (
20 from .revlogutils import (
21 constants as revlog_constants,
21 constants as revlog_constants,
22 rewrite,
22 rewrite,
23 )
23 )
24
24
25
25
26 @interfaceutil.implementer(repository.ifilestorage)
26 @interfaceutil.implementer(repository.ifilestorage)
27 class filelog:
27 class filelog:
28 def __init__(self, opener, path):
28 def __init__(self, opener, path):
29 self._revlog = revlog.revlog(
29 self._revlog = revlog.revlog(
30 opener,
30 opener,
31 # XXX should use the unencoded path
31 # XXX should use the unencoded path
32 target=(revlog_constants.KIND_FILELOG, path),
32 target=(revlog_constants.KIND_FILELOG, path),
33 radix=b'/'.join((b'data', path)),
33 radix=b'/'.join((b'data', path)),
34 censorable=True,
34 censorable=True,
35 canonical_parent_order=False, # see comment in revlog.py
35 canonical_parent_order=False, # see comment in revlog.py
36 )
36 )
37 # Full name of the user visible file, relative to the repository root.
37 # Full name of the user visible file, relative to the repository root.
38 # Used by LFS.
38 # Used by LFS.
39 self._revlog.filename = path
39 self._revlog.filename = path
40 self.nullid = self._revlog.nullid
40 self.nullid = self._revlog.nullid
41 opts = opener.options
41 opts = opener.options
42 self._fix_issue6528 = opts.get(b'issue6528.fix-incoming', True)
42 self._fix_issue6528 = opts.get(b'issue6528.fix-incoming', True)
43
43
44 def __len__(self):
44 def __len__(self):
45 return len(self._revlog)
45 return len(self._revlog)
46
46
47 def __iter__(self):
47 def __iter__(self):
48 return self._revlog.__iter__()
48 return self._revlog.__iter__()
49
49
50 def hasnode(self, node):
50 def hasnode(self, node):
51 if node in (self.nullid, nullrev):
51 if node in (self.nullid, nullrev):
52 return False
52 return False
53
53
54 try:
54 try:
55 self._revlog.rev(node)
55 self._revlog.rev(node)
56 return True
56 return True
57 except (TypeError, ValueError, IndexError, error.LookupError):
57 except (TypeError, ValueError, IndexError, error.LookupError):
58 return False
58 return False
59
59
60 def revs(self, start=0, stop=None):
60 def revs(self, start=0, stop=None):
61 return self._revlog.revs(start=start, stop=stop)
61 return self._revlog.revs(start=start, stop=stop)
62
62
63 def parents(self, node):
63 def parents(self, node):
64 return self._revlog.parents(node)
64 return self._revlog.parents(node)
65
65
66 def parentrevs(self, rev):
66 def parentrevs(self, rev):
67 return self._revlog.parentrevs(rev)
67 return self._revlog.parentrevs(rev)
68
68
69 def rev(self, node):
69 def rev(self, node):
70 return self._revlog.rev(node)
70 return self._revlog.rev(node)
71
71
72 def node(self, rev):
72 def node(self, rev):
73 return self._revlog.node(rev)
73 return self._revlog.node(rev)
74
74
75 def lookup(self, node):
75 def lookup(self, node):
76 return storageutil.fileidlookup(
76 return storageutil.fileidlookup(
77 self._revlog, node, self._revlog.display_id
77 self._revlog, node, self._revlog.display_id
78 )
78 )
79
79
80 def linkrev(self, rev):
80 def linkrev(self, rev):
81 return self._revlog.linkrev(rev)
81 return self._revlog.linkrev(rev)
82
82
83 def commonancestorsheads(self, node1, node2):
83 def commonancestorsheads(self, node1, node2):
84 return self._revlog.commonancestorsheads(node1, node2)
84 return self._revlog.commonancestorsheads(node1, node2)
85
85
86 # Used by dagop.blockdescendants().
86 # Used by dagop.blockdescendants().
87 def descendants(self, revs):
87 def descendants(self, revs):
88 return self._revlog.descendants(revs)
88 return self._revlog.descendants(revs)
89
89
90 def heads(self, start=None, stop=None):
90 def heads(self, start=None, stop=None):
91 return self._revlog.heads(start, stop)
91 return self._revlog.heads(start, stop)
92
92
93 # Used by hgweb, children extension.
93 # Used by hgweb, children extension.
94 def children(self, node):
94 def children(self, node):
95 return self._revlog.children(node)
95 return self._revlog.children(node)
96
96
97 def iscensored(self, rev):
97 def iscensored(self, rev):
98 return self._revlog.iscensored(rev)
98 return self._revlog.iscensored(rev)
99
99
100 def revision(self, node, _df=None):
100 def revision(self, node, _df=None):
101 return self._revlog.revision(node, _df=_df)
101 return self._revlog.revision(node, _df=_df)
102
102
103 def rawdata(self, node, _df=None):
103 def rawdata(self, node, _df=None):
104 return self._revlog.rawdata(node, _df=_df)
104 return self._revlog.rawdata(node, _df=_df)
105
105
106 def emitrevisions(
106 def emitrevisions(
107 self,
107 self,
108 nodes,
108 nodes,
109 nodesorder=None,
109 nodesorder=None,
110 revisiondata=False,
110 revisiondata=False,
111 assumehaveparentrevisions=False,
111 assumehaveparentrevisions=False,
112 deltamode=repository.CG_DELTAMODE_STD,
112 deltamode=repository.CG_DELTAMODE_STD,
113 sidedata_helpers=None,
113 sidedata_helpers=None,
114 debug_info=None,
114 debug_info=None,
115 ):
115 ):
116 return self._revlog.emitrevisions(
116 return self._revlog.emitrevisions(
117 nodes,
117 nodes,
118 nodesorder=nodesorder,
118 nodesorder=nodesorder,
119 revisiondata=revisiondata,
119 revisiondata=revisiondata,
120 assumehaveparentrevisions=assumehaveparentrevisions,
120 assumehaveparentrevisions=assumehaveparentrevisions,
121 deltamode=deltamode,
121 deltamode=deltamode,
122 sidedata_helpers=sidedata_helpers,
122 sidedata_helpers=sidedata_helpers,
123 debug_info=debug_info,
123 debug_info=debug_info,
124 )
124 )
125
125
126 def addrevision(
126 def addrevision(
127 self,
127 self,
128 revisiondata,
128 revisiondata,
129 transaction,
129 transaction,
130 linkrev,
130 linkrev,
131 p1,
131 p1,
132 p2,
132 p2,
133 node=None,
133 node=None,
134 flags=revlog.REVIDX_DEFAULT_FLAGS,
134 flags=revlog.REVIDX_DEFAULT_FLAGS,
135 cachedelta=None,
135 cachedelta=None,
136 ):
136 ):
137 return self._revlog.addrevision(
137 return self._revlog.addrevision(
138 revisiondata,
138 revisiondata,
139 transaction,
139 transaction,
140 linkrev,
140 linkrev,
141 p1,
141 p1,
142 p2,
142 p2,
143 node=node,
143 node=node,
144 flags=flags,
144 flags=flags,
145 cachedelta=cachedelta,
145 cachedelta=cachedelta,
146 )
146 )
147
147
148 def addgroup(
148 def addgroup(
149 self,
149 self,
150 deltas,
150 deltas,
151 linkmapper,
151 linkmapper,
152 transaction,
152 transaction,
153 addrevisioncb=None,
153 addrevisioncb=None,
154 duplicaterevisioncb=None,
154 duplicaterevisioncb=None,
155 maybemissingparents=False,
155 maybemissingparents=False,
156 debug_info=None,
156 ):
157 ):
157 if maybemissingparents:
158 if maybemissingparents:
158 raise error.Abort(
159 raise error.Abort(
159 _(
160 _(
160 b'revlog storage does not support missing '
161 b'revlog storage does not support missing '
161 b'parents write mode'
162 b'parents write mode'
162 )
163 )
163 )
164 )
164
165
165 with self._revlog._writing(transaction):
166 with self._revlog._writing(transaction):
166
167
167 if self._fix_issue6528:
168 if self._fix_issue6528:
168 deltas = rewrite.filter_delta_issue6528(self._revlog, deltas)
169 deltas = rewrite.filter_delta_issue6528(self._revlog, deltas)
169
170
170 return self._revlog.addgroup(
171 return self._revlog.addgroup(
171 deltas,
172 deltas,
172 linkmapper,
173 linkmapper,
173 transaction,
174 transaction,
174 addrevisioncb=addrevisioncb,
175 addrevisioncb=addrevisioncb,
175 duplicaterevisioncb=duplicaterevisioncb,
176 duplicaterevisioncb=duplicaterevisioncb,
177 debug_info=debug_info,
176 )
178 )
177
179
178 def getstrippoint(self, minlink):
180 def getstrippoint(self, minlink):
179 return self._revlog.getstrippoint(minlink)
181 return self._revlog.getstrippoint(minlink)
180
182
181 def strip(self, minlink, transaction):
183 def strip(self, minlink, transaction):
182 return self._revlog.strip(minlink, transaction)
184 return self._revlog.strip(minlink, transaction)
183
185
184 def censorrevision(self, tr, node, tombstone=b''):
186 def censorrevision(self, tr, node, tombstone=b''):
185 return self._revlog.censorrevision(tr, node, tombstone=tombstone)
187 return self._revlog.censorrevision(tr, node, tombstone=tombstone)
186
188
187 def files(self):
189 def files(self):
188 return self._revlog.files()
190 return self._revlog.files()
189
191
190 def read(self, node):
192 def read(self, node):
191 return storageutil.filtermetadata(self.revision(node))
193 return storageutil.filtermetadata(self.revision(node))
192
194
193 def add(self, text, meta, transaction, link, p1=None, p2=None):
195 def add(self, text, meta, transaction, link, p1=None, p2=None):
194 if meta or text.startswith(b'\1\n'):
196 if meta or text.startswith(b'\1\n'):
195 text = storageutil.packmeta(meta, text)
197 text = storageutil.packmeta(meta, text)
196 rev = self.addrevision(text, transaction, link, p1, p2)
198 rev = self.addrevision(text, transaction, link, p1, p2)
197 return self.node(rev)
199 return self.node(rev)
198
200
199 def renamed(self, node):
201 def renamed(self, node):
200 return storageutil.filerevisioncopied(self, node)
202 return storageutil.filerevisioncopied(self, node)
201
203
202 def size(self, rev):
204 def size(self, rev):
203 """return the size of a given revision"""
205 """return the size of a given revision"""
204
206
205 # for revisions with renames, we have to go the slow way
207 # for revisions with renames, we have to go the slow way
206 node = self.node(rev)
208 node = self.node(rev)
207 if self.iscensored(rev):
209 if self.iscensored(rev):
208 return 0
210 return 0
209 if self.renamed(node):
211 if self.renamed(node):
210 return len(self.read(node))
212 return len(self.read(node))
211
213
212 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
214 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
213 # XXX See also basefilectx.cmp.
215 # XXX See also basefilectx.cmp.
214 return self._revlog.size(rev)
216 return self._revlog.size(rev)
215
217
216 def cmp(self, node, text):
218 def cmp(self, node, text):
217 """compare text with a given file revision
219 """compare text with a given file revision
218
220
219 returns True if text is different than what is stored.
221 returns True if text is different than what is stored.
220 """
222 """
221 return not storageutil.filedataequivalent(self, node, text)
223 return not storageutil.filedataequivalent(self, node, text)
222
224
223 def verifyintegrity(self, state):
225 def verifyintegrity(self, state):
224 return self._revlog.verifyintegrity(state)
226 return self._revlog.verifyintegrity(state)
225
227
226 def storageinfo(
228 def storageinfo(
227 self,
229 self,
228 exclusivefiles=False,
230 exclusivefiles=False,
229 sharedfiles=False,
231 sharedfiles=False,
230 revisionscount=False,
232 revisionscount=False,
231 trackedsize=False,
233 trackedsize=False,
232 storedsize=False,
234 storedsize=False,
233 ):
235 ):
234 return self._revlog.storageinfo(
236 return self._revlog.storageinfo(
235 exclusivefiles=exclusivefiles,
237 exclusivefiles=exclusivefiles,
236 sharedfiles=sharedfiles,
238 sharedfiles=sharedfiles,
237 revisionscount=revisionscount,
239 revisionscount=revisionscount,
238 trackedsize=trackedsize,
240 trackedsize=trackedsize,
239 storedsize=storedsize,
241 storedsize=storedsize,
240 )
242 )
241
243
242 # Used by repo upgrade.
244 # Used by repo upgrade.
243 def clone(self, tr, destrevlog, **kwargs):
245 def clone(self, tr, destrevlog, **kwargs):
244 if not isinstance(destrevlog, filelog):
246 if not isinstance(destrevlog, filelog):
245 msg = b'expected filelog to clone(), not %r'
247 msg = b'expected filelog to clone(), not %r'
246 msg %= destrevlog
248 msg %= destrevlog
247 raise error.ProgrammingError(msg)
249 raise error.ProgrammingError(msg)
248
250
249 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
251 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
250
252
251
253
252 class narrowfilelog(filelog):
254 class narrowfilelog(filelog):
253 """Filelog variation to be used with narrow stores."""
255 """Filelog variation to be used with narrow stores."""
254
256
255 def __init__(self, opener, path, narrowmatch):
257 def __init__(self, opener, path, narrowmatch):
256 super(narrowfilelog, self).__init__(opener, path)
258 super(narrowfilelog, self).__init__(opener, path)
257 self._narrowmatch = narrowmatch
259 self._narrowmatch = narrowmatch
258
260
259 def renamed(self, node):
261 def renamed(self, node):
260 res = super(narrowfilelog, self).renamed(node)
262 res = super(narrowfilelog, self).renamed(node)
261
263
262 # Renames that come from outside the narrowspec are problematic
264 # Renames that come from outside the narrowspec are problematic
263 # because we may lack the base text for the rename. This can result
265 # because we may lack the base text for the rename. This can result
264 # in code attempting to walk the ancestry or compute a diff
266 # in code attempting to walk the ancestry or compute a diff
265 # encountering a missing revision. We address this by silently
267 # encountering a missing revision. We address this by silently
266 # removing rename metadata if the source file is outside the
268 # removing rename metadata if the source file is outside the
267 # narrow spec.
269 # narrow spec.
268 #
270 #
269 # A better solution would be to see if the base revision is available,
271 # A better solution would be to see if the base revision is available,
270 # rather than assuming it isn't.
272 # rather than assuming it isn't.
271 #
273 #
272 # An even better solution would be to teach all consumers of rename
274 # An even better solution would be to teach all consumers of rename
273 # metadata that the base revision may not be available.
275 # metadata that the base revision may not be available.
274 #
276 #
275 # TODO consider better ways of doing this.
277 # TODO consider better ways of doing this.
276 if res and not self._narrowmatch(res[0]):
278 if res and not self._narrowmatch(res[0]):
277 return None
279 return None
278
280
279 return res
281 return res
280
282
281 def size(self, rev):
283 def size(self, rev):
282 # Because we have a custom renamed() that may lie, we need to call
284 # Because we have a custom renamed() that may lie, we need to call
283 # the base renamed() to report accurate results.
285 # the base renamed() to report accurate results.
284 node = self.node(rev)
286 node = self.node(rev)
285 if super(narrowfilelog, self).renamed(node):
287 if super(narrowfilelog, self).renamed(node):
286 return len(self.read(node))
288 return len(self.read(node))
287 else:
289 else:
288 return super(narrowfilelog, self).size(rev)
290 return super(narrowfilelog, self).size(rev)
289
291
290 def cmp(self, node, text):
292 def cmp(self, node, text):
291 # We don't call `super` because narrow parents can be buggy in case of a
293 # We don't call `super` because narrow parents can be buggy in case of a
292 # ambiguous dirstate. Always take the slow path until there is a better
294 # ambiguous dirstate. Always take the slow path until there is a better
293 # fix, see issue6150.
295 # fix, see issue6150.
294
296
295 # Censored files compare against the empty file.
297 # Censored files compare against the empty file.
296 if self.iscensored(self.rev(node)):
298 if self.iscensored(self.rev(node)):
297 return text != b''
299 return text != b''
298
300
299 return self.read(node) != text
301 return self.read(node) != text
@@ -1,2370 +1,2372 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import heapq
9 import heapq
10 import itertools
10 import itertools
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 bin,
16 bin,
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 )
19 )
20 from .pycompat import getattr
20 from .pycompat import getattr
21 from . import (
21 from . import (
22 encoding,
22 encoding,
23 error,
23 error,
24 match as matchmod,
24 match as matchmod,
25 mdiff,
25 mdiff,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 revlog,
29 revlog,
30 util,
30 util,
31 )
31 )
32 from .interfaces import (
32 from .interfaces import (
33 repository,
33 repository,
34 util as interfaceutil,
34 util as interfaceutil,
35 )
35 )
36 from .revlogutils import (
36 from .revlogutils import (
37 constants as revlog_constants,
37 constants as revlog_constants,
38 )
38 )
39
39
40 parsers = policy.importmod('parsers')
40 parsers = policy.importmod('parsers')
41 propertycache = util.propertycache
41 propertycache = util.propertycache
42
42
43 # Allow tests to more easily test the alternate path in manifestdict.fastdelta()
43 # Allow tests to more easily test the alternate path in manifestdict.fastdelta()
44 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
44 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
45
45
46
46
47 def _parse(nodelen, data):
47 def _parse(nodelen, data):
48 # This method does a little bit of excessive-looking
48 # This method does a little bit of excessive-looking
49 # precondition checking. This is so that the behavior of this
49 # precondition checking. This is so that the behavior of this
50 # class exactly matches its C counterpart to try and help
50 # class exactly matches its C counterpart to try and help
51 # prevent surprise breakage for anyone that develops against
51 # prevent surprise breakage for anyone that develops against
52 # the pure version.
52 # the pure version.
53 if data and data[-1:] != b'\n':
53 if data and data[-1:] != b'\n':
54 raise ValueError(b'Manifest did not end in a newline.')
54 raise ValueError(b'Manifest did not end in a newline.')
55 prev = None
55 prev = None
56 for l in data.splitlines():
56 for l in data.splitlines():
57 if prev is not None and prev > l:
57 if prev is not None and prev > l:
58 raise ValueError(b'Manifest lines not in sorted order.')
58 raise ValueError(b'Manifest lines not in sorted order.')
59 prev = l
59 prev = l
60 f, n = l.split(b'\0')
60 f, n = l.split(b'\0')
61 nl = len(n)
61 nl = len(n)
62 flags = n[-1:]
62 flags = n[-1:]
63 if flags in _manifestflags:
63 if flags in _manifestflags:
64 n = n[:-1]
64 n = n[:-1]
65 nl -= 1
65 nl -= 1
66 else:
66 else:
67 flags = b''
67 flags = b''
68 if nl != 2 * nodelen:
68 if nl != 2 * nodelen:
69 raise ValueError(b'Invalid manifest line')
69 raise ValueError(b'Invalid manifest line')
70
70
71 yield f, bin(n), flags
71 yield f, bin(n), flags
72
72
73
73
74 def _text(it):
74 def _text(it):
75 files = []
75 files = []
76 lines = []
76 lines = []
77 for f, n, fl in it:
77 for f, n, fl in it:
78 files.append(f)
78 files.append(f)
79 # if this is changed to support newlines in filenames,
79 # if this is changed to support newlines in filenames,
80 # be sure to check the templates/ dir again (especially *-raw.tmpl)
80 # be sure to check the templates/ dir again (especially *-raw.tmpl)
81 lines.append(b"%s\0%s%s\n" % (f, hex(n), fl))
81 lines.append(b"%s\0%s%s\n" % (f, hex(n), fl))
82
82
83 _checkforbidden(files)
83 _checkforbidden(files)
84 return b''.join(lines)
84 return b''.join(lines)
85
85
86
86
87 class lazymanifestiter:
87 class lazymanifestiter:
88 def __init__(self, lm):
88 def __init__(self, lm):
89 self.pos = 0
89 self.pos = 0
90 self.lm = lm
90 self.lm = lm
91
91
92 def __iter__(self):
92 def __iter__(self):
93 return self
93 return self
94
94
95 def next(self):
95 def next(self):
96 try:
96 try:
97 data, pos = self.lm._get(self.pos)
97 data, pos = self.lm._get(self.pos)
98 except IndexError:
98 except IndexError:
99 raise StopIteration
99 raise StopIteration
100 if pos == -1:
100 if pos == -1:
101 self.pos += 1
101 self.pos += 1
102 return data[0]
102 return data[0]
103 self.pos += 1
103 self.pos += 1
104 zeropos = data.find(b'\x00', pos)
104 zeropos = data.find(b'\x00', pos)
105 return data[pos:zeropos]
105 return data[pos:zeropos]
106
106
107 __next__ = next
107 __next__ = next
108
108
109
109
110 class lazymanifestiterentries:
110 class lazymanifestiterentries:
111 def __init__(self, lm):
111 def __init__(self, lm):
112 self.lm = lm
112 self.lm = lm
113 self.pos = 0
113 self.pos = 0
114
114
115 def __iter__(self):
115 def __iter__(self):
116 return self
116 return self
117
117
118 def next(self):
118 def next(self):
119 try:
119 try:
120 data, pos = self.lm._get(self.pos)
120 data, pos = self.lm._get(self.pos)
121 except IndexError:
121 except IndexError:
122 raise StopIteration
122 raise StopIteration
123 if pos == -1:
123 if pos == -1:
124 self.pos += 1
124 self.pos += 1
125 return data
125 return data
126 zeropos = data.find(b'\x00', pos)
126 zeropos = data.find(b'\x00', pos)
127 nlpos = data.find(b'\n', pos)
127 nlpos = data.find(b'\n', pos)
128 if zeropos == -1 or nlpos == -1 or nlpos < zeropos:
128 if zeropos == -1 or nlpos == -1 or nlpos < zeropos:
129 raise error.StorageError(b'Invalid manifest line')
129 raise error.StorageError(b'Invalid manifest line')
130 flags = data[nlpos - 1 : nlpos]
130 flags = data[nlpos - 1 : nlpos]
131 if flags in _manifestflags:
131 if flags in _manifestflags:
132 hlen = nlpos - zeropos - 2
132 hlen = nlpos - zeropos - 2
133 else:
133 else:
134 hlen = nlpos - zeropos - 1
134 hlen = nlpos - zeropos - 1
135 flags = b''
135 flags = b''
136 if hlen != 2 * self.lm._nodelen:
136 if hlen != 2 * self.lm._nodelen:
137 raise error.StorageError(b'Invalid manifest line')
137 raise error.StorageError(b'Invalid manifest line')
138 hashval = unhexlify(
138 hashval = unhexlify(
139 data, self.lm.extrainfo[self.pos], zeropos + 1, hlen
139 data, self.lm.extrainfo[self.pos], zeropos + 1, hlen
140 )
140 )
141 self.pos += 1
141 self.pos += 1
142 return (data[pos:zeropos], hashval, flags)
142 return (data[pos:zeropos], hashval, flags)
143
143
144 __next__ = next
144 __next__ = next
145
145
146
146
147 def unhexlify(data, extra, pos, length):
147 def unhexlify(data, extra, pos, length):
148 s = bin(data[pos : pos + length])
148 s = bin(data[pos : pos + length])
149 if extra:
149 if extra:
150 s += chr(extra & 0xFF)
150 s += chr(extra & 0xFF)
151 return s
151 return s
152
152
153
153
154 def _cmp(a, b):
154 def _cmp(a, b):
155 return (a > b) - (a < b)
155 return (a > b) - (a < b)
156
156
157
157
158 _manifestflags = {b'', b'l', b't', b'x'}
158 _manifestflags = {b'', b'l', b't', b'x'}
159
159
160
160
161 class _lazymanifest:
161 class _lazymanifest:
162 """A pure python manifest backed by a byte string. It is supplimented with
162 """A pure python manifest backed by a byte string. It is supplimented with
163 internal lists as it is modified, until it is compacted back to a pure byte
163 internal lists as it is modified, until it is compacted back to a pure byte
164 string.
164 string.
165
165
166 ``data`` is the initial manifest data.
166 ``data`` is the initial manifest data.
167
167
168 ``positions`` is a list of offsets, one per manifest entry. Positive
168 ``positions`` is a list of offsets, one per manifest entry. Positive
169 values are offsets into ``data``, negative values are offsets into the
169 values are offsets into ``data``, negative values are offsets into the
170 ``extradata`` list. When an entry is removed, its entry is dropped from
170 ``extradata`` list. When an entry is removed, its entry is dropped from
171 ``positions``. The values are encoded such that when walking the list and
171 ``positions``. The values are encoded such that when walking the list and
172 indexing into ``data`` or ``extradata`` as appropriate, the entries are
172 indexing into ``data`` or ``extradata`` as appropriate, the entries are
173 sorted by filename.
173 sorted by filename.
174
174
175 ``extradata`` is a list of (key, hash, flags) for entries that were added or
175 ``extradata`` is a list of (key, hash, flags) for entries that were added or
176 modified since the manifest was created or compacted.
176 modified since the manifest was created or compacted.
177 """
177 """
178
178
179 def __init__(
179 def __init__(
180 self,
180 self,
181 nodelen,
181 nodelen,
182 data,
182 data,
183 positions=None,
183 positions=None,
184 extrainfo=None,
184 extrainfo=None,
185 extradata=None,
185 extradata=None,
186 hasremovals=False,
186 hasremovals=False,
187 ):
187 ):
188 self._nodelen = nodelen
188 self._nodelen = nodelen
189 if positions is None:
189 if positions is None:
190 self.positions = self.findlines(data)
190 self.positions = self.findlines(data)
191 self.extrainfo = [0] * len(self.positions)
191 self.extrainfo = [0] * len(self.positions)
192 self.data = data
192 self.data = data
193 self.extradata = []
193 self.extradata = []
194 self.hasremovals = False
194 self.hasremovals = False
195 else:
195 else:
196 self.positions = positions[:]
196 self.positions = positions[:]
197 self.extrainfo = extrainfo[:]
197 self.extrainfo = extrainfo[:]
198 self.extradata = extradata[:]
198 self.extradata = extradata[:]
199 self.data = data
199 self.data = data
200 self.hasremovals = hasremovals
200 self.hasremovals = hasremovals
201
201
202 def findlines(self, data):
202 def findlines(self, data):
203 if not data:
203 if not data:
204 return []
204 return []
205 pos = data.find(b"\n")
205 pos = data.find(b"\n")
206 if pos == -1 or data[-1:] != b'\n':
206 if pos == -1 or data[-1:] != b'\n':
207 raise ValueError(b"Manifest did not end in a newline.")
207 raise ValueError(b"Manifest did not end in a newline.")
208 positions = [0]
208 positions = [0]
209 prev = data[: data.find(b'\x00')]
209 prev = data[: data.find(b'\x00')]
210 while pos < len(data) - 1 and pos != -1:
210 while pos < len(data) - 1 and pos != -1:
211 positions.append(pos + 1)
211 positions.append(pos + 1)
212 nexts = data[pos + 1 : data.find(b'\x00', pos + 1)]
212 nexts = data[pos + 1 : data.find(b'\x00', pos + 1)]
213 if nexts < prev:
213 if nexts < prev:
214 raise ValueError(b"Manifest lines not in sorted order.")
214 raise ValueError(b"Manifest lines not in sorted order.")
215 prev = nexts
215 prev = nexts
216 pos = data.find(b"\n", pos + 1)
216 pos = data.find(b"\n", pos + 1)
217 return positions
217 return positions
218
218
219 def _get(self, index):
219 def _get(self, index):
220 # get the position encoded in pos:
220 # get the position encoded in pos:
221 # positive number is an index in 'data'
221 # positive number is an index in 'data'
222 # negative number is in extrapieces
222 # negative number is in extrapieces
223 pos = self.positions[index]
223 pos = self.positions[index]
224 if pos >= 0:
224 if pos >= 0:
225 return self.data, pos
225 return self.data, pos
226 return self.extradata[-pos - 1], -1
226 return self.extradata[-pos - 1], -1
227
227
228 def _getkey(self, pos):
228 def _getkey(self, pos):
229 if pos >= 0:
229 if pos >= 0:
230 return self.data[pos : self.data.find(b'\x00', pos + 1)]
230 return self.data[pos : self.data.find(b'\x00', pos + 1)]
231 return self.extradata[-pos - 1][0]
231 return self.extradata[-pos - 1][0]
232
232
233 def bsearch(self, key):
233 def bsearch(self, key):
234 first = 0
234 first = 0
235 last = len(self.positions) - 1
235 last = len(self.positions) - 1
236
236
237 while first <= last:
237 while first <= last:
238 midpoint = (first + last) // 2
238 midpoint = (first + last) // 2
239 nextpos = self.positions[midpoint]
239 nextpos = self.positions[midpoint]
240 candidate = self._getkey(nextpos)
240 candidate = self._getkey(nextpos)
241 r = _cmp(key, candidate)
241 r = _cmp(key, candidate)
242 if r == 0:
242 if r == 0:
243 return midpoint
243 return midpoint
244 else:
244 else:
245 if r < 0:
245 if r < 0:
246 last = midpoint - 1
246 last = midpoint - 1
247 else:
247 else:
248 first = midpoint + 1
248 first = midpoint + 1
249 return -1
249 return -1
250
250
251 def bsearch2(self, key):
251 def bsearch2(self, key):
252 # same as the above, but will always return the position
252 # same as the above, but will always return the position
253 # done for performance reasons
253 # done for performance reasons
254 first = 0
254 first = 0
255 last = len(self.positions) - 1
255 last = len(self.positions) - 1
256
256
257 while first <= last:
257 while first <= last:
258 midpoint = (first + last) // 2
258 midpoint = (first + last) // 2
259 nextpos = self.positions[midpoint]
259 nextpos = self.positions[midpoint]
260 candidate = self._getkey(nextpos)
260 candidate = self._getkey(nextpos)
261 r = _cmp(key, candidate)
261 r = _cmp(key, candidate)
262 if r == 0:
262 if r == 0:
263 return (midpoint, True)
263 return (midpoint, True)
264 else:
264 else:
265 if r < 0:
265 if r < 0:
266 last = midpoint - 1
266 last = midpoint - 1
267 else:
267 else:
268 first = midpoint + 1
268 first = midpoint + 1
269 return (first, False)
269 return (first, False)
270
270
271 def __contains__(self, key):
271 def __contains__(self, key):
272 return self.bsearch(key) != -1
272 return self.bsearch(key) != -1
273
273
274 def __getitem__(self, key):
274 def __getitem__(self, key):
275 if not isinstance(key, bytes):
275 if not isinstance(key, bytes):
276 raise TypeError(b"getitem: manifest keys must be a bytes.")
276 raise TypeError(b"getitem: manifest keys must be a bytes.")
277 needle = self.bsearch(key)
277 needle = self.bsearch(key)
278 if needle == -1:
278 if needle == -1:
279 raise KeyError
279 raise KeyError
280 data, pos = self._get(needle)
280 data, pos = self._get(needle)
281 if pos == -1:
281 if pos == -1:
282 return (data[1], data[2])
282 return (data[1], data[2])
283 zeropos = data.find(b'\x00', pos)
283 zeropos = data.find(b'\x00', pos)
284 nlpos = data.find(b'\n', zeropos)
284 nlpos = data.find(b'\n', zeropos)
285 assert 0 <= needle <= len(self.positions)
285 assert 0 <= needle <= len(self.positions)
286 assert len(self.extrainfo) == len(self.positions)
286 assert len(self.extrainfo) == len(self.positions)
287 if zeropos == -1 or nlpos == -1 or nlpos < zeropos:
287 if zeropos == -1 or nlpos == -1 or nlpos < zeropos:
288 raise error.StorageError(b'Invalid manifest line')
288 raise error.StorageError(b'Invalid manifest line')
289 hlen = nlpos - zeropos - 1
289 hlen = nlpos - zeropos - 1
290 flags = data[nlpos - 1 : nlpos]
290 flags = data[nlpos - 1 : nlpos]
291 if flags in _manifestflags:
291 if flags in _manifestflags:
292 hlen -= 1
292 hlen -= 1
293 else:
293 else:
294 flags = b''
294 flags = b''
295 if hlen != 2 * self._nodelen:
295 if hlen != 2 * self._nodelen:
296 raise error.StorageError(b'Invalid manifest line')
296 raise error.StorageError(b'Invalid manifest line')
297 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, hlen)
297 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, hlen)
298 return (hashval, flags)
298 return (hashval, flags)
299
299
300 def __delitem__(self, key):
300 def __delitem__(self, key):
301 needle, found = self.bsearch2(key)
301 needle, found = self.bsearch2(key)
302 if not found:
302 if not found:
303 raise KeyError
303 raise KeyError
304 cur = self.positions[needle]
304 cur = self.positions[needle]
305 self.positions = self.positions[:needle] + self.positions[needle + 1 :]
305 self.positions = self.positions[:needle] + self.positions[needle + 1 :]
306 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1 :]
306 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1 :]
307 if cur >= 0:
307 if cur >= 0:
308 # This does NOT unsort the list as far as the search functions are
308 # This does NOT unsort the list as far as the search functions are
309 # concerned, as they only examine lines mapped by self.positions.
309 # concerned, as they only examine lines mapped by self.positions.
310 self.data = self.data[:cur] + b'\x00' + self.data[cur + 1 :]
310 self.data = self.data[:cur] + b'\x00' + self.data[cur + 1 :]
311 self.hasremovals = True
311 self.hasremovals = True
312
312
313 def __setitem__(self, key, value):
313 def __setitem__(self, key, value):
314 if not isinstance(key, bytes):
314 if not isinstance(key, bytes):
315 raise TypeError(b"setitem: manifest keys must be a byte string.")
315 raise TypeError(b"setitem: manifest keys must be a byte string.")
316 if not isinstance(value, tuple) or len(value) != 2:
316 if not isinstance(value, tuple) or len(value) != 2:
317 raise TypeError(
317 raise TypeError(
318 b"Manifest values must be a tuple of (node, flags)."
318 b"Manifest values must be a tuple of (node, flags)."
319 )
319 )
320 hashval = value[0]
320 hashval = value[0]
321 if not isinstance(hashval, bytes) or len(hashval) not in (20, 32):
321 if not isinstance(hashval, bytes) or len(hashval) not in (20, 32):
322 raise TypeError(b"node must be a 20-byte or 32-byte byte string")
322 raise TypeError(b"node must be a 20-byte or 32-byte byte string")
323 flags = value[1]
323 flags = value[1]
324 if not isinstance(flags, bytes) or len(flags) > 1:
324 if not isinstance(flags, bytes) or len(flags) > 1:
325 raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags)
325 raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags)
326 needle, found = self.bsearch2(key)
326 needle, found = self.bsearch2(key)
327 if found:
327 if found:
328 # put the item
328 # put the item
329 pos = self.positions[needle]
329 pos = self.positions[needle]
330 if pos < 0:
330 if pos < 0:
331 self.extradata[-pos - 1] = (key, hashval, value[1])
331 self.extradata[-pos - 1] = (key, hashval, value[1])
332 else:
332 else:
333 # just don't bother
333 # just don't bother
334 self.extradata.append((key, hashval, value[1]))
334 self.extradata.append((key, hashval, value[1]))
335 self.positions[needle] = -len(self.extradata)
335 self.positions[needle] = -len(self.extradata)
336 else:
336 else:
337 # not found, put it in with extra positions
337 # not found, put it in with extra positions
338 self.extradata.append((key, hashval, value[1]))
338 self.extradata.append((key, hashval, value[1]))
339 self.positions = (
339 self.positions = (
340 self.positions[:needle]
340 self.positions[:needle]
341 + [-len(self.extradata)]
341 + [-len(self.extradata)]
342 + self.positions[needle:]
342 + self.positions[needle:]
343 )
343 )
344 self.extrainfo = (
344 self.extrainfo = (
345 self.extrainfo[:needle] + [0] + self.extrainfo[needle:]
345 self.extrainfo[:needle] + [0] + self.extrainfo[needle:]
346 )
346 )
347
347
348 def copy(self):
348 def copy(self):
349 # XXX call _compact like in C?
349 # XXX call _compact like in C?
350 return _lazymanifest(
350 return _lazymanifest(
351 self._nodelen,
351 self._nodelen,
352 self.data,
352 self.data,
353 self.positions,
353 self.positions,
354 self.extrainfo,
354 self.extrainfo,
355 self.extradata,
355 self.extradata,
356 self.hasremovals,
356 self.hasremovals,
357 )
357 )
358
358
359 def _compact(self):
359 def _compact(self):
360 # hopefully not called TOO often
360 # hopefully not called TOO often
361 if len(self.extradata) == 0 and not self.hasremovals:
361 if len(self.extradata) == 0 and not self.hasremovals:
362 return
362 return
363 l = []
363 l = []
364 i = 0
364 i = 0
365 offset = 0
365 offset = 0
366 self.extrainfo = [0] * len(self.positions)
366 self.extrainfo = [0] * len(self.positions)
367 while i < len(self.positions):
367 while i < len(self.positions):
368 if self.positions[i] >= 0:
368 if self.positions[i] >= 0:
369 cur = self.positions[i]
369 cur = self.positions[i]
370 last_cut = cur
370 last_cut = cur
371
371
372 # Collect all contiguous entries in the buffer at the current
372 # Collect all contiguous entries in the buffer at the current
373 # offset, breaking out only for added/modified items held in
373 # offset, breaking out only for added/modified items held in
374 # extradata, or a deleted line prior to the next position.
374 # extradata, or a deleted line prior to the next position.
375 while True:
375 while True:
376 self.positions[i] = offset
376 self.positions[i] = offset
377 i += 1
377 i += 1
378 if i == len(self.positions) or self.positions[i] < 0:
378 if i == len(self.positions) or self.positions[i] < 0:
379 break
379 break
380
380
381 # A removed file has no positions[] entry, but does have an
381 # A removed file has no positions[] entry, but does have an
382 # overwritten first byte. Break out and find the end of the
382 # overwritten first byte. Break out and find the end of the
383 # current good entry/entries if there is a removed file
383 # current good entry/entries if there is a removed file
384 # before the next position.
384 # before the next position.
385 if (
385 if (
386 self.hasremovals
386 self.hasremovals
387 and self.data.find(b'\n\x00', cur, self.positions[i])
387 and self.data.find(b'\n\x00', cur, self.positions[i])
388 != -1
388 != -1
389 ):
389 ):
390 break
390 break
391
391
392 offset += self.positions[i] - cur
392 offset += self.positions[i] - cur
393 cur = self.positions[i]
393 cur = self.positions[i]
394 end_cut = self.data.find(b'\n', cur)
394 end_cut = self.data.find(b'\n', cur)
395 if end_cut != -1:
395 if end_cut != -1:
396 end_cut += 1
396 end_cut += 1
397 offset += end_cut - cur
397 offset += end_cut - cur
398 l.append(self.data[last_cut:end_cut])
398 l.append(self.data[last_cut:end_cut])
399 else:
399 else:
400 while i < len(self.positions) and self.positions[i] < 0:
400 while i < len(self.positions) and self.positions[i] < 0:
401 cur = self.positions[i]
401 cur = self.positions[i]
402 t = self.extradata[-cur - 1]
402 t = self.extradata[-cur - 1]
403 l.append(self._pack(t))
403 l.append(self._pack(t))
404 self.positions[i] = offset
404 self.positions[i] = offset
405 # Hashes are either 20 bytes (old sha1s) or 32
405 # Hashes are either 20 bytes (old sha1s) or 32
406 # bytes (new non-sha1).
406 # bytes (new non-sha1).
407 hlen = 20
407 hlen = 20
408 if len(t[1]) > 25:
408 if len(t[1]) > 25:
409 hlen = 32
409 hlen = 32
410 if len(t[1]) > hlen:
410 if len(t[1]) > hlen:
411 self.extrainfo[i] = ord(t[1][hlen + 1])
411 self.extrainfo[i] = ord(t[1][hlen + 1])
412 offset += len(l[-1])
412 offset += len(l[-1])
413 i += 1
413 i += 1
414 self.data = b''.join(l)
414 self.data = b''.join(l)
415 self.hasremovals = False
415 self.hasremovals = False
416 self.extradata = []
416 self.extradata = []
417
417
418 def _pack(self, d):
418 def _pack(self, d):
419 n = d[1]
419 n = d[1]
420 assert len(n) in (20, 32)
420 assert len(n) in (20, 32)
421 return d[0] + b'\x00' + hex(n) + d[2] + b'\n'
421 return d[0] + b'\x00' + hex(n) + d[2] + b'\n'
422
422
423 def text(self):
423 def text(self):
424 self._compact()
424 self._compact()
425 return self.data
425 return self.data
426
426
427 def diff(self, m2, clean=False):
427 def diff(self, m2, clean=False):
428 '''Finds changes between the current manifest and m2.'''
428 '''Finds changes between the current manifest and m2.'''
429 # XXX think whether efficiency matters here
429 # XXX think whether efficiency matters here
430 diff = {}
430 diff = {}
431
431
432 for fn, e1, flags in self.iterentries():
432 for fn, e1, flags in self.iterentries():
433 if fn not in m2:
433 if fn not in m2:
434 diff[fn] = (e1, flags), (None, b'')
434 diff[fn] = (e1, flags), (None, b'')
435 else:
435 else:
436 e2 = m2[fn]
436 e2 = m2[fn]
437 if (e1, flags) != e2:
437 if (e1, flags) != e2:
438 diff[fn] = (e1, flags), e2
438 diff[fn] = (e1, flags), e2
439 elif clean:
439 elif clean:
440 diff[fn] = None
440 diff[fn] = None
441
441
442 for fn, e2, flags in m2.iterentries():
442 for fn, e2, flags in m2.iterentries():
443 if fn not in self:
443 if fn not in self:
444 diff[fn] = (None, b''), (e2, flags)
444 diff[fn] = (None, b''), (e2, flags)
445
445
446 return diff
446 return diff
447
447
448 def iterentries(self):
448 def iterentries(self):
449 return lazymanifestiterentries(self)
449 return lazymanifestiterentries(self)
450
450
451 def iterkeys(self):
451 def iterkeys(self):
452 return lazymanifestiter(self)
452 return lazymanifestiter(self)
453
453
454 def __iter__(self):
454 def __iter__(self):
455 return lazymanifestiter(self)
455 return lazymanifestiter(self)
456
456
457 def __len__(self):
457 def __len__(self):
458 return len(self.positions)
458 return len(self.positions)
459
459
460 def filtercopy(self, filterfn):
460 def filtercopy(self, filterfn):
461 # XXX should be optimized
461 # XXX should be optimized
462 c = _lazymanifest(self._nodelen, b'')
462 c = _lazymanifest(self._nodelen, b'')
463 for f, n, fl in self.iterentries():
463 for f, n, fl in self.iterentries():
464 if filterfn(f):
464 if filterfn(f):
465 c[f] = n, fl
465 c[f] = n, fl
466 return c
466 return c
467
467
468
468
469 try:
469 try:
470 _lazymanifest = parsers.lazymanifest
470 _lazymanifest = parsers.lazymanifest
471 except AttributeError:
471 except AttributeError:
472 pass
472 pass
473
473
474
474
475 @interfaceutil.implementer(repository.imanifestdict)
475 @interfaceutil.implementer(repository.imanifestdict)
476 class manifestdict:
476 class manifestdict:
477 def __init__(self, nodelen, data=b''):
477 def __init__(self, nodelen, data=b''):
478 self._nodelen = nodelen
478 self._nodelen = nodelen
479 self._lm = _lazymanifest(nodelen, data)
479 self._lm = _lazymanifest(nodelen, data)
480
480
481 def __getitem__(self, key):
481 def __getitem__(self, key):
482 return self._lm[key][0]
482 return self._lm[key][0]
483
483
484 def find(self, key):
484 def find(self, key):
485 return self._lm[key]
485 return self._lm[key]
486
486
487 def __len__(self):
487 def __len__(self):
488 return len(self._lm)
488 return len(self._lm)
489
489
490 def __nonzero__(self):
490 def __nonzero__(self):
491 # nonzero is covered by the __len__ function, but implementing it here
491 # nonzero is covered by the __len__ function, but implementing it here
492 # makes it easier for extensions to override.
492 # makes it easier for extensions to override.
493 return len(self._lm) != 0
493 return len(self._lm) != 0
494
494
495 __bool__ = __nonzero__
495 __bool__ = __nonzero__
496
496
497 def __setitem__(self, key, node):
497 def __setitem__(self, key, node):
498 self._lm[key] = node, self.flags(key)
498 self._lm[key] = node, self.flags(key)
499
499
500 def __contains__(self, key):
500 def __contains__(self, key):
501 if key is None:
501 if key is None:
502 return False
502 return False
503 return key in self._lm
503 return key in self._lm
504
504
505 def __delitem__(self, key):
505 def __delitem__(self, key):
506 del self._lm[key]
506 del self._lm[key]
507
507
508 def __iter__(self):
508 def __iter__(self):
509 return self._lm.__iter__()
509 return self._lm.__iter__()
510
510
511 def iterkeys(self):
511 def iterkeys(self):
512 return self._lm.iterkeys()
512 return self._lm.iterkeys()
513
513
514 def keys(self):
514 def keys(self):
515 return list(self.iterkeys())
515 return list(self.iterkeys())
516
516
517 def filesnotin(self, m2, match=None):
517 def filesnotin(self, m2, match=None):
518 '''Set of files in this manifest that are not in the other'''
518 '''Set of files in this manifest that are not in the other'''
519 if match is not None:
519 if match is not None:
520 match = matchmod.badmatch(match, lambda path, msg: None)
520 match = matchmod.badmatch(match, lambda path, msg: None)
521 sm2 = set(m2.walk(match))
521 sm2 = set(m2.walk(match))
522 return {f for f in self.walk(match) if f not in sm2}
522 return {f for f in self.walk(match) if f not in sm2}
523 return {f for f in self if f not in m2}
523 return {f for f in self if f not in m2}
524
524
525 @propertycache
525 @propertycache
526 def _dirs(self):
526 def _dirs(self):
527 return pathutil.dirs(self)
527 return pathutil.dirs(self)
528
528
529 def dirs(self):
529 def dirs(self):
530 return self._dirs
530 return self._dirs
531
531
532 def hasdir(self, dir):
532 def hasdir(self, dir):
533 return dir in self._dirs
533 return dir in self._dirs
534
534
535 def _filesfastpath(self, match):
535 def _filesfastpath(self, match):
536 """Checks whether we can correctly and quickly iterate over matcher
536 """Checks whether we can correctly and quickly iterate over matcher
537 files instead of over manifest files."""
537 files instead of over manifest files."""
538 files = match.files()
538 files = match.files()
539 return len(files) < 100 and (
539 return len(files) < 100 and (
540 match.isexact()
540 match.isexact()
541 or (match.prefix() and all(fn in self for fn in files))
541 or (match.prefix() and all(fn in self for fn in files))
542 )
542 )
543
543
544 def walk(self, match):
544 def walk(self, match):
545 """Generates matching file names.
545 """Generates matching file names.
546
546
547 Equivalent to manifest.matches(match).iterkeys(), but without creating
547 Equivalent to manifest.matches(match).iterkeys(), but without creating
548 an entirely new manifest.
548 an entirely new manifest.
549
549
550 It also reports nonexistent files by marking them bad with match.bad().
550 It also reports nonexistent files by marking them bad with match.bad().
551 """
551 """
552 if match.always():
552 if match.always():
553 for f in iter(self):
553 for f in iter(self):
554 yield f
554 yield f
555 return
555 return
556
556
557 fset = set(match.files())
557 fset = set(match.files())
558
558
559 # avoid the entire walk if we're only looking for specific files
559 # avoid the entire walk if we're only looking for specific files
560 if self._filesfastpath(match):
560 if self._filesfastpath(match):
561 for fn in sorted(fset):
561 for fn in sorted(fset):
562 if fn in self:
562 if fn in self:
563 yield fn
563 yield fn
564 return
564 return
565
565
566 for fn in self:
566 for fn in self:
567 if fn in fset:
567 if fn in fset:
568 # specified pattern is the exact name
568 # specified pattern is the exact name
569 fset.remove(fn)
569 fset.remove(fn)
570 if match(fn):
570 if match(fn):
571 yield fn
571 yield fn
572
572
573 # for dirstate.walk, files=[''] means "walk the whole tree".
573 # for dirstate.walk, files=[''] means "walk the whole tree".
574 # follow that here, too
574 # follow that here, too
575 fset.discard(b'')
575 fset.discard(b'')
576
576
577 for fn in sorted(fset):
577 for fn in sorted(fset):
578 if not self.hasdir(fn):
578 if not self.hasdir(fn):
579 match.bad(fn, None)
579 match.bad(fn, None)
580
580
581 def _matches(self, match):
581 def _matches(self, match):
582 '''generate a new manifest filtered by the match argument'''
582 '''generate a new manifest filtered by the match argument'''
583 if match.always():
583 if match.always():
584 return self.copy()
584 return self.copy()
585
585
586 if self._filesfastpath(match):
586 if self._filesfastpath(match):
587 m = manifestdict(self._nodelen)
587 m = manifestdict(self._nodelen)
588 lm = self._lm
588 lm = self._lm
589 for fn in match.files():
589 for fn in match.files():
590 if fn in lm:
590 if fn in lm:
591 m._lm[fn] = lm[fn]
591 m._lm[fn] = lm[fn]
592 return m
592 return m
593
593
594 m = manifestdict(self._nodelen)
594 m = manifestdict(self._nodelen)
595 m._lm = self._lm.filtercopy(match)
595 m._lm = self._lm.filtercopy(match)
596 return m
596 return m
597
597
598 def diff(self, m2, match=None, clean=False):
598 def diff(self, m2, match=None, clean=False):
599 """Finds changes between the current manifest and m2.
599 """Finds changes between the current manifest and m2.
600
600
601 Args:
601 Args:
602 m2: the manifest to which this manifest should be compared.
602 m2: the manifest to which this manifest should be compared.
603 clean: if true, include files unchanged between these manifests
603 clean: if true, include files unchanged between these manifests
604 with a None value in the returned dictionary.
604 with a None value in the returned dictionary.
605
605
606 The result is returned as a dict with filename as key and
606 The result is returned as a dict with filename as key and
607 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
607 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
608 nodeid in the current/other manifest and fl1/fl2 is the flag
608 nodeid in the current/other manifest and fl1/fl2 is the flag
609 in the current/other manifest. Where the file does not exist,
609 in the current/other manifest. Where the file does not exist,
610 the nodeid will be None and the flags will be the empty
610 the nodeid will be None and the flags will be the empty
611 string.
611 string.
612 """
612 """
613 if match:
613 if match:
614 m1 = self._matches(match)
614 m1 = self._matches(match)
615 m2 = m2._matches(match)
615 m2 = m2._matches(match)
616 return m1.diff(m2, clean=clean)
616 return m1.diff(m2, clean=clean)
617 return self._lm.diff(m2._lm, clean)
617 return self._lm.diff(m2._lm, clean)
618
618
619 def setflag(self, key, flag):
619 def setflag(self, key, flag):
620 if flag not in _manifestflags:
620 if flag not in _manifestflags:
621 raise TypeError(b"Invalid manifest flag set.")
621 raise TypeError(b"Invalid manifest flag set.")
622 self._lm[key] = self[key], flag
622 self._lm[key] = self[key], flag
623
623
624 def get(self, key, default=None):
624 def get(self, key, default=None):
625 try:
625 try:
626 return self._lm[key][0]
626 return self._lm[key][0]
627 except KeyError:
627 except KeyError:
628 return default
628 return default
629
629
630 def flags(self, key):
630 def flags(self, key):
631 try:
631 try:
632 return self._lm[key][1]
632 return self._lm[key][1]
633 except KeyError:
633 except KeyError:
634 return b''
634 return b''
635
635
636 def copy(self):
636 def copy(self):
637 c = manifestdict(self._nodelen)
637 c = manifestdict(self._nodelen)
638 c._lm = self._lm.copy()
638 c._lm = self._lm.copy()
639 return c
639 return c
640
640
641 def items(self):
641 def items(self):
642 return (x[:2] for x in self._lm.iterentries())
642 return (x[:2] for x in self._lm.iterentries())
643
643
644 def iteritems(self):
644 def iteritems(self):
645 return (x[:2] for x in self._lm.iterentries())
645 return (x[:2] for x in self._lm.iterentries())
646
646
647 def iterentries(self):
647 def iterentries(self):
648 return self._lm.iterentries()
648 return self._lm.iterentries()
649
649
650 def text(self):
650 def text(self):
651 # most likely uses native version
651 # most likely uses native version
652 return self._lm.text()
652 return self._lm.text()
653
653
654 def fastdelta(self, base, changes):
654 def fastdelta(self, base, changes):
655 """Given a base manifest text as a bytearray and a list of changes
655 """Given a base manifest text as a bytearray and a list of changes
656 relative to that text, compute a delta that can be used by revlog.
656 relative to that text, compute a delta that can be used by revlog.
657 """
657 """
658 delta = []
658 delta = []
659 dstart = None
659 dstart = None
660 dend = None
660 dend = None
661 dline = [b""]
661 dline = [b""]
662 start = 0
662 start = 0
663 # zero copy representation of base as a buffer
663 # zero copy representation of base as a buffer
664 addbuf = util.buffer(base)
664 addbuf = util.buffer(base)
665
665
666 changes = list(changes)
666 changes = list(changes)
667 if len(changes) < FASTDELTA_TEXTDIFF_THRESHOLD:
667 if len(changes) < FASTDELTA_TEXTDIFF_THRESHOLD:
668 # start with a readonly loop that finds the offset of
668 # start with a readonly loop that finds the offset of
669 # each line and creates the deltas
669 # each line and creates the deltas
670 for f, todelete in changes:
670 for f, todelete in changes:
671 # bs will either be the index of the item or the insert point
671 # bs will either be the index of the item or the insert point
672 start, end = _msearch(addbuf, f, start)
672 start, end = _msearch(addbuf, f, start)
673 if not todelete:
673 if not todelete:
674 h, fl = self._lm[f]
674 h, fl = self._lm[f]
675 l = b"%s\0%s%s\n" % (f, hex(h), fl)
675 l = b"%s\0%s%s\n" % (f, hex(h), fl)
676 else:
676 else:
677 if start == end:
677 if start == end:
678 # item we want to delete was not found, error out
678 # item we want to delete was not found, error out
679 raise AssertionError(
679 raise AssertionError(
680 _(b"failed to remove %s from manifest") % f
680 _(b"failed to remove %s from manifest") % f
681 )
681 )
682 l = b""
682 l = b""
683 if dstart is not None and dstart <= start and dend >= start:
683 if dstart is not None and dstart <= start and dend >= start:
684 if dend < end:
684 if dend < end:
685 dend = end
685 dend = end
686 if l:
686 if l:
687 dline.append(l)
687 dline.append(l)
688 else:
688 else:
689 if dstart is not None:
689 if dstart is not None:
690 delta.append([dstart, dend, b"".join(dline)])
690 delta.append([dstart, dend, b"".join(dline)])
691 dstart = start
691 dstart = start
692 dend = end
692 dend = end
693 dline = [l]
693 dline = [l]
694
694
695 if dstart is not None:
695 if dstart is not None:
696 delta.append([dstart, dend, b"".join(dline)])
696 delta.append([dstart, dend, b"".join(dline)])
697 # apply the delta to the base, and get a delta for addrevision
697 # apply the delta to the base, and get a delta for addrevision
698 deltatext, arraytext = _addlistdelta(base, delta)
698 deltatext, arraytext = _addlistdelta(base, delta)
699 else:
699 else:
700 # For large changes, it's much cheaper to just build the text and
700 # For large changes, it's much cheaper to just build the text and
701 # diff it.
701 # diff it.
702 arraytext = bytearray(self.text())
702 arraytext = bytearray(self.text())
703 deltatext = mdiff.textdiff(
703 deltatext = mdiff.textdiff(
704 util.buffer(base), util.buffer(arraytext)
704 util.buffer(base), util.buffer(arraytext)
705 )
705 )
706
706
707 return arraytext, deltatext
707 return arraytext, deltatext
708
708
709
709
710 def _msearch(m, s, lo=0, hi=None):
710 def _msearch(m, s, lo=0, hi=None):
711 """return a tuple (start, end) that says where to find s within m.
711 """return a tuple (start, end) that says where to find s within m.
712
712
713 If the string is found m[start:end] are the line containing
713 If the string is found m[start:end] are the line containing
714 that string. If start == end the string was not found and
714 that string. If start == end the string was not found and
715 they indicate the proper sorted insertion point.
715 they indicate the proper sorted insertion point.
716
716
717 m should be a buffer, a memoryview or a byte string.
717 m should be a buffer, a memoryview or a byte string.
718 s is a byte string"""
718 s is a byte string"""
719
719
720 def advance(i, c):
720 def advance(i, c):
721 while i < lenm and m[i : i + 1] != c:
721 while i < lenm and m[i : i + 1] != c:
722 i += 1
722 i += 1
723 return i
723 return i
724
724
725 if not s:
725 if not s:
726 return (lo, lo)
726 return (lo, lo)
727 lenm = len(m)
727 lenm = len(m)
728 if not hi:
728 if not hi:
729 hi = lenm
729 hi = lenm
730 while lo < hi:
730 while lo < hi:
731 mid = (lo + hi) // 2
731 mid = (lo + hi) // 2
732 start = mid
732 start = mid
733 while start > 0 and m[start - 1 : start] != b'\n':
733 while start > 0 and m[start - 1 : start] != b'\n':
734 start -= 1
734 start -= 1
735 end = advance(start, b'\0')
735 end = advance(start, b'\0')
736 if bytes(m[start:end]) < s:
736 if bytes(m[start:end]) < s:
737 # we know that after the null there are 40 bytes of sha1
737 # we know that after the null there are 40 bytes of sha1
738 # this translates to the bisect lo = mid + 1
738 # this translates to the bisect lo = mid + 1
739 lo = advance(end + 40, b'\n') + 1
739 lo = advance(end + 40, b'\n') + 1
740 else:
740 else:
741 # this translates to the bisect hi = mid
741 # this translates to the bisect hi = mid
742 hi = start
742 hi = start
743 end = advance(lo, b'\0')
743 end = advance(lo, b'\0')
744 found = m[lo:end]
744 found = m[lo:end]
745 if s == found:
745 if s == found:
746 # we know that after the null there are 40 bytes of sha1
746 # we know that after the null there are 40 bytes of sha1
747 end = advance(end + 40, b'\n')
747 end = advance(end + 40, b'\n')
748 return (lo, end + 1)
748 return (lo, end + 1)
749 else:
749 else:
750 return (lo, lo)
750 return (lo, lo)
751
751
752
752
753 def _checkforbidden(l):
753 def _checkforbidden(l):
754 """Check filenames for illegal characters."""
754 """Check filenames for illegal characters."""
755 for f in l:
755 for f in l:
756 if b'\n' in f or b'\r' in f:
756 if b'\n' in f or b'\r' in f:
757 raise error.StorageError(
757 raise error.StorageError(
758 _(b"'\\n' and '\\r' disallowed in filenames: %r")
758 _(b"'\\n' and '\\r' disallowed in filenames: %r")
759 % pycompat.bytestr(f)
759 % pycompat.bytestr(f)
760 )
760 )
761
761
762
762
763 # apply the changes collected during the bisect loop to our addlist
763 # apply the changes collected during the bisect loop to our addlist
764 # return a delta suitable for addrevision
764 # return a delta suitable for addrevision
765 def _addlistdelta(addlist, x):
765 def _addlistdelta(addlist, x):
766 # for large addlist arrays, building a new array is cheaper
766 # for large addlist arrays, building a new array is cheaper
767 # than repeatedly modifying the existing one
767 # than repeatedly modifying the existing one
768 currentposition = 0
768 currentposition = 0
769 newaddlist = bytearray()
769 newaddlist = bytearray()
770
770
771 for start, end, content in x:
771 for start, end, content in x:
772 newaddlist += addlist[currentposition:start]
772 newaddlist += addlist[currentposition:start]
773 if content:
773 if content:
774 newaddlist += bytearray(content)
774 newaddlist += bytearray(content)
775
775
776 currentposition = end
776 currentposition = end
777
777
778 newaddlist += addlist[currentposition:]
778 newaddlist += addlist[currentposition:]
779
779
780 deltatext = b"".join(
780 deltatext = b"".join(
781 struct.pack(b">lll", start, end, len(content)) + content
781 struct.pack(b">lll", start, end, len(content)) + content
782 for start, end, content in x
782 for start, end, content in x
783 )
783 )
784 return deltatext, newaddlist
784 return deltatext, newaddlist
785
785
786
786
787 def _splittopdir(f):
787 def _splittopdir(f):
788 if b'/' in f:
788 if b'/' in f:
789 dir, subpath = f.split(b'/', 1)
789 dir, subpath = f.split(b'/', 1)
790 return dir + b'/', subpath
790 return dir + b'/', subpath
791 else:
791 else:
792 return b'', f
792 return b'', f
793
793
794
794
795 _noop = lambda s: None
795 _noop = lambda s: None
796
796
797
797
798 @interfaceutil.implementer(repository.imanifestdict)
798 @interfaceutil.implementer(repository.imanifestdict)
799 class treemanifest:
799 class treemanifest:
800 def __init__(self, nodeconstants, dir=b'', text=b''):
800 def __init__(self, nodeconstants, dir=b'', text=b''):
801 self._dir = dir
801 self._dir = dir
802 self.nodeconstants = nodeconstants
802 self.nodeconstants = nodeconstants
803 self._node = self.nodeconstants.nullid
803 self._node = self.nodeconstants.nullid
804 self._nodelen = self.nodeconstants.nodelen
804 self._nodelen = self.nodeconstants.nodelen
805 self._loadfunc = _noop
805 self._loadfunc = _noop
806 self._copyfunc = _noop
806 self._copyfunc = _noop
807 self._dirty = False
807 self._dirty = False
808 self._dirs = {}
808 self._dirs = {}
809 self._lazydirs = {}
809 self._lazydirs = {}
810 # Using _lazymanifest here is a little slower than plain old dicts
810 # Using _lazymanifest here is a little slower than plain old dicts
811 self._files = {}
811 self._files = {}
812 self._flags = {}
812 self._flags = {}
813 if text:
813 if text:
814
814
815 def readsubtree(subdir, subm):
815 def readsubtree(subdir, subm):
816 raise AssertionError(
816 raise AssertionError(
817 b'treemanifest constructor only accepts flat manifests'
817 b'treemanifest constructor only accepts flat manifests'
818 )
818 )
819
819
820 self.parse(text, readsubtree)
820 self.parse(text, readsubtree)
821 self._dirty = True # Mark flat manifest dirty after parsing
821 self._dirty = True # Mark flat manifest dirty after parsing
822
822
823 def _subpath(self, path):
823 def _subpath(self, path):
824 return self._dir + path
824 return self._dir + path
825
825
826 def _loadalllazy(self):
826 def _loadalllazy(self):
827 selfdirs = self._dirs
827 selfdirs = self._dirs
828 subpath = self._subpath
828 subpath = self._subpath
829 for d, (node, readsubtree, docopy) in self._lazydirs.items():
829 for d, (node, readsubtree, docopy) in self._lazydirs.items():
830 if docopy:
830 if docopy:
831 selfdirs[d] = readsubtree(subpath(d), node).copy()
831 selfdirs[d] = readsubtree(subpath(d), node).copy()
832 else:
832 else:
833 selfdirs[d] = readsubtree(subpath(d), node)
833 selfdirs[d] = readsubtree(subpath(d), node)
834 self._lazydirs = {}
834 self._lazydirs = {}
835
835
836 def _loadlazy(self, d):
836 def _loadlazy(self, d):
837 v = self._lazydirs.get(d)
837 v = self._lazydirs.get(d)
838 if v:
838 if v:
839 node, readsubtree, docopy = v
839 node, readsubtree, docopy = v
840 if docopy:
840 if docopy:
841 self._dirs[d] = readsubtree(self._subpath(d), node).copy()
841 self._dirs[d] = readsubtree(self._subpath(d), node).copy()
842 else:
842 else:
843 self._dirs[d] = readsubtree(self._subpath(d), node)
843 self._dirs[d] = readsubtree(self._subpath(d), node)
844 del self._lazydirs[d]
844 del self._lazydirs[d]
845
845
846 def _loadchildrensetlazy(self, visit):
846 def _loadchildrensetlazy(self, visit):
847 if not visit:
847 if not visit:
848 return None
848 return None
849 if visit == b'all' or visit == b'this':
849 if visit == b'all' or visit == b'this':
850 self._loadalllazy()
850 self._loadalllazy()
851 return None
851 return None
852
852
853 loadlazy = self._loadlazy
853 loadlazy = self._loadlazy
854 for k in visit:
854 for k in visit:
855 loadlazy(k + b'/')
855 loadlazy(k + b'/')
856 return visit
856 return visit
857
857
858 def _loaddifflazy(self, t1, t2):
858 def _loaddifflazy(self, t1, t2):
859 """load items in t1 and t2 if they're needed for diffing.
859 """load items in t1 and t2 if they're needed for diffing.
860
860
861 The criteria currently is:
861 The criteria currently is:
862 - if it's not present in _lazydirs in either t1 or t2, load it in the
862 - if it's not present in _lazydirs in either t1 or t2, load it in the
863 other (it may already be loaded or it may not exist, doesn't matter)
863 other (it may already be loaded or it may not exist, doesn't matter)
864 - if it's present in _lazydirs in both, compare the nodeid; if it
864 - if it's present in _lazydirs in both, compare the nodeid; if it
865 differs, load it in both
865 differs, load it in both
866 """
866 """
867 toloadlazy = []
867 toloadlazy = []
868 for d, v1 in t1._lazydirs.items():
868 for d, v1 in t1._lazydirs.items():
869 v2 = t2._lazydirs.get(d)
869 v2 = t2._lazydirs.get(d)
870 if not v2 or v2[0] != v1[0]:
870 if not v2 or v2[0] != v1[0]:
871 toloadlazy.append(d)
871 toloadlazy.append(d)
872 for d, v1 in t2._lazydirs.items():
872 for d, v1 in t2._lazydirs.items():
873 if d not in t1._lazydirs:
873 if d not in t1._lazydirs:
874 toloadlazy.append(d)
874 toloadlazy.append(d)
875
875
876 for d in toloadlazy:
876 for d in toloadlazy:
877 t1._loadlazy(d)
877 t1._loadlazy(d)
878 t2._loadlazy(d)
878 t2._loadlazy(d)
879
879
880 def __len__(self):
880 def __len__(self):
881 self._load()
881 self._load()
882 size = len(self._files)
882 size = len(self._files)
883 self._loadalllazy()
883 self._loadalllazy()
884 for m in self._dirs.values():
884 for m in self._dirs.values():
885 size += m.__len__()
885 size += m.__len__()
886 return size
886 return size
887
887
888 def __nonzero__(self):
888 def __nonzero__(self):
889 # Faster than "__len() != 0" since it avoids loading sub-manifests
889 # Faster than "__len() != 0" since it avoids loading sub-manifests
890 return not self._isempty()
890 return not self._isempty()
891
891
892 __bool__ = __nonzero__
892 __bool__ = __nonzero__
893
893
894 def _isempty(self):
894 def _isempty(self):
895 self._load() # for consistency; already loaded by all callers
895 self._load() # for consistency; already loaded by all callers
896 # See if we can skip loading everything.
896 # See if we can skip loading everything.
897 if self._files or (
897 if self._files or (
898 self._dirs and any(not m._isempty() for m in self._dirs.values())
898 self._dirs and any(not m._isempty() for m in self._dirs.values())
899 ):
899 ):
900 return False
900 return False
901 self._loadalllazy()
901 self._loadalllazy()
902 return not self._dirs or all(m._isempty() for m in self._dirs.values())
902 return not self._dirs or all(m._isempty() for m in self._dirs.values())
903
903
904 @encoding.strmethod
904 @encoding.strmethod
905 def __repr__(self):
905 def __repr__(self):
906 return (
906 return (
907 b'<treemanifest dir=%s, node=%s, loaded=%r, dirty=%r at 0x%x>'
907 b'<treemanifest dir=%s, node=%s, loaded=%r, dirty=%r at 0x%x>'
908 % (
908 % (
909 self._dir,
909 self._dir,
910 hex(self._node),
910 hex(self._node),
911 bool(self._loadfunc is _noop),
911 bool(self._loadfunc is _noop),
912 self._dirty,
912 self._dirty,
913 id(self),
913 id(self),
914 )
914 )
915 )
915 )
916
916
917 def dir(self):
917 def dir(self):
918 """The directory that this tree manifest represents, including a
918 """The directory that this tree manifest represents, including a
919 trailing '/'. Empty string for the repo root directory."""
919 trailing '/'. Empty string for the repo root directory."""
920 return self._dir
920 return self._dir
921
921
922 def node(self):
922 def node(self):
923 """This node of this instance. nullid for unsaved instances. Should
923 """This node of this instance. nullid for unsaved instances. Should
924 be updated when the instance is read or written from a revlog.
924 be updated when the instance is read or written from a revlog.
925 """
925 """
926 assert not self._dirty
926 assert not self._dirty
927 return self._node
927 return self._node
928
928
929 def setnode(self, node):
929 def setnode(self, node):
930 self._node = node
930 self._node = node
931 self._dirty = False
931 self._dirty = False
932
932
933 def iterentries(self):
933 def iterentries(self):
934 self._load()
934 self._load()
935 self._loadalllazy()
935 self._loadalllazy()
936 for p, n in sorted(
936 for p, n in sorted(
937 itertools.chain(self._dirs.items(), self._files.items())
937 itertools.chain(self._dirs.items(), self._files.items())
938 ):
938 ):
939 if p in self._files:
939 if p in self._files:
940 yield self._subpath(p), n, self._flags.get(p, b'')
940 yield self._subpath(p), n, self._flags.get(p, b'')
941 else:
941 else:
942 for x in n.iterentries():
942 for x in n.iterentries():
943 yield x
943 yield x
944
944
945 def items(self):
945 def items(self):
946 self._load()
946 self._load()
947 self._loadalllazy()
947 self._loadalllazy()
948 for p, n in sorted(
948 for p, n in sorted(
949 itertools.chain(self._dirs.items(), self._files.items())
949 itertools.chain(self._dirs.items(), self._files.items())
950 ):
950 ):
951 if p in self._files:
951 if p in self._files:
952 yield self._subpath(p), n
952 yield self._subpath(p), n
953 else:
953 else:
954 for f, sn in n.items():
954 for f, sn in n.items():
955 yield f, sn
955 yield f, sn
956
956
957 iteritems = items
957 iteritems = items
958
958
959 def iterkeys(self):
959 def iterkeys(self):
960 self._load()
960 self._load()
961 self._loadalllazy()
961 self._loadalllazy()
962 for p in sorted(itertools.chain(self._dirs, self._files)):
962 for p in sorted(itertools.chain(self._dirs, self._files)):
963 if p in self._files:
963 if p in self._files:
964 yield self._subpath(p)
964 yield self._subpath(p)
965 else:
965 else:
966 for f in self._dirs[p]:
966 for f in self._dirs[p]:
967 yield f
967 yield f
968
968
969 def keys(self):
969 def keys(self):
970 return list(self.iterkeys())
970 return list(self.iterkeys())
971
971
972 def __iter__(self):
972 def __iter__(self):
973 return self.iterkeys()
973 return self.iterkeys()
974
974
975 def __contains__(self, f):
975 def __contains__(self, f):
976 if f is None:
976 if f is None:
977 return False
977 return False
978 self._load()
978 self._load()
979 dir, subpath = _splittopdir(f)
979 dir, subpath = _splittopdir(f)
980 if dir:
980 if dir:
981 self._loadlazy(dir)
981 self._loadlazy(dir)
982
982
983 if dir not in self._dirs:
983 if dir not in self._dirs:
984 return False
984 return False
985
985
986 return self._dirs[dir].__contains__(subpath)
986 return self._dirs[dir].__contains__(subpath)
987 else:
987 else:
988 return f in self._files
988 return f in self._files
989
989
990 def get(self, f, default=None):
990 def get(self, f, default=None):
991 self._load()
991 self._load()
992 dir, subpath = _splittopdir(f)
992 dir, subpath = _splittopdir(f)
993 if dir:
993 if dir:
994 self._loadlazy(dir)
994 self._loadlazy(dir)
995
995
996 if dir not in self._dirs:
996 if dir not in self._dirs:
997 return default
997 return default
998 return self._dirs[dir].get(subpath, default)
998 return self._dirs[dir].get(subpath, default)
999 else:
999 else:
1000 return self._files.get(f, default)
1000 return self._files.get(f, default)
1001
1001
1002 def __getitem__(self, f):
1002 def __getitem__(self, f):
1003 self._load()
1003 self._load()
1004 dir, subpath = _splittopdir(f)
1004 dir, subpath = _splittopdir(f)
1005 if dir:
1005 if dir:
1006 self._loadlazy(dir)
1006 self._loadlazy(dir)
1007
1007
1008 return self._dirs[dir].__getitem__(subpath)
1008 return self._dirs[dir].__getitem__(subpath)
1009 else:
1009 else:
1010 return self._files[f]
1010 return self._files[f]
1011
1011
1012 def flags(self, f):
1012 def flags(self, f):
1013 self._load()
1013 self._load()
1014 dir, subpath = _splittopdir(f)
1014 dir, subpath = _splittopdir(f)
1015 if dir:
1015 if dir:
1016 self._loadlazy(dir)
1016 self._loadlazy(dir)
1017
1017
1018 if dir not in self._dirs:
1018 if dir not in self._dirs:
1019 return b''
1019 return b''
1020 return self._dirs[dir].flags(subpath)
1020 return self._dirs[dir].flags(subpath)
1021 else:
1021 else:
1022 if f in self._lazydirs or f in self._dirs:
1022 if f in self._lazydirs or f in self._dirs:
1023 return b''
1023 return b''
1024 return self._flags.get(f, b'')
1024 return self._flags.get(f, b'')
1025
1025
1026 def find(self, f):
1026 def find(self, f):
1027 self._load()
1027 self._load()
1028 dir, subpath = _splittopdir(f)
1028 dir, subpath = _splittopdir(f)
1029 if dir:
1029 if dir:
1030 self._loadlazy(dir)
1030 self._loadlazy(dir)
1031
1031
1032 return self._dirs[dir].find(subpath)
1032 return self._dirs[dir].find(subpath)
1033 else:
1033 else:
1034 return self._files[f], self._flags.get(f, b'')
1034 return self._files[f], self._flags.get(f, b'')
1035
1035
1036 def __delitem__(self, f):
1036 def __delitem__(self, f):
1037 self._load()
1037 self._load()
1038 dir, subpath = _splittopdir(f)
1038 dir, subpath = _splittopdir(f)
1039 if dir:
1039 if dir:
1040 self._loadlazy(dir)
1040 self._loadlazy(dir)
1041
1041
1042 self._dirs[dir].__delitem__(subpath)
1042 self._dirs[dir].__delitem__(subpath)
1043 # If the directory is now empty, remove it
1043 # If the directory is now empty, remove it
1044 if self._dirs[dir]._isempty():
1044 if self._dirs[dir]._isempty():
1045 del self._dirs[dir]
1045 del self._dirs[dir]
1046 else:
1046 else:
1047 del self._files[f]
1047 del self._files[f]
1048 if f in self._flags:
1048 if f in self._flags:
1049 del self._flags[f]
1049 del self._flags[f]
1050 self._dirty = True
1050 self._dirty = True
1051
1051
1052 def __setitem__(self, f, n):
1052 def __setitem__(self, f, n):
1053 assert n is not None
1053 assert n is not None
1054 self._load()
1054 self._load()
1055 dir, subpath = _splittopdir(f)
1055 dir, subpath = _splittopdir(f)
1056 if dir:
1056 if dir:
1057 self._loadlazy(dir)
1057 self._loadlazy(dir)
1058 if dir not in self._dirs:
1058 if dir not in self._dirs:
1059 self._dirs[dir] = treemanifest(
1059 self._dirs[dir] = treemanifest(
1060 self.nodeconstants, self._subpath(dir)
1060 self.nodeconstants, self._subpath(dir)
1061 )
1061 )
1062 self._dirs[dir].__setitem__(subpath, n)
1062 self._dirs[dir].__setitem__(subpath, n)
1063 else:
1063 else:
1064 # manifest nodes are either 20 bytes or 32 bytes,
1064 # manifest nodes are either 20 bytes or 32 bytes,
1065 # depending on the hash in use. Assert this as historically
1065 # depending on the hash in use. Assert this as historically
1066 # sometimes extra bytes were added.
1066 # sometimes extra bytes were added.
1067 assert len(n) in (20, 32)
1067 assert len(n) in (20, 32)
1068 self._files[f] = n
1068 self._files[f] = n
1069 self._dirty = True
1069 self._dirty = True
1070
1070
1071 def _load(self):
1071 def _load(self):
1072 if self._loadfunc is not _noop:
1072 if self._loadfunc is not _noop:
1073 lf, self._loadfunc = self._loadfunc, _noop
1073 lf, self._loadfunc = self._loadfunc, _noop
1074 lf(self)
1074 lf(self)
1075 elif self._copyfunc is not _noop:
1075 elif self._copyfunc is not _noop:
1076 cf, self._copyfunc = self._copyfunc, _noop
1076 cf, self._copyfunc = self._copyfunc, _noop
1077 cf(self)
1077 cf(self)
1078
1078
1079 def setflag(self, f, flags):
1079 def setflag(self, f, flags):
1080 """Set the flags (symlink, executable) for path f."""
1080 """Set the flags (symlink, executable) for path f."""
1081 if flags not in _manifestflags:
1081 if flags not in _manifestflags:
1082 raise TypeError(b"Invalid manifest flag set.")
1082 raise TypeError(b"Invalid manifest flag set.")
1083 self._load()
1083 self._load()
1084 dir, subpath = _splittopdir(f)
1084 dir, subpath = _splittopdir(f)
1085 if dir:
1085 if dir:
1086 self._loadlazy(dir)
1086 self._loadlazy(dir)
1087 if dir not in self._dirs:
1087 if dir not in self._dirs:
1088 self._dirs[dir] = treemanifest(
1088 self._dirs[dir] = treemanifest(
1089 self.nodeconstants, self._subpath(dir)
1089 self.nodeconstants, self._subpath(dir)
1090 )
1090 )
1091 self._dirs[dir].setflag(subpath, flags)
1091 self._dirs[dir].setflag(subpath, flags)
1092 else:
1092 else:
1093 self._flags[f] = flags
1093 self._flags[f] = flags
1094 self._dirty = True
1094 self._dirty = True
1095
1095
1096 def copy(self):
1096 def copy(self):
1097 copy = treemanifest(self.nodeconstants, self._dir)
1097 copy = treemanifest(self.nodeconstants, self._dir)
1098 copy._node = self._node
1098 copy._node = self._node
1099 copy._dirty = self._dirty
1099 copy._dirty = self._dirty
1100 if self._copyfunc is _noop:
1100 if self._copyfunc is _noop:
1101
1101
1102 def _copyfunc(s):
1102 def _copyfunc(s):
1103 self._load()
1103 self._load()
1104 s._lazydirs = {
1104 s._lazydirs = {
1105 d: (n, r, True) for d, (n, r, c) in self._lazydirs.items()
1105 d: (n, r, True) for d, (n, r, c) in self._lazydirs.items()
1106 }
1106 }
1107 sdirs = s._dirs
1107 sdirs = s._dirs
1108 for d, v in self._dirs.items():
1108 for d, v in self._dirs.items():
1109 sdirs[d] = v.copy()
1109 sdirs[d] = v.copy()
1110 s._files = dict.copy(self._files)
1110 s._files = dict.copy(self._files)
1111 s._flags = dict.copy(self._flags)
1111 s._flags = dict.copy(self._flags)
1112
1112
1113 if self._loadfunc is _noop:
1113 if self._loadfunc is _noop:
1114 _copyfunc(copy)
1114 _copyfunc(copy)
1115 else:
1115 else:
1116 copy._copyfunc = _copyfunc
1116 copy._copyfunc = _copyfunc
1117 else:
1117 else:
1118 copy._copyfunc = self._copyfunc
1118 copy._copyfunc = self._copyfunc
1119 return copy
1119 return copy
1120
1120
1121 def filesnotin(self, m2, match=None):
1121 def filesnotin(self, m2, match=None):
1122 '''Set of files in this manifest that are not in the other'''
1122 '''Set of files in this manifest that are not in the other'''
1123 if match and not match.always():
1123 if match and not match.always():
1124 m1 = self._matches(match)
1124 m1 = self._matches(match)
1125 m2 = m2._matches(match)
1125 m2 = m2._matches(match)
1126 return m1.filesnotin(m2)
1126 return m1.filesnotin(m2)
1127
1127
1128 files = set()
1128 files = set()
1129
1129
1130 def _filesnotin(t1, t2):
1130 def _filesnotin(t1, t2):
1131 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1131 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1132 return
1132 return
1133 t1._load()
1133 t1._load()
1134 t2._load()
1134 t2._load()
1135 self._loaddifflazy(t1, t2)
1135 self._loaddifflazy(t1, t2)
1136 for d, m1 in t1._dirs.items():
1136 for d, m1 in t1._dirs.items():
1137 if d in t2._dirs:
1137 if d in t2._dirs:
1138 m2 = t2._dirs[d]
1138 m2 = t2._dirs[d]
1139 _filesnotin(m1, m2)
1139 _filesnotin(m1, m2)
1140 else:
1140 else:
1141 files.update(m1.iterkeys())
1141 files.update(m1.iterkeys())
1142
1142
1143 for fn in t1._files:
1143 for fn in t1._files:
1144 if fn not in t2._files:
1144 if fn not in t2._files:
1145 files.add(t1._subpath(fn))
1145 files.add(t1._subpath(fn))
1146
1146
1147 _filesnotin(self, m2)
1147 _filesnotin(self, m2)
1148 return files
1148 return files
1149
1149
1150 @propertycache
1150 @propertycache
1151 def _alldirs(self):
1151 def _alldirs(self):
1152 return pathutil.dirs(self)
1152 return pathutil.dirs(self)
1153
1153
1154 def dirs(self):
1154 def dirs(self):
1155 return self._alldirs
1155 return self._alldirs
1156
1156
1157 def hasdir(self, dir):
1157 def hasdir(self, dir):
1158 self._load()
1158 self._load()
1159 topdir, subdir = _splittopdir(dir)
1159 topdir, subdir = _splittopdir(dir)
1160 if topdir:
1160 if topdir:
1161 self._loadlazy(topdir)
1161 self._loadlazy(topdir)
1162 if topdir in self._dirs:
1162 if topdir in self._dirs:
1163 return self._dirs[topdir].hasdir(subdir)
1163 return self._dirs[topdir].hasdir(subdir)
1164 return False
1164 return False
1165 dirslash = dir + b'/'
1165 dirslash = dir + b'/'
1166 return dirslash in self._dirs or dirslash in self._lazydirs
1166 return dirslash in self._dirs or dirslash in self._lazydirs
1167
1167
1168 def walk(self, match):
1168 def walk(self, match):
1169 """Generates matching file names.
1169 """Generates matching file names.
1170
1170
1171 It also reports nonexistent files by marking them bad with match.bad().
1171 It also reports nonexistent files by marking them bad with match.bad().
1172 """
1172 """
1173 if match.always():
1173 if match.always():
1174 for f in iter(self):
1174 for f in iter(self):
1175 yield f
1175 yield f
1176 return
1176 return
1177
1177
1178 fset = set(match.files())
1178 fset = set(match.files())
1179
1179
1180 for fn in self._walk(match):
1180 for fn in self._walk(match):
1181 if fn in fset:
1181 if fn in fset:
1182 # specified pattern is the exact name
1182 # specified pattern is the exact name
1183 fset.remove(fn)
1183 fset.remove(fn)
1184 yield fn
1184 yield fn
1185
1185
1186 # for dirstate.walk, files=[''] means "walk the whole tree".
1186 # for dirstate.walk, files=[''] means "walk the whole tree".
1187 # follow that here, too
1187 # follow that here, too
1188 fset.discard(b'')
1188 fset.discard(b'')
1189
1189
1190 for fn in sorted(fset):
1190 for fn in sorted(fset):
1191 if not self.hasdir(fn):
1191 if not self.hasdir(fn):
1192 match.bad(fn, None)
1192 match.bad(fn, None)
1193
1193
1194 def _walk(self, match):
1194 def _walk(self, match):
1195 '''Recursively generates matching file names for walk().'''
1195 '''Recursively generates matching file names for walk().'''
1196 visit = match.visitchildrenset(self._dir[:-1])
1196 visit = match.visitchildrenset(self._dir[:-1])
1197 if not visit:
1197 if not visit:
1198 return
1198 return
1199
1199
1200 # yield this dir's files and walk its submanifests
1200 # yield this dir's files and walk its submanifests
1201 self._load()
1201 self._load()
1202 visit = self._loadchildrensetlazy(visit)
1202 visit = self._loadchildrensetlazy(visit)
1203 for p in sorted(list(self._dirs) + list(self._files)):
1203 for p in sorted(list(self._dirs) + list(self._files)):
1204 if p in self._files:
1204 if p in self._files:
1205 fullp = self._subpath(p)
1205 fullp = self._subpath(p)
1206 if match(fullp):
1206 if match(fullp):
1207 yield fullp
1207 yield fullp
1208 else:
1208 else:
1209 if not visit or p[:-1] in visit:
1209 if not visit or p[:-1] in visit:
1210 for f in self._dirs[p]._walk(match):
1210 for f in self._dirs[p]._walk(match):
1211 yield f
1211 yield f
1212
1212
1213 def _matches(self, match):
1213 def _matches(self, match):
1214 """recursively generate a new manifest filtered by the match argument."""
1214 """recursively generate a new manifest filtered by the match argument."""
1215 if match.always():
1215 if match.always():
1216 return self.copy()
1216 return self.copy()
1217 return self._matches_inner(match)
1217 return self._matches_inner(match)
1218
1218
1219 def _matches_inner(self, match):
1219 def _matches_inner(self, match):
1220 if match.always():
1220 if match.always():
1221 return self.copy()
1221 return self.copy()
1222
1222
1223 visit = match.visitchildrenset(self._dir[:-1])
1223 visit = match.visitchildrenset(self._dir[:-1])
1224 if visit == b'all':
1224 if visit == b'all':
1225 return self.copy()
1225 return self.copy()
1226 ret = treemanifest(self.nodeconstants, self._dir)
1226 ret = treemanifest(self.nodeconstants, self._dir)
1227 if not visit:
1227 if not visit:
1228 return ret
1228 return ret
1229
1229
1230 self._load()
1230 self._load()
1231 for fn in self._files:
1231 for fn in self._files:
1232 # While visitchildrenset *usually* lists only subdirs, this is
1232 # While visitchildrenset *usually* lists only subdirs, this is
1233 # actually up to the matcher and may have some files in the set().
1233 # actually up to the matcher and may have some files in the set().
1234 # If visit == 'this', we should obviously look at the files in this
1234 # If visit == 'this', we should obviously look at the files in this
1235 # directory; if visit is a set, and fn is in it, we should inspect
1235 # directory; if visit is a set, and fn is in it, we should inspect
1236 # fn (but no need to inspect things not in the set).
1236 # fn (but no need to inspect things not in the set).
1237 if visit != b'this' and fn not in visit:
1237 if visit != b'this' and fn not in visit:
1238 continue
1238 continue
1239 fullp = self._subpath(fn)
1239 fullp = self._subpath(fn)
1240 # visitchildrenset isn't perfect, we still need to call the regular
1240 # visitchildrenset isn't perfect, we still need to call the regular
1241 # matcher code to further filter results.
1241 # matcher code to further filter results.
1242 if not match(fullp):
1242 if not match(fullp):
1243 continue
1243 continue
1244 ret._files[fn] = self._files[fn]
1244 ret._files[fn] = self._files[fn]
1245 if fn in self._flags:
1245 if fn in self._flags:
1246 ret._flags[fn] = self._flags[fn]
1246 ret._flags[fn] = self._flags[fn]
1247
1247
1248 visit = self._loadchildrensetlazy(visit)
1248 visit = self._loadchildrensetlazy(visit)
1249 for dir, subm in self._dirs.items():
1249 for dir, subm in self._dirs.items():
1250 if visit and dir[:-1] not in visit:
1250 if visit and dir[:-1] not in visit:
1251 continue
1251 continue
1252 m = subm._matches_inner(match)
1252 m = subm._matches_inner(match)
1253 if not m._isempty():
1253 if not m._isempty():
1254 ret._dirs[dir] = m
1254 ret._dirs[dir] = m
1255
1255
1256 if not ret._isempty():
1256 if not ret._isempty():
1257 ret._dirty = True
1257 ret._dirty = True
1258 return ret
1258 return ret
1259
1259
1260 def fastdelta(self, base, changes):
1260 def fastdelta(self, base, changes):
1261 raise FastdeltaUnavailable()
1261 raise FastdeltaUnavailable()
1262
1262
1263 def diff(self, m2, match=None, clean=False):
1263 def diff(self, m2, match=None, clean=False):
1264 """Finds changes between the current manifest and m2.
1264 """Finds changes between the current manifest and m2.
1265
1265
1266 Args:
1266 Args:
1267 m2: the manifest to which this manifest should be compared.
1267 m2: the manifest to which this manifest should be compared.
1268 clean: if true, include files unchanged between these manifests
1268 clean: if true, include files unchanged between these manifests
1269 with a None value in the returned dictionary.
1269 with a None value in the returned dictionary.
1270
1270
1271 The result is returned as a dict with filename as key and
1271 The result is returned as a dict with filename as key and
1272 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1272 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1273 nodeid in the current/other manifest and fl1/fl2 is the flag
1273 nodeid in the current/other manifest and fl1/fl2 is the flag
1274 in the current/other manifest. Where the file does not exist,
1274 in the current/other manifest. Where the file does not exist,
1275 the nodeid will be None and the flags will be the empty
1275 the nodeid will be None and the flags will be the empty
1276 string.
1276 string.
1277 """
1277 """
1278 if match and not match.always():
1278 if match and not match.always():
1279 m1 = self._matches(match)
1279 m1 = self._matches(match)
1280 m2 = m2._matches(match)
1280 m2 = m2._matches(match)
1281 return m1.diff(m2, clean=clean)
1281 return m1.diff(m2, clean=clean)
1282 result = {}
1282 result = {}
1283 emptytree = treemanifest(self.nodeconstants)
1283 emptytree = treemanifest(self.nodeconstants)
1284
1284
1285 def _iterativediff(t1, t2, stack):
1285 def _iterativediff(t1, t2, stack):
1286 """compares two tree manifests and append new tree-manifests which
1286 """compares two tree manifests and append new tree-manifests which
1287 needs to be compared to stack"""
1287 needs to be compared to stack"""
1288 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1288 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1289 return
1289 return
1290 t1._load()
1290 t1._load()
1291 t2._load()
1291 t2._load()
1292 self._loaddifflazy(t1, t2)
1292 self._loaddifflazy(t1, t2)
1293
1293
1294 for d, m1 in t1._dirs.items():
1294 for d, m1 in t1._dirs.items():
1295 m2 = t2._dirs.get(d, emptytree)
1295 m2 = t2._dirs.get(d, emptytree)
1296 stack.append((m1, m2))
1296 stack.append((m1, m2))
1297
1297
1298 for d, m2 in t2._dirs.items():
1298 for d, m2 in t2._dirs.items():
1299 if d not in t1._dirs:
1299 if d not in t1._dirs:
1300 stack.append((emptytree, m2))
1300 stack.append((emptytree, m2))
1301
1301
1302 for fn, n1 in t1._files.items():
1302 for fn, n1 in t1._files.items():
1303 fl1 = t1._flags.get(fn, b'')
1303 fl1 = t1._flags.get(fn, b'')
1304 n2 = t2._files.get(fn, None)
1304 n2 = t2._files.get(fn, None)
1305 fl2 = t2._flags.get(fn, b'')
1305 fl2 = t2._flags.get(fn, b'')
1306 if n1 != n2 or fl1 != fl2:
1306 if n1 != n2 or fl1 != fl2:
1307 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1307 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1308 elif clean:
1308 elif clean:
1309 result[t1._subpath(fn)] = None
1309 result[t1._subpath(fn)] = None
1310
1310
1311 for fn, n2 in t2._files.items():
1311 for fn, n2 in t2._files.items():
1312 if fn not in t1._files:
1312 if fn not in t1._files:
1313 fl2 = t2._flags.get(fn, b'')
1313 fl2 = t2._flags.get(fn, b'')
1314 result[t2._subpath(fn)] = ((None, b''), (n2, fl2))
1314 result[t2._subpath(fn)] = ((None, b''), (n2, fl2))
1315
1315
1316 stackls = []
1316 stackls = []
1317 _iterativediff(self, m2, stackls)
1317 _iterativediff(self, m2, stackls)
1318 while stackls:
1318 while stackls:
1319 t1, t2 = stackls.pop()
1319 t1, t2 = stackls.pop()
1320 # stackls is populated in the function call
1320 # stackls is populated in the function call
1321 _iterativediff(t1, t2, stackls)
1321 _iterativediff(t1, t2, stackls)
1322 return result
1322 return result
1323
1323
1324 def unmodifiedsince(self, m2):
1324 def unmodifiedsince(self, m2):
1325 return not self._dirty and not m2._dirty and self._node == m2._node
1325 return not self._dirty and not m2._dirty and self._node == m2._node
1326
1326
1327 def parse(self, text, readsubtree):
1327 def parse(self, text, readsubtree):
1328 selflazy = self._lazydirs
1328 selflazy = self._lazydirs
1329 for f, n, fl in _parse(self._nodelen, text):
1329 for f, n, fl in _parse(self._nodelen, text):
1330 if fl == b't':
1330 if fl == b't':
1331 f = f + b'/'
1331 f = f + b'/'
1332 # False below means "doesn't need to be copied" and can use the
1332 # False below means "doesn't need to be copied" and can use the
1333 # cached value from readsubtree directly.
1333 # cached value from readsubtree directly.
1334 selflazy[f] = (n, readsubtree, False)
1334 selflazy[f] = (n, readsubtree, False)
1335 elif b'/' in f:
1335 elif b'/' in f:
1336 # This is a flat manifest, so use __setitem__ and setflag rather
1336 # This is a flat manifest, so use __setitem__ and setflag rather
1337 # than assigning directly to _files and _flags, so we can
1337 # than assigning directly to _files and _flags, so we can
1338 # assign a path in a subdirectory, and to mark dirty (compared
1338 # assign a path in a subdirectory, and to mark dirty (compared
1339 # to nullid).
1339 # to nullid).
1340 self[f] = n
1340 self[f] = n
1341 if fl:
1341 if fl:
1342 self.setflag(f, fl)
1342 self.setflag(f, fl)
1343 else:
1343 else:
1344 # Assigning to _files and _flags avoids marking as dirty,
1344 # Assigning to _files and _flags avoids marking as dirty,
1345 # and should be a little faster.
1345 # and should be a little faster.
1346 self._files[f] = n
1346 self._files[f] = n
1347 if fl:
1347 if fl:
1348 self._flags[f] = fl
1348 self._flags[f] = fl
1349
1349
1350 def text(self):
1350 def text(self):
1351 """Get the full data of this manifest as a bytestring."""
1351 """Get the full data of this manifest as a bytestring."""
1352 self._load()
1352 self._load()
1353 return _text(self.iterentries())
1353 return _text(self.iterentries())
1354
1354
1355 def dirtext(self):
1355 def dirtext(self):
1356 """Get the full data of this directory as a bytestring. Make sure that
1356 """Get the full data of this directory as a bytestring. Make sure that
1357 any submanifests have been written first, so their nodeids are correct.
1357 any submanifests have been written first, so their nodeids are correct.
1358 """
1358 """
1359 self._load()
1359 self._load()
1360 flags = self.flags
1360 flags = self.flags
1361 lazydirs = [(d[:-1], v[0], b't') for d, v in self._lazydirs.items()]
1361 lazydirs = [(d[:-1], v[0], b't') for d, v in self._lazydirs.items()]
1362 dirs = [(d[:-1], self._dirs[d]._node, b't') for d in self._dirs]
1362 dirs = [(d[:-1], self._dirs[d]._node, b't') for d in self._dirs]
1363 files = [(f, self._files[f], flags(f)) for f in self._files]
1363 files = [(f, self._files[f], flags(f)) for f in self._files]
1364 return _text(sorted(dirs + files + lazydirs))
1364 return _text(sorted(dirs + files + lazydirs))
1365
1365
1366 def read(self, gettext, readsubtree):
1366 def read(self, gettext, readsubtree):
1367 def _load_for_read(s):
1367 def _load_for_read(s):
1368 s.parse(gettext(), readsubtree)
1368 s.parse(gettext(), readsubtree)
1369 s._dirty = False
1369 s._dirty = False
1370
1370
1371 self._loadfunc = _load_for_read
1371 self._loadfunc = _load_for_read
1372
1372
1373 def writesubtrees(self, m1, m2, writesubtree, match):
1373 def writesubtrees(self, m1, m2, writesubtree, match):
1374 self._load() # for consistency; should never have any effect here
1374 self._load() # for consistency; should never have any effect here
1375 m1._load()
1375 m1._load()
1376 m2._load()
1376 m2._load()
1377 emptytree = treemanifest(self.nodeconstants)
1377 emptytree = treemanifest(self.nodeconstants)
1378
1378
1379 def getnode(m, d):
1379 def getnode(m, d):
1380 ld = m._lazydirs.get(d)
1380 ld = m._lazydirs.get(d)
1381 if ld:
1381 if ld:
1382 return ld[0]
1382 return ld[0]
1383 return m._dirs.get(d, emptytree)._node
1383 return m._dirs.get(d, emptytree)._node
1384
1384
1385 # let's skip investigating things that `match` says we do not need.
1385 # let's skip investigating things that `match` says we do not need.
1386 visit = match.visitchildrenset(self._dir[:-1])
1386 visit = match.visitchildrenset(self._dir[:-1])
1387 visit = self._loadchildrensetlazy(visit)
1387 visit = self._loadchildrensetlazy(visit)
1388 if visit == b'this' or visit == b'all':
1388 if visit == b'this' or visit == b'all':
1389 visit = None
1389 visit = None
1390 for d, subm in self._dirs.items():
1390 for d, subm in self._dirs.items():
1391 if visit and d[:-1] not in visit:
1391 if visit and d[:-1] not in visit:
1392 continue
1392 continue
1393 subp1 = getnode(m1, d)
1393 subp1 = getnode(m1, d)
1394 subp2 = getnode(m2, d)
1394 subp2 = getnode(m2, d)
1395 if subp1 == self.nodeconstants.nullid:
1395 if subp1 == self.nodeconstants.nullid:
1396 subp1, subp2 = subp2, subp1
1396 subp1, subp2 = subp2, subp1
1397 writesubtree(subm, subp1, subp2, match)
1397 writesubtree(subm, subp1, subp2, match)
1398
1398
1399 def walksubtrees(self, matcher=None):
1399 def walksubtrees(self, matcher=None):
1400 """Returns an iterator of the subtrees of this manifest, including this
1400 """Returns an iterator of the subtrees of this manifest, including this
1401 manifest itself.
1401 manifest itself.
1402
1402
1403 If `matcher` is provided, it only returns subtrees that match.
1403 If `matcher` is provided, it only returns subtrees that match.
1404 """
1404 """
1405 if matcher and not matcher.visitdir(self._dir[:-1]):
1405 if matcher and not matcher.visitdir(self._dir[:-1]):
1406 return
1406 return
1407 if not matcher or matcher(self._dir[:-1]):
1407 if not matcher or matcher(self._dir[:-1]):
1408 yield self
1408 yield self
1409
1409
1410 self._load()
1410 self._load()
1411 # OPT: use visitchildrenset to avoid loading everything.
1411 # OPT: use visitchildrenset to avoid loading everything.
1412 self._loadalllazy()
1412 self._loadalllazy()
1413 for d, subm in self._dirs.items():
1413 for d, subm in self._dirs.items():
1414 for subtree in subm.walksubtrees(matcher=matcher):
1414 for subtree in subm.walksubtrees(matcher=matcher):
1415 yield subtree
1415 yield subtree
1416
1416
1417
1417
1418 class manifestfulltextcache(util.lrucachedict):
1418 class manifestfulltextcache(util.lrucachedict):
1419 """File-backed LRU cache for the manifest cache
1419 """File-backed LRU cache for the manifest cache
1420
1420
1421 File consists of entries, up to EOF:
1421 File consists of entries, up to EOF:
1422
1422
1423 - 20 bytes node, 4 bytes length, <length> manifest data
1423 - 20 bytes node, 4 bytes length, <length> manifest data
1424
1424
1425 These are written in reverse cache order (oldest to newest).
1425 These are written in reverse cache order (oldest to newest).
1426
1426
1427 """
1427 """
1428
1428
1429 _file = b'manifestfulltextcache'
1429 _file = b'manifestfulltextcache'
1430
1430
1431 def __init__(self, max):
1431 def __init__(self, max):
1432 super(manifestfulltextcache, self).__init__(max)
1432 super(manifestfulltextcache, self).__init__(max)
1433 self._dirty = False
1433 self._dirty = False
1434 self._read = False
1434 self._read = False
1435 self._opener = None
1435 self._opener = None
1436
1436
1437 def read(self):
1437 def read(self):
1438 if self._read or self._opener is None:
1438 if self._read or self._opener is None:
1439 return
1439 return
1440
1440
1441 try:
1441 try:
1442 with self._opener(self._file) as fp:
1442 with self._opener(self._file) as fp:
1443 set = super(manifestfulltextcache, self).__setitem__
1443 set = super(manifestfulltextcache, self).__setitem__
1444 # ignore trailing data, this is a cache, corruption is skipped
1444 # ignore trailing data, this is a cache, corruption is skipped
1445 while True:
1445 while True:
1446 # TODO do we need to do work here for sha1 portability?
1446 # TODO do we need to do work here for sha1 portability?
1447 node = fp.read(20)
1447 node = fp.read(20)
1448 if len(node) < 20:
1448 if len(node) < 20:
1449 break
1449 break
1450 try:
1450 try:
1451 size = struct.unpack(b'>L', fp.read(4))[0]
1451 size = struct.unpack(b'>L', fp.read(4))[0]
1452 except struct.error:
1452 except struct.error:
1453 break
1453 break
1454 value = bytearray(fp.read(size))
1454 value = bytearray(fp.read(size))
1455 if len(value) != size:
1455 if len(value) != size:
1456 break
1456 break
1457 set(node, value)
1457 set(node, value)
1458 except IOError:
1458 except IOError:
1459 # the file is allowed to be missing
1459 # the file is allowed to be missing
1460 pass
1460 pass
1461
1461
1462 self._read = True
1462 self._read = True
1463 self._dirty = False
1463 self._dirty = False
1464
1464
1465 def write(self):
1465 def write(self):
1466 if not self._dirty or self._opener is None:
1466 if not self._dirty or self._opener is None:
1467 return
1467 return
1468 # rotate backwards to the first used node
1468 # rotate backwards to the first used node
1469 try:
1469 try:
1470 with self._opener(
1470 with self._opener(
1471 self._file, b'w', atomictemp=True, checkambig=True
1471 self._file, b'w', atomictemp=True, checkambig=True
1472 ) as fp:
1472 ) as fp:
1473 node = self._head.prev
1473 node = self._head.prev
1474 while True:
1474 while True:
1475 if node.key in self._cache:
1475 if node.key in self._cache:
1476 fp.write(node.key)
1476 fp.write(node.key)
1477 fp.write(struct.pack(b'>L', len(node.value)))
1477 fp.write(struct.pack(b'>L', len(node.value)))
1478 fp.write(node.value)
1478 fp.write(node.value)
1479 if node is self._head:
1479 if node is self._head:
1480 break
1480 break
1481 node = node.prev
1481 node = node.prev
1482 except IOError:
1482 except IOError:
1483 # We could not write the cache (eg: permission error)
1483 # We could not write the cache (eg: permission error)
1484 # the content can be missing.
1484 # the content can be missing.
1485 #
1485 #
1486 # We could try harder and see if we could recreate a wcache
1486 # We could try harder and see if we could recreate a wcache
1487 # directory were we coudl write too.
1487 # directory were we coudl write too.
1488 #
1488 #
1489 # XXX the error pass silently, having some way to issue an error
1489 # XXX the error pass silently, having some way to issue an error
1490 # log `ui.log` would be nice.
1490 # log `ui.log` would be nice.
1491 pass
1491 pass
1492
1492
1493 def __len__(self):
1493 def __len__(self):
1494 if not self._read:
1494 if not self._read:
1495 self.read()
1495 self.read()
1496 return super(manifestfulltextcache, self).__len__()
1496 return super(manifestfulltextcache, self).__len__()
1497
1497
1498 def __contains__(self, k):
1498 def __contains__(self, k):
1499 if not self._read:
1499 if not self._read:
1500 self.read()
1500 self.read()
1501 return super(manifestfulltextcache, self).__contains__(k)
1501 return super(manifestfulltextcache, self).__contains__(k)
1502
1502
1503 def __iter__(self):
1503 def __iter__(self):
1504 if not self._read:
1504 if not self._read:
1505 self.read()
1505 self.read()
1506 return super(manifestfulltextcache, self).__iter__()
1506 return super(manifestfulltextcache, self).__iter__()
1507
1507
1508 def __getitem__(self, k):
1508 def __getitem__(self, k):
1509 if not self._read:
1509 if not self._read:
1510 self.read()
1510 self.read()
1511 # the cache lru order can change on read
1511 # the cache lru order can change on read
1512 setdirty = self._cache.get(k) is not self._head
1512 setdirty = self._cache.get(k) is not self._head
1513 value = super(manifestfulltextcache, self).__getitem__(k)
1513 value = super(manifestfulltextcache, self).__getitem__(k)
1514 if setdirty:
1514 if setdirty:
1515 self._dirty = True
1515 self._dirty = True
1516 return value
1516 return value
1517
1517
1518 def __setitem__(self, k, v):
1518 def __setitem__(self, k, v):
1519 if not self._read:
1519 if not self._read:
1520 self.read()
1520 self.read()
1521 super(manifestfulltextcache, self).__setitem__(k, v)
1521 super(manifestfulltextcache, self).__setitem__(k, v)
1522 self._dirty = True
1522 self._dirty = True
1523
1523
1524 def __delitem__(self, k):
1524 def __delitem__(self, k):
1525 if not self._read:
1525 if not self._read:
1526 self.read()
1526 self.read()
1527 super(manifestfulltextcache, self).__delitem__(k)
1527 super(manifestfulltextcache, self).__delitem__(k)
1528 self._dirty = True
1528 self._dirty = True
1529
1529
1530 def get(self, k, default=None):
1530 def get(self, k, default=None):
1531 if not self._read:
1531 if not self._read:
1532 self.read()
1532 self.read()
1533 return super(manifestfulltextcache, self).get(k, default=default)
1533 return super(manifestfulltextcache, self).get(k, default=default)
1534
1534
1535 def clear(self, clear_persisted_data=False):
1535 def clear(self, clear_persisted_data=False):
1536 super(manifestfulltextcache, self).clear()
1536 super(manifestfulltextcache, self).clear()
1537 if clear_persisted_data:
1537 if clear_persisted_data:
1538 self._dirty = True
1538 self._dirty = True
1539 self.write()
1539 self.write()
1540 self._read = False
1540 self._read = False
1541
1541
1542
1542
1543 # and upper bound of what we expect from compression
1543 # and upper bound of what we expect from compression
1544 # (real live value seems to be "3")
1544 # (real live value seems to be "3")
1545 MAXCOMPRESSION = 3
1545 MAXCOMPRESSION = 3
1546
1546
1547
1547
1548 class FastdeltaUnavailable(Exception):
1548 class FastdeltaUnavailable(Exception):
1549 """Exception raised when fastdelta isn't usable on a manifest."""
1549 """Exception raised when fastdelta isn't usable on a manifest."""
1550
1550
1551
1551
1552 @interfaceutil.implementer(repository.imanifeststorage)
1552 @interfaceutil.implementer(repository.imanifeststorage)
1553 class manifestrevlog:
1553 class manifestrevlog:
1554 """A revlog that stores manifest texts. This is responsible for caching the
1554 """A revlog that stores manifest texts. This is responsible for caching the
1555 full-text manifest contents.
1555 full-text manifest contents.
1556 """
1556 """
1557
1557
1558 def __init__(
1558 def __init__(
1559 self,
1559 self,
1560 nodeconstants,
1560 nodeconstants,
1561 opener,
1561 opener,
1562 tree=b'',
1562 tree=b'',
1563 dirlogcache=None,
1563 dirlogcache=None,
1564 treemanifest=False,
1564 treemanifest=False,
1565 ):
1565 ):
1566 """Constructs a new manifest revlog
1566 """Constructs a new manifest revlog
1567
1567
1568 `indexfile` - used by extensions to have two manifests at once, like
1568 `indexfile` - used by extensions to have two manifests at once, like
1569 when transitioning between flatmanifeset and treemanifests.
1569 when transitioning between flatmanifeset and treemanifests.
1570
1570
1571 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1571 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1572 options can also be used to make this a tree manifest revlog. The opener
1572 options can also be used to make this a tree manifest revlog. The opener
1573 option takes precedence, so if it is set to True, we ignore whatever
1573 option takes precedence, so if it is set to True, we ignore whatever
1574 value is passed in to the constructor.
1574 value is passed in to the constructor.
1575 """
1575 """
1576 self.nodeconstants = nodeconstants
1576 self.nodeconstants = nodeconstants
1577 # During normal operations, we expect to deal with not more than four
1577 # During normal operations, we expect to deal with not more than four
1578 # revs at a time (such as during commit --amend). When rebasing large
1578 # revs at a time (such as during commit --amend). When rebasing large
1579 # stacks of commits, the number can go up, hence the config knob below.
1579 # stacks of commits, the number can go up, hence the config knob below.
1580 cachesize = 4
1580 cachesize = 4
1581 optiontreemanifest = False
1581 optiontreemanifest = False
1582 opts = getattr(opener, 'options', None)
1582 opts = getattr(opener, 'options', None)
1583 if opts is not None:
1583 if opts is not None:
1584 cachesize = opts.get(b'manifestcachesize', cachesize)
1584 cachesize = opts.get(b'manifestcachesize', cachesize)
1585 optiontreemanifest = opts.get(b'treemanifest', False)
1585 optiontreemanifest = opts.get(b'treemanifest', False)
1586
1586
1587 self._treeondisk = optiontreemanifest or treemanifest
1587 self._treeondisk = optiontreemanifest or treemanifest
1588
1588
1589 self._fulltextcache = manifestfulltextcache(cachesize)
1589 self._fulltextcache = manifestfulltextcache(cachesize)
1590
1590
1591 if tree:
1591 if tree:
1592 assert self._treeondisk, (tree, b'opts is %r' % opts)
1592 assert self._treeondisk, (tree, b'opts is %r' % opts)
1593
1593
1594 radix = b'00manifest'
1594 radix = b'00manifest'
1595 if tree:
1595 if tree:
1596 radix = b"meta/" + tree + radix
1596 radix = b"meta/" + tree + radix
1597
1597
1598 self.tree = tree
1598 self.tree = tree
1599
1599
1600 # The dirlogcache is kept on the root manifest log
1600 # The dirlogcache is kept on the root manifest log
1601 if tree:
1601 if tree:
1602 self._dirlogcache = dirlogcache
1602 self._dirlogcache = dirlogcache
1603 else:
1603 else:
1604 self._dirlogcache = {b'': self}
1604 self._dirlogcache = {b'': self}
1605
1605
1606 self._revlog = revlog.revlog(
1606 self._revlog = revlog.revlog(
1607 opener,
1607 opener,
1608 target=(revlog_constants.KIND_MANIFESTLOG, self.tree),
1608 target=(revlog_constants.KIND_MANIFESTLOG, self.tree),
1609 radix=radix,
1609 radix=radix,
1610 # only root indexfile is cached
1610 # only root indexfile is cached
1611 checkambig=not bool(tree),
1611 checkambig=not bool(tree),
1612 mmaplargeindex=True,
1612 mmaplargeindex=True,
1613 upperboundcomp=MAXCOMPRESSION,
1613 upperboundcomp=MAXCOMPRESSION,
1614 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
1614 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
1615 )
1615 )
1616
1616
1617 self.index = self._revlog.index
1617 self.index = self._revlog.index
1618 self._generaldelta = self._revlog._generaldelta
1618 self._generaldelta = self._revlog._generaldelta
1619
1619
1620 def _setupmanifestcachehooks(self, repo):
1620 def _setupmanifestcachehooks(self, repo):
1621 """Persist the manifestfulltextcache on lock release"""
1621 """Persist the manifestfulltextcache on lock release"""
1622 if not util.safehasattr(repo, b'_wlockref'):
1622 if not util.safehasattr(repo, b'_wlockref'):
1623 return
1623 return
1624
1624
1625 self._fulltextcache._opener = repo.wcachevfs
1625 self._fulltextcache._opener = repo.wcachevfs
1626 if repo._currentlock(repo._wlockref) is None:
1626 if repo._currentlock(repo._wlockref) is None:
1627 return
1627 return
1628
1628
1629 reporef = weakref.ref(repo)
1629 reporef = weakref.ref(repo)
1630 manifestrevlogref = weakref.ref(self)
1630 manifestrevlogref = weakref.ref(self)
1631
1631
1632 def persistmanifestcache(success):
1632 def persistmanifestcache(success):
1633 # Repo is in an unknown state, do not persist.
1633 # Repo is in an unknown state, do not persist.
1634 if not success:
1634 if not success:
1635 return
1635 return
1636
1636
1637 repo = reporef()
1637 repo = reporef()
1638 self = manifestrevlogref()
1638 self = manifestrevlogref()
1639 if repo is None or self is None:
1639 if repo is None or self is None:
1640 return
1640 return
1641 if repo.manifestlog.getstorage(b'') is not self:
1641 if repo.manifestlog.getstorage(b'') is not self:
1642 # there's a different manifest in play now, abort
1642 # there's a different manifest in play now, abort
1643 return
1643 return
1644 self._fulltextcache.write()
1644 self._fulltextcache.write()
1645
1645
1646 repo._afterlock(persistmanifestcache)
1646 repo._afterlock(persistmanifestcache)
1647
1647
1648 @property
1648 @property
1649 def fulltextcache(self):
1649 def fulltextcache(self):
1650 return self._fulltextcache
1650 return self._fulltextcache
1651
1651
1652 def clearcaches(self, clear_persisted_data=False):
1652 def clearcaches(self, clear_persisted_data=False):
1653 self._revlog.clearcaches()
1653 self._revlog.clearcaches()
1654 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1654 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1655 self._dirlogcache = {self.tree: self}
1655 self._dirlogcache = {self.tree: self}
1656
1656
1657 def dirlog(self, d):
1657 def dirlog(self, d):
1658 if d:
1658 if d:
1659 assert self._treeondisk
1659 assert self._treeondisk
1660 if d not in self._dirlogcache:
1660 if d not in self._dirlogcache:
1661 mfrevlog = manifestrevlog(
1661 mfrevlog = manifestrevlog(
1662 self.nodeconstants,
1662 self.nodeconstants,
1663 self.opener,
1663 self.opener,
1664 d,
1664 d,
1665 self._dirlogcache,
1665 self._dirlogcache,
1666 treemanifest=self._treeondisk,
1666 treemanifest=self._treeondisk,
1667 )
1667 )
1668 self._dirlogcache[d] = mfrevlog
1668 self._dirlogcache[d] = mfrevlog
1669 return self._dirlogcache[d]
1669 return self._dirlogcache[d]
1670
1670
1671 def add(
1671 def add(
1672 self,
1672 self,
1673 m,
1673 m,
1674 transaction,
1674 transaction,
1675 link,
1675 link,
1676 p1,
1676 p1,
1677 p2,
1677 p2,
1678 added,
1678 added,
1679 removed,
1679 removed,
1680 readtree=None,
1680 readtree=None,
1681 match=None,
1681 match=None,
1682 ):
1682 ):
1683 """add some manifest entry in to the manifest log
1683 """add some manifest entry in to the manifest log
1684
1684
1685 input:
1685 input:
1686
1686
1687 m: the manifest dict we want to store
1687 m: the manifest dict we want to store
1688 transaction: the open transaction
1688 transaction: the open transaction
1689 p1: manifest-node of p1
1689 p1: manifest-node of p1
1690 p2: manifest-node of p2
1690 p2: manifest-node of p2
1691 added: file added/changed compared to parent
1691 added: file added/changed compared to parent
1692 removed: file removed compared to parent
1692 removed: file removed compared to parent
1693
1693
1694 tree manifest input:
1694 tree manifest input:
1695
1695
1696 readtree: a function to read a subtree
1696 readtree: a function to read a subtree
1697 match: a filematcher for the subpart of the tree manifest
1697 match: a filematcher for the subpart of the tree manifest
1698 """
1698 """
1699 try:
1699 try:
1700 if p1 not in self.fulltextcache:
1700 if p1 not in self.fulltextcache:
1701 raise FastdeltaUnavailable()
1701 raise FastdeltaUnavailable()
1702 # If our first parent is in the manifest cache, we can
1702 # If our first parent is in the manifest cache, we can
1703 # compute a delta here using properties we know about the
1703 # compute a delta here using properties we know about the
1704 # manifest up-front, which may save time later for the
1704 # manifest up-front, which may save time later for the
1705 # revlog layer.
1705 # revlog layer.
1706
1706
1707 _checkforbidden(added)
1707 _checkforbidden(added)
1708 # combine the changed lists into one sorted iterator
1708 # combine the changed lists into one sorted iterator
1709 work = heapq.merge(
1709 work = heapq.merge(
1710 [(x, False) for x in sorted(added)],
1710 [(x, False) for x in sorted(added)],
1711 [(x, True) for x in sorted(removed)],
1711 [(x, True) for x in sorted(removed)],
1712 )
1712 )
1713
1713
1714 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1714 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1715 cachedelta = self._revlog.rev(p1), deltatext
1715 cachedelta = self._revlog.rev(p1), deltatext
1716 text = util.buffer(arraytext)
1716 text = util.buffer(arraytext)
1717 rev = self._revlog.addrevision(
1717 rev = self._revlog.addrevision(
1718 text, transaction, link, p1, p2, cachedelta
1718 text, transaction, link, p1, p2, cachedelta
1719 )
1719 )
1720 n = self._revlog.node(rev)
1720 n = self._revlog.node(rev)
1721 except FastdeltaUnavailable:
1721 except FastdeltaUnavailable:
1722 # The first parent manifest isn't already loaded or the
1722 # The first parent manifest isn't already loaded or the
1723 # manifest implementation doesn't support fastdelta, so
1723 # manifest implementation doesn't support fastdelta, so
1724 # we'll just encode a fulltext of the manifest and pass
1724 # we'll just encode a fulltext of the manifest and pass
1725 # that through to the revlog layer, and let it handle the
1725 # that through to the revlog layer, and let it handle the
1726 # delta process.
1726 # delta process.
1727 if self._treeondisk:
1727 if self._treeondisk:
1728 assert readtree, b"readtree must be set for treemanifest writes"
1728 assert readtree, b"readtree must be set for treemanifest writes"
1729 assert match, b"match must be specified for treemanifest writes"
1729 assert match, b"match must be specified for treemanifest writes"
1730 m1 = readtree(self.tree, p1)
1730 m1 = readtree(self.tree, p1)
1731 m2 = readtree(self.tree, p2)
1731 m2 = readtree(self.tree, p2)
1732 n = self._addtree(
1732 n = self._addtree(
1733 m, transaction, link, m1, m2, readtree, match=match
1733 m, transaction, link, m1, m2, readtree, match=match
1734 )
1734 )
1735 arraytext = None
1735 arraytext = None
1736 else:
1736 else:
1737 text = m.text()
1737 text = m.text()
1738 rev = self._revlog.addrevision(text, transaction, link, p1, p2)
1738 rev = self._revlog.addrevision(text, transaction, link, p1, p2)
1739 n = self._revlog.node(rev)
1739 n = self._revlog.node(rev)
1740 arraytext = bytearray(text)
1740 arraytext = bytearray(text)
1741
1741
1742 if arraytext is not None:
1742 if arraytext is not None:
1743 self.fulltextcache[n] = arraytext
1743 self.fulltextcache[n] = arraytext
1744
1744
1745 return n
1745 return n
1746
1746
1747 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1747 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1748 # If the manifest is unchanged compared to one parent,
1748 # If the manifest is unchanged compared to one parent,
1749 # don't write a new revision
1749 # don't write a new revision
1750 if self.tree != b'' and (
1750 if self.tree != b'' and (
1751 m.unmodifiedsince(m1) or m.unmodifiedsince(m2)
1751 m.unmodifiedsince(m1) or m.unmodifiedsince(m2)
1752 ):
1752 ):
1753 return m.node()
1753 return m.node()
1754
1754
1755 def writesubtree(subm, subp1, subp2, match):
1755 def writesubtree(subm, subp1, subp2, match):
1756 sublog = self.dirlog(subm.dir())
1756 sublog = self.dirlog(subm.dir())
1757 sublog.add(
1757 sublog.add(
1758 subm,
1758 subm,
1759 transaction,
1759 transaction,
1760 link,
1760 link,
1761 subp1,
1761 subp1,
1762 subp2,
1762 subp2,
1763 None,
1763 None,
1764 None,
1764 None,
1765 readtree=readtree,
1765 readtree=readtree,
1766 match=match,
1766 match=match,
1767 )
1767 )
1768
1768
1769 m.writesubtrees(m1, m2, writesubtree, match)
1769 m.writesubtrees(m1, m2, writesubtree, match)
1770 text = m.dirtext()
1770 text = m.dirtext()
1771 n = None
1771 n = None
1772 if self.tree != b'':
1772 if self.tree != b'':
1773 # Double-check whether contents are unchanged to one parent
1773 # Double-check whether contents are unchanged to one parent
1774 if text == m1.dirtext():
1774 if text == m1.dirtext():
1775 n = m1.node()
1775 n = m1.node()
1776 elif text == m2.dirtext():
1776 elif text == m2.dirtext():
1777 n = m2.node()
1777 n = m2.node()
1778
1778
1779 if not n:
1779 if not n:
1780 rev = self._revlog.addrevision(
1780 rev = self._revlog.addrevision(
1781 text, transaction, link, m1.node(), m2.node()
1781 text, transaction, link, m1.node(), m2.node()
1782 )
1782 )
1783 n = self._revlog.node(rev)
1783 n = self._revlog.node(rev)
1784
1784
1785 # Save nodeid so parent manifest can calculate its nodeid
1785 # Save nodeid so parent manifest can calculate its nodeid
1786 m.setnode(n)
1786 m.setnode(n)
1787 return n
1787 return n
1788
1788
1789 def __len__(self):
1789 def __len__(self):
1790 return len(self._revlog)
1790 return len(self._revlog)
1791
1791
1792 def __iter__(self):
1792 def __iter__(self):
1793 return self._revlog.__iter__()
1793 return self._revlog.__iter__()
1794
1794
1795 def rev(self, node):
1795 def rev(self, node):
1796 return self._revlog.rev(node)
1796 return self._revlog.rev(node)
1797
1797
1798 def node(self, rev):
1798 def node(self, rev):
1799 return self._revlog.node(rev)
1799 return self._revlog.node(rev)
1800
1800
1801 def lookup(self, value):
1801 def lookup(self, value):
1802 return self._revlog.lookup(value)
1802 return self._revlog.lookup(value)
1803
1803
1804 def parentrevs(self, rev):
1804 def parentrevs(self, rev):
1805 return self._revlog.parentrevs(rev)
1805 return self._revlog.parentrevs(rev)
1806
1806
1807 def parents(self, node):
1807 def parents(self, node):
1808 return self._revlog.parents(node)
1808 return self._revlog.parents(node)
1809
1809
1810 def linkrev(self, rev):
1810 def linkrev(self, rev):
1811 return self._revlog.linkrev(rev)
1811 return self._revlog.linkrev(rev)
1812
1812
1813 def checksize(self):
1813 def checksize(self):
1814 return self._revlog.checksize()
1814 return self._revlog.checksize()
1815
1815
1816 def revision(self, node, _df=None):
1816 def revision(self, node, _df=None):
1817 return self._revlog.revision(node, _df=_df)
1817 return self._revlog.revision(node, _df=_df)
1818
1818
1819 def rawdata(self, node, _df=None):
1819 def rawdata(self, node, _df=None):
1820 return self._revlog.rawdata(node, _df=_df)
1820 return self._revlog.rawdata(node, _df=_df)
1821
1821
1822 def revdiff(self, rev1, rev2):
1822 def revdiff(self, rev1, rev2):
1823 return self._revlog.revdiff(rev1, rev2)
1823 return self._revlog.revdiff(rev1, rev2)
1824
1824
1825 def cmp(self, node, text):
1825 def cmp(self, node, text):
1826 return self._revlog.cmp(node, text)
1826 return self._revlog.cmp(node, text)
1827
1827
1828 def deltaparent(self, rev):
1828 def deltaparent(self, rev):
1829 return self._revlog.deltaparent(rev)
1829 return self._revlog.deltaparent(rev)
1830
1830
1831 def emitrevisions(
1831 def emitrevisions(
1832 self,
1832 self,
1833 nodes,
1833 nodes,
1834 nodesorder=None,
1834 nodesorder=None,
1835 revisiondata=False,
1835 revisiondata=False,
1836 assumehaveparentrevisions=False,
1836 assumehaveparentrevisions=False,
1837 deltamode=repository.CG_DELTAMODE_STD,
1837 deltamode=repository.CG_DELTAMODE_STD,
1838 sidedata_helpers=None,
1838 sidedata_helpers=None,
1839 debug_info=None,
1839 debug_info=None,
1840 ):
1840 ):
1841 return self._revlog.emitrevisions(
1841 return self._revlog.emitrevisions(
1842 nodes,
1842 nodes,
1843 nodesorder=nodesorder,
1843 nodesorder=nodesorder,
1844 revisiondata=revisiondata,
1844 revisiondata=revisiondata,
1845 assumehaveparentrevisions=assumehaveparentrevisions,
1845 assumehaveparentrevisions=assumehaveparentrevisions,
1846 deltamode=deltamode,
1846 deltamode=deltamode,
1847 sidedata_helpers=sidedata_helpers,
1847 sidedata_helpers=sidedata_helpers,
1848 debug_info=debug_info,
1848 debug_info=debug_info,
1849 )
1849 )
1850
1850
1851 def addgroup(
1851 def addgroup(
1852 self,
1852 self,
1853 deltas,
1853 deltas,
1854 linkmapper,
1854 linkmapper,
1855 transaction,
1855 transaction,
1856 alwayscache=False,
1856 alwayscache=False,
1857 addrevisioncb=None,
1857 addrevisioncb=None,
1858 duplicaterevisioncb=None,
1858 duplicaterevisioncb=None,
1859 debug_info=None,
1859 ):
1860 ):
1860 return self._revlog.addgroup(
1861 return self._revlog.addgroup(
1861 deltas,
1862 deltas,
1862 linkmapper,
1863 linkmapper,
1863 transaction,
1864 transaction,
1864 alwayscache=alwayscache,
1865 alwayscache=alwayscache,
1865 addrevisioncb=addrevisioncb,
1866 addrevisioncb=addrevisioncb,
1866 duplicaterevisioncb=duplicaterevisioncb,
1867 duplicaterevisioncb=duplicaterevisioncb,
1868 debug_info=debug_info,
1867 )
1869 )
1868
1870
1869 def rawsize(self, rev):
1871 def rawsize(self, rev):
1870 return self._revlog.rawsize(rev)
1872 return self._revlog.rawsize(rev)
1871
1873
1872 def getstrippoint(self, minlink):
1874 def getstrippoint(self, minlink):
1873 return self._revlog.getstrippoint(minlink)
1875 return self._revlog.getstrippoint(minlink)
1874
1876
1875 def strip(self, minlink, transaction):
1877 def strip(self, minlink, transaction):
1876 return self._revlog.strip(minlink, transaction)
1878 return self._revlog.strip(minlink, transaction)
1877
1879
1878 def files(self):
1880 def files(self):
1879 return self._revlog.files()
1881 return self._revlog.files()
1880
1882
1881 def clone(self, tr, destrevlog, **kwargs):
1883 def clone(self, tr, destrevlog, **kwargs):
1882 if not isinstance(destrevlog, manifestrevlog):
1884 if not isinstance(destrevlog, manifestrevlog):
1883 raise error.ProgrammingError(b'expected manifestrevlog to clone()')
1885 raise error.ProgrammingError(b'expected manifestrevlog to clone()')
1884
1886
1885 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1887 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1886
1888
1887 def storageinfo(
1889 def storageinfo(
1888 self,
1890 self,
1889 exclusivefiles=False,
1891 exclusivefiles=False,
1890 sharedfiles=False,
1892 sharedfiles=False,
1891 revisionscount=False,
1893 revisionscount=False,
1892 trackedsize=False,
1894 trackedsize=False,
1893 storedsize=False,
1895 storedsize=False,
1894 ):
1896 ):
1895 return self._revlog.storageinfo(
1897 return self._revlog.storageinfo(
1896 exclusivefiles=exclusivefiles,
1898 exclusivefiles=exclusivefiles,
1897 sharedfiles=sharedfiles,
1899 sharedfiles=sharedfiles,
1898 revisionscount=revisionscount,
1900 revisionscount=revisionscount,
1899 trackedsize=trackedsize,
1901 trackedsize=trackedsize,
1900 storedsize=storedsize,
1902 storedsize=storedsize,
1901 )
1903 )
1902
1904
1903 @property
1905 @property
1904 def opener(self):
1906 def opener(self):
1905 return self._revlog.opener
1907 return self._revlog.opener
1906
1908
1907 @opener.setter
1909 @opener.setter
1908 def opener(self, value):
1910 def opener(self, value):
1909 self._revlog.opener = value
1911 self._revlog.opener = value
1910
1912
1911
1913
1912 @interfaceutil.implementer(repository.imanifestlog)
1914 @interfaceutil.implementer(repository.imanifestlog)
1913 class manifestlog:
1915 class manifestlog:
1914 """A collection class representing the collection of manifest snapshots
1916 """A collection class representing the collection of manifest snapshots
1915 referenced by commits in the repository.
1917 referenced by commits in the repository.
1916
1918
1917 In this situation, 'manifest' refers to the abstract concept of a snapshot
1919 In this situation, 'manifest' refers to the abstract concept of a snapshot
1918 of the list of files in the given commit. Consumers of the output of this
1920 of the list of files in the given commit. Consumers of the output of this
1919 class do not care about the implementation details of the actual manifests
1921 class do not care about the implementation details of the actual manifests
1920 they receive (i.e. tree or flat or lazily loaded, etc)."""
1922 they receive (i.e. tree or flat or lazily loaded, etc)."""
1921
1923
1922 def __init__(self, opener, repo, rootstore, narrowmatch):
1924 def __init__(self, opener, repo, rootstore, narrowmatch):
1923 self.nodeconstants = repo.nodeconstants
1925 self.nodeconstants = repo.nodeconstants
1924 usetreemanifest = False
1926 usetreemanifest = False
1925 cachesize = 4
1927 cachesize = 4
1926
1928
1927 opts = getattr(opener, 'options', None)
1929 opts = getattr(opener, 'options', None)
1928 if opts is not None:
1930 if opts is not None:
1929 usetreemanifest = opts.get(b'treemanifest', usetreemanifest)
1931 usetreemanifest = opts.get(b'treemanifest', usetreemanifest)
1930 cachesize = opts.get(b'manifestcachesize', cachesize)
1932 cachesize = opts.get(b'manifestcachesize', cachesize)
1931
1933
1932 self._treemanifests = usetreemanifest
1934 self._treemanifests = usetreemanifest
1933
1935
1934 self._rootstore = rootstore
1936 self._rootstore = rootstore
1935 self._rootstore._setupmanifestcachehooks(repo)
1937 self._rootstore._setupmanifestcachehooks(repo)
1936 self._narrowmatch = narrowmatch
1938 self._narrowmatch = narrowmatch
1937
1939
1938 # A cache of the manifestctx or treemanifestctx for each directory
1940 # A cache of the manifestctx or treemanifestctx for each directory
1939 self._dirmancache = {}
1941 self._dirmancache = {}
1940 self._dirmancache[b''] = util.lrucachedict(cachesize)
1942 self._dirmancache[b''] = util.lrucachedict(cachesize)
1941
1943
1942 self._cachesize = cachesize
1944 self._cachesize = cachesize
1943
1945
1944 def __getitem__(self, node):
1946 def __getitem__(self, node):
1945 """Retrieves the manifest instance for the given node. Throws a
1947 """Retrieves the manifest instance for the given node. Throws a
1946 LookupError if not found.
1948 LookupError if not found.
1947 """
1949 """
1948 return self.get(b'', node)
1950 return self.get(b'', node)
1949
1951
1950 def get(self, tree, node, verify=True):
1952 def get(self, tree, node, verify=True):
1951 """Retrieves the manifest instance for the given node. Throws a
1953 """Retrieves the manifest instance for the given node. Throws a
1952 LookupError if not found.
1954 LookupError if not found.
1953
1955
1954 `verify` - if True an exception will be thrown if the node is not in
1956 `verify` - if True an exception will be thrown if the node is not in
1955 the revlog
1957 the revlog
1956 """
1958 """
1957 if node in self._dirmancache.get(tree, ()):
1959 if node in self._dirmancache.get(tree, ()):
1958 return self._dirmancache[tree][node]
1960 return self._dirmancache[tree][node]
1959
1961
1960 if not self._narrowmatch.always():
1962 if not self._narrowmatch.always():
1961 if not self._narrowmatch.visitdir(tree[:-1]):
1963 if not self._narrowmatch.visitdir(tree[:-1]):
1962 return excludeddirmanifestctx(self.nodeconstants, tree, node)
1964 return excludeddirmanifestctx(self.nodeconstants, tree, node)
1963 if tree:
1965 if tree:
1964 if self._rootstore._treeondisk:
1966 if self._rootstore._treeondisk:
1965 if verify:
1967 if verify:
1966 # Side-effect is LookupError is raised if node doesn't
1968 # Side-effect is LookupError is raised if node doesn't
1967 # exist.
1969 # exist.
1968 self.getstorage(tree).rev(node)
1970 self.getstorage(tree).rev(node)
1969
1971
1970 m = treemanifestctx(self, tree, node)
1972 m = treemanifestctx(self, tree, node)
1971 else:
1973 else:
1972 raise error.Abort(
1974 raise error.Abort(
1973 _(
1975 _(
1974 b"cannot ask for manifest directory '%s' in a flat "
1976 b"cannot ask for manifest directory '%s' in a flat "
1975 b"manifest"
1977 b"manifest"
1976 )
1978 )
1977 % tree
1979 % tree
1978 )
1980 )
1979 else:
1981 else:
1980 if verify:
1982 if verify:
1981 # Side-effect is LookupError is raised if node doesn't exist.
1983 # Side-effect is LookupError is raised if node doesn't exist.
1982 self._rootstore.rev(node)
1984 self._rootstore.rev(node)
1983
1985
1984 if self._treemanifests:
1986 if self._treemanifests:
1985 m = treemanifestctx(self, b'', node)
1987 m = treemanifestctx(self, b'', node)
1986 else:
1988 else:
1987 m = manifestctx(self, node)
1989 m = manifestctx(self, node)
1988
1990
1989 if node != self.nodeconstants.nullid:
1991 if node != self.nodeconstants.nullid:
1990 mancache = self._dirmancache.get(tree)
1992 mancache = self._dirmancache.get(tree)
1991 if not mancache:
1993 if not mancache:
1992 mancache = util.lrucachedict(self._cachesize)
1994 mancache = util.lrucachedict(self._cachesize)
1993 self._dirmancache[tree] = mancache
1995 self._dirmancache[tree] = mancache
1994 mancache[node] = m
1996 mancache[node] = m
1995 return m
1997 return m
1996
1998
1997 def getstorage(self, tree):
1999 def getstorage(self, tree):
1998 return self._rootstore.dirlog(tree)
2000 return self._rootstore.dirlog(tree)
1999
2001
2000 def clearcaches(self, clear_persisted_data=False):
2002 def clearcaches(self, clear_persisted_data=False):
2001 self._dirmancache.clear()
2003 self._dirmancache.clear()
2002 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
2004 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
2003
2005
2004 def rev(self, node):
2006 def rev(self, node):
2005 return self._rootstore.rev(node)
2007 return self._rootstore.rev(node)
2006
2008
2007 def update_caches(self, transaction):
2009 def update_caches(self, transaction):
2008 return self._rootstore._revlog.update_caches(transaction=transaction)
2010 return self._rootstore._revlog.update_caches(transaction=transaction)
2009
2011
2010
2012
2011 @interfaceutil.implementer(repository.imanifestrevisionwritable)
2013 @interfaceutil.implementer(repository.imanifestrevisionwritable)
2012 class memmanifestctx:
2014 class memmanifestctx:
2013 def __init__(self, manifestlog):
2015 def __init__(self, manifestlog):
2014 self._manifestlog = manifestlog
2016 self._manifestlog = manifestlog
2015 self._manifestdict = manifestdict(manifestlog.nodeconstants.nodelen)
2017 self._manifestdict = manifestdict(manifestlog.nodeconstants.nodelen)
2016
2018
2017 def _storage(self):
2019 def _storage(self):
2018 return self._manifestlog.getstorage(b'')
2020 return self._manifestlog.getstorage(b'')
2019
2021
2020 def copy(self):
2022 def copy(self):
2021 memmf = memmanifestctx(self._manifestlog)
2023 memmf = memmanifestctx(self._manifestlog)
2022 memmf._manifestdict = self.read().copy()
2024 memmf._manifestdict = self.read().copy()
2023 return memmf
2025 return memmf
2024
2026
2025 def read(self):
2027 def read(self):
2026 return self._manifestdict
2028 return self._manifestdict
2027
2029
2028 def write(self, transaction, link, p1, p2, added, removed, match=None):
2030 def write(self, transaction, link, p1, p2, added, removed, match=None):
2029 return self._storage().add(
2031 return self._storage().add(
2030 self._manifestdict,
2032 self._manifestdict,
2031 transaction,
2033 transaction,
2032 link,
2034 link,
2033 p1,
2035 p1,
2034 p2,
2036 p2,
2035 added,
2037 added,
2036 removed,
2038 removed,
2037 match=match,
2039 match=match,
2038 )
2040 )
2039
2041
2040
2042
2041 @interfaceutil.implementer(repository.imanifestrevisionstored)
2043 @interfaceutil.implementer(repository.imanifestrevisionstored)
2042 class manifestctx:
2044 class manifestctx:
2043 """A class representing a single revision of a manifest, including its
2045 """A class representing a single revision of a manifest, including its
2044 contents, its parent revs, and its linkrev.
2046 contents, its parent revs, and its linkrev.
2045 """
2047 """
2046
2048
2047 def __init__(self, manifestlog, node):
2049 def __init__(self, manifestlog, node):
2048 self._manifestlog = manifestlog
2050 self._manifestlog = manifestlog
2049 self._data = None
2051 self._data = None
2050
2052
2051 self._node = node
2053 self._node = node
2052
2054
2053 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
2055 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
2054 # but let's add it later when something needs it and we can load it
2056 # but let's add it later when something needs it and we can load it
2055 # lazily.
2057 # lazily.
2056 # self.p1, self.p2 = store.parents(node)
2058 # self.p1, self.p2 = store.parents(node)
2057 # rev = store.rev(node)
2059 # rev = store.rev(node)
2058 # self.linkrev = store.linkrev(rev)
2060 # self.linkrev = store.linkrev(rev)
2059
2061
2060 def _storage(self):
2062 def _storage(self):
2061 return self._manifestlog.getstorage(b'')
2063 return self._manifestlog.getstorage(b'')
2062
2064
2063 def node(self):
2065 def node(self):
2064 return self._node
2066 return self._node
2065
2067
2066 def copy(self):
2068 def copy(self):
2067 memmf = memmanifestctx(self._manifestlog)
2069 memmf = memmanifestctx(self._manifestlog)
2068 memmf._manifestdict = self.read().copy()
2070 memmf._manifestdict = self.read().copy()
2069 return memmf
2071 return memmf
2070
2072
2071 @propertycache
2073 @propertycache
2072 def parents(self):
2074 def parents(self):
2073 return self._storage().parents(self._node)
2075 return self._storage().parents(self._node)
2074
2076
2075 def read(self):
2077 def read(self):
2076 if self._data is None:
2078 if self._data is None:
2077 nc = self._manifestlog.nodeconstants
2079 nc = self._manifestlog.nodeconstants
2078 if self._node == nc.nullid:
2080 if self._node == nc.nullid:
2079 self._data = manifestdict(nc.nodelen)
2081 self._data = manifestdict(nc.nodelen)
2080 else:
2082 else:
2081 store = self._storage()
2083 store = self._storage()
2082 if self._node in store.fulltextcache:
2084 if self._node in store.fulltextcache:
2083 text = pycompat.bytestr(store.fulltextcache[self._node])
2085 text = pycompat.bytestr(store.fulltextcache[self._node])
2084 else:
2086 else:
2085 text = store.revision(self._node)
2087 text = store.revision(self._node)
2086 arraytext = bytearray(text)
2088 arraytext = bytearray(text)
2087 store.fulltextcache[self._node] = arraytext
2089 store.fulltextcache[self._node] = arraytext
2088 self._data = manifestdict(nc.nodelen, text)
2090 self._data = manifestdict(nc.nodelen, text)
2089 return self._data
2091 return self._data
2090
2092
2091 def readfast(self, shallow=False):
2093 def readfast(self, shallow=False):
2092 """Calls either readdelta or read, based on which would be less work.
2094 """Calls either readdelta or read, based on which would be less work.
2093 readdelta is called if the delta is against the p1, and therefore can be
2095 readdelta is called if the delta is against the p1, and therefore can be
2094 read quickly.
2096 read quickly.
2095
2097
2096 If `shallow` is True, nothing changes since this is a flat manifest.
2098 If `shallow` is True, nothing changes since this is a flat manifest.
2097 """
2099 """
2098 store = self._storage()
2100 store = self._storage()
2099 r = store.rev(self._node)
2101 r = store.rev(self._node)
2100 deltaparent = store.deltaparent(r)
2102 deltaparent = store.deltaparent(r)
2101 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2103 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2102 return self.readdelta()
2104 return self.readdelta()
2103 return self.read()
2105 return self.read()
2104
2106
2105 def readdelta(self, shallow=False):
2107 def readdelta(self, shallow=False):
2106 """Returns a manifest containing just the entries that are present
2108 """Returns a manifest containing just the entries that are present
2107 in this manifest, but not in its p1 manifest. This is efficient to read
2109 in this manifest, but not in its p1 manifest. This is efficient to read
2108 if the revlog delta is already p1.
2110 if the revlog delta is already p1.
2109
2111
2110 Changing the value of `shallow` has no effect on flat manifests.
2112 Changing the value of `shallow` has no effect on flat manifests.
2111 """
2113 """
2112 store = self._storage()
2114 store = self._storage()
2113 r = store.rev(self._node)
2115 r = store.rev(self._node)
2114 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2116 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2115 return manifestdict(store.nodeconstants.nodelen, d)
2117 return manifestdict(store.nodeconstants.nodelen, d)
2116
2118
2117 def find(self, key):
2119 def find(self, key):
2118 return self.read().find(key)
2120 return self.read().find(key)
2119
2121
2120
2122
2121 @interfaceutil.implementer(repository.imanifestrevisionwritable)
2123 @interfaceutil.implementer(repository.imanifestrevisionwritable)
2122 class memtreemanifestctx:
2124 class memtreemanifestctx:
2123 def __init__(self, manifestlog, dir=b''):
2125 def __init__(self, manifestlog, dir=b''):
2124 self._manifestlog = manifestlog
2126 self._manifestlog = manifestlog
2125 self._dir = dir
2127 self._dir = dir
2126 self._treemanifest = treemanifest(manifestlog.nodeconstants)
2128 self._treemanifest = treemanifest(manifestlog.nodeconstants)
2127
2129
2128 def _storage(self):
2130 def _storage(self):
2129 return self._manifestlog.getstorage(b'')
2131 return self._manifestlog.getstorage(b'')
2130
2132
2131 def copy(self):
2133 def copy(self):
2132 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2134 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2133 memmf._treemanifest = self._treemanifest.copy()
2135 memmf._treemanifest = self._treemanifest.copy()
2134 return memmf
2136 return memmf
2135
2137
2136 def read(self):
2138 def read(self):
2137 return self._treemanifest
2139 return self._treemanifest
2138
2140
2139 def write(self, transaction, link, p1, p2, added, removed, match=None):
2141 def write(self, transaction, link, p1, p2, added, removed, match=None):
2140 def readtree(dir, node):
2142 def readtree(dir, node):
2141 return self._manifestlog.get(dir, node).read()
2143 return self._manifestlog.get(dir, node).read()
2142
2144
2143 return self._storage().add(
2145 return self._storage().add(
2144 self._treemanifest,
2146 self._treemanifest,
2145 transaction,
2147 transaction,
2146 link,
2148 link,
2147 p1,
2149 p1,
2148 p2,
2150 p2,
2149 added,
2151 added,
2150 removed,
2152 removed,
2151 readtree=readtree,
2153 readtree=readtree,
2152 match=match,
2154 match=match,
2153 )
2155 )
2154
2156
2155
2157
2156 @interfaceutil.implementer(repository.imanifestrevisionstored)
2158 @interfaceutil.implementer(repository.imanifestrevisionstored)
2157 class treemanifestctx:
2159 class treemanifestctx:
2158 def __init__(self, manifestlog, dir, node):
2160 def __init__(self, manifestlog, dir, node):
2159 self._manifestlog = manifestlog
2161 self._manifestlog = manifestlog
2160 self._dir = dir
2162 self._dir = dir
2161 self._data = None
2163 self._data = None
2162
2164
2163 self._node = node
2165 self._node = node
2164
2166
2165 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
2167 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
2166 # we can instantiate treemanifestctx objects for directories we don't
2168 # we can instantiate treemanifestctx objects for directories we don't
2167 # have on disk.
2169 # have on disk.
2168 # self.p1, self.p2 = store.parents(node)
2170 # self.p1, self.p2 = store.parents(node)
2169 # rev = store.rev(node)
2171 # rev = store.rev(node)
2170 # self.linkrev = store.linkrev(rev)
2172 # self.linkrev = store.linkrev(rev)
2171
2173
2172 def _storage(self):
2174 def _storage(self):
2173 narrowmatch = self._manifestlog._narrowmatch
2175 narrowmatch = self._manifestlog._narrowmatch
2174 if not narrowmatch.always():
2176 if not narrowmatch.always():
2175 if not narrowmatch.visitdir(self._dir[:-1]):
2177 if not narrowmatch.visitdir(self._dir[:-1]):
2176 return excludedmanifestrevlog(
2178 return excludedmanifestrevlog(
2177 self._manifestlog.nodeconstants, self._dir
2179 self._manifestlog.nodeconstants, self._dir
2178 )
2180 )
2179 return self._manifestlog.getstorage(self._dir)
2181 return self._manifestlog.getstorage(self._dir)
2180
2182
2181 def read(self):
2183 def read(self):
2182 if self._data is None:
2184 if self._data is None:
2183 store = self._storage()
2185 store = self._storage()
2184 if self._node == self._manifestlog.nodeconstants.nullid:
2186 if self._node == self._manifestlog.nodeconstants.nullid:
2185 self._data = treemanifest(self._manifestlog.nodeconstants)
2187 self._data = treemanifest(self._manifestlog.nodeconstants)
2186 # TODO accessing non-public API
2188 # TODO accessing non-public API
2187 elif store._treeondisk:
2189 elif store._treeondisk:
2188 m = treemanifest(self._manifestlog.nodeconstants, dir=self._dir)
2190 m = treemanifest(self._manifestlog.nodeconstants, dir=self._dir)
2189
2191
2190 def gettext():
2192 def gettext():
2191 return store.revision(self._node)
2193 return store.revision(self._node)
2192
2194
2193 def readsubtree(dir, subm):
2195 def readsubtree(dir, subm):
2194 # Set verify to False since we need to be able to create
2196 # Set verify to False since we need to be able to create
2195 # subtrees for trees that don't exist on disk.
2197 # subtrees for trees that don't exist on disk.
2196 return self._manifestlog.get(dir, subm, verify=False).read()
2198 return self._manifestlog.get(dir, subm, verify=False).read()
2197
2199
2198 m.read(gettext, readsubtree)
2200 m.read(gettext, readsubtree)
2199 m.setnode(self._node)
2201 m.setnode(self._node)
2200 self._data = m
2202 self._data = m
2201 else:
2203 else:
2202 if self._node in store.fulltextcache:
2204 if self._node in store.fulltextcache:
2203 text = pycompat.bytestr(store.fulltextcache[self._node])
2205 text = pycompat.bytestr(store.fulltextcache[self._node])
2204 else:
2206 else:
2205 text = store.revision(self._node)
2207 text = store.revision(self._node)
2206 arraytext = bytearray(text)
2208 arraytext = bytearray(text)
2207 store.fulltextcache[self._node] = arraytext
2209 store.fulltextcache[self._node] = arraytext
2208 self._data = treemanifest(
2210 self._data = treemanifest(
2209 self._manifestlog.nodeconstants, dir=self._dir, text=text
2211 self._manifestlog.nodeconstants, dir=self._dir, text=text
2210 )
2212 )
2211
2213
2212 return self._data
2214 return self._data
2213
2215
2214 def node(self):
2216 def node(self):
2215 return self._node
2217 return self._node
2216
2218
2217 def copy(self):
2219 def copy(self):
2218 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2220 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2219 memmf._treemanifest = self.read().copy()
2221 memmf._treemanifest = self.read().copy()
2220 return memmf
2222 return memmf
2221
2223
2222 @propertycache
2224 @propertycache
2223 def parents(self):
2225 def parents(self):
2224 return self._storage().parents(self._node)
2226 return self._storage().parents(self._node)
2225
2227
2226 def readdelta(self, shallow=False):
2228 def readdelta(self, shallow=False):
2227 """Returns a manifest containing just the entries that are present
2229 """Returns a manifest containing just the entries that are present
2228 in this manifest, but not in its p1 manifest. This is efficient to read
2230 in this manifest, but not in its p1 manifest. This is efficient to read
2229 if the revlog delta is already p1.
2231 if the revlog delta is already p1.
2230
2232
2231 If `shallow` is True, this will read the delta for this directory,
2233 If `shallow` is True, this will read the delta for this directory,
2232 without recursively reading subdirectory manifests. Instead, any
2234 without recursively reading subdirectory manifests. Instead, any
2233 subdirectory entry will be reported as it appears in the manifest, i.e.
2235 subdirectory entry will be reported as it appears in the manifest, i.e.
2234 the subdirectory will be reported among files and distinguished only by
2236 the subdirectory will be reported among files and distinguished only by
2235 its 't' flag.
2237 its 't' flag.
2236 """
2238 """
2237 store = self._storage()
2239 store = self._storage()
2238 if shallow:
2240 if shallow:
2239 r = store.rev(self._node)
2241 r = store.rev(self._node)
2240 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2242 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2241 return manifestdict(store.nodeconstants.nodelen, d)
2243 return manifestdict(store.nodeconstants.nodelen, d)
2242 else:
2244 else:
2243 # Need to perform a slow delta
2245 # Need to perform a slow delta
2244 r0 = store.deltaparent(store.rev(self._node))
2246 r0 = store.deltaparent(store.rev(self._node))
2245 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
2247 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
2246 m1 = self.read()
2248 m1 = self.read()
2247 md = treemanifest(self._manifestlog.nodeconstants, dir=self._dir)
2249 md = treemanifest(self._manifestlog.nodeconstants, dir=self._dir)
2248 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).items():
2250 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).items():
2249 if n1:
2251 if n1:
2250 md[f] = n1
2252 md[f] = n1
2251 if fl1:
2253 if fl1:
2252 md.setflag(f, fl1)
2254 md.setflag(f, fl1)
2253 return md
2255 return md
2254
2256
2255 def readfast(self, shallow=False):
2257 def readfast(self, shallow=False):
2256 """Calls either readdelta or read, based on which would be less work.
2258 """Calls either readdelta or read, based on which would be less work.
2257 readdelta is called if the delta is against the p1, and therefore can be
2259 readdelta is called if the delta is against the p1, and therefore can be
2258 read quickly.
2260 read quickly.
2259
2261
2260 If `shallow` is True, it only returns the entries from this manifest,
2262 If `shallow` is True, it only returns the entries from this manifest,
2261 and not any submanifests.
2263 and not any submanifests.
2262 """
2264 """
2263 store = self._storage()
2265 store = self._storage()
2264 r = store.rev(self._node)
2266 r = store.rev(self._node)
2265 deltaparent = store.deltaparent(r)
2267 deltaparent = store.deltaparent(r)
2266 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2268 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2267 return self.readdelta(shallow=shallow)
2269 return self.readdelta(shallow=shallow)
2268
2270
2269 if shallow:
2271 if shallow:
2270 return manifestdict(
2272 return manifestdict(
2271 store.nodeconstants.nodelen, store.revision(self._node)
2273 store.nodeconstants.nodelen, store.revision(self._node)
2272 )
2274 )
2273 else:
2275 else:
2274 return self.read()
2276 return self.read()
2275
2277
2276 def find(self, key):
2278 def find(self, key):
2277 return self.read().find(key)
2279 return self.read().find(key)
2278
2280
2279
2281
2280 class excludeddir(treemanifest):
2282 class excludeddir(treemanifest):
2281 """Stand-in for a directory that is excluded from the repository.
2283 """Stand-in for a directory that is excluded from the repository.
2282
2284
2283 With narrowing active on a repository that uses treemanifests,
2285 With narrowing active on a repository that uses treemanifests,
2284 some of the directory revlogs will be excluded from the resulting
2286 some of the directory revlogs will be excluded from the resulting
2285 clone. This is a huge storage win for clients, but means we need
2287 clone. This is a huge storage win for clients, but means we need
2286 some sort of pseudo-manifest to surface to internals so we can
2288 some sort of pseudo-manifest to surface to internals so we can
2287 detect a merge conflict outside the narrowspec. That's what this
2289 detect a merge conflict outside the narrowspec. That's what this
2288 class is: it stands in for a directory whose node is known, but
2290 class is: it stands in for a directory whose node is known, but
2289 whose contents are unknown.
2291 whose contents are unknown.
2290 """
2292 """
2291
2293
2292 def __init__(self, nodeconstants, dir, node):
2294 def __init__(self, nodeconstants, dir, node):
2293 super(excludeddir, self).__init__(nodeconstants, dir)
2295 super(excludeddir, self).__init__(nodeconstants, dir)
2294 self._node = node
2296 self._node = node
2295 # Add an empty file, which will be included by iterators and such,
2297 # Add an empty file, which will be included by iterators and such,
2296 # appearing as the directory itself (i.e. something like "dir/")
2298 # appearing as the directory itself (i.e. something like "dir/")
2297 self._files[b''] = node
2299 self._files[b''] = node
2298 self._flags[b''] = b't'
2300 self._flags[b''] = b't'
2299
2301
2300 # Manifests outside the narrowspec should never be modified, so avoid
2302 # Manifests outside the narrowspec should never be modified, so avoid
2301 # copying. This makes a noticeable difference when there are very many
2303 # copying. This makes a noticeable difference when there are very many
2302 # directories outside the narrowspec. Also, it makes sense for the copy to
2304 # directories outside the narrowspec. Also, it makes sense for the copy to
2303 # be of the same type as the original, which would not happen with the
2305 # be of the same type as the original, which would not happen with the
2304 # super type's copy().
2306 # super type's copy().
2305 def copy(self):
2307 def copy(self):
2306 return self
2308 return self
2307
2309
2308
2310
2309 class excludeddirmanifestctx(treemanifestctx):
2311 class excludeddirmanifestctx(treemanifestctx):
2310 """context wrapper for excludeddir - see that docstring for rationale"""
2312 """context wrapper for excludeddir - see that docstring for rationale"""
2311
2313
2312 def __init__(self, nodeconstants, dir, node):
2314 def __init__(self, nodeconstants, dir, node):
2313 self.nodeconstants = nodeconstants
2315 self.nodeconstants = nodeconstants
2314 self._dir = dir
2316 self._dir = dir
2315 self._node = node
2317 self._node = node
2316
2318
2317 def read(self):
2319 def read(self):
2318 return excludeddir(self.nodeconstants, self._dir, self._node)
2320 return excludeddir(self.nodeconstants, self._dir, self._node)
2319
2321
2320 def readfast(self, shallow=False):
2322 def readfast(self, shallow=False):
2321 # special version of readfast since we don't have underlying storage
2323 # special version of readfast since we don't have underlying storage
2322 return self.read()
2324 return self.read()
2323
2325
2324 def write(self, *args):
2326 def write(self, *args):
2325 raise error.ProgrammingError(
2327 raise error.ProgrammingError(
2326 b'attempt to write manifest from excluded dir %s' % self._dir
2328 b'attempt to write manifest from excluded dir %s' % self._dir
2327 )
2329 )
2328
2330
2329
2331
2330 class excludedmanifestrevlog(manifestrevlog):
2332 class excludedmanifestrevlog(manifestrevlog):
2331 """Stand-in for excluded treemanifest revlogs.
2333 """Stand-in for excluded treemanifest revlogs.
2332
2334
2333 When narrowing is active on a treemanifest repository, we'll have
2335 When narrowing is active on a treemanifest repository, we'll have
2334 references to directories we can't see due to the revlog being
2336 references to directories we can't see due to the revlog being
2335 skipped. This class exists to conform to the manifestrevlog
2337 skipped. This class exists to conform to the manifestrevlog
2336 interface for those directories and proactively prevent writes to
2338 interface for those directories and proactively prevent writes to
2337 outside the narrowspec.
2339 outside the narrowspec.
2338 """
2340 """
2339
2341
2340 def __init__(self, nodeconstants, dir):
2342 def __init__(self, nodeconstants, dir):
2341 self.nodeconstants = nodeconstants
2343 self.nodeconstants = nodeconstants
2342 self._dir = dir
2344 self._dir = dir
2343
2345
2344 def __len__(self):
2346 def __len__(self):
2345 raise error.ProgrammingError(
2347 raise error.ProgrammingError(
2346 b'attempt to get length of excluded dir %s' % self._dir
2348 b'attempt to get length of excluded dir %s' % self._dir
2347 )
2349 )
2348
2350
2349 def rev(self, node):
2351 def rev(self, node):
2350 raise error.ProgrammingError(
2352 raise error.ProgrammingError(
2351 b'attempt to get rev from excluded dir %s' % self._dir
2353 b'attempt to get rev from excluded dir %s' % self._dir
2352 )
2354 )
2353
2355
2354 def linkrev(self, node):
2356 def linkrev(self, node):
2355 raise error.ProgrammingError(
2357 raise error.ProgrammingError(
2356 b'attempt to get linkrev from excluded dir %s' % self._dir
2358 b'attempt to get linkrev from excluded dir %s' % self._dir
2357 )
2359 )
2358
2360
2359 def node(self, rev):
2361 def node(self, rev):
2360 raise error.ProgrammingError(
2362 raise error.ProgrammingError(
2361 b'attempt to get node from excluded dir %s' % self._dir
2363 b'attempt to get node from excluded dir %s' % self._dir
2362 )
2364 )
2363
2365
2364 def add(self, *args, **kwargs):
2366 def add(self, *args, **kwargs):
2365 # We should never write entries in dirlogs outside the narrow clone.
2367 # We should never write entries in dirlogs outside the narrow clone.
2366 # However, the method still gets called from writesubtree() in
2368 # However, the method still gets called from writesubtree() in
2367 # _addtree(), so we need to handle it. We should possibly make that
2369 # _addtree(), so we need to handle it. We should possibly make that
2368 # avoid calling add() with a clean manifest (_dirty is always False
2370 # avoid calling add() with a clean manifest (_dirty is always False
2369 # in excludeddir instances).
2371 # in excludeddir instances).
2370 pass
2372 pass
@@ -1,3352 +1,3354 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 # coding: utf8
2 # coding: utf8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Storage back-end for Mercurial.
9 """Storage back-end for Mercurial.
10
10
11 This provides efficient delta storage with O(1) retrieve and append
11 This provides efficient delta storage with O(1) retrieve and append
12 and O(changes) merge between branches.
12 and O(changes) merge between branches.
13 """
13 """
14
14
15
15
16 import binascii
16 import binascii
17 import collections
17 import collections
18 import contextlib
18 import contextlib
19 import io
19 import io
20 import os
20 import os
21 import struct
21 import struct
22 import zlib
22 import zlib
23
23
24 # import stuff from node for others to import from revlog
24 # import stuff from node for others to import from revlog
25 from .node import (
25 from .node import (
26 bin,
26 bin,
27 hex,
27 hex,
28 nullrev,
28 nullrev,
29 sha1nodeconstants,
29 sha1nodeconstants,
30 short,
30 short,
31 wdirrev,
31 wdirrev,
32 )
32 )
33 from .i18n import _
33 from .i18n import _
34 from .pycompat import getattr
34 from .pycompat import getattr
35 from .revlogutils.constants import (
35 from .revlogutils.constants import (
36 ALL_KINDS,
36 ALL_KINDS,
37 CHANGELOGV2,
37 CHANGELOGV2,
38 COMP_MODE_DEFAULT,
38 COMP_MODE_DEFAULT,
39 COMP_MODE_INLINE,
39 COMP_MODE_INLINE,
40 COMP_MODE_PLAIN,
40 COMP_MODE_PLAIN,
41 ENTRY_RANK,
41 ENTRY_RANK,
42 FEATURES_BY_VERSION,
42 FEATURES_BY_VERSION,
43 FLAG_GENERALDELTA,
43 FLAG_GENERALDELTA,
44 FLAG_INLINE_DATA,
44 FLAG_INLINE_DATA,
45 INDEX_HEADER,
45 INDEX_HEADER,
46 KIND_CHANGELOG,
46 KIND_CHANGELOG,
47 KIND_FILELOG,
47 KIND_FILELOG,
48 RANK_UNKNOWN,
48 RANK_UNKNOWN,
49 REVLOGV0,
49 REVLOGV0,
50 REVLOGV1,
50 REVLOGV1,
51 REVLOGV1_FLAGS,
51 REVLOGV1_FLAGS,
52 REVLOGV2,
52 REVLOGV2,
53 REVLOGV2_FLAGS,
53 REVLOGV2_FLAGS,
54 REVLOG_DEFAULT_FLAGS,
54 REVLOG_DEFAULT_FLAGS,
55 REVLOG_DEFAULT_FORMAT,
55 REVLOG_DEFAULT_FORMAT,
56 REVLOG_DEFAULT_VERSION,
56 REVLOG_DEFAULT_VERSION,
57 SUPPORTED_FLAGS,
57 SUPPORTED_FLAGS,
58 )
58 )
59 from .revlogutils.flagutil import (
59 from .revlogutils.flagutil import (
60 REVIDX_DEFAULT_FLAGS,
60 REVIDX_DEFAULT_FLAGS,
61 REVIDX_ELLIPSIS,
61 REVIDX_ELLIPSIS,
62 REVIDX_EXTSTORED,
62 REVIDX_EXTSTORED,
63 REVIDX_FLAGS_ORDER,
63 REVIDX_FLAGS_ORDER,
64 REVIDX_HASCOPIESINFO,
64 REVIDX_HASCOPIESINFO,
65 REVIDX_ISCENSORED,
65 REVIDX_ISCENSORED,
66 REVIDX_RAWTEXT_CHANGING_FLAGS,
66 REVIDX_RAWTEXT_CHANGING_FLAGS,
67 )
67 )
68 from .thirdparty import attr
68 from .thirdparty import attr
69 from . import (
69 from . import (
70 ancestor,
70 ancestor,
71 dagop,
71 dagop,
72 error,
72 error,
73 mdiff,
73 mdiff,
74 policy,
74 policy,
75 pycompat,
75 pycompat,
76 revlogutils,
76 revlogutils,
77 templatefilters,
77 templatefilters,
78 util,
78 util,
79 )
79 )
80 from .interfaces import (
80 from .interfaces import (
81 repository,
81 repository,
82 util as interfaceutil,
82 util as interfaceutil,
83 )
83 )
84 from .revlogutils import (
84 from .revlogutils import (
85 deltas as deltautil,
85 deltas as deltautil,
86 docket as docketutil,
86 docket as docketutil,
87 flagutil,
87 flagutil,
88 nodemap as nodemaputil,
88 nodemap as nodemaputil,
89 randomaccessfile,
89 randomaccessfile,
90 revlogv0,
90 revlogv0,
91 rewrite,
91 rewrite,
92 sidedata as sidedatautil,
92 sidedata as sidedatautil,
93 )
93 )
94 from .utils import (
94 from .utils import (
95 storageutil,
95 storageutil,
96 stringutil,
96 stringutil,
97 )
97 )
98
98
99 # blanked usage of all the name to prevent pyflakes constraints
99 # blanked usage of all the name to prevent pyflakes constraints
100 # We need these name available in the module for extensions.
100 # We need these name available in the module for extensions.
101
101
102 REVLOGV0
102 REVLOGV0
103 REVLOGV1
103 REVLOGV1
104 REVLOGV2
104 REVLOGV2
105 CHANGELOGV2
105 CHANGELOGV2
106 FLAG_INLINE_DATA
106 FLAG_INLINE_DATA
107 FLAG_GENERALDELTA
107 FLAG_GENERALDELTA
108 REVLOG_DEFAULT_FLAGS
108 REVLOG_DEFAULT_FLAGS
109 REVLOG_DEFAULT_FORMAT
109 REVLOG_DEFAULT_FORMAT
110 REVLOG_DEFAULT_VERSION
110 REVLOG_DEFAULT_VERSION
111 REVLOGV1_FLAGS
111 REVLOGV1_FLAGS
112 REVLOGV2_FLAGS
112 REVLOGV2_FLAGS
113 REVIDX_ISCENSORED
113 REVIDX_ISCENSORED
114 REVIDX_ELLIPSIS
114 REVIDX_ELLIPSIS
115 REVIDX_HASCOPIESINFO
115 REVIDX_HASCOPIESINFO
116 REVIDX_EXTSTORED
116 REVIDX_EXTSTORED
117 REVIDX_DEFAULT_FLAGS
117 REVIDX_DEFAULT_FLAGS
118 REVIDX_FLAGS_ORDER
118 REVIDX_FLAGS_ORDER
119 REVIDX_RAWTEXT_CHANGING_FLAGS
119 REVIDX_RAWTEXT_CHANGING_FLAGS
120
120
121 parsers = policy.importmod('parsers')
121 parsers = policy.importmod('parsers')
122 rustancestor = policy.importrust('ancestor')
122 rustancestor = policy.importrust('ancestor')
123 rustdagop = policy.importrust('dagop')
123 rustdagop = policy.importrust('dagop')
124 rustrevlog = policy.importrust('revlog')
124 rustrevlog = policy.importrust('revlog')
125
125
126 # Aliased for performance.
126 # Aliased for performance.
127 _zlibdecompress = zlib.decompress
127 _zlibdecompress = zlib.decompress
128
128
129 # max size of revlog with inline data
129 # max size of revlog with inline data
130 _maxinline = 131072
130 _maxinline = 131072
131
131
132 # Flag processors for REVIDX_ELLIPSIS.
132 # Flag processors for REVIDX_ELLIPSIS.
133 def ellipsisreadprocessor(rl, text):
133 def ellipsisreadprocessor(rl, text):
134 return text, False
134 return text, False
135
135
136
136
137 def ellipsiswriteprocessor(rl, text):
137 def ellipsiswriteprocessor(rl, text):
138 return text, False
138 return text, False
139
139
140
140
141 def ellipsisrawprocessor(rl, text):
141 def ellipsisrawprocessor(rl, text):
142 return False
142 return False
143
143
144
144
145 ellipsisprocessor = (
145 ellipsisprocessor = (
146 ellipsisreadprocessor,
146 ellipsisreadprocessor,
147 ellipsiswriteprocessor,
147 ellipsiswriteprocessor,
148 ellipsisrawprocessor,
148 ellipsisrawprocessor,
149 )
149 )
150
150
151
151
152 def _verify_revision(rl, skipflags, state, node):
152 def _verify_revision(rl, skipflags, state, node):
153 """Verify the integrity of the given revlog ``node`` while providing a hook
153 """Verify the integrity of the given revlog ``node`` while providing a hook
154 point for extensions to influence the operation."""
154 point for extensions to influence the operation."""
155 if skipflags:
155 if skipflags:
156 state[b'skipread'].add(node)
156 state[b'skipread'].add(node)
157 else:
157 else:
158 # Side-effect: read content and verify hash.
158 # Side-effect: read content and verify hash.
159 rl.revision(node)
159 rl.revision(node)
160
160
161
161
162 # True if a fast implementation for persistent-nodemap is available
162 # True if a fast implementation for persistent-nodemap is available
163 #
163 #
164 # We also consider we have a "fast" implementation in "pure" python because
164 # We also consider we have a "fast" implementation in "pure" python because
165 # people using pure don't really have performance consideration (and a
165 # people using pure don't really have performance consideration (and a
166 # wheelbarrow of other slowness source)
166 # wheelbarrow of other slowness source)
167 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
167 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
168 parsers, 'BaseIndexObject'
168 parsers, 'BaseIndexObject'
169 )
169 )
170
170
171
171
172 @interfaceutil.implementer(repository.irevisiondelta)
172 @interfaceutil.implementer(repository.irevisiondelta)
173 @attr.s(slots=True)
173 @attr.s(slots=True)
174 class revlogrevisiondelta:
174 class revlogrevisiondelta:
175 node = attr.ib()
175 node = attr.ib()
176 p1node = attr.ib()
176 p1node = attr.ib()
177 p2node = attr.ib()
177 p2node = attr.ib()
178 basenode = attr.ib()
178 basenode = attr.ib()
179 flags = attr.ib()
179 flags = attr.ib()
180 baserevisionsize = attr.ib()
180 baserevisionsize = attr.ib()
181 revision = attr.ib()
181 revision = attr.ib()
182 delta = attr.ib()
182 delta = attr.ib()
183 sidedata = attr.ib()
183 sidedata = attr.ib()
184 protocol_flags = attr.ib()
184 protocol_flags = attr.ib()
185 linknode = attr.ib(default=None)
185 linknode = attr.ib(default=None)
186
186
187
187
188 @interfaceutil.implementer(repository.iverifyproblem)
188 @interfaceutil.implementer(repository.iverifyproblem)
189 @attr.s(frozen=True)
189 @attr.s(frozen=True)
190 class revlogproblem:
190 class revlogproblem:
191 warning = attr.ib(default=None)
191 warning = attr.ib(default=None)
192 error = attr.ib(default=None)
192 error = attr.ib(default=None)
193 node = attr.ib(default=None)
193 node = attr.ib(default=None)
194
194
195
195
196 def parse_index_v1(data, inline):
196 def parse_index_v1(data, inline):
197 # call the C implementation to parse the index data
197 # call the C implementation to parse the index data
198 index, cache = parsers.parse_index2(data, inline)
198 index, cache = parsers.parse_index2(data, inline)
199 return index, cache
199 return index, cache
200
200
201
201
202 def parse_index_v2(data, inline):
202 def parse_index_v2(data, inline):
203 # call the C implementation to parse the index data
203 # call the C implementation to parse the index data
204 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
204 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
205 return index, cache
205 return index, cache
206
206
207
207
208 def parse_index_cl_v2(data, inline):
208 def parse_index_cl_v2(data, inline):
209 # call the C implementation to parse the index data
209 # call the C implementation to parse the index data
210 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
210 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
211 return index, cache
211 return index, cache
212
212
213
213
214 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
214 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
215
215
216 def parse_index_v1_nodemap(data, inline):
216 def parse_index_v1_nodemap(data, inline):
217 index, cache = parsers.parse_index_devel_nodemap(data, inline)
217 index, cache = parsers.parse_index_devel_nodemap(data, inline)
218 return index, cache
218 return index, cache
219
219
220
220
221 else:
221 else:
222 parse_index_v1_nodemap = None
222 parse_index_v1_nodemap = None
223
223
224
224
225 def parse_index_v1_mixed(data, inline):
225 def parse_index_v1_mixed(data, inline):
226 index, cache = parse_index_v1(data, inline)
226 index, cache = parse_index_v1(data, inline)
227 return rustrevlog.MixedIndex(index), cache
227 return rustrevlog.MixedIndex(index), cache
228
228
229
229
230 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
230 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
231 # signed integer)
231 # signed integer)
232 _maxentrysize = 0x7FFFFFFF
232 _maxentrysize = 0x7FFFFFFF
233
233
234 FILE_TOO_SHORT_MSG = _(
234 FILE_TOO_SHORT_MSG = _(
235 b'cannot read from revlog %s;'
235 b'cannot read from revlog %s;'
236 b' expected %d bytes from offset %d, data size is %d'
236 b' expected %d bytes from offset %d, data size is %d'
237 )
237 )
238
238
239 hexdigits = b'0123456789abcdefABCDEF'
239 hexdigits = b'0123456789abcdefABCDEF'
240
240
241
241
242 class revlog:
242 class revlog:
243 """
243 """
244 the underlying revision storage object
244 the underlying revision storage object
245
245
246 A revlog consists of two parts, an index and the revision data.
246 A revlog consists of two parts, an index and the revision data.
247
247
248 The index is a file with a fixed record size containing
248 The index is a file with a fixed record size containing
249 information on each revision, including its nodeid (hash), the
249 information on each revision, including its nodeid (hash), the
250 nodeids of its parents, the position and offset of its data within
250 nodeids of its parents, the position and offset of its data within
251 the data file, and the revision it's based on. Finally, each entry
251 the data file, and the revision it's based on. Finally, each entry
252 contains a linkrev entry that can serve as a pointer to external
252 contains a linkrev entry that can serve as a pointer to external
253 data.
253 data.
254
254
255 The revision data itself is a linear collection of data chunks.
255 The revision data itself is a linear collection of data chunks.
256 Each chunk represents a revision and is usually represented as a
256 Each chunk represents a revision and is usually represented as a
257 delta against the previous chunk. To bound lookup time, runs of
257 delta against the previous chunk. To bound lookup time, runs of
258 deltas are limited to about 2 times the length of the original
258 deltas are limited to about 2 times the length of the original
259 version data. This makes retrieval of a version proportional to
259 version data. This makes retrieval of a version proportional to
260 its size, or O(1) relative to the number of revisions.
260 its size, or O(1) relative to the number of revisions.
261
261
262 Both pieces of the revlog are written to in an append-only
262 Both pieces of the revlog are written to in an append-only
263 fashion, which means we never need to rewrite a file to insert or
263 fashion, which means we never need to rewrite a file to insert or
264 remove data, and can use some simple techniques to avoid the need
264 remove data, and can use some simple techniques to avoid the need
265 for locking while reading.
265 for locking while reading.
266
266
267 If checkambig, indexfile is opened with checkambig=True at
267 If checkambig, indexfile is opened with checkambig=True at
268 writing, to avoid file stat ambiguity.
268 writing, to avoid file stat ambiguity.
269
269
270 If mmaplargeindex is True, and an mmapindexthreshold is set, the
270 If mmaplargeindex is True, and an mmapindexthreshold is set, the
271 index will be mmapped rather than read if it is larger than the
271 index will be mmapped rather than read if it is larger than the
272 configured threshold.
272 configured threshold.
273
273
274 If censorable is True, the revlog can have censored revisions.
274 If censorable is True, the revlog can have censored revisions.
275
275
276 If `upperboundcomp` is not None, this is the expected maximal gain from
276 If `upperboundcomp` is not None, this is the expected maximal gain from
277 compression for the data content.
277 compression for the data content.
278
278
279 `concurrencychecker` is an optional function that receives 3 arguments: a
279 `concurrencychecker` is an optional function that receives 3 arguments: a
280 file handle, a filename, and an expected position. It should check whether
280 file handle, a filename, and an expected position. It should check whether
281 the current position in the file handle is valid, and log/warn/fail (by
281 the current position in the file handle is valid, and log/warn/fail (by
282 raising).
282 raising).
283
283
284 See mercurial/revlogutils/contants.py for details about the content of an
284 See mercurial/revlogutils/contants.py for details about the content of an
285 index entry.
285 index entry.
286 """
286 """
287
287
288 _flagserrorclass = error.RevlogError
288 _flagserrorclass = error.RevlogError
289
289
290 def __init__(
290 def __init__(
291 self,
291 self,
292 opener,
292 opener,
293 target,
293 target,
294 radix,
294 radix,
295 postfix=None, # only exist for `tmpcensored` now
295 postfix=None, # only exist for `tmpcensored` now
296 checkambig=False,
296 checkambig=False,
297 mmaplargeindex=False,
297 mmaplargeindex=False,
298 censorable=False,
298 censorable=False,
299 upperboundcomp=None,
299 upperboundcomp=None,
300 persistentnodemap=False,
300 persistentnodemap=False,
301 concurrencychecker=None,
301 concurrencychecker=None,
302 trypending=False,
302 trypending=False,
303 canonical_parent_order=True,
303 canonical_parent_order=True,
304 ):
304 ):
305 """
305 """
306 create a revlog object
306 create a revlog object
307
307
308 opener is a function that abstracts the file opening operation
308 opener is a function that abstracts the file opening operation
309 and can be used to implement COW semantics or the like.
309 and can be used to implement COW semantics or the like.
310
310
311 `target`: a (KIND, ID) tuple that identify the content stored in
311 `target`: a (KIND, ID) tuple that identify the content stored in
312 this revlog. It help the rest of the code to understand what the revlog
312 this revlog. It help the rest of the code to understand what the revlog
313 is about without having to resort to heuristic and index filename
313 is about without having to resort to heuristic and index filename
314 analysis. Note: that this must be reliably be set by normal code, but
314 analysis. Note: that this must be reliably be set by normal code, but
315 that test, debug, or performance measurement code might not set this to
315 that test, debug, or performance measurement code might not set this to
316 accurate value.
316 accurate value.
317 """
317 """
318 self.upperboundcomp = upperboundcomp
318 self.upperboundcomp = upperboundcomp
319
319
320 self.radix = radix
320 self.radix = radix
321
321
322 self._docket_file = None
322 self._docket_file = None
323 self._indexfile = None
323 self._indexfile = None
324 self._datafile = None
324 self._datafile = None
325 self._sidedatafile = None
325 self._sidedatafile = None
326 self._nodemap_file = None
326 self._nodemap_file = None
327 self.postfix = postfix
327 self.postfix = postfix
328 self._trypending = trypending
328 self._trypending = trypending
329 self.opener = opener
329 self.opener = opener
330 if persistentnodemap:
330 if persistentnodemap:
331 self._nodemap_file = nodemaputil.get_nodemap_file(self)
331 self._nodemap_file = nodemaputil.get_nodemap_file(self)
332
332
333 assert target[0] in ALL_KINDS
333 assert target[0] in ALL_KINDS
334 assert len(target) == 2
334 assert len(target) == 2
335 self.target = target
335 self.target = target
336 # When True, indexfile is opened with checkambig=True at writing, to
336 # When True, indexfile is opened with checkambig=True at writing, to
337 # avoid file stat ambiguity.
337 # avoid file stat ambiguity.
338 self._checkambig = checkambig
338 self._checkambig = checkambig
339 self._mmaplargeindex = mmaplargeindex
339 self._mmaplargeindex = mmaplargeindex
340 self._censorable = censorable
340 self._censorable = censorable
341 # 3-tuple of (node, rev, text) for a raw revision.
341 # 3-tuple of (node, rev, text) for a raw revision.
342 self._revisioncache = None
342 self._revisioncache = None
343 # Maps rev to chain base rev.
343 # Maps rev to chain base rev.
344 self._chainbasecache = util.lrucachedict(100)
344 self._chainbasecache = util.lrucachedict(100)
345 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
345 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
346 self._chunkcache = (0, b'')
346 self._chunkcache = (0, b'')
347 # How much data to read and cache into the raw revlog data cache.
347 # How much data to read and cache into the raw revlog data cache.
348 self._chunkcachesize = 65536
348 self._chunkcachesize = 65536
349 self._maxchainlen = None
349 self._maxchainlen = None
350 self._deltabothparents = True
350 self._deltabothparents = True
351 self._debug_delta = False
351 self._debug_delta = False
352 self.index = None
352 self.index = None
353 self._docket = None
353 self._docket = None
354 self._nodemap_docket = None
354 self._nodemap_docket = None
355 # Mapping of partial identifiers to full nodes.
355 # Mapping of partial identifiers to full nodes.
356 self._pcache = {}
356 self._pcache = {}
357 # Mapping of revision integer to full node.
357 # Mapping of revision integer to full node.
358 self._compengine = b'zlib'
358 self._compengine = b'zlib'
359 self._compengineopts = {}
359 self._compengineopts = {}
360 self._maxdeltachainspan = -1
360 self._maxdeltachainspan = -1
361 self._withsparseread = False
361 self._withsparseread = False
362 self._sparserevlog = False
362 self._sparserevlog = False
363 self.hassidedata = False
363 self.hassidedata = False
364 self._srdensitythreshold = 0.50
364 self._srdensitythreshold = 0.50
365 self._srmingapsize = 262144
365 self._srmingapsize = 262144
366
366
367 # Make copy of flag processors so each revlog instance can support
367 # Make copy of flag processors so each revlog instance can support
368 # custom flags.
368 # custom flags.
369 self._flagprocessors = dict(flagutil.flagprocessors)
369 self._flagprocessors = dict(flagutil.flagprocessors)
370
370
371 # 3-tuple of file handles being used for active writing.
371 # 3-tuple of file handles being used for active writing.
372 self._writinghandles = None
372 self._writinghandles = None
373 # prevent nesting of addgroup
373 # prevent nesting of addgroup
374 self._adding_group = None
374 self._adding_group = None
375
375
376 self._loadindex()
376 self._loadindex()
377
377
378 self._concurrencychecker = concurrencychecker
378 self._concurrencychecker = concurrencychecker
379
379
380 # parent order is supposed to be semantically irrelevant, so we
380 # parent order is supposed to be semantically irrelevant, so we
381 # normally resort parents to ensure that the first parent is non-null,
381 # normally resort parents to ensure that the first parent is non-null,
382 # if there is a non-null parent at all.
382 # if there is a non-null parent at all.
383 # filelog abuses the parent order as flag to mark some instances of
383 # filelog abuses the parent order as flag to mark some instances of
384 # meta-encoded files, so allow it to disable this behavior.
384 # meta-encoded files, so allow it to disable this behavior.
385 self.canonical_parent_order = canonical_parent_order
385 self.canonical_parent_order = canonical_parent_order
386
386
387 def _init_opts(self):
387 def _init_opts(self):
388 """process options (from above/config) to setup associated default revlog mode
388 """process options (from above/config) to setup associated default revlog mode
389
389
390 These values might be affected when actually reading on disk information.
390 These values might be affected when actually reading on disk information.
391
391
392 The relevant values are returned for use in _loadindex().
392 The relevant values are returned for use in _loadindex().
393
393
394 * newversionflags:
394 * newversionflags:
395 version header to use if we need to create a new revlog
395 version header to use if we need to create a new revlog
396
396
397 * mmapindexthreshold:
397 * mmapindexthreshold:
398 minimal index size for start to use mmap
398 minimal index size for start to use mmap
399
399
400 * force_nodemap:
400 * force_nodemap:
401 force the usage of a "development" version of the nodemap code
401 force the usage of a "development" version of the nodemap code
402 """
402 """
403 mmapindexthreshold = None
403 mmapindexthreshold = None
404 opts = self.opener.options
404 opts = self.opener.options
405
405
406 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
406 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
407 new_header = CHANGELOGV2
407 new_header = CHANGELOGV2
408 elif b'revlogv2' in opts:
408 elif b'revlogv2' in opts:
409 new_header = REVLOGV2
409 new_header = REVLOGV2
410 elif b'revlogv1' in opts:
410 elif b'revlogv1' in opts:
411 new_header = REVLOGV1 | FLAG_INLINE_DATA
411 new_header = REVLOGV1 | FLAG_INLINE_DATA
412 if b'generaldelta' in opts:
412 if b'generaldelta' in opts:
413 new_header |= FLAG_GENERALDELTA
413 new_header |= FLAG_GENERALDELTA
414 elif b'revlogv0' in self.opener.options:
414 elif b'revlogv0' in self.opener.options:
415 new_header = REVLOGV0
415 new_header = REVLOGV0
416 else:
416 else:
417 new_header = REVLOG_DEFAULT_VERSION
417 new_header = REVLOG_DEFAULT_VERSION
418
418
419 if b'chunkcachesize' in opts:
419 if b'chunkcachesize' in opts:
420 self._chunkcachesize = opts[b'chunkcachesize']
420 self._chunkcachesize = opts[b'chunkcachesize']
421 if b'maxchainlen' in opts:
421 if b'maxchainlen' in opts:
422 self._maxchainlen = opts[b'maxchainlen']
422 self._maxchainlen = opts[b'maxchainlen']
423 if b'deltabothparents' in opts:
423 if b'deltabothparents' in opts:
424 self._deltabothparents = opts[b'deltabothparents']
424 self._deltabothparents = opts[b'deltabothparents']
425 self._lazydelta = bool(opts.get(b'lazydelta', True))
425 self._lazydelta = bool(opts.get(b'lazydelta', True))
426 self._lazydeltabase = False
426 self._lazydeltabase = False
427 if self._lazydelta:
427 if self._lazydelta:
428 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
428 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
429 if b'debug-delta' in opts:
429 if b'debug-delta' in opts:
430 self._debug_delta = opts[b'debug-delta']
430 self._debug_delta = opts[b'debug-delta']
431 if b'compengine' in opts:
431 if b'compengine' in opts:
432 self._compengine = opts[b'compengine']
432 self._compengine = opts[b'compengine']
433 if b'zlib.level' in opts:
433 if b'zlib.level' in opts:
434 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
434 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
435 if b'zstd.level' in opts:
435 if b'zstd.level' in opts:
436 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
436 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
437 if b'maxdeltachainspan' in opts:
437 if b'maxdeltachainspan' in opts:
438 self._maxdeltachainspan = opts[b'maxdeltachainspan']
438 self._maxdeltachainspan = opts[b'maxdeltachainspan']
439 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
439 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
440 mmapindexthreshold = opts[b'mmapindexthreshold']
440 mmapindexthreshold = opts[b'mmapindexthreshold']
441 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
441 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
442 withsparseread = bool(opts.get(b'with-sparse-read', False))
442 withsparseread = bool(opts.get(b'with-sparse-read', False))
443 # sparse-revlog forces sparse-read
443 # sparse-revlog forces sparse-read
444 self._withsparseread = self._sparserevlog or withsparseread
444 self._withsparseread = self._sparserevlog or withsparseread
445 if b'sparse-read-density-threshold' in opts:
445 if b'sparse-read-density-threshold' in opts:
446 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
446 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
447 if b'sparse-read-min-gap-size' in opts:
447 if b'sparse-read-min-gap-size' in opts:
448 self._srmingapsize = opts[b'sparse-read-min-gap-size']
448 self._srmingapsize = opts[b'sparse-read-min-gap-size']
449 if opts.get(b'enableellipsis'):
449 if opts.get(b'enableellipsis'):
450 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
450 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
451
451
452 # revlog v0 doesn't have flag processors
452 # revlog v0 doesn't have flag processors
453 for flag, processor in opts.get(b'flagprocessors', {}).items():
453 for flag, processor in opts.get(b'flagprocessors', {}).items():
454 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
454 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
455
455
456 if self._chunkcachesize <= 0:
456 if self._chunkcachesize <= 0:
457 raise error.RevlogError(
457 raise error.RevlogError(
458 _(b'revlog chunk cache size %r is not greater than 0')
458 _(b'revlog chunk cache size %r is not greater than 0')
459 % self._chunkcachesize
459 % self._chunkcachesize
460 )
460 )
461 elif self._chunkcachesize & (self._chunkcachesize - 1):
461 elif self._chunkcachesize & (self._chunkcachesize - 1):
462 raise error.RevlogError(
462 raise error.RevlogError(
463 _(b'revlog chunk cache size %r is not a power of 2')
463 _(b'revlog chunk cache size %r is not a power of 2')
464 % self._chunkcachesize
464 % self._chunkcachesize
465 )
465 )
466 force_nodemap = opts.get(b'devel-force-nodemap', False)
466 force_nodemap = opts.get(b'devel-force-nodemap', False)
467 return new_header, mmapindexthreshold, force_nodemap
467 return new_header, mmapindexthreshold, force_nodemap
468
468
469 def _get_data(self, filepath, mmap_threshold, size=None):
469 def _get_data(self, filepath, mmap_threshold, size=None):
470 """return a file content with or without mmap
470 """return a file content with or without mmap
471
471
472 If the file is missing return the empty string"""
472 If the file is missing return the empty string"""
473 try:
473 try:
474 with self.opener(filepath) as fp:
474 with self.opener(filepath) as fp:
475 if mmap_threshold is not None:
475 if mmap_threshold is not None:
476 file_size = self.opener.fstat(fp).st_size
476 file_size = self.opener.fstat(fp).st_size
477 if file_size >= mmap_threshold:
477 if file_size >= mmap_threshold:
478 if size is not None:
478 if size is not None:
479 # avoid potentiel mmap crash
479 # avoid potentiel mmap crash
480 size = min(file_size, size)
480 size = min(file_size, size)
481 # TODO: should .close() to release resources without
481 # TODO: should .close() to release resources without
482 # relying on Python GC
482 # relying on Python GC
483 if size is None:
483 if size is None:
484 return util.buffer(util.mmapread(fp))
484 return util.buffer(util.mmapread(fp))
485 else:
485 else:
486 return util.buffer(util.mmapread(fp, size))
486 return util.buffer(util.mmapread(fp, size))
487 if size is None:
487 if size is None:
488 return fp.read()
488 return fp.read()
489 else:
489 else:
490 return fp.read(size)
490 return fp.read(size)
491 except FileNotFoundError:
491 except FileNotFoundError:
492 return b''
492 return b''
493
493
494 def _loadindex(self, docket=None):
494 def _loadindex(self, docket=None):
495
495
496 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
496 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
497
497
498 if self.postfix is not None:
498 if self.postfix is not None:
499 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
499 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
500 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
500 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
501 entry_point = b'%s.i.a' % self.radix
501 entry_point = b'%s.i.a' % self.radix
502 else:
502 else:
503 entry_point = b'%s.i' % self.radix
503 entry_point = b'%s.i' % self.radix
504
504
505 if docket is not None:
505 if docket is not None:
506 self._docket = docket
506 self._docket = docket
507 self._docket_file = entry_point
507 self._docket_file = entry_point
508 else:
508 else:
509 self._initempty = True
509 self._initempty = True
510 entry_data = self._get_data(entry_point, mmapindexthreshold)
510 entry_data = self._get_data(entry_point, mmapindexthreshold)
511 if len(entry_data) > 0:
511 if len(entry_data) > 0:
512 header = INDEX_HEADER.unpack(entry_data[:4])[0]
512 header = INDEX_HEADER.unpack(entry_data[:4])[0]
513 self._initempty = False
513 self._initempty = False
514 else:
514 else:
515 header = new_header
515 header = new_header
516
516
517 self._format_flags = header & ~0xFFFF
517 self._format_flags = header & ~0xFFFF
518 self._format_version = header & 0xFFFF
518 self._format_version = header & 0xFFFF
519
519
520 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
520 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
521 if supported_flags is None:
521 if supported_flags is None:
522 msg = _(b'unknown version (%d) in revlog %s')
522 msg = _(b'unknown version (%d) in revlog %s')
523 msg %= (self._format_version, self.display_id)
523 msg %= (self._format_version, self.display_id)
524 raise error.RevlogError(msg)
524 raise error.RevlogError(msg)
525 elif self._format_flags & ~supported_flags:
525 elif self._format_flags & ~supported_flags:
526 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
526 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
527 display_flag = self._format_flags >> 16
527 display_flag = self._format_flags >> 16
528 msg %= (display_flag, self._format_version, self.display_id)
528 msg %= (display_flag, self._format_version, self.display_id)
529 raise error.RevlogError(msg)
529 raise error.RevlogError(msg)
530
530
531 features = FEATURES_BY_VERSION[self._format_version]
531 features = FEATURES_BY_VERSION[self._format_version]
532 self._inline = features[b'inline'](self._format_flags)
532 self._inline = features[b'inline'](self._format_flags)
533 self._generaldelta = features[b'generaldelta'](self._format_flags)
533 self._generaldelta = features[b'generaldelta'](self._format_flags)
534 self.hassidedata = features[b'sidedata']
534 self.hassidedata = features[b'sidedata']
535
535
536 if not features[b'docket']:
536 if not features[b'docket']:
537 self._indexfile = entry_point
537 self._indexfile = entry_point
538 index_data = entry_data
538 index_data = entry_data
539 else:
539 else:
540 self._docket_file = entry_point
540 self._docket_file = entry_point
541 if self._initempty:
541 if self._initempty:
542 self._docket = docketutil.default_docket(self, header)
542 self._docket = docketutil.default_docket(self, header)
543 else:
543 else:
544 self._docket = docketutil.parse_docket(
544 self._docket = docketutil.parse_docket(
545 self, entry_data, use_pending=self._trypending
545 self, entry_data, use_pending=self._trypending
546 )
546 )
547
547
548 if self._docket is not None:
548 if self._docket is not None:
549 self._indexfile = self._docket.index_filepath()
549 self._indexfile = self._docket.index_filepath()
550 index_data = b''
550 index_data = b''
551 index_size = self._docket.index_end
551 index_size = self._docket.index_end
552 if index_size > 0:
552 if index_size > 0:
553 index_data = self._get_data(
553 index_data = self._get_data(
554 self._indexfile, mmapindexthreshold, size=index_size
554 self._indexfile, mmapindexthreshold, size=index_size
555 )
555 )
556 if len(index_data) < index_size:
556 if len(index_data) < index_size:
557 msg = _(b'too few index data for %s: got %d, expected %d')
557 msg = _(b'too few index data for %s: got %d, expected %d')
558 msg %= (self.display_id, len(index_data), index_size)
558 msg %= (self.display_id, len(index_data), index_size)
559 raise error.RevlogError(msg)
559 raise error.RevlogError(msg)
560
560
561 self._inline = False
561 self._inline = False
562 # generaldelta implied by version 2 revlogs.
562 # generaldelta implied by version 2 revlogs.
563 self._generaldelta = True
563 self._generaldelta = True
564 # the logic for persistent nodemap will be dealt with within the
564 # the logic for persistent nodemap will be dealt with within the
565 # main docket, so disable it for now.
565 # main docket, so disable it for now.
566 self._nodemap_file = None
566 self._nodemap_file = None
567
567
568 if self._docket is not None:
568 if self._docket is not None:
569 self._datafile = self._docket.data_filepath()
569 self._datafile = self._docket.data_filepath()
570 self._sidedatafile = self._docket.sidedata_filepath()
570 self._sidedatafile = self._docket.sidedata_filepath()
571 elif self.postfix is None:
571 elif self.postfix is None:
572 self._datafile = b'%s.d' % self.radix
572 self._datafile = b'%s.d' % self.radix
573 else:
573 else:
574 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
574 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
575
575
576 self.nodeconstants = sha1nodeconstants
576 self.nodeconstants = sha1nodeconstants
577 self.nullid = self.nodeconstants.nullid
577 self.nullid = self.nodeconstants.nullid
578
578
579 # sparse-revlog can't be on without general-delta (issue6056)
579 # sparse-revlog can't be on without general-delta (issue6056)
580 if not self._generaldelta:
580 if not self._generaldelta:
581 self._sparserevlog = False
581 self._sparserevlog = False
582
582
583 self._storedeltachains = True
583 self._storedeltachains = True
584
584
585 devel_nodemap = (
585 devel_nodemap = (
586 self._nodemap_file
586 self._nodemap_file
587 and force_nodemap
587 and force_nodemap
588 and parse_index_v1_nodemap is not None
588 and parse_index_v1_nodemap is not None
589 )
589 )
590
590
591 use_rust_index = False
591 use_rust_index = False
592 if rustrevlog is not None:
592 if rustrevlog is not None:
593 if self._nodemap_file is not None:
593 if self._nodemap_file is not None:
594 use_rust_index = True
594 use_rust_index = True
595 else:
595 else:
596 use_rust_index = self.opener.options.get(b'rust.index')
596 use_rust_index = self.opener.options.get(b'rust.index')
597
597
598 self._parse_index = parse_index_v1
598 self._parse_index = parse_index_v1
599 if self._format_version == REVLOGV0:
599 if self._format_version == REVLOGV0:
600 self._parse_index = revlogv0.parse_index_v0
600 self._parse_index = revlogv0.parse_index_v0
601 elif self._format_version == REVLOGV2:
601 elif self._format_version == REVLOGV2:
602 self._parse_index = parse_index_v2
602 self._parse_index = parse_index_v2
603 elif self._format_version == CHANGELOGV2:
603 elif self._format_version == CHANGELOGV2:
604 self._parse_index = parse_index_cl_v2
604 self._parse_index = parse_index_cl_v2
605 elif devel_nodemap:
605 elif devel_nodemap:
606 self._parse_index = parse_index_v1_nodemap
606 self._parse_index = parse_index_v1_nodemap
607 elif use_rust_index:
607 elif use_rust_index:
608 self._parse_index = parse_index_v1_mixed
608 self._parse_index = parse_index_v1_mixed
609 try:
609 try:
610 d = self._parse_index(index_data, self._inline)
610 d = self._parse_index(index_data, self._inline)
611 index, chunkcache = d
611 index, chunkcache = d
612 use_nodemap = (
612 use_nodemap = (
613 not self._inline
613 not self._inline
614 and self._nodemap_file is not None
614 and self._nodemap_file is not None
615 and util.safehasattr(index, 'update_nodemap_data')
615 and util.safehasattr(index, 'update_nodemap_data')
616 )
616 )
617 if use_nodemap:
617 if use_nodemap:
618 nodemap_data = nodemaputil.persisted_data(self)
618 nodemap_data = nodemaputil.persisted_data(self)
619 if nodemap_data is not None:
619 if nodemap_data is not None:
620 docket = nodemap_data[0]
620 docket = nodemap_data[0]
621 if (
621 if (
622 len(d[0]) > docket.tip_rev
622 len(d[0]) > docket.tip_rev
623 and d[0][docket.tip_rev][7] == docket.tip_node
623 and d[0][docket.tip_rev][7] == docket.tip_node
624 ):
624 ):
625 # no changelog tampering
625 # no changelog tampering
626 self._nodemap_docket = docket
626 self._nodemap_docket = docket
627 index.update_nodemap_data(*nodemap_data)
627 index.update_nodemap_data(*nodemap_data)
628 except (ValueError, IndexError):
628 except (ValueError, IndexError):
629 raise error.RevlogError(
629 raise error.RevlogError(
630 _(b"index %s is corrupted") % self.display_id
630 _(b"index %s is corrupted") % self.display_id
631 )
631 )
632 self.index = index
632 self.index = index
633 self._segmentfile = randomaccessfile.randomaccessfile(
633 self._segmentfile = randomaccessfile.randomaccessfile(
634 self.opener,
634 self.opener,
635 (self._indexfile if self._inline else self._datafile),
635 (self._indexfile if self._inline else self._datafile),
636 self._chunkcachesize,
636 self._chunkcachesize,
637 chunkcache,
637 chunkcache,
638 )
638 )
639 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
639 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
640 self.opener,
640 self.opener,
641 self._sidedatafile,
641 self._sidedatafile,
642 self._chunkcachesize,
642 self._chunkcachesize,
643 )
643 )
644 # revnum -> (chain-length, sum-delta-length)
644 # revnum -> (chain-length, sum-delta-length)
645 self._chaininfocache = util.lrucachedict(500)
645 self._chaininfocache = util.lrucachedict(500)
646 # revlog header -> revlog compressor
646 # revlog header -> revlog compressor
647 self._decompressors = {}
647 self._decompressors = {}
648
648
649 @util.propertycache
649 @util.propertycache
650 def revlog_kind(self):
650 def revlog_kind(self):
651 return self.target[0]
651 return self.target[0]
652
652
653 @util.propertycache
653 @util.propertycache
654 def display_id(self):
654 def display_id(self):
655 """The public facing "ID" of the revlog that we use in message"""
655 """The public facing "ID" of the revlog that we use in message"""
656 if self.revlog_kind == KIND_FILELOG:
656 if self.revlog_kind == KIND_FILELOG:
657 # Reference the file without the "data/" prefix, so it is familiar
657 # Reference the file without the "data/" prefix, so it is familiar
658 # to the user.
658 # to the user.
659 return self.target[1]
659 return self.target[1]
660 else:
660 else:
661 return self.radix
661 return self.radix
662
662
663 def _get_decompressor(self, t):
663 def _get_decompressor(self, t):
664 try:
664 try:
665 compressor = self._decompressors[t]
665 compressor = self._decompressors[t]
666 except KeyError:
666 except KeyError:
667 try:
667 try:
668 engine = util.compengines.forrevlogheader(t)
668 engine = util.compengines.forrevlogheader(t)
669 compressor = engine.revlogcompressor(self._compengineopts)
669 compressor = engine.revlogcompressor(self._compengineopts)
670 self._decompressors[t] = compressor
670 self._decompressors[t] = compressor
671 except KeyError:
671 except KeyError:
672 raise error.RevlogError(
672 raise error.RevlogError(
673 _(b'unknown compression type %s') % binascii.hexlify(t)
673 _(b'unknown compression type %s') % binascii.hexlify(t)
674 )
674 )
675 return compressor
675 return compressor
676
676
677 @util.propertycache
677 @util.propertycache
678 def _compressor(self):
678 def _compressor(self):
679 engine = util.compengines[self._compengine]
679 engine = util.compengines[self._compengine]
680 return engine.revlogcompressor(self._compengineopts)
680 return engine.revlogcompressor(self._compengineopts)
681
681
682 @util.propertycache
682 @util.propertycache
683 def _decompressor(self):
683 def _decompressor(self):
684 """the default decompressor"""
684 """the default decompressor"""
685 if self._docket is None:
685 if self._docket is None:
686 return None
686 return None
687 t = self._docket.default_compression_header
687 t = self._docket.default_compression_header
688 c = self._get_decompressor(t)
688 c = self._get_decompressor(t)
689 return c.decompress
689 return c.decompress
690
690
691 def _indexfp(self):
691 def _indexfp(self):
692 """file object for the revlog's index file"""
692 """file object for the revlog's index file"""
693 return self.opener(self._indexfile, mode=b"r")
693 return self.opener(self._indexfile, mode=b"r")
694
694
695 def __index_write_fp(self):
695 def __index_write_fp(self):
696 # You should not use this directly and use `_writing` instead
696 # You should not use this directly and use `_writing` instead
697 try:
697 try:
698 f = self.opener(
698 f = self.opener(
699 self._indexfile, mode=b"r+", checkambig=self._checkambig
699 self._indexfile, mode=b"r+", checkambig=self._checkambig
700 )
700 )
701 if self._docket is None:
701 if self._docket is None:
702 f.seek(0, os.SEEK_END)
702 f.seek(0, os.SEEK_END)
703 else:
703 else:
704 f.seek(self._docket.index_end, os.SEEK_SET)
704 f.seek(self._docket.index_end, os.SEEK_SET)
705 return f
705 return f
706 except FileNotFoundError:
706 except FileNotFoundError:
707 return self.opener(
707 return self.opener(
708 self._indexfile, mode=b"w+", checkambig=self._checkambig
708 self._indexfile, mode=b"w+", checkambig=self._checkambig
709 )
709 )
710
710
711 def __index_new_fp(self):
711 def __index_new_fp(self):
712 # You should not use this unless you are upgrading from inline revlog
712 # You should not use this unless you are upgrading from inline revlog
713 return self.opener(
713 return self.opener(
714 self._indexfile,
714 self._indexfile,
715 mode=b"w",
715 mode=b"w",
716 checkambig=self._checkambig,
716 checkambig=self._checkambig,
717 atomictemp=True,
717 atomictemp=True,
718 )
718 )
719
719
720 def _datafp(self, mode=b'r'):
720 def _datafp(self, mode=b'r'):
721 """file object for the revlog's data file"""
721 """file object for the revlog's data file"""
722 return self.opener(self._datafile, mode=mode)
722 return self.opener(self._datafile, mode=mode)
723
723
724 @contextlib.contextmanager
724 @contextlib.contextmanager
725 def _sidedatareadfp(self):
725 def _sidedatareadfp(self):
726 """file object suitable to read sidedata"""
726 """file object suitable to read sidedata"""
727 if self._writinghandles:
727 if self._writinghandles:
728 yield self._writinghandles[2]
728 yield self._writinghandles[2]
729 else:
729 else:
730 with self.opener(self._sidedatafile) as fp:
730 with self.opener(self._sidedatafile) as fp:
731 yield fp
731 yield fp
732
732
733 def tiprev(self):
733 def tiprev(self):
734 return len(self.index) - 1
734 return len(self.index) - 1
735
735
736 def tip(self):
736 def tip(self):
737 return self.node(self.tiprev())
737 return self.node(self.tiprev())
738
738
739 def __contains__(self, rev):
739 def __contains__(self, rev):
740 return 0 <= rev < len(self)
740 return 0 <= rev < len(self)
741
741
742 def __len__(self):
742 def __len__(self):
743 return len(self.index)
743 return len(self.index)
744
744
745 def __iter__(self):
745 def __iter__(self):
746 return iter(range(len(self)))
746 return iter(range(len(self)))
747
747
748 def revs(self, start=0, stop=None):
748 def revs(self, start=0, stop=None):
749 """iterate over all rev in this revlog (from start to stop)"""
749 """iterate over all rev in this revlog (from start to stop)"""
750 return storageutil.iterrevs(len(self), start=start, stop=stop)
750 return storageutil.iterrevs(len(self), start=start, stop=stop)
751
751
752 def hasnode(self, node):
752 def hasnode(self, node):
753 try:
753 try:
754 self.rev(node)
754 self.rev(node)
755 return True
755 return True
756 except KeyError:
756 except KeyError:
757 return False
757 return False
758
758
759 def candelta(self, baserev, rev):
759 def candelta(self, baserev, rev):
760 """whether two revisions (baserev, rev) can be delta-ed or not"""
760 """whether two revisions (baserev, rev) can be delta-ed or not"""
761 # Disable delta if either rev requires a content-changing flag
761 # Disable delta if either rev requires a content-changing flag
762 # processor (ex. LFS). This is because such flag processor can alter
762 # processor (ex. LFS). This is because such flag processor can alter
763 # the rawtext content that the delta will be based on, and two clients
763 # the rawtext content that the delta will be based on, and two clients
764 # could have a same revlog node with different flags (i.e. different
764 # could have a same revlog node with different flags (i.e. different
765 # rawtext contents) and the delta could be incompatible.
765 # rawtext contents) and the delta could be incompatible.
766 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
766 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
767 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
767 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
768 ):
768 ):
769 return False
769 return False
770 return True
770 return True
771
771
772 def update_caches(self, transaction):
772 def update_caches(self, transaction):
773 if self._nodemap_file is not None:
773 if self._nodemap_file is not None:
774 if transaction is None:
774 if transaction is None:
775 nodemaputil.update_persistent_nodemap(self)
775 nodemaputil.update_persistent_nodemap(self)
776 else:
776 else:
777 nodemaputil.setup_persistent_nodemap(transaction, self)
777 nodemaputil.setup_persistent_nodemap(transaction, self)
778
778
779 def clearcaches(self):
779 def clearcaches(self):
780 self._revisioncache = None
780 self._revisioncache = None
781 self._chainbasecache.clear()
781 self._chainbasecache.clear()
782 self._segmentfile.clear_cache()
782 self._segmentfile.clear_cache()
783 self._segmentfile_sidedata.clear_cache()
783 self._segmentfile_sidedata.clear_cache()
784 self._pcache = {}
784 self._pcache = {}
785 self._nodemap_docket = None
785 self._nodemap_docket = None
786 self.index.clearcaches()
786 self.index.clearcaches()
787 # The python code is the one responsible for validating the docket, we
787 # The python code is the one responsible for validating the docket, we
788 # end up having to refresh it here.
788 # end up having to refresh it here.
789 use_nodemap = (
789 use_nodemap = (
790 not self._inline
790 not self._inline
791 and self._nodemap_file is not None
791 and self._nodemap_file is not None
792 and util.safehasattr(self.index, 'update_nodemap_data')
792 and util.safehasattr(self.index, 'update_nodemap_data')
793 )
793 )
794 if use_nodemap:
794 if use_nodemap:
795 nodemap_data = nodemaputil.persisted_data(self)
795 nodemap_data = nodemaputil.persisted_data(self)
796 if nodemap_data is not None:
796 if nodemap_data is not None:
797 self._nodemap_docket = nodemap_data[0]
797 self._nodemap_docket = nodemap_data[0]
798 self.index.update_nodemap_data(*nodemap_data)
798 self.index.update_nodemap_data(*nodemap_data)
799
799
800 def rev(self, node):
800 def rev(self, node):
801 try:
801 try:
802 return self.index.rev(node)
802 return self.index.rev(node)
803 except TypeError:
803 except TypeError:
804 raise
804 raise
805 except error.RevlogError:
805 except error.RevlogError:
806 # parsers.c radix tree lookup failed
806 # parsers.c radix tree lookup failed
807 if (
807 if (
808 node == self.nodeconstants.wdirid
808 node == self.nodeconstants.wdirid
809 or node in self.nodeconstants.wdirfilenodeids
809 or node in self.nodeconstants.wdirfilenodeids
810 ):
810 ):
811 raise error.WdirUnsupported
811 raise error.WdirUnsupported
812 raise error.LookupError(node, self.display_id, _(b'no node'))
812 raise error.LookupError(node, self.display_id, _(b'no node'))
813
813
814 # Accessors for index entries.
814 # Accessors for index entries.
815
815
816 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
816 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
817 # are flags.
817 # are flags.
818 def start(self, rev):
818 def start(self, rev):
819 return int(self.index[rev][0] >> 16)
819 return int(self.index[rev][0] >> 16)
820
820
821 def sidedata_cut_off(self, rev):
821 def sidedata_cut_off(self, rev):
822 sd_cut_off = self.index[rev][8]
822 sd_cut_off = self.index[rev][8]
823 if sd_cut_off != 0:
823 if sd_cut_off != 0:
824 return sd_cut_off
824 return sd_cut_off
825 # This is some annoying dance, because entries without sidedata
825 # This is some annoying dance, because entries without sidedata
826 # currently use 0 as their ofsset. (instead of previous-offset +
826 # currently use 0 as their ofsset. (instead of previous-offset +
827 # previous-size)
827 # previous-size)
828 #
828 #
829 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
829 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
830 # In the meantime, we need this.
830 # In the meantime, we need this.
831 while 0 <= rev:
831 while 0 <= rev:
832 e = self.index[rev]
832 e = self.index[rev]
833 if e[9] != 0:
833 if e[9] != 0:
834 return e[8] + e[9]
834 return e[8] + e[9]
835 rev -= 1
835 rev -= 1
836 return 0
836 return 0
837
837
838 def flags(self, rev):
838 def flags(self, rev):
839 return self.index[rev][0] & 0xFFFF
839 return self.index[rev][0] & 0xFFFF
840
840
841 def length(self, rev):
841 def length(self, rev):
842 return self.index[rev][1]
842 return self.index[rev][1]
843
843
844 def sidedata_length(self, rev):
844 def sidedata_length(self, rev):
845 if not self.hassidedata:
845 if not self.hassidedata:
846 return 0
846 return 0
847 return self.index[rev][9]
847 return self.index[rev][9]
848
848
849 def rawsize(self, rev):
849 def rawsize(self, rev):
850 """return the length of the uncompressed text for a given revision"""
850 """return the length of the uncompressed text for a given revision"""
851 l = self.index[rev][2]
851 l = self.index[rev][2]
852 if l >= 0:
852 if l >= 0:
853 return l
853 return l
854
854
855 t = self.rawdata(rev)
855 t = self.rawdata(rev)
856 return len(t)
856 return len(t)
857
857
858 def size(self, rev):
858 def size(self, rev):
859 """length of non-raw text (processed by a "read" flag processor)"""
859 """length of non-raw text (processed by a "read" flag processor)"""
860 # fast path: if no "read" flag processor could change the content,
860 # fast path: if no "read" flag processor could change the content,
861 # size is rawsize. note: ELLIPSIS is known to not change the content.
861 # size is rawsize. note: ELLIPSIS is known to not change the content.
862 flags = self.flags(rev)
862 flags = self.flags(rev)
863 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
863 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
864 return self.rawsize(rev)
864 return self.rawsize(rev)
865
865
866 return len(self.revision(rev))
866 return len(self.revision(rev))
867
867
868 def fast_rank(self, rev):
868 def fast_rank(self, rev):
869 """Return the rank of a revision if already known, or None otherwise.
869 """Return the rank of a revision if already known, or None otherwise.
870
870
871 The rank of a revision is the size of the sub-graph it defines as a
871 The rank of a revision is the size of the sub-graph it defines as a
872 head. Equivalently, the rank of a revision `r` is the size of the set
872 head. Equivalently, the rank of a revision `r` is the size of the set
873 `ancestors(r)`, `r` included.
873 `ancestors(r)`, `r` included.
874
874
875 This method returns the rank retrieved from the revlog in constant
875 This method returns the rank retrieved from the revlog in constant
876 time. It makes no attempt at computing unknown values for versions of
876 time. It makes no attempt at computing unknown values for versions of
877 the revlog which do not persist the rank.
877 the revlog which do not persist the rank.
878 """
878 """
879 rank = self.index[rev][ENTRY_RANK]
879 rank = self.index[rev][ENTRY_RANK]
880 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
880 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
881 return None
881 return None
882 if rev == nullrev:
882 if rev == nullrev:
883 return 0 # convention
883 return 0 # convention
884 return rank
884 return rank
885
885
886 def chainbase(self, rev):
886 def chainbase(self, rev):
887 base = self._chainbasecache.get(rev)
887 base = self._chainbasecache.get(rev)
888 if base is not None:
888 if base is not None:
889 return base
889 return base
890
890
891 index = self.index
891 index = self.index
892 iterrev = rev
892 iterrev = rev
893 base = index[iterrev][3]
893 base = index[iterrev][3]
894 while base != iterrev:
894 while base != iterrev:
895 iterrev = base
895 iterrev = base
896 base = index[iterrev][3]
896 base = index[iterrev][3]
897
897
898 self._chainbasecache[rev] = base
898 self._chainbasecache[rev] = base
899 return base
899 return base
900
900
901 def linkrev(self, rev):
901 def linkrev(self, rev):
902 return self.index[rev][4]
902 return self.index[rev][4]
903
903
904 def parentrevs(self, rev):
904 def parentrevs(self, rev):
905 try:
905 try:
906 entry = self.index[rev]
906 entry = self.index[rev]
907 except IndexError:
907 except IndexError:
908 if rev == wdirrev:
908 if rev == wdirrev:
909 raise error.WdirUnsupported
909 raise error.WdirUnsupported
910 raise
910 raise
911
911
912 if self.canonical_parent_order and entry[5] == nullrev:
912 if self.canonical_parent_order and entry[5] == nullrev:
913 return entry[6], entry[5]
913 return entry[6], entry[5]
914 else:
914 else:
915 return entry[5], entry[6]
915 return entry[5], entry[6]
916
916
917 # fast parentrevs(rev) where rev isn't filtered
917 # fast parentrevs(rev) where rev isn't filtered
918 _uncheckedparentrevs = parentrevs
918 _uncheckedparentrevs = parentrevs
919
919
920 def node(self, rev):
920 def node(self, rev):
921 try:
921 try:
922 return self.index[rev][7]
922 return self.index[rev][7]
923 except IndexError:
923 except IndexError:
924 if rev == wdirrev:
924 if rev == wdirrev:
925 raise error.WdirUnsupported
925 raise error.WdirUnsupported
926 raise
926 raise
927
927
928 # Derived from index values.
928 # Derived from index values.
929
929
930 def end(self, rev):
930 def end(self, rev):
931 return self.start(rev) + self.length(rev)
931 return self.start(rev) + self.length(rev)
932
932
933 def parents(self, node):
933 def parents(self, node):
934 i = self.index
934 i = self.index
935 d = i[self.rev(node)]
935 d = i[self.rev(node)]
936 # inline node() to avoid function call overhead
936 # inline node() to avoid function call overhead
937 if self.canonical_parent_order and d[5] == self.nullid:
937 if self.canonical_parent_order and d[5] == self.nullid:
938 return i[d[6]][7], i[d[5]][7]
938 return i[d[6]][7], i[d[5]][7]
939 else:
939 else:
940 return i[d[5]][7], i[d[6]][7]
940 return i[d[5]][7], i[d[6]][7]
941
941
942 def chainlen(self, rev):
942 def chainlen(self, rev):
943 return self._chaininfo(rev)[0]
943 return self._chaininfo(rev)[0]
944
944
945 def _chaininfo(self, rev):
945 def _chaininfo(self, rev):
946 chaininfocache = self._chaininfocache
946 chaininfocache = self._chaininfocache
947 if rev in chaininfocache:
947 if rev in chaininfocache:
948 return chaininfocache[rev]
948 return chaininfocache[rev]
949 index = self.index
949 index = self.index
950 generaldelta = self._generaldelta
950 generaldelta = self._generaldelta
951 iterrev = rev
951 iterrev = rev
952 e = index[iterrev]
952 e = index[iterrev]
953 clen = 0
953 clen = 0
954 compresseddeltalen = 0
954 compresseddeltalen = 0
955 while iterrev != e[3]:
955 while iterrev != e[3]:
956 clen += 1
956 clen += 1
957 compresseddeltalen += e[1]
957 compresseddeltalen += e[1]
958 if generaldelta:
958 if generaldelta:
959 iterrev = e[3]
959 iterrev = e[3]
960 else:
960 else:
961 iterrev -= 1
961 iterrev -= 1
962 if iterrev in chaininfocache:
962 if iterrev in chaininfocache:
963 t = chaininfocache[iterrev]
963 t = chaininfocache[iterrev]
964 clen += t[0]
964 clen += t[0]
965 compresseddeltalen += t[1]
965 compresseddeltalen += t[1]
966 break
966 break
967 e = index[iterrev]
967 e = index[iterrev]
968 else:
968 else:
969 # Add text length of base since decompressing that also takes
969 # Add text length of base since decompressing that also takes
970 # work. For cache hits the length is already included.
970 # work. For cache hits the length is already included.
971 compresseddeltalen += e[1]
971 compresseddeltalen += e[1]
972 r = (clen, compresseddeltalen)
972 r = (clen, compresseddeltalen)
973 chaininfocache[rev] = r
973 chaininfocache[rev] = r
974 return r
974 return r
975
975
976 def _deltachain(self, rev, stoprev=None):
976 def _deltachain(self, rev, stoprev=None):
977 """Obtain the delta chain for a revision.
977 """Obtain the delta chain for a revision.
978
978
979 ``stoprev`` specifies a revision to stop at. If not specified, we
979 ``stoprev`` specifies a revision to stop at. If not specified, we
980 stop at the base of the chain.
980 stop at the base of the chain.
981
981
982 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
982 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
983 revs in ascending order and ``stopped`` is a bool indicating whether
983 revs in ascending order and ``stopped`` is a bool indicating whether
984 ``stoprev`` was hit.
984 ``stoprev`` was hit.
985 """
985 """
986 # Try C implementation.
986 # Try C implementation.
987 try:
987 try:
988 return self.index.deltachain(rev, stoprev, self._generaldelta)
988 return self.index.deltachain(rev, stoprev, self._generaldelta)
989 except AttributeError:
989 except AttributeError:
990 pass
990 pass
991
991
992 chain = []
992 chain = []
993
993
994 # Alias to prevent attribute lookup in tight loop.
994 # Alias to prevent attribute lookup in tight loop.
995 index = self.index
995 index = self.index
996 generaldelta = self._generaldelta
996 generaldelta = self._generaldelta
997
997
998 iterrev = rev
998 iterrev = rev
999 e = index[iterrev]
999 e = index[iterrev]
1000 while iterrev != e[3] and iterrev != stoprev:
1000 while iterrev != e[3] and iterrev != stoprev:
1001 chain.append(iterrev)
1001 chain.append(iterrev)
1002 if generaldelta:
1002 if generaldelta:
1003 iterrev = e[3]
1003 iterrev = e[3]
1004 else:
1004 else:
1005 iterrev -= 1
1005 iterrev -= 1
1006 e = index[iterrev]
1006 e = index[iterrev]
1007
1007
1008 if iterrev == stoprev:
1008 if iterrev == stoprev:
1009 stopped = True
1009 stopped = True
1010 else:
1010 else:
1011 chain.append(iterrev)
1011 chain.append(iterrev)
1012 stopped = False
1012 stopped = False
1013
1013
1014 chain.reverse()
1014 chain.reverse()
1015 return chain, stopped
1015 return chain, stopped
1016
1016
1017 def ancestors(self, revs, stoprev=0, inclusive=False):
1017 def ancestors(self, revs, stoprev=0, inclusive=False):
1018 """Generate the ancestors of 'revs' in reverse revision order.
1018 """Generate the ancestors of 'revs' in reverse revision order.
1019 Does not generate revs lower than stoprev.
1019 Does not generate revs lower than stoprev.
1020
1020
1021 See the documentation for ancestor.lazyancestors for more details."""
1021 See the documentation for ancestor.lazyancestors for more details."""
1022
1022
1023 # first, make sure start revisions aren't filtered
1023 # first, make sure start revisions aren't filtered
1024 revs = list(revs)
1024 revs = list(revs)
1025 checkrev = self.node
1025 checkrev = self.node
1026 for r in revs:
1026 for r in revs:
1027 checkrev(r)
1027 checkrev(r)
1028 # and we're sure ancestors aren't filtered as well
1028 # and we're sure ancestors aren't filtered as well
1029
1029
1030 if rustancestor is not None and self.index.rust_ext_compat:
1030 if rustancestor is not None and self.index.rust_ext_compat:
1031 lazyancestors = rustancestor.LazyAncestors
1031 lazyancestors = rustancestor.LazyAncestors
1032 arg = self.index
1032 arg = self.index
1033 else:
1033 else:
1034 lazyancestors = ancestor.lazyancestors
1034 lazyancestors = ancestor.lazyancestors
1035 arg = self._uncheckedparentrevs
1035 arg = self._uncheckedparentrevs
1036 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1036 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1037
1037
1038 def descendants(self, revs):
1038 def descendants(self, revs):
1039 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1039 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1040
1040
1041 def findcommonmissing(self, common=None, heads=None):
1041 def findcommonmissing(self, common=None, heads=None):
1042 """Return a tuple of the ancestors of common and the ancestors of heads
1042 """Return a tuple of the ancestors of common and the ancestors of heads
1043 that are not ancestors of common. In revset terminology, we return the
1043 that are not ancestors of common. In revset terminology, we return the
1044 tuple:
1044 tuple:
1045
1045
1046 ::common, (::heads) - (::common)
1046 ::common, (::heads) - (::common)
1047
1047
1048 The list is sorted by revision number, meaning it is
1048 The list is sorted by revision number, meaning it is
1049 topologically sorted.
1049 topologically sorted.
1050
1050
1051 'heads' and 'common' are both lists of node IDs. If heads is
1051 'heads' and 'common' are both lists of node IDs. If heads is
1052 not supplied, uses all of the revlog's heads. If common is not
1052 not supplied, uses all of the revlog's heads. If common is not
1053 supplied, uses nullid."""
1053 supplied, uses nullid."""
1054 if common is None:
1054 if common is None:
1055 common = [self.nullid]
1055 common = [self.nullid]
1056 if heads is None:
1056 if heads is None:
1057 heads = self.heads()
1057 heads = self.heads()
1058
1058
1059 common = [self.rev(n) for n in common]
1059 common = [self.rev(n) for n in common]
1060 heads = [self.rev(n) for n in heads]
1060 heads = [self.rev(n) for n in heads]
1061
1061
1062 # we want the ancestors, but inclusive
1062 # we want the ancestors, but inclusive
1063 class lazyset:
1063 class lazyset:
1064 def __init__(self, lazyvalues):
1064 def __init__(self, lazyvalues):
1065 self.addedvalues = set()
1065 self.addedvalues = set()
1066 self.lazyvalues = lazyvalues
1066 self.lazyvalues = lazyvalues
1067
1067
1068 def __contains__(self, value):
1068 def __contains__(self, value):
1069 return value in self.addedvalues or value in self.lazyvalues
1069 return value in self.addedvalues or value in self.lazyvalues
1070
1070
1071 def __iter__(self):
1071 def __iter__(self):
1072 added = self.addedvalues
1072 added = self.addedvalues
1073 for r in added:
1073 for r in added:
1074 yield r
1074 yield r
1075 for r in self.lazyvalues:
1075 for r in self.lazyvalues:
1076 if not r in added:
1076 if not r in added:
1077 yield r
1077 yield r
1078
1078
1079 def add(self, value):
1079 def add(self, value):
1080 self.addedvalues.add(value)
1080 self.addedvalues.add(value)
1081
1081
1082 def update(self, values):
1082 def update(self, values):
1083 self.addedvalues.update(values)
1083 self.addedvalues.update(values)
1084
1084
1085 has = lazyset(self.ancestors(common))
1085 has = lazyset(self.ancestors(common))
1086 has.add(nullrev)
1086 has.add(nullrev)
1087 has.update(common)
1087 has.update(common)
1088
1088
1089 # take all ancestors from heads that aren't in has
1089 # take all ancestors from heads that aren't in has
1090 missing = set()
1090 missing = set()
1091 visit = collections.deque(r for r in heads if r not in has)
1091 visit = collections.deque(r for r in heads if r not in has)
1092 while visit:
1092 while visit:
1093 r = visit.popleft()
1093 r = visit.popleft()
1094 if r in missing:
1094 if r in missing:
1095 continue
1095 continue
1096 else:
1096 else:
1097 missing.add(r)
1097 missing.add(r)
1098 for p in self.parentrevs(r):
1098 for p in self.parentrevs(r):
1099 if p not in has:
1099 if p not in has:
1100 visit.append(p)
1100 visit.append(p)
1101 missing = list(missing)
1101 missing = list(missing)
1102 missing.sort()
1102 missing.sort()
1103 return has, [self.node(miss) for miss in missing]
1103 return has, [self.node(miss) for miss in missing]
1104
1104
1105 def incrementalmissingrevs(self, common=None):
1105 def incrementalmissingrevs(self, common=None):
1106 """Return an object that can be used to incrementally compute the
1106 """Return an object that can be used to incrementally compute the
1107 revision numbers of the ancestors of arbitrary sets that are not
1107 revision numbers of the ancestors of arbitrary sets that are not
1108 ancestors of common. This is an ancestor.incrementalmissingancestors
1108 ancestors of common. This is an ancestor.incrementalmissingancestors
1109 object.
1109 object.
1110
1110
1111 'common' is a list of revision numbers. If common is not supplied, uses
1111 'common' is a list of revision numbers. If common is not supplied, uses
1112 nullrev.
1112 nullrev.
1113 """
1113 """
1114 if common is None:
1114 if common is None:
1115 common = [nullrev]
1115 common = [nullrev]
1116
1116
1117 if rustancestor is not None and self.index.rust_ext_compat:
1117 if rustancestor is not None and self.index.rust_ext_compat:
1118 return rustancestor.MissingAncestors(self.index, common)
1118 return rustancestor.MissingAncestors(self.index, common)
1119 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1119 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1120
1120
1121 def findmissingrevs(self, common=None, heads=None):
1121 def findmissingrevs(self, common=None, heads=None):
1122 """Return the revision numbers of the ancestors of heads that
1122 """Return the revision numbers of the ancestors of heads that
1123 are not ancestors of common.
1123 are not ancestors of common.
1124
1124
1125 More specifically, return a list of revision numbers corresponding to
1125 More specifically, return a list of revision numbers corresponding to
1126 nodes N such that every N satisfies the following constraints:
1126 nodes N such that every N satisfies the following constraints:
1127
1127
1128 1. N is an ancestor of some node in 'heads'
1128 1. N is an ancestor of some node in 'heads'
1129 2. N is not an ancestor of any node in 'common'
1129 2. N is not an ancestor of any node in 'common'
1130
1130
1131 The list is sorted by revision number, meaning it is
1131 The list is sorted by revision number, meaning it is
1132 topologically sorted.
1132 topologically sorted.
1133
1133
1134 'heads' and 'common' are both lists of revision numbers. If heads is
1134 'heads' and 'common' are both lists of revision numbers. If heads is
1135 not supplied, uses all of the revlog's heads. If common is not
1135 not supplied, uses all of the revlog's heads. If common is not
1136 supplied, uses nullid."""
1136 supplied, uses nullid."""
1137 if common is None:
1137 if common is None:
1138 common = [nullrev]
1138 common = [nullrev]
1139 if heads is None:
1139 if heads is None:
1140 heads = self.headrevs()
1140 heads = self.headrevs()
1141
1141
1142 inc = self.incrementalmissingrevs(common=common)
1142 inc = self.incrementalmissingrevs(common=common)
1143 return inc.missingancestors(heads)
1143 return inc.missingancestors(heads)
1144
1144
1145 def findmissing(self, common=None, heads=None):
1145 def findmissing(self, common=None, heads=None):
1146 """Return the ancestors of heads that are not ancestors of common.
1146 """Return the ancestors of heads that are not ancestors of common.
1147
1147
1148 More specifically, return a list of nodes N such that every N
1148 More specifically, return a list of nodes N such that every N
1149 satisfies the following constraints:
1149 satisfies the following constraints:
1150
1150
1151 1. N is an ancestor of some node in 'heads'
1151 1. N is an ancestor of some node in 'heads'
1152 2. N is not an ancestor of any node in 'common'
1152 2. N is not an ancestor of any node in 'common'
1153
1153
1154 The list is sorted by revision number, meaning it is
1154 The list is sorted by revision number, meaning it is
1155 topologically sorted.
1155 topologically sorted.
1156
1156
1157 'heads' and 'common' are both lists of node IDs. If heads is
1157 'heads' and 'common' are both lists of node IDs. If heads is
1158 not supplied, uses all of the revlog's heads. If common is not
1158 not supplied, uses all of the revlog's heads. If common is not
1159 supplied, uses nullid."""
1159 supplied, uses nullid."""
1160 if common is None:
1160 if common is None:
1161 common = [self.nullid]
1161 common = [self.nullid]
1162 if heads is None:
1162 if heads is None:
1163 heads = self.heads()
1163 heads = self.heads()
1164
1164
1165 common = [self.rev(n) for n in common]
1165 common = [self.rev(n) for n in common]
1166 heads = [self.rev(n) for n in heads]
1166 heads = [self.rev(n) for n in heads]
1167
1167
1168 inc = self.incrementalmissingrevs(common=common)
1168 inc = self.incrementalmissingrevs(common=common)
1169 return [self.node(r) for r in inc.missingancestors(heads)]
1169 return [self.node(r) for r in inc.missingancestors(heads)]
1170
1170
1171 def nodesbetween(self, roots=None, heads=None):
1171 def nodesbetween(self, roots=None, heads=None):
1172 """Return a topological path from 'roots' to 'heads'.
1172 """Return a topological path from 'roots' to 'heads'.
1173
1173
1174 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1174 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1175 topologically sorted list of all nodes N that satisfy both of
1175 topologically sorted list of all nodes N that satisfy both of
1176 these constraints:
1176 these constraints:
1177
1177
1178 1. N is a descendant of some node in 'roots'
1178 1. N is a descendant of some node in 'roots'
1179 2. N is an ancestor of some node in 'heads'
1179 2. N is an ancestor of some node in 'heads'
1180
1180
1181 Every node is considered to be both a descendant and an ancestor
1181 Every node is considered to be both a descendant and an ancestor
1182 of itself, so every reachable node in 'roots' and 'heads' will be
1182 of itself, so every reachable node in 'roots' and 'heads' will be
1183 included in 'nodes'.
1183 included in 'nodes'.
1184
1184
1185 'outroots' is the list of reachable nodes in 'roots', i.e., the
1185 'outroots' is the list of reachable nodes in 'roots', i.e., the
1186 subset of 'roots' that is returned in 'nodes'. Likewise,
1186 subset of 'roots' that is returned in 'nodes'. Likewise,
1187 'outheads' is the subset of 'heads' that is also in 'nodes'.
1187 'outheads' is the subset of 'heads' that is also in 'nodes'.
1188
1188
1189 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1189 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1190 unspecified, uses nullid as the only root. If 'heads' is
1190 unspecified, uses nullid as the only root. If 'heads' is
1191 unspecified, uses list of all of the revlog's heads."""
1191 unspecified, uses list of all of the revlog's heads."""
1192 nonodes = ([], [], [])
1192 nonodes = ([], [], [])
1193 if roots is not None:
1193 if roots is not None:
1194 roots = list(roots)
1194 roots = list(roots)
1195 if not roots:
1195 if not roots:
1196 return nonodes
1196 return nonodes
1197 lowestrev = min([self.rev(n) for n in roots])
1197 lowestrev = min([self.rev(n) for n in roots])
1198 else:
1198 else:
1199 roots = [self.nullid] # Everybody's a descendant of nullid
1199 roots = [self.nullid] # Everybody's a descendant of nullid
1200 lowestrev = nullrev
1200 lowestrev = nullrev
1201 if (lowestrev == nullrev) and (heads is None):
1201 if (lowestrev == nullrev) and (heads is None):
1202 # We want _all_ the nodes!
1202 # We want _all_ the nodes!
1203 return (
1203 return (
1204 [self.node(r) for r in self],
1204 [self.node(r) for r in self],
1205 [self.nullid],
1205 [self.nullid],
1206 list(self.heads()),
1206 list(self.heads()),
1207 )
1207 )
1208 if heads is None:
1208 if heads is None:
1209 # All nodes are ancestors, so the latest ancestor is the last
1209 # All nodes are ancestors, so the latest ancestor is the last
1210 # node.
1210 # node.
1211 highestrev = len(self) - 1
1211 highestrev = len(self) - 1
1212 # Set ancestors to None to signal that every node is an ancestor.
1212 # Set ancestors to None to signal that every node is an ancestor.
1213 ancestors = None
1213 ancestors = None
1214 # Set heads to an empty dictionary for later discovery of heads
1214 # Set heads to an empty dictionary for later discovery of heads
1215 heads = {}
1215 heads = {}
1216 else:
1216 else:
1217 heads = list(heads)
1217 heads = list(heads)
1218 if not heads:
1218 if not heads:
1219 return nonodes
1219 return nonodes
1220 ancestors = set()
1220 ancestors = set()
1221 # Turn heads into a dictionary so we can remove 'fake' heads.
1221 # Turn heads into a dictionary so we can remove 'fake' heads.
1222 # Also, later we will be using it to filter out the heads we can't
1222 # Also, later we will be using it to filter out the heads we can't
1223 # find from roots.
1223 # find from roots.
1224 heads = dict.fromkeys(heads, False)
1224 heads = dict.fromkeys(heads, False)
1225 # Start at the top and keep marking parents until we're done.
1225 # Start at the top and keep marking parents until we're done.
1226 nodestotag = set(heads)
1226 nodestotag = set(heads)
1227 # Remember where the top was so we can use it as a limit later.
1227 # Remember where the top was so we can use it as a limit later.
1228 highestrev = max([self.rev(n) for n in nodestotag])
1228 highestrev = max([self.rev(n) for n in nodestotag])
1229 while nodestotag:
1229 while nodestotag:
1230 # grab a node to tag
1230 # grab a node to tag
1231 n = nodestotag.pop()
1231 n = nodestotag.pop()
1232 # Never tag nullid
1232 # Never tag nullid
1233 if n == self.nullid:
1233 if n == self.nullid:
1234 continue
1234 continue
1235 # A node's revision number represents its place in a
1235 # A node's revision number represents its place in a
1236 # topologically sorted list of nodes.
1236 # topologically sorted list of nodes.
1237 r = self.rev(n)
1237 r = self.rev(n)
1238 if r >= lowestrev:
1238 if r >= lowestrev:
1239 if n not in ancestors:
1239 if n not in ancestors:
1240 # If we are possibly a descendant of one of the roots
1240 # If we are possibly a descendant of one of the roots
1241 # and we haven't already been marked as an ancestor
1241 # and we haven't already been marked as an ancestor
1242 ancestors.add(n) # Mark as ancestor
1242 ancestors.add(n) # Mark as ancestor
1243 # Add non-nullid parents to list of nodes to tag.
1243 # Add non-nullid parents to list of nodes to tag.
1244 nodestotag.update(
1244 nodestotag.update(
1245 [p for p in self.parents(n) if p != self.nullid]
1245 [p for p in self.parents(n) if p != self.nullid]
1246 )
1246 )
1247 elif n in heads: # We've seen it before, is it a fake head?
1247 elif n in heads: # We've seen it before, is it a fake head?
1248 # So it is, real heads should not be the ancestors of
1248 # So it is, real heads should not be the ancestors of
1249 # any other heads.
1249 # any other heads.
1250 heads.pop(n)
1250 heads.pop(n)
1251 if not ancestors:
1251 if not ancestors:
1252 return nonodes
1252 return nonodes
1253 # Now that we have our set of ancestors, we want to remove any
1253 # Now that we have our set of ancestors, we want to remove any
1254 # roots that are not ancestors.
1254 # roots that are not ancestors.
1255
1255
1256 # If one of the roots was nullid, everything is included anyway.
1256 # If one of the roots was nullid, everything is included anyway.
1257 if lowestrev > nullrev:
1257 if lowestrev > nullrev:
1258 # But, since we weren't, let's recompute the lowest rev to not
1258 # But, since we weren't, let's recompute the lowest rev to not
1259 # include roots that aren't ancestors.
1259 # include roots that aren't ancestors.
1260
1260
1261 # Filter out roots that aren't ancestors of heads
1261 # Filter out roots that aren't ancestors of heads
1262 roots = [root for root in roots if root in ancestors]
1262 roots = [root for root in roots if root in ancestors]
1263 # Recompute the lowest revision
1263 # Recompute the lowest revision
1264 if roots:
1264 if roots:
1265 lowestrev = min([self.rev(root) for root in roots])
1265 lowestrev = min([self.rev(root) for root in roots])
1266 else:
1266 else:
1267 # No more roots? Return empty list
1267 # No more roots? Return empty list
1268 return nonodes
1268 return nonodes
1269 else:
1269 else:
1270 # We are descending from nullid, and don't need to care about
1270 # We are descending from nullid, and don't need to care about
1271 # any other roots.
1271 # any other roots.
1272 lowestrev = nullrev
1272 lowestrev = nullrev
1273 roots = [self.nullid]
1273 roots = [self.nullid]
1274 # Transform our roots list into a set.
1274 # Transform our roots list into a set.
1275 descendants = set(roots)
1275 descendants = set(roots)
1276 # Also, keep the original roots so we can filter out roots that aren't
1276 # Also, keep the original roots so we can filter out roots that aren't
1277 # 'real' roots (i.e. are descended from other roots).
1277 # 'real' roots (i.e. are descended from other roots).
1278 roots = descendants.copy()
1278 roots = descendants.copy()
1279 # Our topologically sorted list of output nodes.
1279 # Our topologically sorted list of output nodes.
1280 orderedout = []
1280 orderedout = []
1281 # Don't start at nullid since we don't want nullid in our output list,
1281 # Don't start at nullid since we don't want nullid in our output list,
1282 # and if nullid shows up in descendants, empty parents will look like
1282 # and if nullid shows up in descendants, empty parents will look like
1283 # they're descendants.
1283 # they're descendants.
1284 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1284 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1285 n = self.node(r)
1285 n = self.node(r)
1286 isdescendant = False
1286 isdescendant = False
1287 if lowestrev == nullrev: # Everybody is a descendant of nullid
1287 if lowestrev == nullrev: # Everybody is a descendant of nullid
1288 isdescendant = True
1288 isdescendant = True
1289 elif n in descendants:
1289 elif n in descendants:
1290 # n is already a descendant
1290 # n is already a descendant
1291 isdescendant = True
1291 isdescendant = True
1292 # This check only needs to be done here because all the roots
1292 # This check only needs to be done here because all the roots
1293 # will start being marked is descendants before the loop.
1293 # will start being marked is descendants before the loop.
1294 if n in roots:
1294 if n in roots:
1295 # If n was a root, check if it's a 'real' root.
1295 # If n was a root, check if it's a 'real' root.
1296 p = tuple(self.parents(n))
1296 p = tuple(self.parents(n))
1297 # If any of its parents are descendants, it's not a root.
1297 # If any of its parents are descendants, it's not a root.
1298 if (p[0] in descendants) or (p[1] in descendants):
1298 if (p[0] in descendants) or (p[1] in descendants):
1299 roots.remove(n)
1299 roots.remove(n)
1300 else:
1300 else:
1301 p = tuple(self.parents(n))
1301 p = tuple(self.parents(n))
1302 # A node is a descendant if either of its parents are
1302 # A node is a descendant if either of its parents are
1303 # descendants. (We seeded the dependents list with the roots
1303 # descendants. (We seeded the dependents list with the roots
1304 # up there, remember?)
1304 # up there, remember?)
1305 if (p[0] in descendants) or (p[1] in descendants):
1305 if (p[0] in descendants) or (p[1] in descendants):
1306 descendants.add(n)
1306 descendants.add(n)
1307 isdescendant = True
1307 isdescendant = True
1308 if isdescendant and ((ancestors is None) or (n in ancestors)):
1308 if isdescendant and ((ancestors is None) or (n in ancestors)):
1309 # Only include nodes that are both descendants and ancestors.
1309 # Only include nodes that are both descendants and ancestors.
1310 orderedout.append(n)
1310 orderedout.append(n)
1311 if (ancestors is not None) and (n in heads):
1311 if (ancestors is not None) and (n in heads):
1312 # We're trying to figure out which heads are reachable
1312 # We're trying to figure out which heads are reachable
1313 # from roots.
1313 # from roots.
1314 # Mark this head as having been reached
1314 # Mark this head as having been reached
1315 heads[n] = True
1315 heads[n] = True
1316 elif ancestors is None:
1316 elif ancestors is None:
1317 # Otherwise, we're trying to discover the heads.
1317 # Otherwise, we're trying to discover the heads.
1318 # Assume this is a head because if it isn't, the next step
1318 # Assume this is a head because if it isn't, the next step
1319 # will eventually remove it.
1319 # will eventually remove it.
1320 heads[n] = True
1320 heads[n] = True
1321 # But, obviously its parents aren't.
1321 # But, obviously its parents aren't.
1322 for p in self.parents(n):
1322 for p in self.parents(n):
1323 heads.pop(p, None)
1323 heads.pop(p, None)
1324 heads = [head for head, flag in heads.items() if flag]
1324 heads = [head for head, flag in heads.items() if flag]
1325 roots = list(roots)
1325 roots = list(roots)
1326 assert orderedout
1326 assert orderedout
1327 assert roots
1327 assert roots
1328 assert heads
1328 assert heads
1329 return (orderedout, roots, heads)
1329 return (orderedout, roots, heads)
1330
1330
1331 def headrevs(self, revs=None):
1331 def headrevs(self, revs=None):
1332 if revs is None:
1332 if revs is None:
1333 try:
1333 try:
1334 return self.index.headrevs()
1334 return self.index.headrevs()
1335 except AttributeError:
1335 except AttributeError:
1336 return self._headrevs()
1336 return self._headrevs()
1337 if rustdagop is not None and self.index.rust_ext_compat:
1337 if rustdagop is not None and self.index.rust_ext_compat:
1338 return rustdagop.headrevs(self.index, revs)
1338 return rustdagop.headrevs(self.index, revs)
1339 return dagop.headrevs(revs, self._uncheckedparentrevs)
1339 return dagop.headrevs(revs, self._uncheckedparentrevs)
1340
1340
1341 def computephases(self, roots):
1341 def computephases(self, roots):
1342 return self.index.computephasesmapsets(roots)
1342 return self.index.computephasesmapsets(roots)
1343
1343
1344 def _headrevs(self):
1344 def _headrevs(self):
1345 count = len(self)
1345 count = len(self)
1346 if not count:
1346 if not count:
1347 return [nullrev]
1347 return [nullrev]
1348 # we won't iter over filtered rev so nobody is a head at start
1348 # we won't iter over filtered rev so nobody is a head at start
1349 ishead = [0] * (count + 1)
1349 ishead = [0] * (count + 1)
1350 index = self.index
1350 index = self.index
1351 for r in self:
1351 for r in self:
1352 ishead[r] = 1 # I may be an head
1352 ishead[r] = 1 # I may be an head
1353 e = index[r]
1353 e = index[r]
1354 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1354 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1355 return [r for r, val in enumerate(ishead) if val]
1355 return [r for r, val in enumerate(ishead) if val]
1356
1356
1357 def heads(self, start=None, stop=None):
1357 def heads(self, start=None, stop=None):
1358 """return the list of all nodes that have no children
1358 """return the list of all nodes that have no children
1359
1359
1360 if start is specified, only heads that are descendants of
1360 if start is specified, only heads that are descendants of
1361 start will be returned
1361 start will be returned
1362 if stop is specified, it will consider all the revs from stop
1362 if stop is specified, it will consider all the revs from stop
1363 as if they had no children
1363 as if they had no children
1364 """
1364 """
1365 if start is None and stop is None:
1365 if start is None and stop is None:
1366 if not len(self):
1366 if not len(self):
1367 return [self.nullid]
1367 return [self.nullid]
1368 return [self.node(r) for r in self.headrevs()]
1368 return [self.node(r) for r in self.headrevs()]
1369
1369
1370 if start is None:
1370 if start is None:
1371 start = nullrev
1371 start = nullrev
1372 else:
1372 else:
1373 start = self.rev(start)
1373 start = self.rev(start)
1374
1374
1375 stoprevs = {self.rev(n) for n in stop or []}
1375 stoprevs = {self.rev(n) for n in stop or []}
1376
1376
1377 revs = dagop.headrevssubset(
1377 revs = dagop.headrevssubset(
1378 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1378 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1379 )
1379 )
1380
1380
1381 return [self.node(rev) for rev in revs]
1381 return [self.node(rev) for rev in revs]
1382
1382
1383 def children(self, node):
1383 def children(self, node):
1384 """find the children of a given node"""
1384 """find the children of a given node"""
1385 c = []
1385 c = []
1386 p = self.rev(node)
1386 p = self.rev(node)
1387 for r in self.revs(start=p + 1):
1387 for r in self.revs(start=p + 1):
1388 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1388 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1389 if prevs:
1389 if prevs:
1390 for pr in prevs:
1390 for pr in prevs:
1391 if pr == p:
1391 if pr == p:
1392 c.append(self.node(r))
1392 c.append(self.node(r))
1393 elif p == nullrev:
1393 elif p == nullrev:
1394 c.append(self.node(r))
1394 c.append(self.node(r))
1395 return c
1395 return c
1396
1396
1397 def commonancestorsheads(self, a, b):
1397 def commonancestorsheads(self, a, b):
1398 """calculate all the heads of the common ancestors of nodes a and b"""
1398 """calculate all the heads of the common ancestors of nodes a and b"""
1399 a, b = self.rev(a), self.rev(b)
1399 a, b = self.rev(a), self.rev(b)
1400 ancs = self._commonancestorsheads(a, b)
1400 ancs = self._commonancestorsheads(a, b)
1401 return pycompat.maplist(self.node, ancs)
1401 return pycompat.maplist(self.node, ancs)
1402
1402
1403 def _commonancestorsheads(self, *revs):
1403 def _commonancestorsheads(self, *revs):
1404 """calculate all the heads of the common ancestors of revs"""
1404 """calculate all the heads of the common ancestors of revs"""
1405 try:
1405 try:
1406 ancs = self.index.commonancestorsheads(*revs)
1406 ancs = self.index.commonancestorsheads(*revs)
1407 except (AttributeError, OverflowError): # C implementation failed
1407 except (AttributeError, OverflowError): # C implementation failed
1408 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1408 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1409 return ancs
1409 return ancs
1410
1410
1411 def isancestor(self, a, b):
1411 def isancestor(self, a, b):
1412 """return True if node a is an ancestor of node b
1412 """return True if node a is an ancestor of node b
1413
1413
1414 A revision is considered an ancestor of itself."""
1414 A revision is considered an ancestor of itself."""
1415 a, b = self.rev(a), self.rev(b)
1415 a, b = self.rev(a), self.rev(b)
1416 return self.isancestorrev(a, b)
1416 return self.isancestorrev(a, b)
1417
1417
1418 def isancestorrev(self, a, b):
1418 def isancestorrev(self, a, b):
1419 """return True if revision a is an ancestor of revision b
1419 """return True if revision a is an ancestor of revision b
1420
1420
1421 A revision is considered an ancestor of itself.
1421 A revision is considered an ancestor of itself.
1422
1422
1423 The implementation of this is trivial but the use of
1423 The implementation of this is trivial but the use of
1424 reachableroots is not."""
1424 reachableroots is not."""
1425 if a == nullrev:
1425 if a == nullrev:
1426 return True
1426 return True
1427 elif a == b:
1427 elif a == b:
1428 return True
1428 return True
1429 elif a > b:
1429 elif a > b:
1430 return False
1430 return False
1431 return bool(self.reachableroots(a, [b], [a], includepath=False))
1431 return bool(self.reachableroots(a, [b], [a], includepath=False))
1432
1432
1433 def reachableroots(self, minroot, heads, roots, includepath=False):
1433 def reachableroots(self, minroot, heads, roots, includepath=False):
1434 """return (heads(::(<roots> and <roots>::<heads>)))
1434 """return (heads(::(<roots> and <roots>::<heads>)))
1435
1435
1436 If includepath is True, return (<roots>::<heads>)."""
1436 If includepath is True, return (<roots>::<heads>)."""
1437 try:
1437 try:
1438 return self.index.reachableroots2(
1438 return self.index.reachableroots2(
1439 minroot, heads, roots, includepath
1439 minroot, heads, roots, includepath
1440 )
1440 )
1441 except AttributeError:
1441 except AttributeError:
1442 return dagop._reachablerootspure(
1442 return dagop._reachablerootspure(
1443 self.parentrevs, minroot, roots, heads, includepath
1443 self.parentrevs, minroot, roots, heads, includepath
1444 )
1444 )
1445
1445
1446 def ancestor(self, a, b):
1446 def ancestor(self, a, b):
1447 """calculate the "best" common ancestor of nodes a and b"""
1447 """calculate the "best" common ancestor of nodes a and b"""
1448
1448
1449 a, b = self.rev(a), self.rev(b)
1449 a, b = self.rev(a), self.rev(b)
1450 try:
1450 try:
1451 ancs = self.index.ancestors(a, b)
1451 ancs = self.index.ancestors(a, b)
1452 except (AttributeError, OverflowError):
1452 except (AttributeError, OverflowError):
1453 ancs = ancestor.ancestors(self.parentrevs, a, b)
1453 ancs = ancestor.ancestors(self.parentrevs, a, b)
1454 if ancs:
1454 if ancs:
1455 # choose a consistent winner when there's a tie
1455 # choose a consistent winner when there's a tie
1456 return min(map(self.node, ancs))
1456 return min(map(self.node, ancs))
1457 return self.nullid
1457 return self.nullid
1458
1458
1459 def _match(self, id):
1459 def _match(self, id):
1460 if isinstance(id, int):
1460 if isinstance(id, int):
1461 # rev
1461 # rev
1462 return self.node(id)
1462 return self.node(id)
1463 if len(id) == self.nodeconstants.nodelen:
1463 if len(id) == self.nodeconstants.nodelen:
1464 # possibly a binary node
1464 # possibly a binary node
1465 # odds of a binary node being all hex in ASCII are 1 in 10**25
1465 # odds of a binary node being all hex in ASCII are 1 in 10**25
1466 try:
1466 try:
1467 node = id
1467 node = id
1468 self.rev(node) # quick search the index
1468 self.rev(node) # quick search the index
1469 return node
1469 return node
1470 except error.LookupError:
1470 except error.LookupError:
1471 pass # may be partial hex id
1471 pass # may be partial hex id
1472 try:
1472 try:
1473 # str(rev)
1473 # str(rev)
1474 rev = int(id)
1474 rev = int(id)
1475 if b"%d" % rev != id:
1475 if b"%d" % rev != id:
1476 raise ValueError
1476 raise ValueError
1477 if rev < 0:
1477 if rev < 0:
1478 rev = len(self) + rev
1478 rev = len(self) + rev
1479 if rev < 0 or rev >= len(self):
1479 if rev < 0 or rev >= len(self):
1480 raise ValueError
1480 raise ValueError
1481 return self.node(rev)
1481 return self.node(rev)
1482 except (ValueError, OverflowError):
1482 except (ValueError, OverflowError):
1483 pass
1483 pass
1484 if len(id) == 2 * self.nodeconstants.nodelen:
1484 if len(id) == 2 * self.nodeconstants.nodelen:
1485 try:
1485 try:
1486 # a full hex nodeid?
1486 # a full hex nodeid?
1487 node = bin(id)
1487 node = bin(id)
1488 self.rev(node)
1488 self.rev(node)
1489 return node
1489 return node
1490 except (binascii.Error, error.LookupError):
1490 except (binascii.Error, error.LookupError):
1491 pass
1491 pass
1492
1492
1493 def _partialmatch(self, id):
1493 def _partialmatch(self, id):
1494 # we don't care wdirfilenodeids as they should be always full hash
1494 # we don't care wdirfilenodeids as they should be always full hash
1495 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1495 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1496 ambiguous = False
1496 ambiguous = False
1497 try:
1497 try:
1498 partial = self.index.partialmatch(id)
1498 partial = self.index.partialmatch(id)
1499 if partial and self.hasnode(partial):
1499 if partial and self.hasnode(partial):
1500 if maybewdir:
1500 if maybewdir:
1501 # single 'ff...' match in radix tree, ambiguous with wdir
1501 # single 'ff...' match in radix tree, ambiguous with wdir
1502 ambiguous = True
1502 ambiguous = True
1503 else:
1503 else:
1504 return partial
1504 return partial
1505 elif maybewdir:
1505 elif maybewdir:
1506 # no 'ff...' match in radix tree, wdir identified
1506 # no 'ff...' match in radix tree, wdir identified
1507 raise error.WdirUnsupported
1507 raise error.WdirUnsupported
1508 else:
1508 else:
1509 return None
1509 return None
1510 except error.RevlogError:
1510 except error.RevlogError:
1511 # parsers.c radix tree lookup gave multiple matches
1511 # parsers.c radix tree lookup gave multiple matches
1512 # fast path: for unfiltered changelog, radix tree is accurate
1512 # fast path: for unfiltered changelog, radix tree is accurate
1513 if not getattr(self, 'filteredrevs', None):
1513 if not getattr(self, 'filteredrevs', None):
1514 ambiguous = True
1514 ambiguous = True
1515 # fall through to slow path that filters hidden revisions
1515 # fall through to slow path that filters hidden revisions
1516 except (AttributeError, ValueError):
1516 except (AttributeError, ValueError):
1517 # we are pure python, or key is not hex
1517 # we are pure python, or key is not hex
1518 pass
1518 pass
1519 if ambiguous:
1519 if ambiguous:
1520 raise error.AmbiguousPrefixLookupError(
1520 raise error.AmbiguousPrefixLookupError(
1521 id, self.display_id, _(b'ambiguous identifier')
1521 id, self.display_id, _(b'ambiguous identifier')
1522 )
1522 )
1523
1523
1524 if id in self._pcache:
1524 if id in self._pcache:
1525 return self._pcache[id]
1525 return self._pcache[id]
1526
1526
1527 if len(id) <= 40:
1527 if len(id) <= 40:
1528 # hex(node)[:...]
1528 # hex(node)[:...]
1529 l = len(id) // 2 * 2 # grab an even number of digits
1529 l = len(id) // 2 * 2 # grab an even number of digits
1530 try:
1530 try:
1531 # we're dropping the last digit, so let's check that it's hex,
1531 # we're dropping the last digit, so let's check that it's hex,
1532 # to avoid the expensive computation below if it's not
1532 # to avoid the expensive computation below if it's not
1533 if len(id) % 2 > 0:
1533 if len(id) % 2 > 0:
1534 if not (id[-1] in hexdigits):
1534 if not (id[-1] in hexdigits):
1535 return None
1535 return None
1536 prefix = bin(id[:l])
1536 prefix = bin(id[:l])
1537 except binascii.Error:
1537 except binascii.Error:
1538 pass
1538 pass
1539 else:
1539 else:
1540 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1540 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1541 nl = [
1541 nl = [
1542 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1542 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1543 ]
1543 ]
1544 if self.nodeconstants.nullhex.startswith(id):
1544 if self.nodeconstants.nullhex.startswith(id):
1545 nl.append(self.nullid)
1545 nl.append(self.nullid)
1546 if len(nl) > 0:
1546 if len(nl) > 0:
1547 if len(nl) == 1 and not maybewdir:
1547 if len(nl) == 1 and not maybewdir:
1548 self._pcache[id] = nl[0]
1548 self._pcache[id] = nl[0]
1549 return nl[0]
1549 return nl[0]
1550 raise error.AmbiguousPrefixLookupError(
1550 raise error.AmbiguousPrefixLookupError(
1551 id, self.display_id, _(b'ambiguous identifier')
1551 id, self.display_id, _(b'ambiguous identifier')
1552 )
1552 )
1553 if maybewdir:
1553 if maybewdir:
1554 raise error.WdirUnsupported
1554 raise error.WdirUnsupported
1555 return None
1555 return None
1556
1556
1557 def lookup(self, id):
1557 def lookup(self, id):
1558 """locate a node based on:
1558 """locate a node based on:
1559 - revision number or str(revision number)
1559 - revision number or str(revision number)
1560 - nodeid or subset of hex nodeid
1560 - nodeid or subset of hex nodeid
1561 """
1561 """
1562 n = self._match(id)
1562 n = self._match(id)
1563 if n is not None:
1563 if n is not None:
1564 return n
1564 return n
1565 n = self._partialmatch(id)
1565 n = self._partialmatch(id)
1566 if n:
1566 if n:
1567 return n
1567 return n
1568
1568
1569 raise error.LookupError(id, self.display_id, _(b'no match found'))
1569 raise error.LookupError(id, self.display_id, _(b'no match found'))
1570
1570
1571 def shortest(self, node, minlength=1):
1571 def shortest(self, node, minlength=1):
1572 """Find the shortest unambiguous prefix that matches node."""
1572 """Find the shortest unambiguous prefix that matches node."""
1573
1573
1574 def isvalid(prefix):
1574 def isvalid(prefix):
1575 try:
1575 try:
1576 matchednode = self._partialmatch(prefix)
1576 matchednode = self._partialmatch(prefix)
1577 except error.AmbiguousPrefixLookupError:
1577 except error.AmbiguousPrefixLookupError:
1578 return False
1578 return False
1579 except error.WdirUnsupported:
1579 except error.WdirUnsupported:
1580 # single 'ff...' match
1580 # single 'ff...' match
1581 return True
1581 return True
1582 if matchednode is None:
1582 if matchednode is None:
1583 raise error.LookupError(node, self.display_id, _(b'no node'))
1583 raise error.LookupError(node, self.display_id, _(b'no node'))
1584 return True
1584 return True
1585
1585
1586 def maybewdir(prefix):
1586 def maybewdir(prefix):
1587 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1587 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1588
1588
1589 hexnode = hex(node)
1589 hexnode = hex(node)
1590
1590
1591 def disambiguate(hexnode, minlength):
1591 def disambiguate(hexnode, minlength):
1592 """Disambiguate against wdirid."""
1592 """Disambiguate against wdirid."""
1593 for length in range(minlength, len(hexnode) + 1):
1593 for length in range(minlength, len(hexnode) + 1):
1594 prefix = hexnode[:length]
1594 prefix = hexnode[:length]
1595 if not maybewdir(prefix):
1595 if not maybewdir(prefix):
1596 return prefix
1596 return prefix
1597
1597
1598 if not getattr(self, 'filteredrevs', None):
1598 if not getattr(self, 'filteredrevs', None):
1599 try:
1599 try:
1600 length = max(self.index.shortest(node), minlength)
1600 length = max(self.index.shortest(node), minlength)
1601 return disambiguate(hexnode, length)
1601 return disambiguate(hexnode, length)
1602 except error.RevlogError:
1602 except error.RevlogError:
1603 if node != self.nodeconstants.wdirid:
1603 if node != self.nodeconstants.wdirid:
1604 raise error.LookupError(
1604 raise error.LookupError(
1605 node, self.display_id, _(b'no node')
1605 node, self.display_id, _(b'no node')
1606 )
1606 )
1607 except AttributeError:
1607 except AttributeError:
1608 # Fall through to pure code
1608 # Fall through to pure code
1609 pass
1609 pass
1610
1610
1611 if node == self.nodeconstants.wdirid:
1611 if node == self.nodeconstants.wdirid:
1612 for length in range(minlength, len(hexnode) + 1):
1612 for length in range(minlength, len(hexnode) + 1):
1613 prefix = hexnode[:length]
1613 prefix = hexnode[:length]
1614 if isvalid(prefix):
1614 if isvalid(prefix):
1615 return prefix
1615 return prefix
1616
1616
1617 for length in range(minlength, len(hexnode) + 1):
1617 for length in range(minlength, len(hexnode) + 1):
1618 prefix = hexnode[:length]
1618 prefix = hexnode[:length]
1619 if isvalid(prefix):
1619 if isvalid(prefix):
1620 return disambiguate(hexnode, length)
1620 return disambiguate(hexnode, length)
1621
1621
1622 def cmp(self, node, text):
1622 def cmp(self, node, text):
1623 """compare text with a given file revision
1623 """compare text with a given file revision
1624
1624
1625 returns True if text is different than what is stored.
1625 returns True if text is different than what is stored.
1626 """
1626 """
1627 p1, p2 = self.parents(node)
1627 p1, p2 = self.parents(node)
1628 return storageutil.hashrevisionsha1(text, p1, p2) != node
1628 return storageutil.hashrevisionsha1(text, p1, p2) != node
1629
1629
1630 def _getsegmentforrevs(self, startrev, endrev, df=None):
1630 def _getsegmentforrevs(self, startrev, endrev, df=None):
1631 """Obtain a segment of raw data corresponding to a range of revisions.
1631 """Obtain a segment of raw data corresponding to a range of revisions.
1632
1632
1633 Accepts the start and end revisions and an optional already-open
1633 Accepts the start and end revisions and an optional already-open
1634 file handle to be used for reading. If the file handle is read, its
1634 file handle to be used for reading. If the file handle is read, its
1635 seek position will not be preserved.
1635 seek position will not be preserved.
1636
1636
1637 Requests for data may be satisfied by a cache.
1637 Requests for data may be satisfied by a cache.
1638
1638
1639 Returns a 2-tuple of (offset, data) for the requested range of
1639 Returns a 2-tuple of (offset, data) for the requested range of
1640 revisions. Offset is the integer offset from the beginning of the
1640 revisions. Offset is the integer offset from the beginning of the
1641 revlog and data is a str or buffer of the raw byte data.
1641 revlog and data is a str or buffer of the raw byte data.
1642
1642
1643 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1643 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1644 to determine where each revision's data begins and ends.
1644 to determine where each revision's data begins and ends.
1645 """
1645 """
1646 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1646 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1647 # (functions are expensive).
1647 # (functions are expensive).
1648 index = self.index
1648 index = self.index
1649 istart = index[startrev]
1649 istart = index[startrev]
1650 start = int(istart[0] >> 16)
1650 start = int(istart[0] >> 16)
1651 if startrev == endrev:
1651 if startrev == endrev:
1652 end = start + istart[1]
1652 end = start + istart[1]
1653 else:
1653 else:
1654 iend = index[endrev]
1654 iend = index[endrev]
1655 end = int(iend[0] >> 16) + iend[1]
1655 end = int(iend[0] >> 16) + iend[1]
1656
1656
1657 if self._inline:
1657 if self._inline:
1658 start += (startrev + 1) * self.index.entry_size
1658 start += (startrev + 1) * self.index.entry_size
1659 end += (endrev + 1) * self.index.entry_size
1659 end += (endrev + 1) * self.index.entry_size
1660 length = end - start
1660 length = end - start
1661
1661
1662 return start, self._segmentfile.read_chunk(start, length, df)
1662 return start, self._segmentfile.read_chunk(start, length, df)
1663
1663
1664 def _chunk(self, rev, df=None):
1664 def _chunk(self, rev, df=None):
1665 """Obtain a single decompressed chunk for a revision.
1665 """Obtain a single decompressed chunk for a revision.
1666
1666
1667 Accepts an integer revision and an optional already-open file handle
1667 Accepts an integer revision and an optional already-open file handle
1668 to be used for reading. If used, the seek position of the file will not
1668 to be used for reading. If used, the seek position of the file will not
1669 be preserved.
1669 be preserved.
1670
1670
1671 Returns a str holding uncompressed data for the requested revision.
1671 Returns a str holding uncompressed data for the requested revision.
1672 """
1672 """
1673 compression_mode = self.index[rev][10]
1673 compression_mode = self.index[rev][10]
1674 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1674 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1675 if compression_mode == COMP_MODE_PLAIN:
1675 if compression_mode == COMP_MODE_PLAIN:
1676 return data
1676 return data
1677 elif compression_mode == COMP_MODE_DEFAULT:
1677 elif compression_mode == COMP_MODE_DEFAULT:
1678 return self._decompressor(data)
1678 return self._decompressor(data)
1679 elif compression_mode == COMP_MODE_INLINE:
1679 elif compression_mode == COMP_MODE_INLINE:
1680 return self.decompress(data)
1680 return self.decompress(data)
1681 else:
1681 else:
1682 msg = b'unknown compression mode %d'
1682 msg = b'unknown compression mode %d'
1683 msg %= compression_mode
1683 msg %= compression_mode
1684 raise error.RevlogError(msg)
1684 raise error.RevlogError(msg)
1685
1685
1686 def _chunks(self, revs, df=None, targetsize=None):
1686 def _chunks(self, revs, df=None, targetsize=None):
1687 """Obtain decompressed chunks for the specified revisions.
1687 """Obtain decompressed chunks for the specified revisions.
1688
1688
1689 Accepts an iterable of numeric revisions that are assumed to be in
1689 Accepts an iterable of numeric revisions that are assumed to be in
1690 ascending order. Also accepts an optional already-open file handle
1690 ascending order. Also accepts an optional already-open file handle
1691 to be used for reading. If used, the seek position of the file will
1691 to be used for reading. If used, the seek position of the file will
1692 not be preserved.
1692 not be preserved.
1693
1693
1694 This function is similar to calling ``self._chunk()`` multiple times,
1694 This function is similar to calling ``self._chunk()`` multiple times,
1695 but is faster.
1695 but is faster.
1696
1696
1697 Returns a list with decompressed data for each requested revision.
1697 Returns a list with decompressed data for each requested revision.
1698 """
1698 """
1699 if not revs:
1699 if not revs:
1700 return []
1700 return []
1701 start = self.start
1701 start = self.start
1702 length = self.length
1702 length = self.length
1703 inline = self._inline
1703 inline = self._inline
1704 iosize = self.index.entry_size
1704 iosize = self.index.entry_size
1705 buffer = util.buffer
1705 buffer = util.buffer
1706
1706
1707 l = []
1707 l = []
1708 ladd = l.append
1708 ladd = l.append
1709
1709
1710 if not self._withsparseread:
1710 if not self._withsparseread:
1711 slicedchunks = (revs,)
1711 slicedchunks = (revs,)
1712 else:
1712 else:
1713 slicedchunks = deltautil.slicechunk(
1713 slicedchunks = deltautil.slicechunk(
1714 self, revs, targetsize=targetsize
1714 self, revs, targetsize=targetsize
1715 )
1715 )
1716
1716
1717 for revschunk in slicedchunks:
1717 for revschunk in slicedchunks:
1718 firstrev = revschunk[0]
1718 firstrev = revschunk[0]
1719 # Skip trailing revisions with empty diff
1719 # Skip trailing revisions with empty diff
1720 for lastrev in revschunk[::-1]:
1720 for lastrev in revschunk[::-1]:
1721 if length(lastrev) != 0:
1721 if length(lastrev) != 0:
1722 break
1722 break
1723
1723
1724 try:
1724 try:
1725 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1725 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1726 except OverflowError:
1726 except OverflowError:
1727 # issue4215 - we can't cache a run of chunks greater than
1727 # issue4215 - we can't cache a run of chunks greater than
1728 # 2G on Windows
1728 # 2G on Windows
1729 return [self._chunk(rev, df=df) for rev in revschunk]
1729 return [self._chunk(rev, df=df) for rev in revschunk]
1730
1730
1731 decomp = self.decompress
1731 decomp = self.decompress
1732 # self._decompressor might be None, but will not be used in that case
1732 # self._decompressor might be None, but will not be used in that case
1733 def_decomp = self._decompressor
1733 def_decomp = self._decompressor
1734 for rev in revschunk:
1734 for rev in revschunk:
1735 chunkstart = start(rev)
1735 chunkstart = start(rev)
1736 if inline:
1736 if inline:
1737 chunkstart += (rev + 1) * iosize
1737 chunkstart += (rev + 1) * iosize
1738 chunklength = length(rev)
1738 chunklength = length(rev)
1739 comp_mode = self.index[rev][10]
1739 comp_mode = self.index[rev][10]
1740 c = buffer(data, chunkstart - offset, chunklength)
1740 c = buffer(data, chunkstart - offset, chunklength)
1741 if comp_mode == COMP_MODE_PLAIN:
1741 if comp_mode == COMP_MODE_PLAIN:
1742 ladd(c)
1742 ladd(c)
1743 elif comp_mode == COMP_MODE_INLINE:
1743 elif comp_mode == COMP_MODE_INLINE:
1744 ladd(decomp(c))
1744 ladd(decomp(c))
1745 elif comp_mode == COMP_MODE_DEFAULT:
1745 elif comp_mode == COMP_MODE_DEFAULT:
1746 ladd(def_decomp(c))
1746 ladd(def_decomp(c))
1747 else:
1747 else:
1748 msg = b'unknown compression mode %d'
1748 msg = b'unknown compression mode %d'
1749 msg %= comp_mode
1749 msg %= comp_mode
1750 raise error.RevlogError(msg)
1750 raise error.RevlogError(msg)
1751
1751
1752 return l
1752 return l
1753
1753
1754 def deltaparent(self, rev):
1754 def deltaparent(self, rev):
1755 """return deltaparent of the given revision"""
1755 """return deltaparent of the given revision"""
1756 base = self.index[rev][3]
1756 base = self.index[rev][3]
1757 if base == rev:
1757 if base == rev:
1758 return nullrev
1758 return nullrev
1759 elif self._generaldelta:
1759 elif self._generaldelta:
1760 return base
1760 return base
1761 else:
1761 else:
1762 return rev - 1
1762 return rev - 1
1763
1763
1764 def issnapshot(self, rev):
1764 def issnapshot(self, rev):
1765 """tells whether rev is a snapshot"""
1765 """tells whether rev is a snapshot"""
1766 if not self._sparserevlog:
1766 if not self._sparserevlog:
1767 return self.deltaparent(rev) == nullrev
1767 return self.deltaparent(rev) == nullrev
1768 elif util.safehasattr(self.index, b'issnapshot'):
1768 elif util.safehasattr(self.index, b'issnapshot'):
1769 # directly assign the method to cache the testing and access
1769 # directly assign the method to cache the testing and access
1770 self.issnapshot = self.index.issnapshot
1770 self.issnapshot = self.index.issnapshot
1771 return self.issnapshot(rev)
1771 return self.issnapshot(rev)
1772 if rev == nullrev:
1772 if rev == nullrev:
1773 return True
1773 return True
1774 entry = self.index[rev]
1774 entry = self.index[rev]
1775 base = entry[3]
1775 base = entry[3]
1776 if base == rev:
1776 if base == rev:
1777 return True
1777 return True
1778 if base == nullrev:
1778 if base == nullrev:
1779 return True
1779 return True
1780 p1 = entry[5]
1780 p1 = entry[5]
1781 while self.length(p1) == 0:
1781 while self.length(p1) == 0:
1782 b = self.deltaparent(p1)
1782 b = self.deltaparent(p1)
1783 if b == p1:
1783 if b == p1:
1784 break
1784 break
1785 p1 = b
1785 p1 = b
1786 p2 = entry[6]
1786 p2 = entry[6]
1787 while self.length(p2) == 0:
1787 while self.length(p2) == 0:
1788 b = self.deltaparent(p2)
1788 b = self.deltaparent(p2)
1789 if b == p2:
1789 if b == p2:
1790 break
1790 break
1791 p2 = b
1791 p2 = b
1792 if base == p1 or base == p2:
1792 if base == p1 or base == p2:
1793 return False
1793 return False
1794 return self.issnapshot(base)
1794 return self.issnapshot(base)
1795
1795
1796 def snapshotdepth(self, rev):
1796 def snapshotdepth(self, rev):
1797 """number of snapshot in the chain before this one"""
1797 """number of snapshot in the chain before this one"""
1798 if not self.issnapshot(rev):
1798 if not self.issnapshot(rev):
1799 raise error.ProgrammingError(b'revision %d not a snapshot')
1799 raise error.ProgrammingError(b'revision %d not a snapshot')
1800 return len(self._deltachain(rev)[0]) - 1
1800 return len(self._deltachain(rev)[0]) - 1
1801
1801
1802 def revdiff(self, rev1, rev2):
1802 def revdiff(self, rev1, rev2):
1803 """return or calculate a delta between two revisions
1803 """return or calculate a delta between two revisions
1804
1804
1805 The delta calculated is in binary form and is intended to be written to
1805 The delta calculated is in binary form and is intended to be written to
1806 revlog data directly. So this function needs raw revision data.
1806 revlog data directly. So this function needs raw revision data.
1807 """
1807 """
1808 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1808 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1809 return bytes(self._chunk(rev2))
1809 return bytes(self._chunk(rev2))
1810
1810
1811 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1811 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1812
1812
1813 def revision(self, nodeorrev, _df=None):
1813 def revision(self, nodeorrev, _df=None):
1814 """return an uncompressed revision of a given node or revision
1814 """return an uncompressed revision of a given node or revision
1815 number.
1815 number.
1816
1816
1817 _df - an existing file handle to read from. (internal-only)
1817 _df - an existing file handle to read from. (internal-only)
1818 """
1818 """
1819 return self._revisiondata(nodeorrev, _df)
1819 return self._revisiondata(nodeorrev, _df)
1820
1820
1821 def sidedata(self, nodeorrev, _df=None):
1821 def sidedata(self, nodeorrev, _df=None):
1822 """a map of extra data related to the changeset but not part of the hash
1822 """a map of extra data related to the changeset but not part of the hash
1823
1823
1824 This function currently return a dictionary. However, more advanced
1824 This function currently return a dictionary. However, more advanced
1825 mapping object will likely be used in the future for a more
1825 mapping object will likely be used in the future for a more
1826 efficient/lazy code.
1826 efficient/lazy code.
1827 """
1827 """
1828 # deal with <nodeorrev> argument type
1828 # deal with <nodeorrev> argument type
1829 if isinstance(nodeorrev, int):
1829 if isinstance(nodeorrev, int):
1830 rev = nodeorrev
1830 rev = nodeorrev
1831 else:
1831 else:
1832 rev = self.rev(nodeorrev)
1832 rev = self.rev(nodeorrev)
1833 return self._sidedata(rev)
1833 return self._sidedata(rev)
1834
1834
1835 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1835 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1836 # deal with <nodeorrev> argument type
1836 # deal with <nodeorrev> argument type
1837 if isinstance(nodeorrev, int):
1837 if isinstance(nodeorrev, int):
1838 rev = nodeorrev
1838 rev = nodeorrev
1839 node = self.node(rev)
1839 node = self.node(rev)
1840 else:
1840 else:
1841 node = nodeorrev
1841 node = nodeorrev
1842 rev = None
1842 rev = None
1843
1843
1844 # fast path the special `nullid` rev
1844 # fast path the special `nullid` rev
1845 if node == self.nullid:
1845 if node == self.nullid:
1846 return b""
1846 return b""
1847
1847
1848 # ``rawtext`` is the text as stored inside the revlog. Might be the
1848 # ``rawtext`` is the text as stored inside the revlog. Might be the
1849 # revision or might need to be processed to retrieve the revision.
1849 # revision or might need to be processed to retrieve the revision.
1850 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1850 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1851
1851
1852 if raw and validated:
1852 if raw and validated:
1853 # if we don't want to process the raw text and that raw
1853 # if we don't want to process the raw text and that raw
1854 # text is cached, we can exit early.
1854 # text is cached, we can exit early.
1855 return rawtext
1855 return rawtext
1856 if rev is None:
1856 if rev is None:
1857 rev = self.rev(node)
1857 rev = self.rev(node)
1858 # the revlog's flag for this revision
1858 # the revlog's flag for this revision
1859 # (usually alter its state or content)
1859 # (usually alter its state or content)
1860 flags = self.flags(rev)
1860 flags = self.flags(rev)
1861
1861
1862 if validated and flags == REVIDX_DEFAULT_FLAGS:
1862 if validated and flags == REVIDX_DEFAULT_FLAGS:
1863 # no extra flags set, no flag processor runs, text = rawtext
1863 # no extra flags set, no flag processor runs, text = rawtext
1864 return rawtext
1864 return rawtext
1865
1865
1866 if raw:
1866 if raw:
1867 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1867 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1868 text = rawtext
1868 text = rawtext
1869 else:
1869 else:
1870 r = flagutil.processflagsread(self, rawtext, flags)
1870 r = flagutil.processflagsread(self, rawtext, flags)
1871 text, validatehash = r
1871 text, validatehash = r
1872 if validatehash:
1872 if validatehash:
1873 self.checkhash(text, node, rev=rev)
1873 self.checkhash(text, node, rev=rev)
1874 if not validated:
1874 if not validated:
1875 self._revisioncache = (node, rev, rawtext)
1875 self._revisioncache = (node, rev, rawtext)
1876
1876
1877 return text
1877 return text
1878
1878
1879 def _rawtext(self, node, rev, _df=None):
1879 def _rawtext(self, node, rev, _df=None):
1880 """return the possibly unvalidated rawtext for a revision
1880 """return the possibly unvalidated rawtext for a revision
1881
1881
1882 returns (rev, rawtext, validated)
1882 returns (rev, rawtext, validated)
1883 """
1883 """
1884
1884
1885 # revision in the cache (could be useful to apply delta)
1885 # revision in the cache (could be useful to apply delta)
1886 cachedrev = None
1886 cachedrev = None
1887 # An intermediate text to apply deltas to
1887 # An intermediate text to apply deltas to
1888 basetext = None
1888 basetext = None
1889
1889
1890 # Check if we have the entry in cache
1890 # Check if we have the entry in cache
1891 # The cache entry looks like (node, rev, rawtext)
1891 # The cache entry looks like (node, rev, rawtext)
1892 if self._revisioncache:
1892 if self._revisioncache:
1893 if self._revisioncache[0] == node:
1893 if self._revisioncache[0] == node:
1894 return (rev, self._revisioncache[2], True)
1894 return (rev, self._revisioncache[2], True)
1895 cachedrev = self._revisioncache[1]
1895 cachedrev = self._revisioncache[1]
1896
1896
1897 if rev is None:
1897 if rev is None:
1898 rev = self.rev(node)
1898 rev = self.rev(node)
1899
1899
1900 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1900 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1901 if stopped:
1901 if stopped:
1902 basetext = self._revisioncache[2]
1902 basetext = self._revisioncache[2]
1903
1903
1904 # drop cache to save memory, the caller is expected to
1904 # drop cache to save memory, the caller is expected to
1905 # update self._revisioncache after validating the text
1905 # update self._revisioncache after validating the text
1906 self._revisioncache = None
1906 self._revisioncache = None
1907
1907
1908 targetsize = None
1908 targetsize = None
1909 rawsize = self.index[rev][2]
1909 rawsize = self.index[rev][2]
1910 if 0 <= rawsize:
1910 if 0 <= rawsize:
1911 targetsize = 4 * rawsize
1911 targetsize = 4 * rawsize
1912
1912
1913 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1913 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1914 if basetext is None:
1914 if basetext is None:
1915 basetext = bytes(bins[0])
1915 basetext = bytes(bins[0])
1916 bins = bins[1:]
1916 bins = bins[1:]
1917
1917
1918 rawtext = mdiff.patches(basetext, bins)
1918 rawtext = mdiff.patches(basetext, bins)
1919 del basetext # let us have a chance to free memory early
1919 del basetext # let us have a chance to free memory early
1920 return (rev, rawtext, False)
1920 return (rev, rawtext, False)
1921
1921
1922 def _sidedata(self, rev):
1922 def _sidedata(self, rev):
1923 """Return the sidedata for a given revision number."""
1923 """Return the sidedata for a given revision number."""
1924 index_entry = self.index[rev]
1924 index_entry = self.index[rev]
1925 sidedata_offset = index_entry[8]
1925 sidedata_offset = index_entry[8]
1926 sidedata_size = index_entry[9]
1926 sidedata_size = index_entry[9]
1927
1927
1928 if self._inline:
1928 if self._inline:
1929 sidedata_offset += self.index.entry_size * (1 + rev)
1929 sidedata_offset += self.index.entry_size * (1 + rev)
1930 if sidedata_size == 0:
1930 if sidedata_size == 0:
1931 return {}
1931 return {}
1932
1932
1933 if self._docket.sidedata_end < sidedata_offset + sidedata_size:
1933 if self._docket.sidedata_end < sidedata_offset + sidedata_size:
1934 filename = self._sidedatafile
1934 filename = self._sidedatafile
1935 end = self._docket.sidedata_end
1935 end = self._docket.sidedata_end
1936 offset = sidedata_offset
1936 offset = sidedata_offset
1937 length = sidedata_size
1937 length = sidedata_size
1938 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
1938 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
1939 raise error.RevlogError(m)
1939 raise error.RevlogError(m)
1940
1940
1941 comp_segment = self._segmentfile_sidedata.read_chunk(
1941 comp_segment = self._segmentfile_sidedata.read_chunk(
1942 sidedata_offset, sidedata_size
1942 sidedata_offset, sidedata_size
1943 )
1943 )
1944
1944
1945 comp = self.index[rev][11]
1945 comp = self.index[rev][11]
1946 if comp == COMP_MODE_PLAIN:
1946 if comp == COMP_MODE_PLAIN:
1947 segment = comp_segment
1947 segment = comp_segment
1948 elif comp == COMP_MODE_DEFAULT:
1948 elif comp == COMP_MODE_DEFAULT:
1949 segment = self._decompressor(comp_segment)
1949 segment = self._decompressor(comp_segment)
1950 elif comp == COMP_MODE_INLINE:
1950 elif comp == COMP_MODE_INLINE:
1951 segment = self.decompress(comp_segment)
1951 segment = self.decompress(comp_segment)
1952 else:
1952 else:
1953 msg = b'unknown compression mode %d'
1953 msg = b'unknown compression mode %d'
1954 msg %= comp
1954 msg %= comp
1955 raise error.RevlogError(msg)
1955 raise error.RevlogError(msg)
1956
1956
1957 sidedata = sidedatautil.deserialize_sidedata(segment)
1957 sidedata = sidedatautil.deserialize_sidedata(segment)
1958 return sidedata
1958 return sidedata
1959
1959
1960 def rawdata(self, nodeorrev, _df=None):
1960 def rawdata(self, nodeorrev, _df=None):
1961 """return an uncompressed raw data of a given node or revision number.
1961 """return an uncompressed raw data of a given node or revision number.
1962
1962
1963 _df - an existing file handle to read from. (internal-only)
1963 _df - an existing file handle to read from. (internal-only)
1964 """
1964 """
1965 return self._revisiondata(nodeorrev, _df, raw=True)
1965 return self._revisiondata(nodeorrev, _df, raw=True)
1966
1966
1967 def hash(self, text, p1, p2):
1967 def hash(self, text, p1, p2):
1968 """Compute a node hash.
1968 """Compute a node hash.
1969
1969
1970 Available as a function so that subclasses can replace the hash
1970 Available as a function so that subclasses can replace the hash
1971 as needed.
1971 as needed.
1972 """
1972 """
1973 return storageutil.hashrevisionsha1(text, p1, p2)
1973 return storageutil.hashrevisionsha1(text, p1, p2)
1974
1974
1975 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1975 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1976 """Check node hash integrity.
1976 """Check node hash integrity.
1977
1977
1978 Available as a function so that subclasses can extend hash mismatch
1978 Available as a function so that subclasses can extend hash mismatch
1979 behaviors as needed.
1979 behaviors as needed.
1980 """
1980 """
1981 try:
1981 try:
1982 if p1 is None and p2 is None:
1982 if p1 is None and p2 is None:
1983 p1, p2 = self.parents(node)
1983 p1, p2 = self.parents(node)
1984 if node != self.hash(text, p1, p2):
1984 if node != self.hash(text, p1, p2):
1985 # Clear the revision cache on hash failure. The revision cache
1985 # Clear the revision cache on hash failure. The revision cache
1986 # only stores the raw revision and clearing the cache does have
1986 # only stores the raw revision and clearing the cache does have
1987 # the side-effect that we won't have a cache hit when the raw
1987 # the side-effect that we won't have a cache hit when the raw
1988 # revision data is accessed. But this case should be rare and
1988 # revision data is accessed. But this case should be rare and
1989 # it is extra work to teach the cache about the hash
1989 # it is extra work to teach the cache about the hash
1990 # verification state.
1990 # verification state.
1991 if self._revisioncache and self._revisioncache[0] == node:
1991 if self._revisioncache and self._revisioncache[0] == node:
1992 self._revisioncache = None
1992 self._revisioncache = None
1993
1993
1994 revornode = rev
1994 revornode = rev
1995 if revornode is None:
1995 if revornode is None:
1996 revornode = templatefilters.short(hex(node))
1996 revornode = templatefilters.short(hex(node))
1997 raise error.RevlogError(
1997 raise error.RevlogError(
1998 _(b"integrity check failed on %s:%s")
1998 _(b"integrity check failed on %s:%s")
1999 % (self.display_id, pycompat.bytestr(revornode))
1999 % (self.display_id, pycompat.bytestr(revornode))
2000 )
2000 )
2001 except error.RevlogError:
2001 except error.RevlogError:
2002 if self._censorable and storageutil.iscensoredtext(text):
2002 if self._censorable and storageutil.iscensoredtext(text):
2003 raise error.CensoredNodeError(self.display_id, node, text)
2003 raise error.CensoredNodeError(self.display_id, node, text)
2004 raise
2004 raise
2005
2005
2006 def _enforceinlinesize(self, tr):
2006 def _enforceinlinesize(self, tr):
2007 """Check if the revlog is too big for inline and convert if so.
2007 """Check if the revlog is too big for inline and convert if so.
2008
2008
2009 This should be called after revisions are added to the revlog. If the
2009 This should be called after revisions are added to the revlog. If the
2010 revlog has grown too large to be an inline revlog, it will convert it
2010 revlog has grown too large to be an inline revlog, it will convert it
2011 to use multiple index and data files.
2011 to use multiple index and data files.
2012 """
2012 """
2013 tiprev = len(self) - 1
2013 tiprev = len(self) - 1
2014 total_size = self.start(tiprev) + self.length(tiprev)
2014 total_size = self.start(tiprev) + self.length(tiprev)
2015 if not self._inline or total_size < _maxinline:
2015 if not self._inline or total_size < _maxinline:
2016 return
2016 return
2017
2017
2018 troffset = tr.findoffset(self._indexfile)
2018 troffset = tr.findoffset(self._indexfile)
2019 if troffset is None:
2019 if troffset is None:
2020 raise error.RevlogError(
2020 raise error.RevlogError(
2021 _(b"%s not found in the transaction") % self._indexfile
2021 _(b"%s not found in the transaction") % self._indexfile
2022 )
2022 )
2023 trindex = None
2023 trindex = None
2024 tr.add(self._datafile, 0)
2024 tr.add(self._datafile, 0)
2025
2025
2026 existing_handles = False
2026 existing_handles = False
2027 if self._writinghandles is not None:
2027 if self._writinghandles is not None:
2028 existing_handles = True
2028 existing_handles = True
2029 fp = self._writinghandles[0]
2029 fp = self._writinghandles[0]
2030 fp.flush()
2030 fp.flush()
2031 fp.close()
2031 fp.close()
2032 # We can't use the cached file handle after close(). So prevent
2032 # We can't use the cached file handle after close(). So prevent
2033 # its usage.
2033 # its usage.
2034 self._writinghandles = None
2034 self._writinghandles = None
2035 self._segmentfile.writing_handle = None
2035 self._segmentfile.writing_handle = None
2036 # No need to deal with sidedata writing handle as it is only
2036 # No need to deal with sidedata writing handle as it is only
2037 # relevant with revlog-v2 which is never inline, not reaching
2037 # relevant with revlog-v2 which is never inline, not reaching
2038 # this code
2038 # this code
2039
2039
2040 new_dfh = self._datafp(b'w+')
2040 new_dfh = self._datafp(b'w+')
2041 new_dfh.truncate(0) # drop any potentially existing data
2041 new_dfh.truncate(0) # drop any potentially existing data
2042 try:
2042 try:
2043 with self._indexfp() as read_ifh:
2043 with self._indexfp() as read_ifh:
2044 for r in self:
2044 for r in self:
2045 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2045 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2046 if (
2046 if (
2047 trindex is None
2047 trindex is None
2048 and troffset
2048 and troffset
2049 <= self.start(r) + r * self.index.entry_size
2049 <= self.start(r) + r * self.index.entry_size
2050 ):
2050 ):
2051 trindex = r
2051 trindex = r
2052 new_dfh.flush()
2052 new_dfh.flush()
2053
2053
2054 if trindex is None:
2054 if trindex is None:
2055 trindex = 0
2055 trindex = 0
2056
2056
2057 with self.__index_new_fp() as fp:
2057 with self.__index_new_fp() as fp:
2058 self._format_flags &= ~FLAG_INLINE_DATA
2058 self._format_flags &= ~FLAG_INLINE_DATA
2059 self._inline = False
2059 self._inline = False
2060 for i in self:
2060 for i in self:
2061 e = self.index.entry_binary(i)
2061 e = self.index.entry_binary(i)
2062 if i == 0 and self._docket is None:
2062 if i == 0 and self._docket is None:
2063 header = self._format_flags | self._format_version
2063 header = self._format_flags | self._format_version
2064 header = self.index.pack_header(header)
2064 header = self.index.pack_header(header)
2065 e = header + e
2065 e = header + e
2066 fp.write(e)
2066 fp.write(e)
2067 if self._docket is not None:
2067 if self._docket is not None:
2068 self._docket.index_end = fp.tell()
2068 self._docket.index_end = fp.tell()
2069
2069
2070 # There is a small transactional race here. If the rename of
2070 # There is a small transactional race here. If the rename of
2071 # the index fails, we should remove the datafile. It is more
2071 # the index fails, we should remove the datafile. It is more
2072 # important to ensure that the data file is not truncated
2072 # important to ensure that the data file is not truncated
2073 # when the index is replaced as otherwise data is lost.
2073 # when the index is replaced as otherwise data is lost.
2074 tr.replace(self._datafile, self.start(trindex))
2074 tr.replace(self._datafile, self.start(trindex))
2075
2075
2076 # the temp file replace the real index when we exit the context
2076 # the temp file replace the real index when we exit the context
2077 # manager
2077 # manager
2078
2078
2079 tr.replace(self._indexfile, trindex * self.index.entry_size)
2079 tr.replace(self._indexfile, trindex * self.index.entry_size)
2080 nodemaputil.setup_persistent_nodemap(tr, self)
2080 nodemaputil.setup_persistent_nodemap(tr, self)
2081 self._segmentfile = randomaccessfile.randomaccessfile(
2081 self._segmentfile = randomaccessfile.randomaccessfile(
2082 self.opener,
2082 self.opener,
2083 self._datafile,
2083 self._datafile,
2084 self._chunkcachesize,
2084 self._chunkcachesize,
2085 )
2085 )
2086
2086
2087 if existing_handles:
2087 if existing_handles:
2088 # switched from inline to conventional reopen the index
2088 # switched from inline to conventional reopen the index
2089 ifh = self.__index_write_fp()
2089 ifh = self.__index_write_fp()
2090 self._writinghandles = (ifh, new_dfh, None)
2090 self._writinghandles = (ifh, new_dfh, None)
2091 self._segmentfile.writing_handle = new_dfh
2091 self._segmentfile.writing_handle = new_dfh
2092 new_dfh = None
2092 new_dfh = None
2093 # No need to deal with sidedata writing handle as it is only
2093 # No need to deal with sidedata writing handle as it is only
2094 # relevant with revlog-v2 which is never inline, not reaching
2094 # relevant with revlog-v2 which is never inline, not reaching
2095 # this code
2095 # this code
2096 finally:
2096 finally:
2097 if new_dfh is not None:
2097 if new_dfh is not None:
2098 new_dfh.close()
2098 new_dfh.close()
2099
2099
2100 def _nodeduplicatecallback(self, transaction, node):
2100 def _nodeduplicatecallback(self, transaction, node):
2101 """called when trying to add a node already stored."""
2101 """called when trying to add a node already stored."""
2102
2102
2103 @contextlib.contextmanager
2103 @contextlib.contextmanager
2104 def reading(self):
2104 def reading(self):
2105 """Context manager that keeps data and sidedata files open for reading"""
2105 """Context manager that keeps data and sidedata files open for reading"""
2106 with self._segmentfile.reading():
2106 with self._segmentfile.reading():
2107 with self._segmentfile_sidedata.reading():
2107 with self._segmentfile_sidedata.reading():
2108 yield
2108 yield
2109
2109
2110 @contextlib.contextmanager
2110 @contextlib.contextmanager
2111 def _writing(self, transaction):
2111 def _writing(self, transaction):
2112 if self._trypending:
2112 if self._trypending:
2113 msg = b'try to write in a `trypending` revlog: %s'
2113 msg = b'try to write in a `trypending` revlog: %s'
2114 msg %= self.display_id
2114 msg %= self.display_id
2115 raise error.ProgrammingError(msg)
2115 raise error.ProgrammingError(msg)
2116 if self._writinghandles is not None:
2116 if self._writinghandles is not None:
2117 yield
2117 yield
2118 else:
2118 else:
2119 ifh = dfh = sdfh = None
2119 ifh = dfh = sdfh = None
2120 try:
2120 try:
2121 r = len(self)
2121 r = len(self)
2122 # opening the data file.
2122 # opening the data file.
2123 dsize = 0
2123 dsize = 0
2124 if r:
2124 if r:
2125 dsize = self.end(r - 1)
2125 dsize = self.end(r - 1)
2126 dfh = None
2126 dfh = None
2127 if not self._inline:
2127 if not self._inline:
2128 try:
2128 try:
2129 dfh = self._datafp(b"r+")
2129 dfh = self._datafp(b"r+")
2130 if self._docket is None:
2130 if self._docket is None:
2131 dfh.seek(0, os.SEEK_END)
2131 dfh.seek(0, os.SEEK_END)
2132 else:
2132 else:
2133 dfh.seek(self._docket.data_end, os.SEEK_SET)
2133 dfh.seek(self._docket.data_end, os.SEEK_SET)
2134 except FileNotFoundError:
2134 except FileNotFoundError:
2135 dfh = self._datafp(b"w+")
2135 dfh = self._datafp(b"w+")
2136 transaction.add(self._datafile, dsize)
2136 transaction.add(self._datafile, dsize)
2137 if self._sidedatafile is not None:
2137 if self._sidedatafile is not None:
2138 # revlog-v2 does not inline, help Pytype
2138 # revlog-v2 does not inline, help Pytype
2139 assert dfh is not None
2139 assert dfh is not None
2140 try:
2140 try:
2141 sdfh = self.opener(self._sidedatafile, mode=b"r+")
2141 sdfh = self.opener(self._sidedatafile, mode=b"r+")
2142 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2142 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2143 except FileNotFoundError:
2143 except FileNotFoundError:
2144 sdfh = self.opener(self._sidedatafile, mode=b"w+")
2144 sdfh = self.opener(self._sidedatafile, mode=b"w+")
2145 transaction.add(
2145 transaction.add(
2146 self._sidedatafile, self._docket.sidedata_end
2146 self._sidedatafile, self._docket.sidedata_end
2147 )
2147 )
2148
2148
2149 # opening the index file.
2149 # opening the index file.
2150 isize = r * self.index.entry_size
2150 isize = r * self.index.entry_size
2151 ifh = self.__index_write_fp()
2151 ifh = self.__index_write_fp()
2152 if self._inline:
2152 if self._inline:
2153 transaction.add(self._indexfile, dsize + isize)
2153 transaction.add(self._indexfile, dsize + isize)
2154 else:
2154 else:
2155 transaction.add(self._indexfile, isize)
2155 transaction.add(self._indexfile, isize)
2156 # exposing all file handle for writing.
2156 # exposing all file handle for writing.
2157 self._writinghandles = (ifh, dfh, sdfh)
2157 self._writinghandles = (ifh, dfh, sdfh)
2158 self._segmentfile.writing_handle = ifh if self._inline else dfh
2158 self._segmentfile.writing_handle = ifh if self._inline else dfh
2159 self._segmentfile_sidedata.writing_handle = sdfh
2159 self._segmentfile_sidedata.writing_handle = sdfh
2160 yield
2160 yield
2161 if self._docket is not None:
2161 if self._docket is not None:
2162 self._write_docket(transaction)
2162 self._write_docket(transaction)
2163 finally:
2163 finally:
2164 self._writinghandles = None
2164 self._writinghandles = None
2165 self._segmentfile.writing_handle = None
2165 self._segmentfile.writing_handle = None
2166 self._segmentfile_sidedata.writing_handle = None
2166 self._segmentfile_sidedata.writing_handle = None
2167 if dfh is not None:
2167 if dfh is not None:
2168 dfh.close()
2168 dfh.close()
2169 if sdfh is not None:
2169 if sdfh is not None:
2170 sdfh.close()
2170 sdfh.close()
2171 # closing the index file last to avoid exposing referent to
2171 # closing the index file last to avoid exposing referent to
2172 # potential unflushed data content.
2172 # potential unflushed data content.
2173 if ifh is not None:
2173 if ifh is not None:
2174 ifh.close()
2174 ifh.close()
2175
2175
2176 def _write_docket(self, transaction):
2176 def _write_docket(self, transaction):
2177 """write the current docket on disk
2177 """write the current docket on disk
2178
2178
2179 Exist as a method to help changelog to implement transaction logic
2179 Exist as a method to help changelog to implement transaction logic
2180
2180
2181 We could also imagine using the same transaction logic for all revlog
2181 We could also imagine using the same transaction logic for all revlog
2182 since docket are cheap."""
2182 since docket are cheap."""
2183 self._docket.write(transaction)
2183 self._docket.write(transaction)
2184
2184
2185 def addrevision(
2185 def addrevision(
2186 self,
2186 self,
2187 text,
2187 text,
2188 transaction,
2188 transaction,
2189 link,
2189 link,
2190 p1,
2190 p1,
2191 p2,
2191 p2,
2192 cachedelta=None,
2192 cachedelta=None,
2193 node=None,
2193 node=None,
2194 flags=REVIDX_DEFAULT_FLAGS,
2194 flags=REVIDX_DEFAULT_FLAGS,
2195 deltacomputer=None,
2195 deltacomputer=None,
2196 sidedata=None,
2196 sidedata=None,
2197 ):
2197 ):
2198 """add a revision to the log
2198 """add a revision to the log
2199
2199
2200 text - the revision data to add
2200 text - the revision data to add
2201 transaction - the transaction object used for rollback
2201 transaction - the transaction object used for rollback
2202 link - the linkrev data to add
2202 link - the linkrev data to add
2203 p1, p2 - the parent nodeids of the revision
2203 p1, p2 - the parent nodeids of the revision
2204 cachedelta - an optional precomputed delta
2204 cachedelta - an optional precomputed delta
2205 node - nodeid of revision; typically node is not specified, and it is
2205 node - nodeid of revision; typically node is not specified, and it is
2206 computed by default as hash(text, p1, p2), however subclasses might
2206 computed by default as hash(text, p1, p2), however subclasses might
2207 use different hashing method (and override checkhash() in such case)
2207 use different hashing method (and override checkhash() in such case)
2208 flags - the known flags to set on the revision
2208 flags - the known flags to set on the revision
2209 deltacomputer - an optional deltacomputer instance shared between
2209 deltacomputer - an optional deltacomputer instance shared between
2210 multiple calls
2210 multiple calls
2211 """
2211 """
2212 if link == nullrev:
2212 if link == nullrev:
2213 raise error.RevlogError(
2213 raise error.RevlogError(
2214 _(b"attempted to add linkrev -1 to %s") % self.display_id
2214 _(b"attempted to add linkrev -1 to %s") % self.display_id
2215 )
2215 )
2216
2216
2217 if sidedata is None:
2217 if sidedata is None:
2218 sidedata = {}
2218 sidedata = {}
2219 elif sidedata and not self.hassidedata:
2219 elif sidedata and not self.hassidedata:
2220 raise error.ProgrammingError(
2220 raise error.ProgrammingError(
2221 _(b"trying to add sidedata to a revlog who don't support them")
2221 _(b"trying to add sidedata to a revlog who don't support them")
2222 )
2222 )
2223
2223
2224 if flags:
2224 if flags:
2225 node = node or self.hash(text, p1, p2)
2225 node = node or self.hash(text, p1, p2)
2226
2226
2227 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2227 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2228
2228
2229 # If the flag processor modifies the revision data, ignore any provided
2229 # If the flag processor modifies the revision data, ignore any provided
2230 # cachedelta.
2230 # cachedelta.
2231 if rawtext != text:
2231 if rawtext != text:
2232 cachedelta = None
2232 cachedelta = None
2233
2233
2234 if len(rawtext) > _maxentrysize:
2234 if len(rawtext) > _maxentrysize:
2235 raise error.RevlogError(
2235 raise error.RevlogError(
2236 _(
2236 _(
2237 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2237 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2238 )
2238 )
2239 % (self.display_id, len(rawtext))
2239 % (self.display_id, len(rawtext))
2240 )
2240 )
2241
2241
2242 node = node or self.hash(rawtext, p1, p2)
2242 node = node or self.hash(rawtext, p1, p2)
2243 rev = self.index.get_rev(node)
2243 rev = self.index.get_rev(node)
2244 if rev is not None:
2244 if rev is not None:
2245 return rev
2245 return rev
2246
2246
2247 if validatehash:
2247 if validatehash:
2248 self.checkhash(rawtext, node, p1=p1, p2=p2)
2248 self.checkhash(rawtext, node, p1=p1, p2=p2)
2249
2249
2250 return self.addrawrevision(
2250 return self.addrawrevision(
2251 rawtext,
2251 rawtext,
2252 transaction,
2252 transaction,
2253 link,
2253 link,
2254 p1,
2254 p1,
2255 p2,
2255 p2,
2256 node,
2256 node,
2257 flags,
2257 flags,
2258 cachedelta=cachedelta,
2258 cachedelta=cachedelta,
2259 deltacomputer=deltacomputer,
2259 deltacomputer=deltacomputer,
2260 sidedata=sidedata,
2260 sidedata=sidedata,
2261 )
2261 )
2262
2262
2263 def addrawrevision(
2263 def addrawrevision(
2264 self,
2264 self,
2265 rawtext,
2265 rawtext,
2266 transaction,
2266 transaction,
2267 link,
2267 link,
2268 p1,
2268 p1,
2269 p2,
2269 p2,
2270 node,
2270 node,
2271 flags,
2271 flags,
2272 cachedelta=None,
2272 cachedelta=None,
2273 deltacomputer=None,
2273 deltacomputer=None,
2274 sidedata=None,
2274 sidedata=None,
2275 ):
2275 ):
2276 """add a raw revision with known flags, node and parents
2276 """add a raw revision with known flags, node and parents
2277 useful when reusing a revision not stored in this revlog (ex: received
2277 useful when reusing a revision not stored in this revlog (ex: received
2278 over wire, or read from an external bundle).
2278 over wire, or read from an external bundle).
2279 """
2279 """
2280 with self._writing(transaction):
2280 with self._writing(transaction):
2281 return self._addrevision(
2281 return self._addrevision(
2282 node,
2282 node,
2283 rawtext,
2283 rawtext,
2284 transaction,
2284 transaction,
2285 link,
2285 link,
2286 p1,
2286 p1,
2287 p2,
2287 p2,
2288 flags,
2288 flags,
2289 cachedelta,
2289 cachedelta,
2290 deltacomputer=deltacomputer,
2290 deltacomputer=deltacomputer,
2291 sidedata=sidedata,
2291 sidedata=sidedata,
2292 )
2292 )
2293
2293
2294 def compress(self, data):
2294 def compress(self, data):
2295 """Generate a possibly-compressed representation of data."""
2295 """Generate a possibly-compressed representation of data."""
2296 if not data:
2296 if not data:
2297 return b'', data
2297 return b'', data
2298
2298
2299 compressed = self._compressor.compress(data)
2299 compressed = self._compressor.compress(data)
2300
2300
2301 if compressed:
2301 if compressed:
2302 # The revlog compressor added the header in the returned data.
2302 # The revlog compressor added the header in the returned data.
2303 return b'', compressed
2303 return b'', compressed
2304
2304
2305 if data[0:1] == b'\0':
2305 if data[0:1] == b'\0':
2306 return b'', data
2306 return b'', data
2307 return b'u', data
2307 return b'u', data
2308
2308
2309 def decompress(self, data):
2309 def decompress(self, data):
2310 """Decompress a revlog chunk.
2310 """Decompress a revlog chunk.
2311
2311
2312 The chunk is expected to begin with a header identifying the
2312 The chunk is expected to begin with a header identifying the
2313 format type so it can be routed to an appropriate decompressor.
2313 format type so it can be routed to an appropriate decompressor.
2314 """
2314 """
2315 if not data:
2315 if not data:
2316 return data
2316 return data
2317
2317
2318 # Revlogs are read much more frequently than they are written and many
2318 # Revlogs are read much more frequently than they are written and many
2319 # chunks only take microseconds to decompress, so performance is
2319 # chunks only take microseconds to decompress, so performance is
2320 # important here.
2320 # important here.
2321 #
2321 #
2322 # We can make a few assumptions about revlogs:
2322 # We can make a few assumptions about revlogs:
2323 #
2323 #
2324 # 1) the majority of chunks will be compressed (as opposed to inline
2324 # 1) the majority of chunks will be compressed (as opposed to inline
2325 # raw data).
2325 # raw data).
2326 # 2) decompressing *any* data will likely by at least 10x slower than
2326 # 2) decompressing *any* data will likely by at least 10x slower than
2327 # returning raw inline data.
2327 # returning raw inline data.
2328 # 3) we want to prioritize common and officially supported compression
2328 # 3) we want to prioritize common and officially supported compression
2329 # engines
2329 # engines
2330 #
2330 #
2331 # It follows that we want to optimize for "decompress compressed data
2331 # It follows that we want to optimize for "decompress compressed data
2332 # when encoded with common and officially supported compression engines"
2332 # when encoded with common and officially supported compression engines"
2333 # case over "raw data" and "data encoded by less common or non-official
2333 # case over "raw data" and "data encoded by less common or non-official
2334 # compression engines." That is why we have the inline lookup first
2334 # compression engines." That is why we have the inline lookup first
2335 # followed by the compengines lookup.
2335 # followed by the compengines lookup.
2336 #
2336 #
2337 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2337 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2338 # compressed chunks. And this matters for changelog and manifest reads.
2338 # compressed chunks. And this matters for changelog and manifest reads.
2339 t = data[0:1]
2339 t = data[0:1]
2340
2340
2341 if t == b'x':
2341 if t == b'x':
2342 try:
2342 try:
2343 return _zlibdecompress(data)
2343 return _zlibdecompress(data)
2344 except zlib.error as e:
2344 except zlib.error as e:
2345 raise error.RevlogError(
2345 raise error.RevlogError(
2346 _(b'revlog decompress error: %s')
2346 _(b'revlog decompress error: %s')
2347 % stringutil.forcebytestr(e)
2347 % stringutil.forcebytestr(e)
2348 )
2348 )
2349 # '\0' is more common than 'u' so it goes first.
2349 # '\0' is more common than 'u' so it goes first.
2350 elif t == b'\0':
2350 elif t == b'\0':
2351 return data
2351 return data
2352 elif t == b'u':
2352 elif t == b'u':
2353 return util.buffer(data, 1)
2353 return util.buffer(data, 1)
2354
2354
2355 compressor = self._get_decompressor(t)
2355 compressor = self._get_decompressor(t)
2356
2356
2357 return compressor.decompress(data)
2357 return compressor.decompress(data)
2358
2358
2359 def _addrevision(
2359 def _addrevision(
2360 self,
2360 self,
2361 node,
2361 node,
2362 rawtext,
2362 rawtext,
2363 transaction,
2363 transaction,
2364 link,
2364 link,
2365 p1,
2365 p1,
2366 p2,
2366 p2,
2367 flags,
2367 flags,
2368 cachedelta,
2368 cachedelta,
2369 alwayscache=False,
2369 alwayscache=False,
2370 deltacomputer=None,
2370 deltacomputer=None,
2371 sidedata=None,
2371 sidedata=None,
2372 ):
2372 ):
2373 """internal function to add revisions to the log
2373 """internal function to add revisions to the log
2374
2374
2375 see addrevision for argument descriptions.
2375 see addrevision for argument descriptions.
2376
2376
2377 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2377 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2378
2378
2379 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2379 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2380 be used.
2380 be used.
2381
2381
2382 invariants:
2382 invariants:
2383 - rawtext is optional (can be None); if not set, cachedelta must be set.
2383 - rawtext is optional (can be None); if not set, cachedelta must be set.
2384 if both are set, they must correspond to each other.
2384 if both are set, they must correspond to each other.
2385 """
2385 """
2386 if node == self.nullid:
2386 if node == self.nullid:
2387 raise error.RevlogError(
2387 raise error.RevlogError(
2388 _(b"%s: attempt to add null revision") % self.display_id
2388 _(b"%s: attempt to add null revision") % self.display_id
2389 )
2389 )
2390 if (
2390 if (
2391 node == self.nodeconstants.wdirid
2391 node == self.nodeconstants.wdirid
2392 or node in self.nodeconstants.wdirfilenodeids
2392 or node in self.nodeconstants.wdirfilenodeids
2393 ):
2393 ):
2394 raise error.RevlogError(
2394 raise error.RevlogError(
2395 _(b"%s: attempt to add wdir revision") % self.display_id
2395 _(b"%s: attempt to add wdir revision") % self.display_id
2396 )
2396 )
2397 if self._writinghandles is None:
2397 if self._writinghandles is None:
2398 msg = b'adding revision outside `revlog._writing` context'
2398 msg = b'adding revision outside `revlog._writing` context'
2399 raise error.ProgrammingError(msg)
2399 raise error.ProgrammingError(msg)
2400
2400
2401 if self._inline:
2401 if self._inline:
2402 fh = self._writinghandles[0]
2402 fh = self._writinghandles[0]
2403 else:
2403 else:
2404 fh = self._writinghandles[1]
2404 fh = self._writinghandles[1]
2405
2405
2406 btext = [rawtext]
2406 btext = [rawtext]
2407
2407
2408 curr = len(self)
2408 curr = len(self)
2409 prev = curr - 1
2409 prev = curr - 1
2410
2410
2411 offset = self._get_data_offset(prev)
2411 offset = self._get_data_offset(prev)
2412
2412
2413 if self._concurrencychecker:
2413 if self._concurrencychecker:
2414 ifh, dfh, sdfh = self._writinghandles
2414 ifh, dfh, sdfh = self._writinghandles
2415 # XXX no checking for the sidedata file
2415 # XXX no checking for the sidedata file
2416 if self._inline:
2416 if self._inline:
2417 # offset is "as if" it were in the .d file, so we need to add on
2417 # offset is "as if" it were in the .d file, so we need to add on
2418 # the size of the entry metadata.
2418 # the size of the entry metadata.
2419 self._concurrencychecker(
2419 self._concurrencychecker(
2420 ifh, self._indexfile, offset + curr * self.index.entry_size
2420 ifh, self._indexfile, offset + curr * self.index.entry_size
2421 )
2421 )
2422 else:
2422 else:
2423 # Entries in the .i are a consistent size.
2423 # Entries in the .i are a consistent size.
2424 self._concurrencychecker(
2424 self._concurrencychecker(
2425 ifh, self._indexfile, curr * self.index.entry_size
2425 ifh, self._indexfile, curr * self.index.entry_size
2426 )
2426 )
2427 self._concurrencychecker(dfh, self._datafile, offset)
2427 self._concurrencychecker(dfh, self._datafile, offset)
2428
2428
2429 p1r, p2r = self.rev(p1), self.rev(p2)
2429 p1r, p2r = self.rev(p1), self.rev(p2)
2430
2430
2431 # full versions are inserted when the needed deltas
2431 # full versions are inserted when the needed deltas
2432 # become comparable to the uncompressed text
2432 # become comparable to the uncompressed text
2433 if rawtext is None:
2433 if rawtext is None:
2434 # need rawtext size, before changed by flag processors, which is
2434 # need rawtext size, before changed by flag processors, which is
2435 # the non-raw size. use revlog explicitly to avoid filelog's extra
2435 # the non-raw size. use revlog explicitly to avoid filelog's extra
2436 # logic that might remove metadata size.
2436 # logic that might remove metadata size.
2437 textlen = mdiff.patchedsize(
2437 textlen = mdiff.patchedsize(
2438 revlog.size(self, cachedelta[0]), cachedelta[1]
2438 revlog.size(self, cachedelta[0]), cachedelta[1]
2439 )
2439 )
2440 else:
2440 else:
2441 textlen = len(rawtext)
2441 textlen = len(rawtext)
2442
2442
2443 if deltacomputer is None:
2443 if deltacomputer is None:
2444 write_debug = None
2444 write_debug = None
2445 if self._debug_delta:
2445 if self._debug_delta:
2446 write_debug = transaction._report
2446 write_debug = transaction._report
2447 deltacomputer = deltautil.deltacomputer(
2447 deltacomputer = deltautil.deltacomputer(
2448 self, write_debug=write_debug
2448 self, write_debug=write_debug
2449 )
2449 )
2450
2450
2451 revinfo = revlogutils.revisioninfo(
2451 revinfo = revlogutils.revisioninfo(
2452 node,
2452 node,
2453 p1,
2453 p1,
2454 p2,
2454 p2,
2455 btext,
2455 btext,
2456 textlen,
2456 textlen,
2457 cachedelta,
2457 cachedelta,
2458 flags,
2458 flags,
2459 )
2459 )
2460
2460
2461 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2461 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2462
2462
2463 compression_mode = COMP_MODE_INLINE
2463 compression_mode = COMP_MODE_INLINE
2464 if self._docket is not None:
2464 if self._docket is not None:
2465 default_comp = self._docket.default_compression_header
2465 default_comp = self._docket.default_compression_header
2466 r = deltautil.delta_compression(default_comp, deltainfo)
2466 r = deltautil.delta_compression(default_comp, deltainfo)
2467 compression_mode, deltainfo = r
2467 compression_mode, deltainfo = r
2468
2468
2469 sidedata_compression_mode = COMP_MODE_INLINE
2469 sidedata_compression_mode = COMP_MODE_INLINE
2470 if sidedata and self.hassidedata:
2470 if sidedata and self.hassidedata:
2471 sidedata_compression_mode = COMP_MODE_PLAIN
2471 sidedata_compression_mode = COMP_MODE_PLAIN
2472 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2472 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2473 sidedata_offset = self._docket.sidedata_end
2473 sidedata_offset = self._docket.sidedata_end
2474 h, comp_sidedata = self.compress(serialized_sidedata)
2474 h, comp_sidedata = self.compress(serialized_sidedata)
2475 if (
2475 if (
2476 h != b'u'
2476 h != b'u'
2477 and comp_sidedata[0:1] != b'\0'
2477 and comp_sidedata[0:1] != b'\0'
2478 and len(comp_sidedata) < len(serialized_sidedata)
2478 and len(comp_sidedata) < len(serialized_sidedata)
2479 ):
2479 ):
2480 assert not h
2480 assert not h
2481 if (
2481 if (
2482 comp_sidedata[0:1]
2482 comp_sidedata[0:1]
2483 == self._docket.default_compression_header
2483 == self._docket.default_compression_header
2484 ):
2484 ):
2485 sidedata_compression_mode = COMP_MODE_DEFAULT
2485 sidedata_compression_mode = COMP_MODE_DEFAULT
2486 serialized_sidedata = comp_sidedata
2486 serialized_sidedata = comp_sidedata
2487 else:
2487 else:
2488 sidedata_compression_mode = COMP_MODE_INLINE
2488 sidedata_compression_mode = COMP_MODE_INLINE
2489 serialized_sidedata = comp_sidedata
2489 serialized_sidedata = comp_sidedata
2490 else:
2490 else:
2491 serialized_sidedata = b""
2491 serialized_sidedata = b""
2492 # Don't store the offset if the sidedata is empty, that way
2492 # Don't store the offset if the sidedata is empty, that way
2493 # we can easily detect empty sidedata and they will be no different
2493 # we can easily detect empty sidedata and they will be no different
2494 # than ones we manually add.
2494 # than ones we manually add.
2495 sidedata_offset = 0
2495 sidedata_offset = 0
2496
2496
2497 rank = RANK_UNKNOWN
2497 rank = RANK_UNKNOWN
2498 if self._format_version == CHANGELOGV2:
2498 if self._format_version == CHANGELOGV2:
2499 if (p1r, p2r) == (nullrev, nullrev):
2499 if (p1r, p2r) == (nullrev, nullrev):
2500 rank = 1
2500 rank = 1
2501 elif p1r != nullrev and p2r == nullrev:
2501 elif p1r != nullrev and p2r == nullrev:
2502 rank = 1 + self.fast_rank(p1r)
2502 rank = 1 + self.fast_rank(p1r)
2503 elif p1r == nullrev and p2r != nullrev:
2503 elif p1r == nullrev and p2r != nullrev:
2504 rank = 1 + self.fast_rank(p2r)
2504 rank = 1 + self.fast_rank(p2r)
2505 else: # merge node
2505 else: # merge node
2506 if rustdagop is not None and self.index.rust_ext_compat:
2506 if rustdagop is not None and self.index.rust_ext_compat:
2507 rank = rustdagop.rank(self.index, p1r, p2r)
2507 rank = rustdagop.rank(self.index, p1r, p2r)
2508 else:
2508 else:
2509 pmin, pmax = sorted((p1r, p2r))
2509 pmin, pmax = sorted((p1r, p2r))
2510 rank = 1 + self.fast_rank(pmax)
2510 rank = 1 + self.fast_rank(pmax)
2511 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
2511 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
2512
2512
2513 e = revlogutils.entry(
2513 e = revlogutils.entry(
2514 flags=flags,
2514 flags=flags,
2515 data_offset=offset,
2515 data_offset=offset,
2516 data_compressed_length=deltainfo.deltalen,
2516 data_compressed_length=deltainfo.deltalen,
2517 data_uncompressed_length=textlen,
2517 data_uncompressed_length=textlen,
2518 data_compression_mode=compression_mode,
2518 data_compression_mode=compression_mode,
2519 data_delta_base=deltainfo.base,
2519 data_delta_base=deltainfo.base,
2520 link_rev=link,
2520 link_rev=link,
2521 parent_rev_1=p1r,
2521 parent_rev_1=p1r,
2522 parent_rev_2=p2r,
2522 parent_rev_2=p2r,
2523 node_id=node,
2523 node_id=node,
2524 sidedata_offset=sidedata_offset,
2524 sidedata_offset=sidedata_offset,
2525 sidedata_compressed_length=len(serialized_sidedata),
2525 sidedata_compressed_length=len(serialized_sidedata),
2526 sidedata_compression_mode=sidedata_compression_mode,
2526 sidedata_compression_mode=sidedata_compression_mode,
2527 rank=rank,
2527 rank=rank,
2528 )
2528 )
2529
2529
2530 self.index.append(e)
2530 self.index.append(e)
2531 entry = self.index.entry_binary(curr)
2531 entry = self.index.entry_binary(curr)
2532 if curr == 0 and self._docket is None:
2532 if curr == 0 and self._docket is None:
2533 header = self._format_flags | self._format_version
2533 header = self._format_flags | self._format_version
2534 header = self.index.pack_header(header)
2534 header = self.index.pack_header(header)
2535 entry = header + entry
2535 entry = header + entry
2536 self._writeentry(
2536 self._writeentry(
2537 transaction,
2537 transaction,
2538 entry,
2538 entry,
2539 deltainfo.data,
2539 deltainfo.data,
2540 link,
2540 link,
2541 offset,
2541 offset,
2542 serialized_sidedata,
2542 serialized_sidedata,
2543 sidedata_offset,
2543 sidedata_offset,
2544 )
2544 )
2545
2545
2546 rawtext = btext[0]
2546 rawtext = btext[0]
2547
2547
2548 if alwayscache and rawtext is None:
2548 if alwayscache and rawtext is None:
2549 rawtext = deltacomputer.buildtext(revinfo, fh)
2549 rawtext = deltacomputer.buildtext(revinfo, fh)
2550
2550
2551 if type(rawtext) == bytes: # only accept immutable objects
2551 if type(rawtext) == bytes: # only accept immutable objects
2552 self._revisioncache = (node, curr, rawtext)
2552 self._revisioncache = (node, curr, rawtext)
2553 self._chainbasecache[curr] = deltainfo.chainbase
2553 self._chainbasecache[curr] = deltainfo.chainbase
2554 return curr
2554 return curr
2555
2555
2556 def _get_data_offset(self, prev):
2556 def _get_data_offset(self, prev):
2557 """Returns the current offset in the (in-transaction) data file.
2557 """Returns the current offset in the (in-transaction) data file.
2558 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2558 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2559 file to store that information: since sidedata can be rewritten to the
2559 file to store that information: since sidedata can be rewritten to the
2560 end of the data file within a transaction, you can have cases where, for
2560 end of the data file within a transaction, you can have cases where, for
2561 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2561 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2562 to `n - 1`'s sidedata being written after `n`'s data.
2562 to `n - 1`'s sidedata being written after `n`'s data.
2563
2563
2564 TODO cache this in a docket file before getting out of experimental."""
2564 TODO cache this in a docket file before getting out of experimental."""
2565 if self._docket is None:
2565 if self._docket is None:
2566 return self.end(prev)
2566 return self.end(prev)
2567 else:
2567 else:
2568 return self._docket.data_end
2568 return self._docket.data_end
2569
2569
2570 def _writeentry(
2570 def _writeentry(
2571 self, transaction, entry, data, link, offset, sidedata, sidedata_offset
2571 self, transaction, entry, data, link, offset, sidedata, sidedata_offset
2572 ):
2572 ):
2573 # Files opened in a+ mode have inconsistent behavior on various
2573 # Files opened in a+ mode have inconsistent behavior on various
2574 # platforms. Windows requires that a file positioning call be made
2574 # platforms. Windows requires that a file positioning call be made
2575 # when the file handle transitions between reads and writes. See
2575 # when the file handle transitions between reads and writes. See
2576 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2576 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2577 # platforms, Python or the platform itself can be buggy. Some versions
2577 # platforms, Python or the platform itself can be buggy. Some versions
2578 # of Solaris have been observed to not append at the end of the file
2578 # of Solaris have been observed to not append at the end of the file
2579 # if the file was seeked to before the end. See issue4943 for more.
2579 # if the file was seeked to before the end. See issue4943 for more.
2580 #
2580 #
2581 # We work around this issue by inserting a seek() before writing.
2581 # We work around this issue by inserting a seek() before writing.
2582 # Note: This is likely not necessary on Python 3. However, because
2582 # Note: This is likely not necessary on Python 3. However, because
2583 # the file handle is reused for reads and may be seeked there, we need
2583 # the file handle is reused for reads and may be seeked there, we need
2584 # to be careful before changing this.
2584 # to be careful before changing this.
2585 if self._writinghandles is None:
2585 if self._writinghandles is None:
2586 msg = b'adding revision outside `revlog._writing` context'
2586 msg = b'adding revision outside `revlog._writing` context'
2587 raise error.ProgrammingError(msg)
2587 raise error.ProgrammingError(msg)
2588 ifh, dfh, sdfh = self._writinghandles
2588 ifh, dfh, sdfh = self._writinghandles
2589 if self._docket is None:
2589 if self._docket is None:
2590 ifh.seek(0, os.SEEK_END)
2590 ifh.seek(0, os.SEEK_END)
2591 else:
2591 else:
2592 ifh.seek(self._docket.index_end, os.SEEK_SET)
2592 ifh.seek(self._docket.index_end, os.SEEK_SET)
2593 if dfh:
2593 if dfh:
2594 if self._docket is None:
2594 if self._docket is None:
2595 dfh.seek(0, os.SEEK_END)
2595 dfh.seek(0, os.SEEK_END)
2596 else:
2596 else:
2597 dfh.seek(self._docket.data_end, os.SEEK_SET)
2597 dfh.seek(self._docket.data_end, os.SEEK_SET)
2598 if sdfh:
2598 if sdfh:
2599 sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2599 sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2600
2600
2601 curr = len(self) - 1
2601 curr = len(self) - 1
2602 if not self._inline:
2602 if not self._inline:
2603 transaction.add(self._datafile, offset)
2603 transaction.add(self._datafile, offset)
2604 if self._sidedatafile:
2604 if self._sidedatafile:
2605 transaction.add(self._sidedatafile, sidedata_offset)
2605 transaction.add(self._sidedatafile, sidedata_offset)
2606 transaction.add(self._indexfile, curr * len(entry))
2606 transaction.add(self._indexfile, curr * len(entry))
2607 if data[0]:
2607 if data[0]:
2608 dfh.write(data[0])
2608 dfh.write(data[0])
2609 dfh.write(data[1])
2609 dfh.write(data[1])
2610 if sidedata:
2610 if sidedata:
2611 sdfh.write(sidedata)
2611 sdfh.write(sidedata)
2612 ifh.write(entry)
2612 ifh.write(entry)
2613 else:
2613 else:
2614 offset += curr * self.index.entry_size
2614 offset += curr * self.index.entry_size
2615 transaction.add(self._indexfile, offset)
2615 transaction.add(self._indexfile, offset)
2616 ifh.write(entry)
2616 ifh.write(entry)
2617 ifh.write(data[0])
2617 ifh.write(data[0])
2618 ifh.write(data[1])
2618 ifh.write(data[1])
2619 assert not sidedata
2619 assert not sidedata
2620 self._enforceinlinesize(transaction)
2620 self._enforceinlinesize(transaction)
2621 if self._docket is not None:
2621 if self._docket is not None:
2622 # revlog-v2 always has 3 writing handles, help Pytype
2622 # revlog-v2 always has 3 writing handles, help Pytype
2623 wh1 = self._writinghandles[0]
2623 wh1 = self._writinghandles[0]
2624 wh2 = self._writinghandles[1]
2624 wh2 = self._writinghandles[1]
2625 wh3 = self._writinghandles[2]
2625 wh3 = self._writinghandles[2]
2626 assert wh1 is not None
2626 assert wh1 is not None
2627 assert wh2 is not None
2627 assert wh2 is not None
2628 assert wh3 is not None
2628 assert wh3 is not None
2629 self._docket.index_end = wh1.tell()
2629 self._docket.index_end = wh1.tell()
2630 self._docket.data_end = wh2.tell()
2630 self._docket.data_end = wh2.tell()
2631 self._docket.sidedata_end = wh3.tell()
2631 self._docket.sidedata_end = wh3.tell()
2632
2632
2633 nodemaputil.setup_persistent_nodemap(transaction, self)
2633 nodemaputil.setup_persistent_nodemap(transaction, self)
2634
2634
2635 def addgroup(
2635 def addgroup(
2636 self,
2636 self,
2637 deltas,
2637 deltas,
2638 linkmapper,
2638 linkmapper,
2639 transaction,
2639 transaction,
2640 alwayscache=False,
2640 alwayscache=False,
2641 addrevisioncb=None,
2641 addrevisioncb=None,
2642 duplicaterevisioncb=None,
2642 duplicaterevisioncb=None,
2643 debug_info=None,
2643 ):
2644 ):
2644 """
2645 """
2645 add a delta group
2646 add a delta group
2646
2647
2647 given a set of deltas, add them to the revision log. the
2648 given a set of deltas, add them to the revision log. the
2648 first delta is against its parent, which should be in our
2649 first delta is against its parent, which should be in our
2649 log, the rest are against the previous delta.
2650 log, the rest are against the previous delta.
2650
2651
2651 If ``addrevisioncb`` is defined, it will be called with arguments of
2652 If ``addrevisioncb`` is defined, it will be called with arguments of
2652 this revlog and the node that was added.
2653 this revlog and the node that was added.
2653 """
2654 """
2654
2655
2655 if self._adding_group:
2656 if self._adding_group:
2656 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2657 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2657
2658
2658 self._adding_group = True
2659 self._adding_group = True
2659 empty = True
2660 empty = True
2660 try:
2661 try:
2661 with self._writing(transaction):
2662 with self._writing(transaction):
2662 write_debug = None
2663 write_debug = None
2663 if self._debug_delta:
2664 if self._debug_delta:
2664 write_debug = transaction._report
2665 write_debug = transaction._report
2665 deltacomputer = deltautil.deltacomputer(
2666 deltacomputer = deltautil.deltacomputer(
2666 self,
2667 self,
2667 write_debug=write_debug,
2668 write_debug=write_debug,
2669 debug_info=debug_info,
2668 )
2670 )
2669 # loop through our set of deltas
2671 # loop through our set of deltas
2670 for data in deltas:
2672 for data in deltas:
2671 (
2673 (
2672 node,
2674 node,
2673 p1,
2675 p1,
2674 p2,
2676 p2,
2675 linknode,
2677 linknode,
2676 deltabase,
2678 deltabase,
2677 delta,
2679 delta,
2678 flags,
2680 flags,
2679 sidedata,
2681 sidedata,
2680 ) = data
2682 ) = data
2681 link = linkmapper(linknode)
2683 link = linkmapper(linknode)
2682 flags = flags or REVIDX_DEFAULT_FLAGS
2684 flags = flags or REVIDX_DEFAULT_FLAGS
2683
2685
2684 rev = self.index.get_rev(node)
2686 rev = self.index.get_rev(node)
2685 if rev is not None:
2687 if rev is not None:
2686 # this can happen if two branches make the same change
2688 # this can happen if two branches make the same change
2687 self._nodeduplicatecallback(transaction, rev)
2689 self._nodeduplicatecallback(transaction, rev)
2688 if duplicaterevisioncb:
2690 if duplicaterevisioncb:
2689 duplicaterevisioncb(self, rev)
2691 duplicaterevisioncb(self, rev)
2690 empty = False
2692 empty = False
2691 continue
2693 continue
2692
2694
2693 for p in (p1, p2):
2695 for p in (p1, p2):
2694 if not self.index.has_node(p):
2696 if not self.index.has_node(p):
2695 raise error.LookupError(
2697 raise error.LookupError(
2696 p, self.radix, _(b'unknown parent')
2698 p, self.radix, _(b'unknown parent')
2697 )
2699 )
2698
2700
2699 if not self.index.has_node(deltabase):
2701 if not self.index.has_node(deltabase):
2700 raise error.LookupError(
2702 raise error.LookupError(
2701 deltabase, self.display_id, _(b'unknown delta base')
2703 deltabase, self.display_id, _(b'unknown delta base')
2702 )
2704 )
2703
2705
2704 baserev = self.rev(deltabase)
2706 baserev = self.rev(deltabase)
2705
2707
2706 if baserev != nullrev and self.iscensored(baserev):
2708 if baserev != nullrev and self.iscensored(baserev):
2707 # if base is censored, delta must be full replacement in a
2709 # if base is censored, delta must be full replacement in a
2708 # single patch operation
2710 # single patch operation
2709 hlen = struct.calcsize(b">lll")
2711 hlen = struct.calcsize(b">lll")
2710 oldlen = self.rawsize(baserev)
2712 oldlen = self.rawsize(baserev)
2711 newlen = len(delta) - hlen
2713 newlen = len(delta) - hlen
2712 if delta[:hlen] != mdiff.replacediffheader(
2714 if delta[:hlen] != mdiff.replacediffheader(
2713 oldlen, newlen
2715 oldlen, newlen
2714 ):
2716 ):
2715 raise error.CensoredBaseError(
2717 raise error.CensoredBaseError(
2716 self.display_id, self.node(baserev)
2718 self.display_id, self.node(baserev)
2717 )
2719 )
2718
2720
2719 if not flags and self._peek_iscensored(baserev, delta):
2721 if not flags and self._peek_iscensored(baserev, delta):
2720 flags |= REVIDX_ISCENSORED
2722 flags |= REVIDX_ISCENSORED
2721
2723
2722 # We assume consumers of addrevisioncb will want to retrieve
2724 # We assume consumers of addrevisioncb will want to retrieve
2723 # the added revision, which will require a call to
2725 # the added revision, which will require a call to
2724 # revision(). revision() will fast path if there is a cache
2726 # revision(). revision() will fast path if there is a cache
2725 # hit. So, we tell _addrevision() to always cache in this case.
2727 # hit. So, we tell _addrevision() to always cache in this case.
2726 # We're only using addgroup() in the context of changegroup
2728 # We're only using addgroup() in the context of changegroup
2727 # generation so the revision data can always be handled as raw
2729 # generation so the revision data can always be handled as raw
2728 # by the flagprocessor.
2730 # by the flagprocessor.
2729 rev = self._addrevision(
2731 rev = self._addrevision(
2730 node,
2732 node,
2731 None,
2733 None,
2732 transaction,
2734 transaction,
2733 link,
2735 link,
2734 p1,
2736 p1,
2735 p2,
2737 p2,
2736 flags,
2738 flags,
2737 (baserev, delta),
2739 (baserev, delta),
2738 alwayscache=alwayscache,
2740 alwayscache=alwayscache,
2739 deltacomputer=deltacomputer,
2741 deltacomputer=deltacomputer,
2740 sidedata=sidedata,
2742 sidedata=sidedata,
2741 )
2743 )
2742
2744
2743 if addrevisioncb:
2745 if addrevisioncb:
2744 addrevisioncb(self, rev)
2746 addrevisioncb(self, rev)
2745 empty = False
2747 empty = False
2746 finally:
2748 finally:
2747 self._adding_group = False
2749 self._adding_group = False
2748 return not empty
2750 return not empty
2749
2751
2750 def iscensored(self, rev):
2752 def iscensored(self, rev):
2751 """Check if a file revision is censored."""
2753 """Check if a file revision is censored."""
2752 if not self._censorable:
2754 if not self._censorable:
2753 return False
2755 return False
2754
2756
2755 return self.flags(rev) & REVIDX_ISCENSORED
2757 return self.flags(rev) & REVIDX_ISCENSORED
2756
2758
2757 def _peek_iscensored(self, baserev, delta):
2759 def _peek_iscensored(self, baserev, delta):
2758 """Quickly check if a delta produces a censored revision."""
2760 """Quickly check if a delta produces a censored revision."""
2759 if not self._censorable:
2761 if not self._censorable:
2760 return False
2762 return False
2761
2763
2762 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2764 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2763
2765
2764 def getstrippoint(self, minlink):
2766 def getstrippoint(self, minlink):
2765 """find the minimum rev that must be stripped to strip the linkrev
2767 """find the minimum rev that must be stripped to strip the linkrev
2766
2768
2767 Returns a tuple containing the minimum rev and a set of all revs that
2769 Returns a tuple containing the minimum rev and a set of all revs that
2768 have linkrevs that will be broken by this strip.
2770 have linkrevs that will be broken by this strip.
2769 """
2771 """
2770 return storageutil.resolvestripinfo(
2772 return storageutil.resolvestripinfo(
2771 minlink,
2773 minlink,
2772 len(self) - 1,
2774 len(self) - 1,
2773 self.headrevs(),
2775 self.headrevs(),
2774 self.linkrev,
2776 self.linkrev,
2775 self.parentrevs,
2777 self.parentrevs,
2776 )
2778 )
2777
2779
2778 def strip(self, minlink, transaction):
2780 def strip(self, minlink, transaction):
2779 """truncate the revlog on the first revision with a linkrev >= minlink
2781 """truncate the revlog on the first revision with a linkrev >= minlink
2780
2782
2781 This function is called when we're stripping revision minlink and
2783 This function is called when we're stripping revision minlink and
2782 its descendants from the repository.
2784 its descendants from the repository.
2783
2785
2784 We have to remove all revisions with linkrev >= minlink, because
2786 We have to remove all revisions with linkrev >= minlink, because
2785 the equivalent changelog revisions will be renumbered after the
2787 the equivalent changelog revisions will be renumbered after the
2786 strip.
2788 strip.
2787
2789
2788 So we truncate the revlog on the first of these revisions, and
2790 So we truncate the revlog on the first of these revisions, and
2789 trust that the caller has saved the revisions that shouldn't be
2791 trust that the caller has saved the revisions that shouldn't be
2790 removed and that it'll re-add them after this truncation.
2792 removed and that it'll re-add them after this truncation.
2791 """
2793 """
2792 if len(self) == 0:
2794 if len(self) == 0:
2793 return
2795 return
2794
2796
2795 rev, _ = self.getstrippoint(minlink)
2797 rev, _ = self.getstrippoint(minlink)
2796 if rev == len(self):
2798 if rev == len(self):
2797 return
2799 return
2798
2800
2799 # first truncate the files on disk
2801 # first truncate the files on disk
2800 data_end = self.start(rev)
2802 data_end = self.start(rev)
2801 if not self._inline:
2803 if not self._inline:
2802 transaction.add(self._datafile, data_end)
2804 transaction.add(self._datafile, data_end)
2803 end = rev * self.index.entry_size
2805 end = rev * self.index.entry_size
2804 else:
2806 else:
2805 end = data_end + (rev * self.index.entry_size)
2807 end = data_end + (rev * self.index.entry_size)
2806
2808
2807 if self._sidedatafile:
2809 if self._sidedatafile:
2808 sidedata_end = self.sidedata_cut_off(rev)
2810 sidedata_end = self.sidedata_cut_off(rev)
2809 transaction.add(self._sidedatafile, sidedata_end)
2811 transaction.add(self._sidedatafile, sidedata_end)
2810
2812
2811 transaction.add(self._indexfile, end)
2813 transaction.add(self._indexfile, end)
2812 if self._docket is not None:
2814 if self._docket is not None:
2813 # XXX we could, leverage the docket while stripping. However it is
2815 # XXX we could, leverage the docket while stripping. However it is
2814 # not powerfull enough at the time of this comment
2816 # not powerfull enough at the time of this comment
2815 self._docket.index_end = end
2817 self._docket.index_end = end
2816 self._docket.data_end = data_end
2818 self._docket.data_end = data_end
2817 self._docket.sidedata_end = sidedata_end
2819 self._docket.sidedata_end = sidedata_end
2818 self._docket.write(transaction, stripping=True)
2820 self._docket.write(transaction, stripping=True)
2819
2821
2820 # then reset internal state in memory to forget those revisions
2822 # then reset internal state in memory to forget those revisions
2821 self._revisioncache = None
2823 self._revisioncache = None
2822 self._chaininfocache = util.lrucachedict(500)
2824 self._chaininfocache = util.lrucachedict(500)
2823 self._segmentfile.clear_cache()
2825 self._segmentfile.clear_cache()
2824 self._segmentfile_sidedata.clear_cache()
2826 self._segmentfile_sidedata.clear_cache()
2825
2827
2826 del self.index[rev:-1]
2828 del self.index[rev:-1]
2827
2829
2828 def checksize(self):
2830 def checksize(self):
2829 """Check size of index and data files
2831 """Check size of index and data files
2830
2832
2831 return a (dd, di) tuple.
2833 return a (dd, di) tuple.
2832 - dd: extra bytes for the "data" file
2834 - dd: extra bytes for the "data" file
2833 - di: extra bytes for the "index" file
2835 - di: extra bytes for the "index" file
2834
2836
2835 A healthy revlog will return (0, 0).
2837 A healthy revlog will return (0, 0).
2836 """
2838 """
2837 expected = 0
2839 expected = 0
2838 if len(self):
2840 if len(self):
2839 expected = max(0, self.end(len(self) - 1))
2841 expected = max(0, self.end(len(self) - 1))
2840
2842
2841 try:
2843 try:
2842 with self._datafp() as f:
2844 with self._datafp() as f:
2843 f.seek(0, io.SEEK_END)
2845 f.seek(0, io.SEEK_END)
2844 actual = f.tell()
2846 actual = f.tell()
2845 dd = actual - expected
2847 dd = actual - expected
2846 except FileNotFoundError:
2848 except FileNotFoundError:
2847 dd = 0
2849 dd = 0
2848
2850
2849 try:
2851 try:
2850 f = self.opener(self._indexfile)
2852 f = self.opener(self._indexfile)
2851 f.seek(0, io.SEEK_END)
2853 f.seek(0, io.SEEK_END)
2852 actual = f.tell()
2854 actual = f.tell()
2853 f.close()
2855 f.close()
2854 s = self.index.entry_size
2856 s = self.index.entry_size
2855 i = max(0, actual // s)
2857 i = max(0, actual // s)
2856 di = actual - (i * s)
2858 di = actual - (i * s)
2857 if self._inline:
2859 if self._inline:
2858 databytes = 0
2860 databytes = 0
2859 for r in self:
2861 for r in self:
2860 databytes += max(0, self.length(r))
2862 databytes += max(0, self.length(r))
2861 dd = 0
2863 dd = 0
2862 di = actual - len(self) * s - databytes
2864 di = actual - len(self) * s - databytes
2863 except FileNotFoundError:
2865 except FileNotFoundError:
2864 di = 0
2866 di = 0
2865
2867
2866 return (dd, di)
2868 return (dd, di)
2867
2869
2868 def files(self):
2870 def files(self):
2869 res = [self._indexfile]
2871 res = [self._indexfile]
2870 if self._docket_file is None:
2872 if self._docket_file is None:
2871 if not self._inline:
2873 if not self._inline:
2872 res.append(self._datafile)
2874 res.append(self._datafile)
2873 else:
2875 else:
2874 res.append(self._docket_file)
2876 res.append(self._docket_file)
2875 res.extend(self._docket.old_index_filepaths(include_empty=False))
2877 res.extend(self._docket.old_index_filepaths(include_empty=False))
2876 if self._docket.data_end:
2878 if self._docket.data_end:
2877 res.append(self._datafile)
2879 res.append(self._datafile)
2878 res.extend(self._docket.old_data_filepaths(include_empty=False))
2880 res.extend(self._docket.old_data_filepaths(include_empty=False))
2879 if self._docket.sidedata_end:
2881 if self._docket.sidedata_end:
2880 res.append(self._sidedatafile)
2882 res.append(self._sidedatafile)
2881 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
2883 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
2882 return res
2884 return res
2883
2885
2884 def emitrevisions(
2886 def emitrevisions(
2885 self,
2887 self,
2886 nodes,
2888 nodes,
2887 nodesorder=None,
2889 nodesorder=None,
2888 revisiondata=False,
2890 revisiondata=False,
2889 assumehaveparentrevisions=False,
2891 assumehaveparentrevisions=False,
2890 deltamode=repository.CG_DELTAMODE_STD,
2892 deltamode=repository.CG_DELTAMODE_STD,
2891 sidedata_helpers=None,
2893 sidedata_helpers=None,
2892 debug_info=None,
2894 debug_info=None,
2893 ):
2895 ):
2894 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2896 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2895 raise error.ProgrammingError(
2897 raise error.ProgrammingError(
2896 b'unhandled value for nodesorder: %s' % nodesorder
2898 b'unhandled value for nodesorder: %s' % nodesorder
2897 )
2899 )
2898
2900
2899 if nodesorder is None and not self._generaldelta:
2901 if nodesorder is None and not self._generaldelta:
2900 nodesorder = b'storage'
2902 nodesorder = b'storage'
2901
2903
2902 if (
2904 if (
2903 not self._storedeltachains
2905 not self._storedeltachains
2904 and deltamode != repository.CG_DELTAMODE_PREV
2906 and deltamode != repository.CG_DELTAMODE_PREV
2905 ):
2907 ):
2906 deltamode = repository.CG_DELTAMODE_FULL
2908 deltamode = repository.CG_DELTAMODE_FULL
2907
2909
2908 return storageutil.emitrevisions(
2910 return storageutil.emitrevisions(
2909 self,
2911 self,
2910 nodes,
2912 nodes,
2911 nodesorder,
2913 nodesorder,
2912 revlogrevisiondelta,
2914 revlogrevisiondelta,
2913 deltaparentfn=self.deltaparent,
2915 deltaparentfn=self.deltaparent,
2914 candeltafn=self.candelta,
2916 candeltafn=self.candelta,
2915 rawsizefn=self.rawsize,
2917 rawsizefn=self.rawsize,
2916 revdifffn=self.revdiff,
2918 revdifffn=self.revdiff,
2917 flagsfn=self.flags,
2919 flagsfn=self.flags,
2918 deltamode=deltamode,
2920 deltamode=deltamode,
2919 revisiondata=revisiondata,
2921 revisiondata=revisiondata,
2920 assumehaveparentrevisions=assumehaveparentrevisions,
2922 assumehaveparentrevisions=assumehaveparentrevisions,
2921 sidedata_helpers=sidedata_helpers,
2923 sidedata_helpers=sidedata_helpers,
2922 debug_info=debug_info,
2924 debug_info=debug_info,
2923 )
2925 )
2924
2926
2925 DELTAREUSEALWAYS = b'always'
2927 DELTAREUSEALWAYS = b'always'
2926 DELTAREUSESAMEREVS = b'samerevs'
2928 DELTAREUSESAMEREVS = b'samerevs'
2927 DELTAREUSENEVER = b'never'
2929 DELTAREUSENEVER = b'never'
2928
2930
2929 DELTAREUSEFULLADD = b'fulladd'
2931 DELTAREUSEFULLADD = b'fulladd'
2930
2932
2931 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2933 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2932
2934
2933 def clone(
2935 def clone(
2934 self,
2936 self,
2935 tr,
2937 tr,
2936 destrevlog,
2938 destrevlog,
2937 addrevisioncb=None,
2939 addrevisioncb=None,
2938 deltareuse=DELTAREUSESAMEREVS,
2940 deltareuse=DELTAREUSESAMEREVS,
2939 forcedeltabothparents=None,
2941 forcedeltabothparents=None,
2940 sidedata_helpers=None,
2942 sidedata_helpers=None,
2941 ):
2943 ):
2942 """Copy this revlog to another, possibly with format changes.
2944 """Copy this revlog to another, possibly with format changes.
2943
2945
2944 The destination revlog will contain the same revisions and nodes.
2946 The destination revlog will contain the same revisions and nodes.
2945 However, it may not be bit-for-bit identical due to e.g. delta encoding
2947 However, it may not be bit-for-bit identical due to e.g. delta encoding
2946 differences.
2948 differences.
2947
2949
2948 The ``deltareuse`` argument control how deltas from the existing revlog
2950 The ``deltareuse`` argument control how deltas from the existing revlog
2949 are preserved in the destination revlog. The argument can have the
2951 are preserved in the destination revlog. The argument can have the
2950 following values:
2952 following values:
2951
2953
2952 DELTAREUSEALWAYS
2954 DELTAREUSEALWAYS
2953 Deltas will always be reused (if possible), even if the destination
2955 Deltas will always be reused (if possible), even if the destination
2954 revlog would not select the same revisions for the delta. This is the
2956 revlog would not select the same revisions for the delta. This is the
2955 fastest mode of operation.
2957 fastest mode of operation.
2956 DELTAREUSESAMEREVS
2958 DELTAREUSESAMEREVS
2957 Deltas will be reused if the destination revlog would pick the same
2959 Deltas will be reused if the destination revlog would pick the same
2958 revisions for the delta. This mode strikes a balance between speed
2960 revisions for the delta. This mode strikes a balance between speed
2959 and optimization.
2961 and optimization.
2960 DELTAREUSENEVER
2962 DELTAREUSENEVER
2961 Deltas will never be reused. This is the slowest mode of execution.
2963 Deltas will never be reused. This is the slowest mode of execution.
2962 This mode can be used to recompute deltas (e.g. if the diff/delta
2964 This mode can be used to recompute deltas (e.g. if the diff/delta
2963 algorithm changes).
2965 algorithm changes).
2964 DELTAREUSEFULLADD
2966 DELTAREUSEFULLADD
2965 Revision will be re-added as if their were new content. This is
2967 Revision will be re-added as if their were new content. This is
2966 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2968 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2967 eg: large file detection and handling.
2969 eg: large file detection and handling.
2968
2970
2969 Delta computation can be slow, so the choice of delta reuse policy can
2971 Delta computation can be slow, so the choice of delta reuse policy can
2970 significantly affect run time.
2972 significantly affect run time.
2971
2973
2972 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2974 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2973 two extremes. Deltas will be reused if they are appropriate. But if the
2975 two extremes. Deltas will be reused if they are appropriate. But if the
2974 delta could choose a better revision, it will do so. This means if you
2976 delta could choose a better revision, it will do so. This means if you
2975 are converting a non-generaldelta revlog to a generaldelta revlog,
2977 are converting a non-generaldelta revlog to a generaldelta revlog,
2976 deltas will be recomputed if the delta's parent isn't a parent of the
2978 deltas will be recomputed if the delta's parent isn't a parent of the
2977 revision.
2979 revision.
2978
2980
2979 In addition to the delta policy, the ``forcedeltabothparents``
2981 In addition to the delta policy, the ``forcedeltabothparents``
2980 argument controls whether to force compute deltas against both parents
2982 argument controls whether to force compute deltas against both parents
2981 for merges. By default, the current default is used.
2983 for merges. By default, the current default is used.
2982
2984
2983 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2985 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2984 `sidedata_helpers`.
2986 `sidedata_helpers`.
2985 """
2987 """
2986 if deltareuse not in self.DELTAREUSEALL:
2988 if deltareuse not in self.DELTAREUSEALL:
2987 raise ValueError(
2989 raise ValueError(
2988 _(b'value for deltareuse invalid: %s') % deltareuse
2990 _(b'value for deltareuse invalid: %s') % deltareuse
2989 )
2991 )
2990
2992
2991 if len(destrevlog):
2993 if len(destrevlog):
2992 raise ValueError(_(b'destination revlog is not empty'))
2994 raise ValueError(_(b'destination revlog is not empty'))
2993
2995
2994 if getattr(self, 'filteredrevs', None):
2996 if getattr(self, 'filteredrevs', None):
2995 raise ValueError(_(b'source revlog has filtered revisions'))
2997 raise ValueError(_(b'source revlog has filtered revisions'))
2996 if getattr(destrevlog, 'filteredrevs', None):
2998 if getattr(destrevlog, 'filteredrevs', None):
2997 raise ValueError(_(b'destination revlog has filtered revisions'))
2999 raise ValueError(_(b'destination revlog has filtered revisions'))
2998
3000
2999 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3001 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3000 # if possible.
3002 # if possible.
3001 oldlazydelta = destrevlog._lazydelta
3003 oldlazydelta = destrevlog._lazydelta
3002 oldlazydeltabase = destrevlog._lazydeltabase
3004 oldlazydeltabase = destrevlog._lazydeltabase
3003 oldamd = destrevlog._deltabothparents
3005 oldamd = destrevlog._deltabothparents
3004
3006
3005 try:
3007 try:
3006 if deltareuse == self.DELTAREUSEALWAYS:
3008 if deltareuse == self.DELTAREUSEALWAYS:
3007 destrevlog._lazydeltabase = True
3009 destrevlog._lazydeltabase = True
3008 destrevlog._lazydelta = True
3010 destrevlog._lazydelta = True
3009 elif deltareuse == self.DELTAREUSESAMEREVS:
3011 elif deltareuse == self.DELTAREUSESAMEREVS:
3010 destrevlog._lazydeltabase = False
3012 destrevlog._lazydeltabase = False
3011 destrevlog._lazydelta = True
3013 destrevlog._lazydelta = True
3012 elif deltareuse == self.DELTAREUSENEVER:
3014 elif deltareuse == self.DELTAREUSENEVER:
3013 destrevlog._lazydeltabase = False
3015 destrevlog._lazydeltabase = False
3014 destrevlog._lazydelta = False
3016 destrevlog._lazydelta = False
3015
3017
3016 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3018 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3017
3019
3018 self._clone(
3020 self._clone(
3019 tr,
3021 tr,
3020 destrevlog,
3022 destrevlog,
3021 addrevisioncb,
3023 addrevisioncb,
3022 deltareuse,
3024 deltareuse,
3023 forcedeltabothparents,
3025 forcedeltabothparents,
3024 sidedata_helpers,
3026 sidedata_helpers,
3025 )
3027 )
3026
3028
3027 finally:
3029 finally:
3028 destrevlog._lazydelta = oldlazydelta
3030 destrevlog._lazydelta = oldlazydelta
3029 destrevlog._lazydeltabase = oldlazydeltabase
3031 destrevlog._lazydeltabase = oldlazydeltabase
3030 destrevlog._deltabothparents = oldamd
3032 destrevlog._deltabothparents = oldamd
3031
3033
3032 def _clone(
3034 def _clone(
3033 self,
3035 self,
3034 tr,
3036 tr,
3035 destrevlog,
3037 destrevlog,
3036 addrevisioncb,
3038 addrevisioncb,
3037 deltareuse,
3039 deltareuse,
3038 forcedeltabothparents,
3040 forcedeltabothparents,
3039 sidedata_helpers,
3041 sidedata_helpers,
3040 ):
3042 ):
3041 """perform the core duty of `revlog.clone` after parameter processing"""
3043 """perform the core duty of `revlog.clone` after parameter processing"""
3042 write_debug = None
3044 write_debug = None
3043 if self._debug_delta:
3045 if self._debug_delta:
3044 write_debug = tr._report
3046 write_debug = tr._report
3045 deltacomputer = deltautil.deltacomputer(
3047 deltacomputer = deltautil.deltacomputer(
3046 destrevlog,
3048 destrevlog,
3047 write_debug=write_debug,
3049 write_debug=write_debug,
3048 )
3050 )
3049 index = self.index
3051 index = self.index
3050 for rev in self:
3052 for rev in self:
3051 entry = index[rev]
3053 entry = index[rev]
3052
3054
3053 # Some classes override linkrev to take filtered revs into
3055 # Some classes override linkrev to take filtered revs into
3054 # account. Use raw entry from index.
3056 # account. Use raw entry from index.
3055 flags = entry[0] & 0xFFFF
3057 flags = entry[0] & 0xFFFF
3056 linkrev = entry[4]
3058 linkrev = entry[4]
3057 p1 = index[entry[5]][7]
3059 p1 = index[entry[5]][7]
3058 p2 = index[entry[6]][7]
3060 p2 = index[entry[6]][7]
3059 node = entry[7]
3061 node = entry[7]
3060
3062
3061 # (Possibly) reuse the delta from the revlog if allowed and
3063 # (Possibly) reuse the delta from the revlog if allowed and
3062 # the revlog chunk is a delta.
3064 # the revlog chunk is a delta.
3063 cachedelta = None
3065 cachedelta = None
3064 rawtext = None
3066 rawtext = None
3065 if deltareuse == self.DELTAREUSEFULLADD:
3067 if deltareuse == self.DELTAREUSEFULLADD:
3066 text = self._revisiondata(rev)
3068 text = self._revisiondata(rev)
3067 sidedata = self.sidedata(rev)
3069 sidedata = self.sidedata(rev)
3068
3070
3069 if sidedata_helpers is not None:
3071 if sidedata_helpers is not None:
3070 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3072 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3071 self, sidedata_helpers, sidedata, rev
3073 self, sidedata_helpers, sidedata, rev
3072 )
3074 )
3073 flags = flags | new_flags[0] & ~new_flags[1]
3075 flags = flags | new_flags[0] & ~new_flags[1]
3074
3076
3075 destrevlog.addrevision(
3077 destrevlog.addrevision(
3076 text,
3078 text,
3077 tr,
3079 tr,
3078 linkrev,
3080 linkrev,
3079 p1,
3081 p1,
3080 p2,
3082 p2,
3081 cachedelta=cachedelta,
3083 cachedelta=cachedelta,
3082 node=node,
3084 node=node,
3083 flags=flags,
3085 flags=flags,
3084 deltacomputer=deltacomputer,
3086 deltacomputer=deltacomputer,
3085 sidedata=sidedata,
3087 sidedata=sidedata,
3086 )
3088 )
3087 else:
3089 else:
3088 if destrevlog._lazydelta:
3090 if destrevlog._lazydelta:
3089 dp = self.deltaparent(rev)
3091 dp = self.deltaparent(rev)
3090 if dp != nullrev:
3092 if dp != nullrev:
3091 cachedelta = (dp, bytes(self._chunk(rev)))
3093 cachedelta = (dp, bytes(self._chunk(rev)))
3092
3094
3093 sidedata = None
3095 sidedata = None
3094 if not cachedelta:
3096 if not cachedelta:
3095 rawtext = self._revisiondata(rev)
3097 rawtext = self._revisiondata(rev)
3096 sidedata = self.sidedata(rev)
3098 sidedata = self.sidedata(rev)
3097 if sidedata is None:
3099 if sidedata is None:
3098 sidedata = self.sidedata(rev)
3100 sidedata = self.sidedata(rev)
3099
3101
3100 if sidedata_helpers is not None:
3102 if sidedata_helpers is not None:
3101 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3103 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3102 self, sidedata_helpers, sidedata, rev
3104 self, sidedata_helpers, sidedata, rev
3103 )
3105 )
3104 flags = flags | new_flags[0] & ~new_flags[1]
3106 flags = flags | new_flags[0] & ~new_flags[1]
3105
3107
3106 with destrevlog._writing(tr):
3108 with destrevlog._writing(tr):
3107 destrevlog._addrevision(
3109 destrevlog._addrevision(
3108 node,
3110 node,
3109 rawtext,
3111 rawtext,
3110 tr,
3112 tr,
3111 linkrev,
3113 linkrev,
3112 p1,
3114 p1,
3113 p2,
3115 p2,
3114 flags,
3116 flags,
3115 cachedelta,
3117 cachedelta,
3116 deltacomputer=deltacomputer,
3118 deltacomputer=deltacomputer,
3117 sidedata=sidedata,
3119 sidedata=sidedata,
3118 )
3120 )
3119
3121
3120 if addrevisioncb:
3122 if addrevisioncb:
3121 addrevisioncb(self, rev, node)
3123 addrevisioncb(self, rev, node)
3122
3124
3123 def censorrevision(self, tr, censornode, tombstone=b''):
3125 def censorrevision(self, tr, censornode, tombstone=b''):
3124 if self._format_version == REVLOGV0:
3126 if self._format_version == REVLOGV0:
3125 raise error.RevlogError(
3127 raise error.RevlogError(
3126 _(b'cannot censor with version %d revlogs')
3128 _(b'cannot censor with version %d revlogs')
3127 % self._format_version
3129 % self._format_version
3128 )
3130 )
3129 elif self._format_version == REVLOGV1:
3131 elif self._format_version == REVLOGV1:
3130 rewrite.v1_censor(self, tr, censornode, tombstone)
3132 rewrite.v1_censor(self, tr, censornode, tombstone)
3131 else:
3133 else:
3132 rewrite.v2_censor(self, tr, censornode, tombstone)
3134 rewrite.v2_censor(self, tr, censornode, tombstone)
3133
3135
3134 def verifyintegrity(self, state):
3136 def verifyintegrity(self, state):
3135 """Verifies the integrity of the revlog.
3137 """Verifies the integrity of the revlog.
3136
3138
3137 Yields ``revlogproblem`` instances describing problems that are
3139 Yields ``revlogproblem`` instances describing problems that are
3138 found.
3140 found.
3139 """
3141 """
3140 dd, di = self.checksize()
3142 dd, di = self.checksize()
3141 if dd:
3143 if dd:
3142 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3144 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3143 if di:
3145 if di:
3144 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3146 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3145
3147
3146 version = self._format_version
3148 version = self._format_version
3147
3149
3148 # The verifier tells us what version revlog we should be.
3150 # The verifier tells us what version revlog we should be.
3149 if version != state[b'expectedversion']:
3151 if version != state[b'expectedversion']:
3150 yield revlogproblem(
3152 yield revlogproblem(
3151 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3153 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3152 % (self.display_id, version, state[b'expectedversion'])
3154 % (self.display_id, version, state[b'expectedversion'])
3153 )
3155 )
3154
3156
3155 state[b'skipread'] = set()
3157 state[b'skipread'] = set()
3156 state[b'safe_renamed'] = set()
3158 state[b'safe_renamed'] = set()
3157
3159
3158 for rev in self:
3160 for rev in self:
3159 node = self.node(rev)
3161 node = self.node(rev)
3160
3162
3161 # Verify contents. 4 cases to care about:
3163 # Verify contents. 4 cases to care about:
3162 #
3164 #
3163 # common: the most common case
3165 # common: the most common case
3164 # rename: with a rename
3166 # rename: with a rename
3165 # meta: file content starts with b'\1\n', the metadata
3167 # meta: file content starts with b'\1\n', the metadata
3166 # header defined in filelog.py, but without a rename
3168 # header defined in filelog.py, but without a rename
3167 # ext: content stored externally
3169 # ext: content stored externally
3168 #
3170 #
3169 # More formally, their differences are shown below:
3171 # More formally, their differences are shown below:
3170 #
3172 #
3171 # | common | rename | meta | ext
3173 # | common | rename | meta | ext
3172 # -------------------------------------------------------
3174 # -------------------------------------------------------
3173 # flags() | 0 | 0 | 0 | not 0
3175 # flags() | 0 | 0 | 0 | not 0
3174 # renamed() | False | True | False | ?
3176 # renamed() | False | True | False | ?
3175 # rawtext[0:2]=='\1\n'| False | True | True | ?
3177 # rawtext[0:2]=='\1\n'| False | True | True | ?
3176 #
3178 #
3177 # "rawtext" means the raw text stored in revlog data, which
3179 # "rawtext" means the raw text stored in revlog data, which
3178 # could be retrieved by "rawdata(rev)". "text"
3180 # could be retrieved by "rawdata(rev)". "text"
3179 # mentioned below is "revision(rev)".
3181 # mentioned below is "revision(rev)".
3180 #
3182 #
3181 # There are 3 different lengths stored physically:
3183 # There are 3 different lengths stored physically:
3182 # 1. L1: rawsize, stored in revlog index
3184 # 1. L1: rawsize, stored in revlog index
3183 # 2. L2: len(rawtext), stored in revlog data
3185 # 2. L2: len(rawtext), stored in revlog data
3184 # 3. L3: len(text), stored in revlog data if flags==0, or
3186 # 3. L3: len(text), stored in revlog data if flags==0, or
3185 # possibly somewhere else if flags!=0
3187 # possibly somewhere else if flags!=0
3186 #
3188 #
3187 # L1 should be equal to L2. L3 could be different from them.
3189 # L1 should be equal to L2. L3 could be different from them.
3188 # "text" may or may not affect commit hash depending on flag
3190 # "text" may or may not affect commit hash depending on flag
3189 # processors (see flagutil.addflagprocessor).
3191 # processors (see flagutil.addflagprocessor).
3190 #
3192 #
3191 # | common | rename | meta | ext
3193 # | common | rename | meta | ext
3192 # -------------------------------------------------
3194 # -------------------------------------------------
3193 # rawsize() | L1 | L1 | L1 | L1
3195 # rawsize() | L1 | L1 | L1 | L1
3194 # size() | L1 | L2-LM | L1(*) | L1 (?)
3196 # size() | L1 | L2-LM | L1(*) | L1 (?)
3195 # len(rawtext) | L2 | L2 | L2 | L2
3197 # len(rawtext) | L2 | L2 | L2 | L2
3196 # len(text) | L2 | L2 | L2 | L3
3198 # len(text) | L2 | L2 | L2 | L3
3197 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3199 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3198 #
3200 #
3199 # LM: length of metadata, depending on rawtext
3201 # LM: length of metadata, depending on rawtext
3200 # (*): not ideal, see comment in filelog.size
3202 # (*): not ideal, see comment in filelog.size
3201 # (?): could be "- len(meta)" if the resolved content has
3203 # (?): could be "- len(meta)" if the resolved content has
3202 # rename metadata
3204 # rename metadata
3203 #
3205 #
3204 # Checks needed to be done:
3206 # Checks needed to be done:
3205 # 1. length check: L1 == L2, in all cases.
3207 # 1. length check: L1 == L2, in all cases.
3206 # 2. hash check: depending on flag processor, we may need to
3208 # 2. hash check: depending on flag processor, we may need to
3207 # use either "text" (external), or "rawtext" (in revlog).
3209 # use either "text" (external), or "rawtext" (in revlog).
3208
3210
3209 try:
3211 try:
3210 skipflags = state.get(b'skipflags', 0)
3212 skipflags = state.get(b'skipflags', 0)
3211 if skipflags:
3213 if skipflags:
3212 skipflags &= self.flags(rev)
3214 skipflags &= self.flags(rev)
3213
3215
3214 _verify_revision(self, skipflags, state, node)
3216 _verify_revision(self, skipflags, state, node)
3215
3217
3216 l1 = self.rawsize(rev)
3218 l1 = self.rawsize(rev)
3217 l2 = len(self.rawdata(node))
3219 l2 = len(self.rawdata(node))
3218
3220
3219 if l1 != l2:
3221 if l1 != l2:
3220 yield revlogproblem(
3222 yield revlogproblem(
3221 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3223 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3222 node=node,
3224 node=node,
3223 )
3225 )
3224
3226
3225 except error.CensoredNodeError:
3227 except error.CensoredNodeError:
3226 if state[b'erroroncensored']:
3228 if state[b'erroroncensored']:
3227 yield revlogproblem(
3229 yield revlogproblem(
3228 error=_(b'censored file data'), node=node
3230 error=_(b'censored file data'), node=node
3229 )
3231 )
3230 state[b'skipread'].add(node)
3232 state[b'skipread'].add(node)
3231 except Exception as e:
3233 except Exception as e:
3232 yield revlogproblem(
3234 yield revlogproblem(
3233 error=_(b'unpacking %s: %s')
3235 error=_(b'unpacking %s: %s')
3234 % (short(node), stringutil.forcebytestr(e)),
3236 % (short(node), stringutil.forcebytestr(e)),
3235 node=node,
3237 node=node,
3236 )
3238 )
3237 state[b'skipread'].add(node)
3239 state[b'skipread'].add(node)
3238
3240
3239 def storageinfo(
3241 def storageinfo(
3240 self,
3242 self,
3241 exclusivefiles=False,
3243 exclusivefiles=False,
3242 sharedfiles=False,
3244 sharedfiles=False,
3243 revisionscount=False,
3245 revisionscount=False,
3244 trackedsize=False,
3246 trackedsize=False,
3245 storedsize=False,
3247 storedsize=False,
3246 ):
3248 ):
3247 d = {}
3249 d = {}
3248
3250
3249 if exclusivefiles:
3251 if exclusivefiles:
3250 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3252 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3251 if not self._inline:
3253 if not self._inline:
3252 d[b'exclusivefiles'].append((self.opener, self._datafile))
3254 d[b'exclusivefiles'].append((self.opener, self._datafile))
3253
3255
3254 if sharedfiles:
3256 if sharedfiles:
3255 d[b'sharedfiles'] = []
3257 d[b'sharedfiles'] = []
3256
3258
3257 if revisionscount:
3259 if revisionscount:
3258 d[b'revisionscount'] = len(self)
3260 d[b'revisionscount'] = len(self)
3259
3261
3260 if trackedsize:
3262 if trackedsize:
3261 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3263 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3262
3264
3263 if storedsize:
3265 if storedsize:
3264 d[b'storedsize'] = sum(
3266 d[b'storedsize'] = sum(
3265 self.opener.stat(path).st_size for path in self.files()
3267 self.opener.stat(path).st_size for path in self.files()
3266 )
3268 )
3267
3269
3268 return d
3270 return d
3269
3271
3270 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3272 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3271 if not self.hassidedata:
3273 if not self.hassidedata:
3272 return
3274 return
3273 # revlog formats with sidedata support does not support inline
3275 # revlog formats with sidedata support does not support inline
3274 assert not self._inline
3276 assert not self._inline
3275 if not helpers[1] and not helpers[2]:
3277 if not helpers[1] and not helpers[2]:
3276 # Nothing to generate or remove
3278 # Nothing to generate or remove
3277 return
3279 return
3278
3280
3279 new_entries = []
3281 new_entries = []
3280 # append the new sidedata
3282 # append the new sidedata
3281 with self._writing(transaction):
3283 with self._writing(transaction):
3282 ifh, dfh, sdfh = self._writinghandles
3284 ifh, dfh, sdfh = self._writinghandles
3283 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3285 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3284
3286
3285 current_offset = sdfh.tell()
3287 current_offset = sdfh.tell()
3286 for rev in range(startrev, endrev + 1):
3288 for rev in range(startrev, endrev + 1):
3287 entry = self.index[rev]
3289 entry = self.index[rev]
3288 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3290 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3289 store=self,
3291 store=self,
3290 sidedata_helpers=helpers,
3292 sidedata_helpers=helpers,
3291 sidedata={},
3293 sidedata={},
3292 rev=rev,
3294 rev=rev,
3293 )
3295 )
3294
3296
3295 serialized_sidedata = sidedatautil.serialize_sidedata(
3297 serialized_sidedata = sidedatautil.serialize_sidedata(
3296 new_sidedata
3298 new_sidedata
3297 )
3299 )
3298
3300
3299 sidedata_compression_mode = COMP_MODE_INLINE
3301 sidedata_compression_mode = COMP_MODE_INLINE
3300 if serialized_sidedata and self.hassidedata:
3302 if serialized_sidedata and self.hassidedata:
3301 sidedata_compression_mode = COMP_MODE_PLAIN
3303 sidedata_compression_mode = COMP_MODE_PLAIN
3302 h, comp_sidedata = self.compress(serialized_sidedata)
3304 h, comp_sidedata = self.compress(serialized_sidedata)
3303 if (
3305 if (
3304 h != b'u'
3306 h != b'u'
3305 and comp_sidedata[0] != b'\0'
3307 and comp_sidedata[0] != b'\0'
3306 and len(comp_sidedata) < len(serialized_sidedata)
3308 and len(comp_sidedata) < len(serialized_sidedata)
3307 ):
3309 ):
3308 assert not h
3310 assert not h
3309 if (
3311 if (
3310 comp_sidedata[0]
3312 comp_sidedata[0]
3311 == self._docket.default_compression_header
3313 == self._docket.default_compression_header
3312 ):
3314 ):
3313 sidedata_compression_mode = COMP_MODE_DEFAULT
3315 sidedata_compression_mode = COMP_MODE_DEFAULT
3314 serialized_sidedata = comp_sidedata
3316 serialized_sidedata = comp_sidedata
3315 else:
3317 else:
3316 sidedata_compression_mode = COMP_MODE_INLINE
3318 sidedata_compression_mode = COMP_MODE_INLINE
3317 serialized_sidedata = comp_sidedata
3319 serialized_sidedata = comp_sidedata
3318 if entry[8] != 0 or entry[9] != 0:
3320 if entry[8] != 0 or entry[9] != 0:
3319 # rewriting entries that already have sidedata is not
3321 # rewriting entries that already have sidedata is not
3320 # supported yet, because it introduces garbage data in the
3322 # supported yet, because it introduces garbage data in the
3321 # revlog.
3323 # revlog.
3322 msg = b"rewriting existing sidedata is not supported yet"
3324 msg = b"rewriting existing sidedata is not supported yet"
3323 raise error.Abort(msg)
3325 raise error.Abort(msg)
3324
3326
3325 # Apply (potential) flags to add and to remove after running
3327 # Apply (potential) flags to add and to remove after running
3326 # the sidedata helpers
3328 # the sidedata helpers
3327 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3329 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3328 entry_update = (
3330 entry_update = (
3329 current_offset,
3331 current_offset,
3330 len(serialized_sidedata),
3332 len(serialized_sidedata),
3331 new_offset_flags,
3333 new_offset_flags,
3332 sidedata_compression_mode,
3334 sidedata_compression_mode,
3333 )
3335 )
3334
3336
3335 # the sidedata computation might have move the file cursors around
3337 # the sidedata computation might have move the file cursors around
3336 sdfh.seek(current_offset, os.SEEK_SET)
3338 sdfh.seek(current_offset, os.SEEK_SET)
3337 sdfh.write(serialized_sidedata)
3339 sdfh.write(serialized_sidedata)
3338 new_entries.append(entry_update)
3340 new_entries.append(entry_update)
3339 current_offset += len(serialized_sidedata)
3341 current_offset += len(serialized_sidedata)
3340 self._docket.sidedata_end = sdfh.tell()
3342 self._docket.sidedata_end = sdfh.tell()
3341
3343
3342 # rewrite the new index entries
3344 # rewrite the new index entries
3343 ifh.seek(startrev * self.index.entry_size)
3345 ifh.seek(startrev * self.index.entry_size)
3344 for i, e in enumerate(new_entries):
3346 for i, e in enumerate(new_entries):
3345 rev = startrev + i
3347 rev = startrev + i
3346 self.index.replace_sidedata_info(rev, *e)
3348 self.index.replace_sidedata_info(rev, *e)
3347 packed = self.index.entry_binary(rev)
3349 packed = self.index.entry_binary(rev)
3348 if rev == 0 and self._docket is None:
3350 if rev == 0 and self._docket is None:
3349 header = self._format_flags | self._format_version
3351 header = self._format_flags | self._format_version
3350 header = self.index.pack_header(header)
3352 header = self.index.pack_header(header)
3351 packed = header + packed
3353 packed = header + packed
3352 ifh.write(packed)
3354 ifh.write(packed)
@@ -1,1351 +1,1386 b''
1 # revlogdeltas.py - Logic around delta computation for revlog
1 # revlogdeltas.py - Logic around delta computation for revlog
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2018 Octobus <contact@octobus.net>
4 # Copyright 2018 Octobus <contact@octobus.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 """Helper class to compute deltas stored inside revlogs"""
8 """Helper class to compute deltas stored inside revlogs"""
9
9
10
10
11 import collections
11 import collections
12 import struct
12 import struct
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from ..node import nullrev
15 from ..node import nullrev
16 from ..i18n import _
16 from ..i18n import _
17 from ..pycompat import getattr
17 from ..pycompat import getattr
18
18
19 from .constants import (
19 from .constants import (
20 COMP_MODE_DEFAULT,
20 COMP_MODE_DEFAULT,
21 COMP_MODE_INLINE,
21 COMP_MODE_INLINE,
22 COMP_MODE_PLAIN,
22 COMP_MODE_PLAIN,
23 KIND_CHANGELOG,
23 KIND_CHANGELOG,
24 KIND_FILELOG,
24 KIND_FILELOG,
25 KIND_MANIFESTLOG,
25 KIND_MANIFESTLOG,
26 REVIDX_ISCENSORED,
26 REVIDX_ISCENSORED,
27 REVIDX_RAWTEXT_CHANGING_FLAGS,
27 REVIDX_RAWTEXT_CHANGING_FLAGS,
28 )
28 )
29
29
30 from ..thirdparty import attr
30 from ..thirdparty import attr
31
31
32 from .. import (
32 from .. import (
33 error,
33 error,
34 mdiff,
34 mdiff,
35 util,
35 util,
36 )
36 )
37
37
38 from . import flagutil
38 from . import flagutil
39
39
40 # maximum <delta-chain-data>/<revision-text-length> ratio
40 # maximum <delta-chain-data>/<revision-text-length> ratio
41 LIMIT_DELTA2TEXT = 2
41 LIMIT_DELTA2TEXT = 2
42
42
43
43
44 class _testrevlog:
44 class _testrevlog:
45 """minimalist fake revlog to use in doctests"""
45 """minimalist fake revlog to use in doctests"""
46
46
47 def __init__(self, data, density=0.5, mingap=0, snapshot=()):
47 def __init__(self, data, density=0.5, mingap=0, snapshot=()):
48 """data is an list of revision payload boundaries"""
48 """data is an list of revision payload boundaries"""
49 self._data = data
49 self._data = data
50 self._srdensitythreshold = density
50 self._srdensitythreshold = density
51 self._srmingapsize = mingap
51 self._srmingapsize = mingap
52 self._snapshot = set(snapshot)
52 self._snapshot = set(snapshot)
53 self.index = None
53 self.index = None
54
54
55 def start(self, rev):
55 def start(self, rev):
56 if rev == nullrev:
56 if rev == nullrev:
57 return 0
57 return 0
58 if rev == 0:
58 if rev == 0:
59 return 0
59 return 0
60 return self._data[rev - 1]
60 return self._data[rev - 1]
61
61
62 def end(self, rev):
62 def end(self, rev):
63 if rev == nullrev:
63 if rev == nullrev:
64 return 0
64 return 0
65 return self._data[rev]
65 return self._data[rev]
66
66
67 def length(self, rev):
67 def length(self, rev):
68 return self.end(rev) - self.start(rev)
68 return self.end(rev) - self.start(rev)
69
69
70 def __len__(self):
70 def __len__(self):
71 return len(self._data)
71 return len(self._data)
72
72
73 def issnapshot(self, rev):
73 def issnapshot(self, rev):
74 if rev == nullrev:
74 if rev == nullrev:
75 return True
75 return True
76 return rev in self._snapshot
76 return rev in self._snapshot
77
77
78
78
79 def slicechunk(revlog, revs, targetsize=None):
79 def slicechunk(revlog, revs, targetsize=None):
80 """slice revs to reduce the amount of unrelated data to be read from disk.
80 """slice revs to reduce the amount of unrelated data to be read from disk.
81
81
82 ``revs`` is sliced into groups that should be read in one time.
82 ``revs`` is sliced into groups that should be read in one time.
83 Assume that revs are sorted.
83 Assume that revs are sorted.
84
84
85 The initial chunk is sliced until the overall density (payload/chunks-span
85 The initial chunk is sliced until the overall density (payload/chunks-span
86 ratio) is above `revlog._srdensitythreshold`. No gap smaller than
86 ratio) is above `revlog._srdensitythreshold`. No gap smaller than
87 `revlog._srmingapsize` is skipped.
87 `revlog._srmingapsize` is skipped.
88
88
89 If `targetsize` is set, no chunk larger than `targetsize` will be yield.
89 If `targetsize` is set, no chunk larger than `targetsize` will be yield.
90 For consistency with other slicing choice, this limit won't go lower than
90 For consistency with other slicing choice, this limit won't go lower than
91 `revlog._srmingapsize`.
91 `revlog._srmingapsize`.
92
92
93 If individual revisions chunk are larger than this limit, they will still
93 If individual revisions chunk are larger than this limit, they will still
94 be raised individually.
94 be raised individually.
95
95
96 >>> data = [
96 >>> data = [
97 ... 5, #00 (5)
97 ... 5, #00 (5)
98 ... 10, #01 (5)
98 ... 10, #01 (5)
99 ... 12, #02 (2)
99 ... 12, #02 (2)
100 ... 12, #03 (empty)
100 ... 12, #03 (empty)
101 ... 27, #04 (15)
101 ... 27, #04 (15)
102 ... 31, #05 (4)
102 ... 31, #05 (4)
103 ... 31, #06 (empty)
103 ... 31, #06 (empty)
104 ... 42, #07 (11)
104 ... 42, #07 (11)
105 ... 47, #08 (5)
105 ... 47, #08 (5)
106 ... 47, #09 (empty)
106 ... 47, #09 (empty)
107 ... 48, #10 (1)
107 ... 48, #10 (1)
108 ... 51, #11 (3)
108 ... 51, #11 (3)
109 ... 74, #12 (23)
109 ... 74, #12 (23)
110 ... 85, #13 (11)
110 ... 85, #13 (11)
111 ... 86, #14 (1)
111 ... 86, #14 (1)
112 ... 91, #15 (5)
112 ... 91, #15 (5)
113 ... ]
113 ... ]
114 >>> revlog = _testrevlog(data, snapshot=range(16))
114 >>> revlog = _testrevlog(data, snapshot=range(16))
115
115
116 >>> list(slicechunk(revlog, list(range(16))))
116 >>> list(slicechunk(revlog, list(range(16))))
117 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
117 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
118 >>> list(slicechunk(revlog, [0, 15]))
118 >>> list(slicechunk(revlog, [0, 15]))
119 [[0], [15]]
119 [[0], [15]]
120 >>> list(slicechunk(revlog, [0, 11, 15]))
120 >>> list(slicechunk(revlog, [0, 11, 15]))
121 [[0], [11], [15]]
121 [[0], [11], [15]]
122 >>> list(slicechunk(revlog, [0, 11, 13, 15]))
122 >>> list(slicechunk(revlog, [0, 11, 13, 15]))
123 [[0], [11, 13, 15]]
123 [[0], [11, 13, 15]]
124 >>> list(slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
124 >>> list(slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
125 [[1, 2], [5, 8, 10, 11], [14]]
125 [[1, 2], [5, 8, 10, 11], [14]]
126
126
127 Slicing with a maximum chunk size
127 Slicing with a maximum chunk size
128 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
128 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
129 [[0], [11], [13], [15]]
129 [[0], [11], [13], [15]]
130 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
130 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
131 [[0], [11], [13, 15]]
131 [[0], [11], [13, 15]]
132
132
133 Slicing involving nullrev
133 Slicing involving nullrev
134 >>> list(slicechunk(revlog, [-1, 0, 11, 13, 15], targetsize=20))
134 >>> list(slicechunk(revlog, [-1, 0, 11, 13, 15], targetsize=20))
135 [[-1, 0], [11], [13, 15]]
135 [[-1, 0], [11], [13, 15]]
136 >>> list(slicechunk(revlog, [-1, 13, 15], targetsize=5))
136 >>> list(slicechunk(revlog, [-1, 13, 15], targetsize=5))
137 [[-1], [13], [15]]
137 [[-1], [13], [15]]
138 """
138 """
139 if targetsize is not None:
139 if targetsize is not None:
140 targetsize = max(targetsize, revlog._srmingapsize)
140 targetsize = max(targetsize, revlog._srmingapsize)
141 # targetsize should not be specified when evaluating delta candidates:
141 # targetsize should not be specified when evaluating delta candidates:
142 # * targetsize is used to ensure we stay within specification when reading,
142 # * targetsize is used to ensure we stay within specification when reading,
143 densityslicing = getattr(revlog.index, 'slicechunktodensity', None)
143 densityslicing = getattr(revlog.index, 'slicechunktodensity', None)
144 if densityslicing is None:
144 if densityslicing is None:
145 densityslicing = lambda x, y, z: _slicechunktodensity(revlog, x, y, z)
145 densityslicing = lambda x, y, z: _slicechunktodensity(revlog, x, y, z)
146 for chunk in densityslicing(
146 for chunk in densityslicing(
147 revs, revlog._srdensitythreshold, revlog._srmingapsize
147 revs, revlog._srdensitythreshold, revlog._srmingapsize
148 ):
148 ):
149 for subchunk in _slicechunktosize(revlog, chunk, targetsize):
149 for subchunk in _slicechunktosize(revlog, chunk, targetsize):
150 yield subchunk
150 yield subchunk
151
151
152
152
153 def _slicechunktosize(revlog, revs, targetsize=None):
153 def _slicechunktosize(revlog, revs, targetsize=None):
154 """slice revs to match the target size
154 """slice revs to match the target size
155
155
156 This is intended to be used on chunk that density slicing selected by that
156 This is intended to be used on chunk that density slicing selected by that
157 are still too large compared to the read garantee of revlog. This might
157 are still too large compared to the read garantee of revlog. This might
158 happens when "minimal gap size" interrupted the slicing or when chain are
158 happens when "minimal gap size" interrupted the slicing or when chain are
159 built in a way that create large blocks next to each other.
159 built in a way that create large blocks next to each other.
160
160
161 >>> data = [
161 >>> data = [
162 ... 3, #0 (3)
162 ... 3, #0 (3)
163 ... 5, #1 (2)
163 ... 5, #1 (2)
164 ... 6, #2 (1)
164 ... 6, #2 (1)
165 ... 8, #3 (2)
165 ... 8, #3 (2)
166 ... 8, #4 (empty)
166 ... 8, #4 (empty)
167 ... 11, #5 (3)
167 ... 11, #5 (3)
168 ... 12, #6 (1)
168 ... 12, #6 (1)
169 ... 13, #7 (1)
169 ... 13, #7 (1)
170 ... 14, #8 (1)
170 ... 14, #8 (1)
171 ... ]
171 ... ]
172
172
173 == All snapshots cases ==
173 == All snapshots cases ==
174 >>> revlog = _testrevlog(data, snapshot=range(9))
174 >>> revlog = _testrevlog(data, snapshot=range(9))
175
175
176 Cases where chunk is already small enough
176 Cases where chunk is already small enough
177 >>> list(_slicechunktosize(revlog, [0], 3))
177 >>> list(_slicechunktosize(revlog, [0], 3))
178 [[0]]
178 [[0]]
179 >>> list(_slicechunktosize(revlog, [6, 7], 3))
179 >>> list(_slicechunktosize(revlog, [6, 7], 3))
180 [[6, 7]]
180 [[6, 7]]
181 >>> list(_slicechunktosize(revlog, [0], None))
181 >>> list(_slicechunktosize(revlog, [0], None))
182 [[0]]
182 [[0]]
183 >>> list(_slicechunktosize(revlog, [6, 7], None))
183 >>> list(_slicechunktosize(revlog, [6, 7], None))
184 [[6, 7]]
184 [[6, 7]]
185
185
186 cases where we need actual slicing
186 cases where we need actual slicing
187 >>> list(_slicechunktosize(revlog, [0, 1], 3))
187 >>> list(_slicechunktosize(revlog, [0, 1], 3))
188 [[0], [1]]
188 [[0], [1]]
189 >>> list(_slicechunktosize(revlog, [1, 3], 3))
189 >>> list(_slicechunktosize(revlog, [1, 3], 3))
190 [[1], [3]]
190 [[1], [3]]
191 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
191 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
192 [[1, 2], [3]]
192 [[1, 2], [3]]
193 >>> list(_slicechunktosize(revlog, [3, 5], 3))
193 >>> list(_slicechunktosize(revlog, [3, 5], 3))
194 [[3], [5]]
194 [[3], [5]]
195 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
195 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
196 [[3], [5]]
196 [[3], [5]]
197 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
197 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
198 [[5], [6, 7, 8]]
198 [[5], [6, 7, 8]]
199 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
199 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
200 [[0], [1, 2], [3], [5], [6, 7, 8]]
200 [[0], [1, 2], [3], [5], [6, 7, 8]]
201
201
202 Case with too large individual chunk (must return valid chunk)
202 Case with too large individual chunk (must return valid chunk)
203 >>> list(_slicechunktosize(revlog, [0, 1], 2))
203 >>> list(_slicechunktosize(revlog, [0, 1], 2))
204 [[0], [1]]
204 [[0], [1]]
205 >>> list(_slicechunktosize(revlog, [1, 3], 1))
205 >>> list(_slicechunktosize(revlog, [1, 3], 1))
206 [[1], [3]]
206 [[1], [3]]
207 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
207 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
208 [[3], [5]]
208 [[3], [5]]
209
209
210 == No Snapshot cases ==
210 == No Snapshot cases ==
211 >>> revlog = _testrevlog(data)
211 >>> revlog = _testrevlog(data)
212
212
213 Cases where chunk is already small enough
213 Cases where chunk is already small enough
214 >>> list(_slicechunktosize(revlog, [0], 3))
214 >>> list(_slicechunktosize(revlog, [0], 3))
215 [[0]]
215 [[0]]
216 >>> list(_slicechunktosize(revlog, [6, 7], 3))
216 >>> list(_slicechunktosize(revlog, [6, 7], 3))
217 [[6, 7]]
217 [[6, 7]]
218 >>> list(_slicechunktosize(revlog, [0], None))
218 >>> list(_slicechunktosize(revlog, [0], None))
219 [[0]]
219 [[0]]
220 >>> list(_slicechunktosize(revlog, [6, 7], None))
220 >>> list(_slicechunktosize(revlog, [6, 7], None))
221 [[6, 7]]
221 [[6, 7]]
222
222
223 cases where we need actual slicing
223 cases where we need actual slicing
224 >>> list(_slicechunktosize(revlog, [0, 1], 3))
224 >>> list(_slicechunktosize(revlog, [0, 1], 3))
225 [[0], [1]]
225 [[0], [1]]
226 >>> list(_slicechunktosize(revlog, [1, 3], 3))
226 >>> list(_slicechunktosize(revlog, [1, 3], 3))
227 [[1], [3]]
227 [[1], [3]]
228 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
228 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
229 [[1], [2, 3]]
229 [[1], [2, 3]]
230 >>> list(_slicechunktosize(revlog, [3, 5], 3))
230 >>> list(_slicechunktosize(revlog, [3, 5], 3))
231 [[3], [5]]
231 [[3], [5]]
232 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
232 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
233 [[3], [4, 5]]
233 [[3], [4, 5]]
234 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
234 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
235 [[5], [6, 7, 8]]
235 [[5], [6, 7, 8]]
236 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
236 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
237 [[0], [1, 2], [3], [5], [6, 7, 8]]
237 [[0], [1, 2], [3], [5], [6, 7, 8]]
238
238
239 Case with too large individual chunk (must return valid chunk)
239 Case with too large individual chunk (must return valid chunk)
240 >>> list(_slicechunktosize(revlog, [0, 1], 2))
240 >>> list(_slicechunktosize(revlog, [0, 1], 2))
241 [[0], [1]]
241 [[0], [1]]
242 >>> list(_slicechunktosize(revlog, [1, 3], 1))
242 >>> list(_slicechunktosize(revlog, [1, 3], 1))
243 [[1], [3]]
243 [[1], [3]]
244 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
244 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
245 [[3], [5]]
245 [[3], [5]]
246
246
247 == mixed case ==
247 == mixed case ==
248 >>> revlog = _testrevlog(data, snapshot=[0, 1, 2])
248 >>> revlog = _testrevlog(data, snapshot=[0, 1, 2])
249 >>> list(_slicechunktosize(revlog, list(range(9)), 5))
249 >>> list(_slicechunktosize(revlog, list(range(9)), 5))
250 [[0, 1], [2], [3, 4, 5], [6, 7, 8]]
250 [[0, 1], [2], [3, 4, 5], [6, 7, 8]]
251 """
251 """
252 assert targetsize is None or 0 <= targetsize
252 assert targetsize is None or 0 <= targetsize
253 startdata = revlog.start(revs[0])
253 startdata = revlog.start(revs[0])
254 enddata = revlog.end(revs[-1])
254 enddata = revlog.end(revs[-1])
255 fullspan = enddata - startdata
255 fullspan = enddata - startdata
256 if targetsize is None or fullspan <= targetsize:
256 if targetsize is None or fullspan <= targetsize:
257 yield revs
257 yield revs
258 return
258 return
259
259
260 startrevidx = 0
260 startrevidx = 0
261 endrevidx = 1
261 endrevidx = 1
262 iterrevs = enumerate(revs)
262 iterrevs = enumerate(revs)
263 next(iterrevs) # skip first rev.
263 next(iterrevs) # skip first rev.
264 # first step: get snapshots out of the way
264 # first step: get snapshots out of the way
265 for idx, r in iterrevs:
265 for idx, r in iterrevs:
266 span = revlog.end(r) - startdata
266 span = revlog.end(r) - startdata
267 snapshot = revlog.issnapshot(r)
267 snapshot = revlog.issnapshot(r)
268 if span <= targetsize and snapshot:
268 if span <= targetsize and snapshot:
269 endrevidx = idx + 1
269 endrevidx = idx + 1
270 else:
270 else:
271 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
271 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
272 if chunk:
272 if chunk:
273 yield chunk
273 yield chunk
274 startrevidx = idx
274 startrevidx = idx
275 startdata = revlog.start(r)
275 startdata = revlog.start(r)
276 endrevidx = idx + 1
276 endrevidx = idx + 1
277 if not snapshot:
277 if not snapshot:
278 break
278 break
279
279
280 # for the others, we use binary slicing to quickly converge toward valid
280 # for the others, we use binary slicing to quickly converge toward valid
281 # chunks (otherwise, we might end up looking for start/end of many
281 # chunks (otherwise, we might end up looking for start/end of many
282 # revisions). This logic is not looking for the perfect slicing point, it
282 # revisions). This logic is not looking for the perfect slicing point, it
283 # focuses on quickly converging toward valid chunks.
283 # focuses on quickly converging toward valid chunks.
284 nbitem = len(revs)
284 nbitem = len(revs)
285 while (enddata - startdata) > targetsize:
285 while (enddata - startdata) > targetsize:
286 endrevidx = nbitem
286 endrevidx = nbitem
287 if nbitem - startrevidx <= 1:
287 if nbitem - startrevidx <= 1:
288 break # protect against individual chunk larger than limit
288 break # protect against individual chunk larger than limit
289 localenddata = revlog.end(revs[endrevidx - 1])
289 localenddata = revlog.end(revs[endrevidx - 1])
290 span = localenddata - startdata
290 span = localenddata - startdata
291 while span > targetsize:
291 while span > targetsize:
292 if endrevidx - startrevidx <= 1:
292 if endrevidx - startrevidx <= 1:
293 break # protect against individual chunk larger than limit
293 break # protect against individual chunk larger than limit
294 endrevidx -= (endrevidx - startrevidx) // 2
294 endrevidx -= (endrevidx - startrevidx) // 2
295 localenddata = revlog.end(revs[endrevidx - 1])
295 localenddata = revlog.end(revs[endrevidx - 1])
296 span = localenddata - startdata
296 span = localenddata - startdata
297 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
297 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
298 if chunk:
298 if chunk:
299 yield chunk
299 yield chunk
300 startrevidx = endrevidx
300 startrevidx = endrevidx
301 startdata = revlog.start(revs[startrevidx])
301 startdata = revlog.start(revs[startrevidx])
302
302
303 chunk = _trimchunk(revlog, revs, startrevidx)
303 chunk = _trimchunk(revlog, revs, startrevidx)
304 if chunk:
304 if chunk:
305 yield chunk
305 yield chunk
306
306
307
307
308 def _slicechunktodensity(revlog, revs, targetdensity=0.5, mingapsize=0):
308 def _slicechunktodensity(revlog, revs, targetdensity=0.5, mingapsize=0):
309 """slice revs to reduce the amount of unrelated data to be read from disk.
309 """slice revs to reduce the amount of unrelated data to be read from disk.
310
310
311 ``revs`` is sliced into groups that should be read in one time.
311 ``revs`` is sliced into groups that should be read in one time.
312 Assume that revs are sorted.
312 Assume that revs are sorted.
313
313
314 The initial chunk is sliced until the overall density (payload/chunks-span
314 The initial chunk is sliced until the overall density (payload/chunks-span
315 ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
315 ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
316 skipped.
316 skipped.
317
317
318 >>> revlog = _testrevlog([
318 >>> revlog = _testrevlog([
319 ... 5, #00 (5)
319 ... 5, #00 (5)
320 ... 10, #01 (5)
320 ... 10, #01 (5)
321 ... 12, #02 (2)
321 ... 12, #02 (2)
322 ... 12, #03 (empty)
322 ... 12, #03 (empty)
323 ... 27, #04 (15)
323 ... 27, #04 (15)
324 ... 31, #05 (4)
324 ... 31, #05 (4)
325 ... 31, #06 (empty)
325 ... 31, #06 (empty)
326 ... 42, #07 (11)
326 ... 42, #07 (11)
327 ... 47, #08 (5)
327 ... 47, #08 (5)
328 ... 47, #09 (empty)
328 ... 47, #09 (empty)
329 ... 48, #10 (1)
329 ... 48, #10 (1)
330 ... 51, #11 (3)
330 ... 51, #11 (3)
331 ... 74, #12 (23)
331 ... 74, #12 (23)
332 ... 85, #13 (11)
332 ... 85, #13 (11)
333 ... 86, #14 (1)
333 ... 86, #14 (1)
334 ... 91, #15 (5)
334 ... 91, #15 (5)
335 ... ])
335 ... ])
336
336
337 >>> list(_slicechunktodensity(revlog, list(range(16))))
337 >>> list(_slicechunktodensity(revlog, list(range(16))))
338 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
338 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
339 >>> list(_slicechunktodensity(revlog, [0, 15]))
339 >>> list(_slicechunktodensity(revlog, [0, 15]))
340 [[0], [15]]
340 [[0], [15]]
341 >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
341 >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
342 [[0], [11], [15]]
342 [[0], [11], [15]]
343 >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
343 >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
344 [[0], [11, 13, 15]]
344 [[0], [11, 13, 15]]
345 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
345 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
346 [[1, 2], [5, 8, 10, 11], [14]]
346 [[1, 2], [5, 8, 10, 11], [14]]
347 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
347 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
348 ... mingapsize=20))
348 ... mingapsize=20))
349 [[1, 2, 3, 5, 8, 10, 11], [14]]
349 [[1, 2, 3, 5, 8, 10, 11], [14]]
350 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
350 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
351 ... targetdensity=0.95))
351 ... targetdensity=0.95))
352 [[1, 2], [5], [8, 10, 11], [14]]
352 [[1, 2], [5], [8, 10, 11], [14]]
353 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
353 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
354 ... targetdensity=0.95, mingapsize=12))
354 ... targetdensity=0.95, mingapsize=12))
355 [[1, 2], [5, 8, 10, 11], [14]]
355 [[1, 2], [5, 8, 10, 11], [14]]
356 """
356 """
357 start = revlog.start
357 start = revlog.start
358 length = revlog.length
358 length = revlog.length
359
359
360 if len(revs) <= 1:
360 if len(revs) <= 1:
361 yield revs
361 yield revs
362 return
362 return
363
363
364 deltachainspan = segmentspan(revlog, revs)
364 deltachainspan = segmentspan(revlog, revs)
365
365
366 if deltachainspan < mingapsize:
366 if deltachainspan < mingapsize:
367 yield revs
367 yield revs
368 return
368 return
369
369
370 readdata = deltachainspan
370 readdata = deltachainspan
371 chainpayload = sum(length(r) for r in revs)
371 chainpayload = sum(length(r) for r in revs)
372
372
373 if deltachainspan:
373 if deltachainspan:
374 density = chainpayload / float(deltachainspan)
374 density = chainpayload / float(deltachainspan)
375 else:
375 else:
376 density = 1.0
376 density = 1.0
377
377
378 if density >= targetdensity:
378 if density >= targetdensity:
379 yield revs
379 yield revs
380 return
380 return
381
381
382 # Store the gaps in a heap to have them sorted by decreasing size
382 # Store the gaps in a heap to have them sorted by decreasing size
383 gaps = []
383 gaps = []
384 prevend = None
384 prevend = None
385 for i, rev in enumerate(revs):
385 for i, rev in enumerate(revs):
386 revstart = start(rev)
386 revstart = start(rev)
387 revlen = length(rev)
387 revlen = length(rev)
388
388
389 # Skip empty revisions to form larger holes
389 # Skip empty revisions to form larger holes
390 if revlen == 0:
390 if revlen == 0:
391 continue
391 continue
392
392
393 if prevend is not None:
393 if prevend is not None:
394 gapsize = revstart - prevend
394 gapsize = revstart - prevend
395 # only consider holes that are large enough
395 # only consider holes that are large enough
396 if gapsize > mingapsize:
396 if gapsize > mingapsize:
397 gaps.append((gapsize, i))
397 gaps.append((gapsize, i))
398
398
399 prevend = revstart + revlen
399 prevend = revstart + revlen
400 # sort the gaps to pop them from largest to small
400 # sort the gaps to pop them from largest to small
401 gaps.sort()
401 gaps.sort()
402
402
403 # Collect the indices of the largest holes until the density is acceptable
403 # Collect the indices of the largest holes until the density is acceptable
404 selected = []
404 selected = []
405 while gaps and density < targetdensity:
405 while gaps and density < targetdensity:
406 gapsize, gapidx = gaps.pop()
406 gapsize, gapidx = gaps.pop()
407
407
408 selected.append(gapidx)
408 selected.append(gapidx)
409
409
410 # the gap sizes are stored as negatives to be sorted decreasingly
410 # the gap sizes are stored as negatives to be sorted decreasingly
411 # by the heap
411 # by the heap
412 readdata -= gapsize
412 readdata -= gapsize
413 if readdata > 0:
413 if readdata > 0:
414 density = chainpayload / float(readdata)
414 density = chainpayload / float(readdata)
415 else:
415 else:
416 density = 1.0
416 density = 1.0
417 selected.sort()
417 selected.sort()
418
418
419 # Cut the revs at collected indices
419 # Cut the revs at collected indices
420 previdx = 0
420 previdx = 0
421 for idx in selected:
421 for idx in selected:
422
422
423 chunk = _trimchunk(revlog, revs, previdx, idx)
423 chunk = _trimchunk(revlog, revs, previdx, idx)
424 if chunk:
424 if chunk:
425 yield chunk
425 yield chunk
426
426
427 previdx = idx
427 previdx = idx
428
428
429 chunk = _trimchunk(revlog, revs, previdx)
429 chunk = _trimchunk(revlog, revs, previdx)
430 if chunk:
430 if chunk:
431 yield chunk
431 yield chunk
432
432
433
433
434 def _trimchunk(revlog, revs, startidx, endidx=None):
434 def _trimchunk(revlog, revs, startidx, endidx=None):
435 """returns revs[startidx:endidx] without empty trailing revs
435 """returns revs[startidx:endidx] without empty trailing revs
436
436
437 Doctest Setup
437 Doctest Setup
438 >>> revlog = _testrevlog([
438 >>> revlog = _testrevlog([
439 ... 5, #0
439 ... 5, #0
440 ... 10, #1
440 ... 10, #1
441 ... 12, #2
441 ... 12, #2
442 ... 12, #3 (empty)
442 ... 12, #3 (empty)
443 ... 17, #4
443 ... 17, #4
444 ... 21, #5
444 ... 21, #5
445 ... 21, #6 (empty)
445 ... 21, #6 (empty)
446 ... ])
446 ... ])
447
447
448 Contiguous cases:
448 Contiguous cases:
449 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
449 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
450 [0, 1, 2, 3, 4, 5]
450 [0, 1, 2, 3, 4, 5]
451 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
451 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
452 [0, 1, 2, 3, 4]
452 [0, 1, 2, 3, 4]
453 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
453 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
454 [0, 1, 2]
454 [0, 1, 2]
455 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
455 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
456 [2]
456 [2]
457 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
457 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
458 [3, 4, 5]
458 [3, 4, 5]
459 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
459 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
460 [3, 4]
460 [3, 4]
461
461
462 Discontiguous cases:
462 Discontiguous cases:
463 >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
463 >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
464 [1, 3, 5]
464 [1, 3, 5]
465 >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
465 >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
466 [1]
466 [1]
467 >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
467 >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
468 [3, 5]
468 [3, 5]
469 >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
469 >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
470 [3, 5]
470 [3, 5]
471 """
471 """
472 length = revlog.length
472 length = revlog.length
473
473
474 if endidx is None:
474 if endidx is None:
475 endidx = len(revs)
475 endidx = len(revs)
476
476
477 # If we have a non-emtpy delta candidate, there are nothing to trim
477 # If we have a non-emtpy delta candidate, there are nothing to trim
478 if revs[endidx - 1] < len(revlog):
478 if revs[endidx - 1] < len(revlog):
479 # Trim empty revs at the end, except the very first revision of a chain
479 # Trim empty revs at the end, except the very first revision of a chain
480 while (
480 while (
481 endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0
481 endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0
482 ):
482 ):
483 endidx -= 1
483 endidx -= 1
484
484
485 return revs[startidx:endidx]
485 return revs[startidx:endidx]
486
486
487
487
488 def segmentspan(revlog, revs):
488 def segmentspan(revlog, revs):
489 """Get the byte span of a segment of revisions
489 """Get the byte span of a segment of revisions
490
490
491 revs is a sorted array of revision numbers
491 revs is a sorted array of revision numbers
492
492
493 >>> revlog = _testrevlog([
493 >>> revlog = _testrevlog([
494 ... 5, #0
494 ... 5, #0
495 ... 10, #1
495 ... 10, #1
496 ... 12, #2
496 ... 12, #2
497 ... 12, #3 (empty)
497 ... 12, #3 (empty)
498 ... 17, #4
498 ... 17, #4
499 ... ])
499 ... ])
500
500
501 >>> segmentspan(revlog, [0, 1, 2, 3, 4])
501 >>> segmentspan(revlog, [0, 1, 2, 3, 4])
502 17
502 17
503 >>> segmentspan(revlog, [0, 4])
503 >>> segmentspan(revlog, [0, 4])
504 17
504 17
505 >>> segmentspan(revlog, [3, 4])
505 >>> segmentspan(revlog, [3, 4])
506 5
506 5
507 >>> segmentspan(revlog, [1, 2, 3,])
507 >>> segmentspan(revlog, [1, 2, 3,])
508 7
508 7
509 >>> segmentspan(revlog, [1, 3])
509 >>> segmentspan(revlog, [1, 3])
510 7
510 7
511 """
511 """
512 if not revs:
512 if not revs:
513 return 0
513 return 0
514 end = revlog.end(revs[-1])
514 end = revlog.end(revs[-1])
515 return end - revlog.start(revs[0])
515 return end - revlog.start(revs[0])
516
516
517
517
518 def _textfromdelta(fh, revlog, baserev, delta, p1, p2, flags, expectednode):
518 def _textfromdelta(fh, revlog, baserev, delta, p1, p2, flags, expectednode):
519 """build full text from a (base, delta) pair and other metadata"""
519 """build full text from a (base, delta) pair and other metadata"""
520 # special case deltas which replace entire base; no need to decode
520 # special case deltas which replace entire base; no need to decode
521 # base revision. this neatly avoids censored bases, which throw when
521 # base revision. this neatly avoids censored bases, which throw when
522 # they're decoded.
522 # they're decoded.
523 hlen = struct.calcsize(b">lll")
523 hlen = struct.calcsize(b">lll")
524 if delta[:hlen] == mdiff.replacediffheader(
524 if delta[:hlen] == mdiff.replacediffheader(
525 revlog.rawsize(baserev), len(delta) - hlen
525 revlog.rawsize(baserev), len(delta) - hlen
526 ):
526 ):
527 fulltext = delta[hlen:]
527 fulltext = delta[hlen:]
528 else:
528 else:
529 # deltabase is rawtext before changed by flag processors, which is
529 # deltabase is rawtext before changed by flag processors, which is
530 # equivalent to non-raw text
530 # equivalent to non-raw text
531 basetext = revlog.revision(baserev, _df=fh)
531 basetext = revlog.revision(baserev, _df=fh)
532 fulltext = mdiff.patch(basetext, delta)
532 fulltext = mdiff.patch(basetext, delta)
533
533
534 try:
534 try:
535 validatehash = flagutil.processflagsraw(revlog, fulltext, flags)
535 validatehash = flagutil.processflagsraw(revlog, fulltext, flags)
536 if validatehash:
536 if validatehash:
537 revlog.checkhash(fulltext, expectednode, p1=p1, p2=p2)
537 revlog.checkhash(fulltext, expectednode, p1=p1, p2=p2)
538 if flags & REVIDX_ISCENSORED:
538 if flags & REVIDX_ISCENSORED:
539 raise error.StorageError(
539 raise error.StorageError(
540 _(b'node %s is not censored') % expectednode
540 _(b'node %s is not censored') % expectednode
541 )
541 )
542 except error.CensoredNodeError:
542 except error.CensoredNodeError:
543 # must pass the censored index flag to add censored revisions
543 # must pass the censored index flag to add censored revisions
544 if not flags & REVIDX_ISCENSORED:
544 if not flags & REVIDX_ISCENSORED:
545 raise
545 raise
546 return fulltext
546 return fulltext
547
547
548
548
549 @attr.s(slots=True, frozen=True)
549 @attr.s(slots=True, frozen=True)
550 class _deltainfo:
550 class _deltainfo:
551 distance = attr.ib()
551 distance = attr.ib()
552 deltalen = attr.ib()
552 deltalen = attr.ib()
553 data = attr.ib()
553 data = attr.ib()
554 base = attr.ib()
554 base = attr.ib()
555 chainbase = attr.ib()
555 chainbase = attr.ib()
556 chainlen = attr.ib()
556 chainlen = attr.ib()
557 compresseddeltalen = attr.ib()
557 compresseddeltalen = attr.ib()
558 snapshotdepth = attr.ib()
558 snapshotdepth = attr.ib()
559
559
560
560
561 def drop_u_compression(delta):
561 def drop_u_compression(delta):
562 """turn into a "u" (no-compression) into no-compression without header
562 """turn into a "u" (no-compression) into no-compression without header
563
563
564 This is useful for revlog format that has better compression method.
564 This is useful for revlog format that has better compression method.
565 """
565 """
566 assert delta.data[0] == b'u', delta.data[0]
566 assert delta.data[0] == b'u', delta.data[0]
567 return _deltainfo(
567 return _deltainfo(
568 delta.distance,
568 delta.distance,
569 delta.deltalen - 1,
569 delta.deltalen - 1,
570 (b'', delta.data[1]),
570 (b'', delta.data[1]),
571 delta.base,
571 delta.base,
572 delta.chainbase,
572 delta.chainbase,
573 delta.chainlen,
573 delta.chainlen,
574 delta.compresseddeltalen,
574 delta.compresseddeltalen,
575 delta.snapshotdepth,
575 delta.snapshotdepth,
576 )
576 )
577
577
578
578
579 def isgooddeltainfo(revlog, deltainfo, revinfo):
579 def isgooddeltainfo(revlog, deltainfo, revinfo):
580 """Returns True if the given delta is good. Good means that it is within
580 """Returns True if the given delta is good. Good means that it is within
581 the disk span, disk size, and chain length bounds that we know to be
581 the disk span, disk size, and chain length bounds that we know to be
582 performant."""
582 performant."""
583 if deltainfo is None:
583 if deltainfo is None:
584 return False
584 return False
585
585
586 # - 'deltainfo.distance' is the distance from the base revision --
586 # - 'deltainfo.distance' is the distance from the base revision --
587 # bounding it limits the amount of I/O we need to do.
587 # bounding it limits the amount of I/O we need to do.
588 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
588 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
589 # deltas we need to apply -- bounding it limits the amount of CPU
589 # deltas we need to apply -- bounding it limits the amount of CPU
590 # we consume.
590 # we consume.
591
591
592 textlen = revinfo.textlen
592 textlen = revinfo.textlen
593 defaultmax = textlen * 4
593 defaultmax = textlen * 4
594 maxdist = revlog._maxdeltachainspan
594 maxdist = revlog._maxdeltachainspan
595 if not maxdist:
595 if not maxdist:
596 maxdist = deltainfo.distance # ensure the conditional pass
596 maxdist = deltainfo.distance # ensure the conditional pass
597 maxdist = max(maxdist, defaultmax)
597 maxdist = max(maxdist, defaultmax)
598
598
599 # Bad delta from read span:
599 # Bad delta from read span:
600 #
600 #
601 # If the span of data read is larger than the maximum allowed.
601 # If the span of data read is larger than the maximum allowed.
602 #
602 #
603 # In the sparse-revlog case, we rely on the associated "sparse reading"
603 # In the sparse-revlog case, we rely on the associated "sparse reading"
604 # to avoid issue related to the span of data. In theory, it would be
604 # to avoid issue related to the span of data. In theory, it would be
605 # possible to build pathological revlog where delta pattern would lead
605 # possible to build pathological revlog where delta pattern would lead
606 # to too many reads. However, they do not happen in practice at all. So
606 # to too many reads. However, they do not happen in practice at all. So
607 # we skip the span check entirely.
607 # we skip the span check entirely.
608 if not revlog._sparserevlog and maxdist < deltainfo.distance:
608 if not revlog._sparserevlog and maxdist < deltainfo.distance:
609 return False
609 return False
610
610
611 # Bad delta from new delta size:
611 # Bad delta from new delta size:
612 #
612 #
613 # If the delta size is larger than the target text, storing the
613 # If the delta size is larger than the target text, storing the
614 # delta will be inefficient.
614 # delta will be inefficient.
615 if textlen < deltainfo.deltalen:
615 if textlen < deltainfo.deltalen:
616 return False
616 return False
617
617
618 # Bad delta from cumulated payload size:
618 # Bad delta from cumulated payload size:
619 #
619 #
620 # If the sum of delta get larger than K * target text length.
620 # If the sum of delta get larger than K * target text length.
621 if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen:
621 if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen:
622 return False
622 return False
623
623
624 # Bad delta from chain length:
624 # Bad delta from chain length:
625 #
625 #
626 # If the number of delta in the chain gets too high.
626 # If the number of delta in the chain gets too high.
627 if revlog._maxchainlen and revlog._maxchainlen < deltainfo.chainlen:
627 if revlog._maxchainlen and revlog._maxchainlen < deltainfo.chainlen:
628 return False
628 return False
629
629
630 # bad delta from intermediate snapshot size limit
630 # bad delta from intermediate snapshot size limit
631 #
631 #
632 # If an intermediate snapshot size is higher than the limit. The
632 # If an intermediate snapshot size is higher than the limit. The
633 # limit exist to prevent endless chain of intermediate delta to be
633 # limit exist to prevent endless chain of intermediate delta to be
634 # created.
634 # created.
635 if (
635 if (
636 deltainfo.snapshotdepth is not None
636 deltainfo.snapshotdepth is not None
637 and (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen
637 and (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen
638 ):
638 ):
639 return False
639 return False
640
640
641 # bad delta if new intermediate snapshot is larger than the previous
641 # bad delta if new intermediate snapshot is larger than the previous
642 # snapshot
642 # snapshot
643 if (
643 if (
644 deltainfo.snapshotdepth
644 deltainfo.snapshotdepth
645 and revlog.length(deltainfo.base) < deltainfo.deltalen
645 and revlog.length(deltainfo.base) < deltainfo.deltalen
646 ):
646 ):
647 return False
647 return False
648
648
649 return True
649 return True
650
650
651
651
652 # If a revision's full text is that much bigger than a base candidate full
652 # If a revision's full text is that much bigger than a base candidate full
653 # text's, it is very unlikely that it will produce a valid delta. We no longer
653 # text's, it is very unlikely that it will produce a valid delta. We no longer
654 # consider these candidates.
654 # consider these candidates.
655 LIMIT_BASE2TEXT = 500
655 LIMIT_BASE2TEXT = 500
656
656
657
657
658 def _candidategroups(revlog, textlen, p1, p2, cachedelta):
658 def _candidategroups(revlog, textlen, p1, p2, cachedelta):
659 """Provides group of revision to be tested as delta base
659 """Provides group of revision to be tested as delta base
660
660
661 This top level function focus on emitting groups with unique and worthwhile
661 This top level function focus on emitting groups with unique and worthwhile
662 content. See _raw_candidate_groups for details about the group order.
662 content. See _raw_candidate_groups for details about the group order.
663 """
663 """
664 # should we try to build a delta?
664 # should we try to build a delta?
665 if not (len(revlog) and revlog._storedeltachains):
665 if not (len(revlog) and revlog._storedeltachains):
666 yield None
666 yield None
667 return
667 return
668
668
669 deltalength = revlog.length
669 deltalength = revlog.length
670 deltaparent = revlog.deltaparent
670 deltaparent = revlog.deltaparent
671 sparse = revlog._sparserevlog
671 sparse = revlog._sparserevlog
672 good = None
672 good = None
673
673
674 deltas_limit = textlen * LIMIT_DELTA2TEXT
674 deltas_limit = textlen * LIMIT_DELTA2TEXT
675
675
676 tested = {nullrev}
676 tested = {nullrev}
677 candidates = _refinedgroups(revlog, p1, p2, cachedelta)
677 candidates = _refinedgroups(
678 revlog,
679 p1,
680 p2,
681 cachedelta,
682 )
678 while True:
683 while True:
679 temptative = candidates.send(good)
684 temptative = candidates.send(good)
680 if temptative is None:
685 if temptative is None:
681 break
686 break
682 group = []
687 group = []
683 for rev in temptative:
688 for rev in temptative:
684 # skip over empty delta (no need to include them in a chain)
689 # skip over empty delta (no need to include them in a chain)
685 while revlog._generaldelta and not (
690 while revlog._generaldelta and not (
686 rev == nullrev or rev in tested or deltalength(rev)
691 rev == nullrev or rev in tested or deltalength(rev)
687 ):
692 ):
688 tested.add(rev)
693 tested.add(rev)
689 rev = deltaparent(rev)
694 rev = deltaparent(rev)
690 # no need to try a delta against nullrev, this will be done as a
695 # no need to try a delta against nullrev, this will be done as a
691 # last resort.
696 # last resort.
692 if rev == nullrev:
697 if rev == nullrev:
693 continue
698 continue
694 # filter out revision we tested already
699 # filter out revision we tested already
695 if rev in tested:
700 if rev in tested:
696 continue
701 continue
697 tested.add(rev)
702 tested.add(rev)
698 # filter out delta base that will never produce good delta
703 # filter out delta base that will never produce good delta
699 if deltas_limit < revlog.length(rev):
704 if deltas_limit < revlog.length(rev):
700 continue
705 continue
701 if sparse and revlog.rawsize(rev) < (textlen // LIMIT_BASE2TEXT):
706 if sparse and revlog.rawsize(rev) < (textlen // LIMIT_BASE2TEXT):
702 continue
707 continue
703 # no delta for rawtext-changing revs (see "candelta" for why)
708 # no delta for rawtext-changing revs (see "candelta" for why)
704 if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
709 if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
705 continue
710 continue
711
706 # If we reach here, we are about to build and test a delta.
712 # If we reach here, we are about to build and test a delta.
707 # The delta building process will compute the chaininfo in all
713 # The delta building process will compute the chaininfo in all
708 # case, since that computation is cached, it is fine to access it
714 # case, since that computation is cached, it is fine to access it
709 # here too.
715 # here too.
710 chainlen, chainsize = revlog._chaininfo(rev)
716 chainlen, chainsize = revlog._chaininfo(rev)
711 # if chain will be too long, skip base
717 # if chain will be too long, skip base
712 if revlog._maxchainlen and chainlen >= revlog._maxchainlen:
718 if revlog._maxchainlen and chainlen >= revlog._maxchainlen:
713 continue
719 continue
714 # if chain already have too much data, skip base
720 # if chain already have too much data, skip base
715 if deltas_limit < chainsize:
721 if deltas_limit < chainsize:
716 continue
722 continue
717 if sparse and revlog.upperboundcomp is not None:
723 if sparse and revlog.upperboundcomp is not None:
718 maxcomp = revlog.upperboundcomp
724 maxcomp = revlog.upperboundcomp
719 basenotsnap = (p1, p2, nullrev)
725 basenotsnap = (p1, p2, nullrev)
720 if rev not in basenotsnap and revlog.issnapshot(rev):
726 if rev not in basenotsnap and revlog.issnapshot(rev):
721 snapshotdepth = revlog.snapshotdepth(rev)
727 snapshotdepth = revlog.snapshotdepth(rev)
722 # If text is significantly larger than the base, we can
728 # If text is significantly larger than the base, we can
723 # expect the resulting delta to be proportional to the size
729 # expect the resulting delta to be proportional to the size
724 # difference
730 # difference
725 revsize = revlog.rawsize(rev)
731 revsize = revlog.rawsize(rev)
726 rawsizedistance = max(textlen - revsize, 0)
732 rawsizedistance = max(textlen - revsize, 0)
727 # use an estimate of the compression upper bound.
733 # use an estimate of the compression upper bound.
728 lowestrealisticdeltalen = rawsizedistance // maxcomp
734 lowestrealisticdeltalen = rawsizedistance // maxcomp
729
735
730 # check the absolute constraint on the delta size
736 # check the absolute constraint on the delta size
731 snapshotlimit = textlen >> snapshotdepth
737 snapshotlimit = textlen >> snapshotdepth
732 if snapshotlimit < lowestrealisticdeltalen:
738 if snapshotlimit < lowestrealisticdeltalen:
733 # delta lower bound is larger than accepted upper bound
739 # delta lower bound is larger than accepted upper bound
734 continue
740 continue
735
741
736 # check the relative constraint on the delta size
742 # check the relative constraint on the delta size
737 revlength = revlog.length(rev)
743 revlength = revlog.length(rev)
738 if revlength < lowestrealisticdeltalen:
744 if revlength < lowestrealisticdeltalen:
739 # delta probable lower bound is larger than target base
745 # delta probable lower bound is larger than target base
740 continue
746 continue
741
747
742 group.append(rev)
748 group.append(rev)
743 if group:
749 if group:
744 # XXX: in the sparse revlog case, group can become large,
750 # XXX: in the sparse revlog case, group can become large,
745 # impacting performances. Some bounding or slicing mecanism
751 # impacting performances. Some bounding or slicing mecanism
746 # would help to reduce this impact.
752 # would help to reduce this impact.
747 good = yield tuple(group)
753 good = yield tuple(group)
748 yield None
754 yield None
749
755
750
756
751 def _findsnapshots(revlog, cache, start_rev):
757 def _findsnapshots(revlog, cache, start_rev):
752 """find snapshot from start_rev to tip"""
758 """find snapshot from start_rev to tip"""
753 if util.safehasattr(revlog.index, b'findsnapshots'):
759 if util.safehasattr(revlog.index, b'findsnapshots'):
754 revlog.index.findsnapshots(cache, start_rev)
760 revlog.index.findsnapshots(cache, start_rev)
755 else:
761 else:
756 deltaparent = revlog.deltaparent
762 deltaparent = revlog.deltaparent
757 issnapshot = revlog.issnapshot
763 issnapshot = revlog.issnapshot
758 for rev in revlog.revs(start_rev):
764 for rev in revlog.revs(start_rev):
759 if issnapshot(rev):
765 if issnapshot(rev):
760 cache[deltaparent(rev)].append(rev)
766 cache[deltaparent(rev)].append(rev)
761
767
762
768
763 def _refinedgroups(revlog, p1, p2, cachedelta):
769 def _refinedgroups(revlog, p1, p2, cachedelta):
764 good = None
770 good = None
765 # First we try to reuse a the delta contained in the bundle.
771 # First we try to reuse a the delta contained in the bundle.
766 # (or from the source revlog)
772 # (or from the source revlog)
767 #
773 #
768 # This logic only applies to general delta repositories and can be disabled
774 # This logic only applies to general delta repositories and can be disabled
769 # through configuration. Disabling reuse source delta is useful when
775 # through configuration. Disabling reuse source delta is useful when
770 # we want to make sure we recomputed "optimal" deltas.
776 # we want to make sure we recomputed "optimal" deltas.
777 debug_info = None
771 if cachedelta and revlog._generaldelta and revlog._lazydeltabase:
778 if cachedelta and revlog._generaldelta and revlog._lazydeltabase:
772 # Assume what we received from the server is a good choice
779 # Assume what we received from the server is a good choice
773 # build delta will reuse the cache
780 # build delta will reuse the cache
781 if debug_info is not None:
782 debug_info['cached-delta.tested'] += 1
774 good = yield (cachedelta[0],)
783 good = yield (cachedelta[0],)
775 if good is not None:
784 if good is not None:
785 if debug_info is not None:
786 debug_info['cached-delta.accepted'] += 1
776 yield None
787 yield None
777 return
788 return
789 # XXX cache me higher
778 snapshots = collections.defaultdict(list)
790 snapshots = collections.defaultdict(list)
779 for candidates in _rawgroups(revlog, p1, p2, cachedelta, snapshots):
791 groups = _rawgroups(
792 revlog,
793 p1,
794 p2,
795 cachedelta,
796 snapshots,
797 )
798 for candidates in groups:
780 good = yield candidates
799 good = yield candidates
781 if good is not None:
800 if good is not None:
782 break
801 break
783
802
784 # If sparse revlog is enabled, we can try to refine the available deltas
803 # If sparse revlog is enabled, we can try to refine the available deltas
785 if not revlog._sparserevlog:
804 if not revlog._sparserevlog:
786 yield None
805 yield None
787 return
806 return
788
807
789 # if we have a refinable value, try to refine it
808 # if we have a refinable value, try to refine it
790 if good is not None and good not in (p1, p2) and revlog.issnapshot(good):
809 if good is not None and good not in (p1, p2) and revlog.issnapshot(good):
791 # refine snapshot down
810 # refine snapshot down
792 previous = None
811 previous = None
793 while previous != good:
812 while previous != good:
794 previous = good
813 previous = good
795 base = revlog.deltaparent(good)
814 base = revlog.deltaparent(good)
796 if base == nullrev:
815 if base == nullrev:
797 break
816 break
798 good = yield (base,)
817 good = yield (base,)
799 # refine snapshot up
818 # refine snapshot up
800 if not snapshots:
819 if not snapshots:
801 _findsnapshots(revlog, snapshots, good + 1)
820 _findsnapshots(revlog, snapshots, good + 1)
802 previous = None
821 previous = None
803 while good != previous:
822 while good != previous:
804 previous = good
823 previous = good
805 children = tuple(sorted(c for c in snapshots[good]))
824 children = tuple(sorted(c for c in snapshots[good]))
806 good = yield children
825 good = yield children
807
826
808 # we have found nothing
827 if debug_info is not None:
828 if good is None:
829 debug_info['no-solution'] += 1
830
809 yield None
831 yield None
810
832
811
833
812 def _rawgroups(revlog, p1, p2, cachedelta, snapshots=None):
834 def _rawgroups(revlog, p1, p2, cachedelta, snapshots=None):
813 """Provides group of revision to be tested as delta base
835 """Provides group of revision to be tested as delta base
814
836
815 This lower level function focus on emitting delta theorically interresting
837 This lower level function focus on emitting delta theorically interresting
816 without looking it any practical details.
838 without looking it any practical details.
817
839
818 The group order aims at providing fast or small candidates first.
840 The group order aims at providing fast or small candidates first.
819 """
841 """
820 gdelta = revlog._generaldelta
842 gdelta = revlog._generaldelta
821 # gate sparse behind general-delta because of issue6056
843 # gate sparse behind general-delta because of issue6056
822 sparse = gdelta and revlog._sparserevlog
844 sparse = gdelta and revlog._sparserevlog
823 curr = len(revlog)
845 curr = len(revlog)
824 prev = curr - 1
846 prev = curr - 1
825 deltachain = lambda rev: revlog._deltachain(rev)[0]
847 deltachain = lambda rev: revlog._deltachain(rev)[0]
826
848
827 if gdelta:
849 if gdelta:
828 # exclude already lazy tested base if any
850 # exclude already lazy tested base if any
829 parents = [p for p in (p1, p2) if p != nullrev]
851 parents = [p for p in (p1, p2) if p != nullrev]
830
852
831 if not revlog._deltabothparents and len(parents) == 2:
853 if not revlog._deltabothparents and len(parents) == 2:
832 parents.sort()
854 parents.sort()
833 # To minimize the chance of having to build a fulltext,
855 # To minimize the chance of having to build a fulltext,
834 # pick first whichever parent is closest to us (max rev)
856 # pick first whichever parent is closest to us (max rev)
835 yield (parents[1],)
857 yield (parents[1],)
836 # then the other one (min rev) if the first did not fit
858 # then the other one (min rev) if the first did not fit
837 yield (parents[0],)
859 yield (parents[0],)
838 elif len(parents) > 0:
860 elif len(parents) > 0:
839 # Test all parents (1 or 2), and keep the best candidate
861 # Test all parents (1 or 2), and keep the best candidate
840 yield parents
862 yield parents
841
863
842 if sparse and parents:
864 if sparse and parents:
843 if snapshots is None:
865 if snapshots is None:
844 # map: base-rev: snapshot-rev
866 # map: base-rev: snapshot-rev
845 snapshots = collections.defaultdict(list)
867 snapshots = collections.defaultdict(list)
846 # See if we can use an existing snapshot in the parent chains to use as
868 # See if we can use an existing snapshot in the parent chains to use as
847 # a base for a new intermediate-snapshot
869 # a base for a new intermediate-snapshot
848 #
870 #
849 # search for snapshot in parents delta chain
871 # search for snapshot in parents delta chain
850 # map: snapshot-level: snapshot-rev
872 # map: snapshot-level: snapshot-rev
851 parents_snaps = collections.defaultdict(set)
873 parents_snaps = collections.defaultdict(set)
852 candidate_chains = [deltachain(p) for p in parents]
874 candidate_chains = [deltachain(p) for p in parents]
853 for chain in candidate_chains:
875 for chain in candidate_chains:
854 for idx, s in enumerate(chain):
876 for idx, s in enumerate(chain):
855 if not revlog.issnapshot(s):
877 if not revlog.issnapshot(s):
856 break
878 break
857 parents_snaps[idx].add(s)
879 parents_snaps[idx].add(s)
858 snapfloor = min(parents_snaps[0]) + 1
880 snapfloor = min(parents_snaps[0]) + 1
859 _findsnapshots(revlog, snapshots, snapfloor)
881 _findsnapshots(revlog, snapshots, snapfloor)
860 # search for the highest "unrelated" revision
882 # search for the highest "unrelated" revision
861 #
883 #
862 # Adding snapshots used by "unrelated" revision increase the odd we
884 # Adding snapshots used by "unrelated" revision increase the odd we
863 # reuse an independant, yet better snapshot chain.
885 # reuse an independant, yet better snapshot chain.
864 #
886 #
865 # XXX instead of building a set of revisions, we could lazily enumerate
887 # XXX instead of building a set of revisions, we could lazily enumerate
866 # over the chains. That would be more efficient, however we stick to
888 # over the chains. That would be more efficient, however we stick to
867 # simple code for now.
889 # simple code for now.
868 all_revs = set()
890 all_revs = set()
869 for chain in candidate_chains:
891 for chain in candidate_chains:
870 all_revs.update(chain)
892 all_revs.update(chain)
871 other = None
893 other = None
872 for r in revlog.revs(prev, snapfloor):
894 for r in revlog.revs(prev, snapfloor):
873 if r not in all_revs:
895 if r not in all_revs:
874 other = r
896 other = r
875 break
897 break
876 if other is not None:
898 if other is not None:
877 # To avoid unfair competition, we won't use unrelated intermediate
899 # To avoid unfair competition, we won't use unrelated intermediate
878 # snapshot that are deeper than the ones from the parent delta
900 # snapshot that are deeper than the ones from the parent delta
879 # chain.
901 # chain.
880 max_depth = max(parents_snaps.keys())
902 max_depth = max(parents_snaps.keys())
881 chain = deltachain(other)
903 chain = deltachain(other)
882 for idx, s in enumerate(chain):
904 for idx, s in enumerate(chain):
883 if s < snapfloor:
905 if s < snapfloor:
884 continue
906 continue
885 if max_depth < idx:
907 if max_depth < idx:
886 break
908 break
887 if not revlog.issnapshot(s):
909 if not revlog.issnapshot(s):
888 break
910 break
889 parents_snaps[idx].add(s)
911 parents_snaps[idx].add(s)
890 # Test them as possible intermediate snapshot base
912 # Test them as possible intermediate snapshot base
891 # We test them from highest to lowest level. High level one are more
913 # We test them from highest to lowest level. High level one are more
892 # likely to result in small delta
914 # likely to result in small delta
893 floor = None
915 floor = None
894 for idx, snaps in sorted(parents_snaps.items(), reverse=True):
916 for idx, snaps in sorted(parents_snaps.items(), reverse=True):
895 siblings = set()
917 siblings = set()
896 for s in snaps:
918 for s in snaps:
897 siblings.update(snapshots[s])
919 siblings.update(snapshots[s])
898 # Before considering making a new intermediate snapshot, we check
920 # Before considering making a new intermediate snapshot, we check
899 # if an existing snapshot, children of base we consider, would be
921 # if an existing snapshot, children of base we consider, would be
900 # suitable.
922 # suitable.
901 #
923 #
902 # It give a change to reuse a delta chain "unrelated" to the
924 # It give a change to reuse a delta chain "unrelated" to the
903 # current revision instead of starting our own. Without such
925 # current revision instead of starting our own. Without such
904 # re-use, topological branches would keep reopening new chains.
926 # re-use, topological branches would keep reopening new chains.
905 # Creating more and more snapshot as the repository grow.
927 # Creating more and more snapshot as the repository grow.
906
928
907 if floor is not None:
929 if floor is not None:
908 # We only do this for siblings created after the one in our
930 # We only do this for siblings created after the one in our
909 # parent's delta chain. Those created before has less chances
931 # parent's delta chain. Those created before has less chances
910 # to be valid base since our ancestors had to create a new
932 # to be valid base since our ancestors had to create a new
911 # snapshot.
933 # snapshot.
912 siblings = [r for r in siblings if floor < r]
934 siblings = [r for r in siblings if floor < r]
913 yield tuple(sorted(siblings))
935 yield tuple(sorted(siblings))
914 # then test the base from our parent's delta chain.
936 # then test the base from our parent's delta chain.
915 yield tuple(sorted(snaps))
937 yield tuple(sorted(snaps))
916 floor = min(snaps)
938 floor = min(snaps)
917 # No suitable base found in the parent chain, search if any full
939 # No suitable base found in the parent chain, search if any full
918 # snapshots emitted since parent's base would be a suitable base for an
940 # snapshots emitted since parent's base would be a suitable base for an
919 # intermediate snapshot.
941 # intermediate snapshot.
920 #
942 #
921 # It give a chance to reuse a delta chain unrelated to the current
943 # It give a chance to reuse a delta chain unrelated to the current
922 # revisions instead of starting our own. Without such re-use,
944 # revisions instead of starting our own. Without such re-use,
923 # topological branches would keep reopening new full chains. Creating
945 # topological branches would keep reopening new full chains. Creating
924 # more and more snapshot as the repository grow.
946 # more and more snapshot as the repository grow.
925 yield tuple(snapshots[nullrev])
947 yield tuple(snapshots[nullrev])
926
948
927 if not sparse:
949 if not sparse:
928 # other approach failed try against prev to hopefully save us a
950 # other approach failed try against prev to hopefully save us a
929 # fulltext.
951 # fulltext.
930 yield (prev,)
952 yield (prev,)
931
953
932
954
933 class deltacomputer:
955 class deltacomputer:
934 def __init__(self, revlog, write_debug=None, debug_search=False):
956 def __init__(
957 self,
958 revlog,
959 write_debug=None,
960 debug_search=False,
961 debug_info=None,
962 ):
935 self.revlog = revlog
963 self.revlog = revlog
936 self._write_debug = write_debug
964 self._write_debug = write_debug
937 self._debug_search = debug_search
965 self._debug_search = debug_search
966 self._debug_info = debug_info
938
967
939 def buildtext(self, revinfo, fh):
968 def buildtext(self, revinfo, fh):
940 """Builds a fulltext version of a revision
969 """Builds a fulltext version of a revision
941
970
942 revinfo: revisioninfo instance that contains all needed info
971 revinfo: revisioninfo instance that contains all needed info
943 fh: file handle to either the .i or the .d revlog file,
972 fh: file handle to either the .i or the .d revlog file,
944 depending on whether it is inlined or not
973 depending on whether it is inlined or not
945 """
974 """
946 btext = revinfo.btext
975 btext = revinfo.btext
947 if btext[0] is not None:
976 if btext[0] is not None:
948 return btext[0]
977 return btext[0]
949
978
950 revlog = self.revlog
979 revlog = self.revlog
951 cachedelta = revinfo.cachedelta
980 cachedelta = revinfo.cachedelta
952 baserev = cachedelta[0]
981 baserev = cachedelta[0]
953 delta = cachedelta[1]
982 delta = cachedelta[1]
954
983
955 fulltext = btext[0] = _textfromdelta(
984 fulltext = btext[0] = _textfromdelta(
956 fh,
985 fh,
957 revlog,
986 revlog,
958 baserev,
987 baserev,
959 delta,
988 delta,
960 revinfo.p1,
989 revinfo.p1,
961 revinfo.p2,
990 revinfo.p2,
962 revinfo.flags,
991 revinfo.flags,
963 revinfo.node,
992 revinfo.node,
964 )
993 )
965 return fulltext
994 return fulltext
966
995
967 def _builddeltadiff(self, base, revinfo, fh):
996 def _builddeltadiff(self, base, revinfo, fh):
968 revlog = self.revlog
997 revlog = self.revlog
969 t = self.buildtext(revinfo, fh)
998 t = self.buildtext(revinfo, fh)
970 if revlog.iscensored(base):
999 if revlog.iscensored(base):
971 # deltas based on a censored revision must replace the
1000 # deltas based on a censored revision must replace the
972 # full content in one patch, so delta works everywhere
1001 # full content in one patch, so delta works everywhere
973 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
1002 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
974 delta = header + t
1003 delta = header + t
975 else:
1004 else:
976 ptext = revlog.rawdata(base, _df=fh)
1005 ptext = revlog.rawdata(base, _df=fh)
977 delta = mdiff.textdiff(ptext, t)
1006 delta = mdiff.textdiff(ptext, t)
978
1007
979 return delta
1008 return delta
980
1009
981 def _builddeltainfo(self, revinfo, base, fh):
1010 def _builddeltainfo(self, revinfo, base, fh):
982 # can we use the cached delta?
1011 # can we use the cached delta?
983 revlog = self.revlog
1012 revlog = self.revlog
984 debug_search = self._write_debug is not None and self._debug_search
1013 debug_search = self._write_debug is not None and self._debug_search
985 chainbase = revlog.chainbase(base)
1014 chainbase = revlog.chainbase(base)
986 if revlog._generaldelta:
1015 if revlog._generaldelta:
987 deltabase = base
1016 deltabase = base
988 else:
1017 else:
989 deltabase = chainbase
1018 deltabase = chainbase
990 snapshotdepth = None
1019 snapshotdepth = None
991 if revlog._sparserevlog and deltabase == nullrev:
1020 if revlog._sparserevlog and deltabase == nullrev:
992 snapshotdepth = 0
1021 snapshotdepth = 0
993 elif revlog._sparserevlog and revlog.issnapshot(deltabase):
1022 elif revlog._sparserevlog and revlog.issnapshot(deltabase):
994 # A delta chain should always be one full snapshot,
1023 # A delta chain should always be one full snapshot,
995 # zero or more semi-snapshots, and zero or more deltas
1024 # zero or more semi-snapshots, and zero or more deltas
996 p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2)
1025 p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2)
997 if deltabase not in (p1, p2) and revlog.issnapshot(deltabase):
1026 if deltabase not in (p1, p2) and revlog.issnapshot(deltabase):
998 snapshotdepth = len(revlog._deltachain(deltabase)[0])
1027 snapshotdepth = len(revlog._deltachain(deltabase)[0])
999 delta = None
1028 delta = None
1000 if revinfo.cachedelta:
1029 if revinfo.cachedelta:
1001 cachebase, cachediff = revinfo.cachedelta
1030 cachebase, cachediff = revinfo.cachedelta
1002 # check if the diff still apply
1031 # check if the diff still apply
1003 currentbase = cachebase
1032 currentbase = cachebase
1004 while (
1033 while (
1005 currentbase != nullrev
1034 currentbase != nullrev
1006 and currentbase != base
1035 and currentbase != base
1007 and self.revlog.length(currentbase) == 0
1036 and self.revlog.length(currentbase) == 0
1008 ):
1037 ):
1009 currentbase = self.revlog.deltaparent(currentbase)
1038 currentbase = self.revlog.deltaparent(currentbase)
1010 if self.revlog._lazydelta and currentbase == base:
1039 if self.revlog._lazydelta and currentbase == base:
1011 delta = revinfo.cachedelta[1]
1040 delta = revinfo.cachedelta[1]
1012 if delta is None:
1041 if delta is None:
1013 delta = self._builddeltadiff(base, revinfo, fh)
1042 delta = self._builddeltadiff(base, revinfo, fh)
1014 if debug_search:
1043 if debug_search:
1015 msg = b"DBG-DELTAS-SEARCH: uncompressed-delta-size=%d\n"
1044 msg = b"DBG-DELTAS-SEARCH: uncompressed-delta-size=%d\n"
1016 msg %= len(delta)
1045 msg %= len(delta)
1017 self._write_debug(msg)
1046 self._write_debug(msg)
1018 # snapshotdept need to be neither None nor 0 level snapshot
1047 # snapshotdept need to be neither None nor 0 level snapshot
1019 if revlog.upperboundcomp is not None and snapshotdepth:
1048 if revlog.upperboundcomp is not None and snapshotdepth:
1020 lowestrealisticdeltalen = len(delta) // revlog.upperboundcomp
1049 lowestrealisticdeltalen = len(delta) // revlog.upperboundcomp
1021 snapshotlimit = revinfo.textlen >> snapshotdepth
1050 snapshotlimit = revinfo.textlen >> snapshotdepth
1022 if debug_search:
1051 if debug_search:
1023 msg = b"DBG-DELTAS-SEARCH: projected-lower-size=%d\n"
1052 msg = b"DBG-DELTAS-SEARCH: projected-lower-size=%d\n"
1024 msg %= lowestrealisticdeltalen
1053 msg %= lowestrealisticdeltalen
1025 self._write_debug(msg)
1054 self._write_debug(msg)
1026 if snapshotlimit < lowestrealisticdeltalen:
1055 if snapshotlimit < lowestrealisticdeltalen:
1027 if debug_search:
1056 if debug_search:
1028 msg = b"DBG-DELTAS-SEARCH: DISCARDED (snapshot limit)\n"
1057 msg = b"DBG-DELTAS-SEARCH: DISCARDED (snapshot limit)\n"
1029 self._write_debug(msg)
1058 self._write_debug(msg)
1030 return None
1059 return None
1031 if revlog.length(base) < lowestrealisticdeltalen:
1060 if revlog.length(base) < lowestrealisticdeltalen:
1032 if debug_search:
1061 if debug_search:
1033 msg = b"DBG-DELTAS-SEARCH: DISCARDED (prev size)\n"
1062 msg = b"DBG-DELTAS-SEARCH: DISCARDED (prev size)\n"
1034 self._write_debug(msg)
1063 self._write_debug(msg)
1035 return None
1064 return None
1036 header, data = revlog.compress(delta)
1065 header, data = revlog.compress(delta)
1037 deltalen = len(header) + len(data)
1066 deltalen = len(header) + len(data)
1038 offset = revlog.end(len(revlog) - 1)
1067 offset = revlog.end(len(revlog) - 1)
1039 dist = deltalen + offset - revlog.start(chainbase)
1068 dist = deltalen + offset - revlog.start(chainbase)
1040 chainlen, compresseddeltalen = revlog._chaininfo(base)
1069 chainlen, compresseddeltalen = revlog._chaininfo(base)
1041 chainlen += 1
1070 chainlen += 1
1042 compresseddeltalen += deltalen
1071 compresseddeltalen += deltalen
1043
1072
1044 return _deltainfo(
1073 return _deltainfo(
1045 dist,
1074 dist,
1046 deltalen,
1075 deltalen,
1047 (header, data),
1076 (header, data),
1048 deltabase,
1077 deltabase,
1049 chainbase,
1078 chainbase,
1050 chainlen,
1079 chainlen,
1051 compresseddeltalen,
1080 compresseddeltalen,
1052 snapshotdepth,
1081 snapshotdepth,
1053 )
1082 )
1054
1083
1055 def _fullsnapshotinfo(self, fh, revinfo, curr):
1084 def _fullsnapshotinfo(self, fh, revinfo, curr):
1056 rawtext = self.buildtext(revinfo, fh)
1085 rawtext = self.buildtext(revinfo, fh)
1057 data = self.revlog.compress(rawtext)
1086 data = self.revlog.compress(rawtext)
1058 compresseddeltalen = deltalen = dist = len(data[1]) + len(data[0])
1087 compresseddeltalen = deltalen = dist = len(data[1]) + len(data[0])
1059 deltabase = chainbase = curr
1088 deltabase = chainbase = curr
1060 snapshotdepth = 0
1089 snapshotdepth = 0
1061 chainlen = 1
1090 chainlen = 1
1062
1091
1063 return _deltainfo(
1092 return _deltainfo(
1064 dist,
1093 dist,
1065 deltalen,
1094 deltalen,
1066 data,
1095 data,
1067 deltabase,
1096 deltabase,
1068 chainbase,
1097 chainbase,
1069 chainlen,
1098 chainlen,
1070 compresseddeltalen,
1099 compresseddeltalen,
1071 snapshotdepth,
1100 snapshotdepth,
1072 )
1101 )
1073
1102
1074 def finddeltainfo(self, revinfo, fh, excluded_bases=None, target_rev=None):
1103 def finddeltainfo(self, revinfo, fh, excluded_bases=None, target_rev=None):
1075 """Find an acceptable delta against a candidate revision
1104 """Find an acceptable delta against a candidate revision
1076
1105
1077 revinfo: information about the revision (instance of _revisioninfo)
1106 revinfo: information about the revision (instance of _revisioninfo)
1078 fh: file handle to either the .i or the .d revlog file,
1107 fh: file handle to either the .i or the .d revlog file,
1079 depending on whether it is inlined or not
1108 depending on whether it is inlined or not
1080
1109
1081 Returns the first acceptable candidate revision, as ordered by
1110 Returns the first acceptable candidate revision, as ordered by
1082 _candidategroups
1111 _candidategroups
1083
1112
1084 If no suitable deltabase is found, we return delta info for a full
1113 If no suitable deltabase is found, we return delta info for a full
1085 snapshot.
1114 snapshot.
1086
1115
1087 `excluded_bases` is an optional set of revision that cannot be used as
1116 `excluded_bases` is an optional set of revision that cannot be used as
1088 a delta base. Use this to recompute delta suitable in censor or strip
1117 a delta base. Use this to recompute delta suitable in censor or strip
1089 context.
1118 context.
1090 """
1119 """
1091 if target_rev is None:
1120 if target_rev is None:
1092 target_rev = len(self.revlog)
1121 target_rev = len(self.revlog)
1093
1122
1094 if not revinfo.textlen:
1123 if not revinfo.textlen:
1095 return self._fullsnapshotinfo(fh, revinfo, target_rev)
1124 return self._fullsnapshotinfo(fh, revinfo, target_rev)
1096
1125
1097 if excluded_bases is None:
1126 if excluded_bases is None:
1098 excluded_bases = set()
1127 excluded_bases = set()
1099
1128
1100 # no delta for flag processor revision (see "candelta" for why)
1129 # no delta for flag processor revision (see "candelta" for why)
1101 # not calling candelta since only one revision needs test, also to
1130 # not calling candelta since only one revision needs test, also to
1102 # avoid overhead fetching flags again.
1131 # avoid overhead fetching flags again.
1103 if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
1132 if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
1104 return self._fullsnapshotinfo(fh, revinfo, target_rev)
1133 return self._fullsnapshotinfo(fh, revinfo, target_rev)
1105
1134
1106 if self._write_debug is not None:
1135 gather_debug = (
1136 self._write_debug is not None or self._debug_info is not None
1137 )
1138 debug_search = self._write_debug is not None and self._debug_search
1139
1140 if gather_debug:
1107 start = util.timer()
1141 start = util.timer()
1108
1142
1109 debug_search = self._write_debug is not None and self._debug_search
1110
1111 # count the number of different delta we tried (for debug purpose)
1143 # count the number of different delta we tried (for debug purpose)
1112 dbg_try_count = 0
1144 dbg_try_count = 0
1113 # count the number of "search round" we did. (for debug purpose)
1145 # count the number of "search round" we did. (for debug purpose)
1114 dbg_try_rounds = 0
1146 dbg_try_rounds = 0
1115 dbg_type = b'unknown'
1147 dbg_type = b'unknown'
1116
1148
1117 cachedelta = revinfo.cachedelta
1149 cachedelta = revinfo.cachedelta
1118 p1 = revinfo.p1
1150 p1 = revinfo.p1
1119 p2 = revinfo.p2
1151 p2 = revinfo.p2
1120 revlog = self.revlog
1152 revlog = self.revlog
1121
1153
1122 deltainfo = None
1154 deltainfo = None
1123 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
1155 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
1124
1156
1125 if self._write_debug is not None:
1157 if gather_debug:
1126 if p1r != nullrev:
1158 if p1r != nullrev:
1127 p1_chain_len = revlog._chaininfo(p1r)[0]
1159 p1_chain_len = revlog._chaininfo(p1r)[0]
1128 else:
1160 else:
1129 p1_chain_len = -1
1161 p1_chain_len = -1
1130 if p2r != nullrev:
1162 if p2r != nullrev:
1131 p2_chain_len = revlog._chaininfo(p2r)[0]
1163 p2_chain_len = revlog._chaininfo(p2r)[0]
1132 else:
1164 else:
1133 p2_chain_len = -1
1165 p2_chain_len = -1
1134 if debug_search:
1166 if debug_search:
1135 msg = b"DBG-DELTAS-SEARCH: SEARCH rev=%d\n"
1167 msg = b"DBG-DELTAS-SEARCH: SEARCH rev=%d\n"
1136 msg %= target_rev
1168 msg %= target_rev
1137 self._write_debug(msg)
1169 self._write_debug(msg)
1138
1170
1139 groups = _candidategroups(
1171 groups = _candidategroups(
1140 self.revlog, revinfo.textlen, p1r, p2r, cachedelta
1172 self.revlog, revinfo.textlen, p1r, p2r, cachedelta
1141 )
1173 )
1142 candidaterevs = next(groups)
1174 candidaterevs = next(groups)
1143 while candidaterevs is not None:
1175 while candidaterevs is not None:
1144 dbg_try_rounds += 1
1176 dbg_try_rounds += 1
1145 if debug_search:
1177 if debug_search:
1146 prev = None
1178 prev = None
1147 if deltainfo is not None:
1179 if deltainfo is not None:
1148 prev = deltainfo.base
1180 prev = deltainfo.base
1149
1181
1150 if (
1182 if (
1151 cachedelta is not None
1183 cachedelta is not None
1152 and len(candidaterevs) == 1
1184 and len(candidaterevs) == 1
1153 and cachedelta[0] in candidaterevs
1185 and cachedelta[0] in candidaterevs
1154 ):
1186 ):
1155 round_type = b"cached-delta"
1187 round_type = b"cached-delta"
1156 elif p1 in candidaterevs or p2 in candidaterevs:
1188 elif p1 in candidaterevs or p2 in candidaterevs:
1157 round_type = b"parents"
1189 round_type = b"parents"
1158 elif prev is not None and all(c < prev for c in candidaterevs):
1190 elif prev is not None and all(c < prev for c in candidaterevs):
1159 round_type = b"refine-down"
1191 round_type = b"refine-down"
1160 elif prev is not None and all(c > prev for c in candidaterevs):
1192 elif prev is not None and all(c > prev for c in candidaterevs):
1161 round_type = b"refine-up"
1193 round_type = b"refine-up"
1162 else:
1194 else:
1163 round_type = b"search-down"
1195 round_type = b"search-down"
1164 msg = b"DBG-DELTAS-SEARCH: ROUND #%d - %d candidates - %s\n"
1196 msg = b"DBG-DELTAS-SEARCH: ROUND #%d - %d candidates - %s\n"
1165 msg %= (dbg_try_rounds, len(candidaterevs), round_type)
1197 msg %= (dbg_try_rounds, len(candidaterevs), round_type)
1166 self._write_debug(msg)
1198 self._write_debug(msg)
1167 nominateddeltas = []
1199 nominateddeltas = []
1168 if deltainfo is not None:
1200 if deltainfo is not None:
1169 if debug_search:
1201 if debug_search:
1170 msg = (
1202 msg = (
1171 b"DBG-DELTAS-SEARCH: CONTENDER: rev=%d - length=%d\n"
1203 b"DBG-DELTAS-SEARCH: CONTENDER: rev=%d - length=%d\n"
1172 )
1204 )
1173 msg %= (deltainfo.base, deltainfo.deltalen)
1205 msg %= (deltainfo.base, deltainfo.deltalen)
1174 self._write_debug(msg)
1206 self._write_debug(msg)
1175 # if we already found a good delta,
1207 # if we already found a good delta,
1176 # challenge it against refined candidates
1208 # challenge it against refined candidates
1177 nominateddeltas.append(deltainfo)
1209 nominateddeltas.append(deltainfo)
1178 for candidaterev in candidaterevs:
1210 for candidaterev in candidaterevs:
1179 if debug_search:
1211 if debug_search:
1180 msg = b"DBG-DELTAS-SEARCH: CANDIDATE: rev=%d\n"
1212 msg = b"DBG-DELTAS-SEARCH: CANDIDATE: rev=%d\n"
1181 msg %= candidaterev
1213 msg %= candidaterev
1182 self._write_debug(msg)
1214 self._write_debug(msg)
1183 candidate_type = None
1215 candidate_type = None
1184 if candidaterev == p1:
1216 if candidaterev == p1:
1185 candidate_type = b"p1"
1217 candidate_type = b"p1"
1186 elif candidaterev == p2:
1218 elif candidaterev == p2:
1187 candidate_type = b"p2"
1219 candidate_type = b"p2"
1188 elif self.revlog.issnapshot(candidaterev):
1220 elif self.revlog.issnapshot(candidaterev):
1189 candidate_type = b"snapshot-%d"
1221 candidate_type = b"snapshot-%d"
1190 candidate_type %= self.revlog.snapshotdepth(
1222 candidate_type %= self.revlog.snapshotdepth(
1191 candidaterev
1223 candidaterev
1192 )
1224 )
1193
1225
1194 if candidate_type is not None:
1226 if candidate_type is not None:
1195 msg = b"DBG-DELTAS-SEARCH: type=%s\n"
1227 msg = b"DBG-DELTAS-SEARCH: type=%s\n"
1196 msg %= candidate_type
1228 msg %= candidate_type
1197 self._write_debug(msg)
1229 self._write_debug(msg)
1198 msg = b"DBG-DELTAS-SEARCH: size=%d\n"
1230 msg = b"DBG-DELTAS-SEARCH: size=%d\n"
1199 msg %= self.revlog.length(candidaterev)
1231 msg %= self.revlog.length(candidaterev)
1200 self._write_debug(msg)
1232 self._write_debug(msg)
1201 msg = b"DBG-DELTAS-SEARCH: base=%d\n"
1233 msg = b"DBG-DELTAS-SEARCH: base=%d\n"
1202 msg %= self.revlog.deltaparent(candidaterev)
1234 msg %= self.revlog.deltaparent(candidaterev)
1203 self._write_debug(msg)
1235 self._write_debug(msg)
1204 if candidaterev in excluded_bases:
1236 if candidaterev in excluded_bases:
1205 if debug_search:
1237 if debug_search:
1206 msg = b"DBG-DELTAS-SEARCH: EXCLUDED\n"
1238 msg = b"DBG-DELTAS-SEARCH: EXCLUDED\n"
1207 self._write_debug(msg)
1239 self._write_debug(msg)
1208 continue
1240 continue
1209 if candidaterev >= target_rev:
1241 if candidaterev >= target_rev:
1210 if debug_search:
1242 if debug_search:
1211 msg = b"DBG-DELTAS-SEARCH: TOO-HIGH\n"
1243 msg = b"DBG-DELTAS-SEARCH: TOO-HIGH\n"
1212 self._write_debug(msg)
1244 self._write_debug(msg)
1213 continue
1245 continue
1214 dbg_try_count += 1
1246 dbg_try_count += 1
1215
1247
1216 if debug_search:
1248 if debug_search:
1217 delta_start = util.timer()
1249 delta_start = util.timer()
1218 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
1250 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
1219 if debug_search:
1251 if debug_search:
1220 delta_end = util.timer()
1252 delta_end = util.timer()
1221 msg = b"DBG-DELTAS-SEARCH: delta-search-time=%f\n"
1253 msg = b"DBG-DELTAS-SEARCH: delta-search-time=%f\n"
1222 msg %= delta_end - delta_start
1254 msg %= delta_end - delta_start
1223 self._write_debug(msg)
1255 self._write_debug(msg)
1224 if candidatedelta is not None:
1256 if candidatedelta is not None:
1225 if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
1257 if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
1226 if debug_search:
1258 if debug_search:
1227 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (GOOD)\n"
1259 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (GOOD)\n"
1228 msg %= candidatedelta.deltalen
1260 msg %= candidatedelta.deltalen
1229 self._write_debug(msg)
1261 self._write_debug(msg)
1230 nominateddeltas.append(candidatedelta)
1262 nominateddeltas.append(candidatedelta)
1231 elif debug_search:
1263 elif debug_search:
1232 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (BAD)\n"
1264 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (BAD)\n"
1233 msg %= candidatedelta.deltalen
1265 msg %= candidatedelta.deltalen
1234 self._write_debug(msg)
1266 self._write_debug(msg)
1235 elif debug_search:
1267 elif debug_search:
1236 msg = b"DBG-DELTAS-SEARCH: NO-DELTA\n"
1268 msg = b"DBG-DELTAS-SEARCH: NO-DELTA\n"
1237 self._write_debug(msg)
1269 self._write_debug(msg)
1238 if nominateddeltas:
1270 if nominateddeltas:
1239 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
1271 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
1240 if deltainfo is not None:
1272 if deltainfo is not None:
1241 candidaterevs = groups.send(deltainfo.base)
1273 candidaterevs = groups.send(deltainfo.base)
1242 else:
1274 else:
1243 candidaterevs = next(groups)
1275 candidaterevs = next(groups)
1244
1276
1245 if deltainfo is None:
1277 if deltainfo is None:
1246 dbg_type = b"full"
1278 dbg_type = b"full"
1247 deltainfo = self._fullsnapshotinfo(fh, revinfo, target_rev)
1279 deltainfo = self._fullsnapshotinfo(fh, revinfo, target_rev)
1248 elif deltainfo.snapshotdepth: # pytype: disable=attribute-error
1280 elif deltainfo.snapshotdepth: # pytype: disable=attribute-error
1249 dbg_type = b"snapshot"
1281 dbg_type = b"snapshot"
1250 else:
1282 else:
1251 dbg_type = b"delta"
1283 dbg_type = b"delta"
1252
1284
1253 if self._write_debug is not None:
1285 if gather_debug:
1254 end = util.timer()
1286 end = util.timer()
1255 assert deltainfo is not None # please pytype
1256 used_cached = (
1287 used_cached = (
1257 cachedelta is not None
1288 cachedelta is not None
1258 and dbg_try_rounds == 1
1289 and dbg_try_rounds == 1
1259 and dbg_try_count == 1
1290 and dbg_try_count == 1
1260 and deltainfo.base == cachedelta[0]
1291 and deltainfo.base == cachedelta[0]
1261 )
1292 )
1262 dbg = {
1293 dbg = {
1263 'duration': end - start,
1294 'duration': end - start,
1264 'revision': target_rev,
1295 'revision': target_rev,
1265 'delta-base': deltainfo.base,
1296 'delta-base': deltainfo.base, # pytype: disable=attribute-error
1266 'search_round_count': dbg_try_rounds,
1297 'search_round_count': dbg_try_rounds,
1267 'using-cached-base': used_cached,
1298 'using-cached-base': used_cached,
1268 'delta_try_count': dbg_try_count,
1299 'delta_try_count': dbg_try_count,
1269 'type': dbg_type,
1300 'type': dbg_type,
1270 'p1-chain-len': p1_chain_len,
1301 'p1-chain-len': p1_chain_len,
1271 'p2-chain-len': p2_chain_len,
1302 'p2-chain-len': p2_chain_len,
1272 }
1303 }
1273 if (
1304 if (
1274 deltainfo.snapshotdepth # pytype: disable=attribute-error
1305 deltainfo.snapshotdepth # pytype: disable=attribute-error
1275 is not None
1306 is not None
1276 ):
1307 ):
1277 dbg[
1308 dbg[
1278 'snapshot-depth'
1309 'snapshot-depth'
1279 ] = deltainfo.snapshotdepth # pytype: disable=attribute-error
1310 ] = deltainfo.snapshotdepth # pytype: disable=attribute-error
1280 else:
1311 else:
1281 dbg['snapshot-depth'] = 0
1312 dbg['snapshot-depth'] = 0
1282 target_revlog = b"UNKNOWN"
1313 target_revlog = b"UNKNOWN"
1283 target_type = self.revlog.target[0]
1314 target_type = self.revlog.target[0]
1284 target_key = self.revlog.target[1]
1315 target_key = self.revlog.target[1]
1285 if target_type == KIND_CHANGELOG:
1316 if target_type == KIND_CHANGELOG:
1286 target_revlog = b'CHANGELOG:'
1317 target_revlog = b'CHANGELOG:'
1287 elif target_type == KIND_MANIFESTLOG:
1318 elif target_type == KIND_MANIFESTLOG:
1288 target_revlog = b'MANIFESTLOG:'
1319 target_revlog = b'MANIFESTLOG:'
1289 if target_key:
1320 if target_key:
1290 target_revlog += b'%s:' % target_key
1321 target_revlog += b'%s:' % target_key
1291 elif target_type == KIND_FILELOG:
1322 elif target_type == KIND_FILELOG:
1292 target_revlog = b'FILELOG:'
1323 target_revlog = b'FILELOG:'
1293 if target_key:
1324 if target_key:
1294 target_revlog += b'%s:' % target_key
1325 target_revlog += b'%s:' % target_key
1295 dbg['target-revlog'] = target_revlog
1326 dbg['target-revlog'] = target_revlog
1296
1327
1297 msg = (
1328 if self._debug_info is not None:
1298 b"DBG-DELTAS:"
1329 self._debug_info.append(dbg)
1299 b" %-12s"
1330
1300 b" rev=%d:"
1331 if self._write_debug is not None:
1301 b" delta-base=%d"
1332 msg = (
1302 b" is-cached=%d"
1333 b"DBG-DELTAS:"
1303 b" - search-rounds=%d"
1334 b" %-12s"
1304 b" try-count=%d"
1335 b" rev=%d:"
1305 b" - delta-type=%-6s"
1336 b" delta-base=%d"
1306 b" snap-depth=%d"
1337 b" is-cached=%d"
1307 b" - p1-chain-length=%d"
1338 b" - search-rounds=%d"
1308 b" p2-chain-length=%d"
1339 b" try-count=%d"
1309 b" - duration=%f"
1340 b" - delta-type=%-6s"
1310 b"\n"
1341 b" snap-depth=%d"
1311 )
1342 b" - p1-chain-length=%d"
1312 msg %= (
1343 b" p2-chain-length=%d"
1313 dbg["target-revlog"],
1344 b" - duration=%f"
1314 dbg["revision"],
1345 b"\n"
1315 dbg["delta-base"],
1346 )
1316 dbg["using-cached-base"],
1347 msg %= (
1317 dbg["search_round_count"],
1348 dbg["target-revlog"],
1318 dbg["delta_try_count"],
1349 dbg["revision"],
1319 dbg["type"],
1350 dbg["delta-base"],
1320 dbg["snapshot-depth"],
1351 dbg["using-cached-base"],
1321 dbg["p1-chain-len"],
1352 dbg["search_round_count"],
1322 dbg["p2-chain-len"],
1353 dbg["delta_try_count"],
1323 dbg["duration"],
1354 dbg["type"],
1324 )
1355 dbg["snapshot-depth"],
1325 self._write_debug(msg)
1356 dbg["p1-chain-len"],
1357 dbg["p2-chain-len"],
1358 dbg["duration"],
1359 )
1360 self._write_debug(msg)
1326 return deltainfo
1361 return deltainfo
1327
1362
1328
1363
1329 def delta_compression(default_compression_header, deltainfo):
1364 def delta_compression(default_compression_header, deltainfo):
1330 """return (COMPRESSION_MODE, deltainfo)
1365 """return (COMPRESSION_MODE, deltainfo)
1331
1366
1332 used by revlog v2+ format to dispatch between PLAIN and DEFAULT
1367 used by revlog v2+ format to dispatch between PLAIN and DEFAULT
1333 compression.
1368 compression.
1334 """
1369 """
1335 h, d = deltainfo.data
1370 h, d = deltainfo.data
1336 compression_mode = COMP_MODE_INLINE
1371 compression_mode = COMP_MODE_INLINE
1337 if not h and not d:
1372 if not h and not d:
1338 # not data to store at all... declare them uncompressed
1373 # not data to store at all... declare them uncompressed
1339 compression_mode = COMP_MODE_PLAIN
1374 compression_mode = COMP_MODE_PLAIN
1340 elif not h:
1375 elif not h:
1341 t = d[0:1]
1376 t = d[0:1]
1342 if t == b'\0':
1377 if t == b'\0':
1343 compression_mode = COMP_MODE_PLAIN
1378 compression_mode = COMP_MODE_PLAIN
1344 elif t == default_compression_header:
1379 elif t == default_compression_header:
1345 compression_mode = COMP_MODE_DEFAULT
1380 compression_mode = COMP_MODE_DEFAULT
1346 elif h == b'u':
1381 elif h == b'u':
1347 # we have a more efficient way to declare uncompressed
1382 # we have a more efficient way to declare uncompressed
1348 h = b''
1383 h = b''
1349 compression_mode = COMP_MODE_PLAIN
1384 compression_mode = COMP_MODE_PLAIN
1350 deltainfo = drop_u_compression(deltainfo)
1385 deltainfo = drop_u_compression(deltainfo)
1351 return compression_mode, deltainfo
1386 return compression_mode, deltainfo
@@ -1,1083 +1,1127 b''
1 Setting up test
1 Setting up test
2
2
3 $ hg init test
3 $ hg init test
4 $ cd test
4 $ cd test
5 $ echo 0 > afile
5 $ echo 0 > afile
6 $ hg add afile
6 $ hg add afile
7 $ hg commit -m "0.0"
7 $ hg commit -m "0.0"
8 $ echo 1 >> afile
8 $ echo 1 >> afile
9 $ hg commit -m "0.1"
9 $ hg commit -m "0.1"
10 $ echo 2 >> afile
10 $ echo 2 >> afile
11 $ hg commit -m "0.2"
11 $ hg commit -m "0.2"
12 $ echo 3 >> afile
12 $ echo 3 >> afile
13 $ hg commit -m "0.3"
13 $ hg commit -m "0.3"
14 $ hg update -C 0
14 $ hg update -C 0
15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
16 $ echo 1 >> afile
16 $ echo 1 >> afile
17 $ hg commit -m "1.1"
17 $ hg commit -m "1.1"
18 created new head
18 created new head
19 $ echo 2 >> afile
19 $ echo 2 >> afile
20 $ hg commit -m "1.2"
20 $ hg commit -m "1.2"
21 $ echo "a line" > fred
21 $ echo "a line" > fred
22 $ echo 3 >> afile
22 $ echo 3 >> afile
23 $ hg add fred
23 $ hg add fred
24 $ hg commit -m "1.3"
24 $ hg commit -m "1.3"
25 $ hg mv afile adifferentfile
25 $ hg mv afile adifferentfile
26 $ hg commit -m "1.3m"
26 $ hg commit -m "1.3m"
27 $ hg update -C 3
27 $ hg update -C 3
28 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
29 $ hg mv afile anotherfile
29 $ hg mv afile anotherfile
30 $ hg commit -m "0.3m"
30 $ hg commit -m "0.3m"
31 $ hg verify
31 $ hg verify
32 checking changesets
32 checking changesets
33 checking manifests
33 checking manifests
34 crosschecking files in changesets and manifests
34 crosschecking files in changesets and manifests
35 checking files
35 checking files
36 checked 9 changesets with 7 changes to 4 files
36 checked 9 changesets with 7 changes to 4 files
37 $ cd ..
37 $ cd ..
38 $ hg init empty
38 $ hg init empty
39
39
40 Bundle and phase
40 Bundle and phase
41
41
42 $ hg -R test phase --force --secret 0
42 $ hg -R test phase --force --secret 0
43 $ hg -R test bundle phase.hg empty
43 $ hg -R test bundle phase.hg empty
44 searching for changes
44 searching for changes
45 no changes found (ignored 9 secret changesets)
45 no changes found (ignored 9 secret changesets)
46 [1]
46 [1]
47 $ hg -R test phase --draft -r 'head()'
47 $ hg -R test phase --draft -r 'head()'
48
48
49 Bundle --all
49 Bundle --all
50
50
51 $ hg -R test bundle --all all.hg
51 $ hg -R test bundle --all all.hg
52 9 changesets found
52 9 changesets found
53
53
54 Bundle test to full.hg
54 Bundle test to full.hg
55
55
56 $ hg -R test bundle full.hg empty
56 $ hg -R test bundle full.hg empty
57 searching for changes
57 searching for changes
58 9 changesets found
58 9 changesets found
59
59
60 Unbundle full.hg in test
60 Unbundle full.hg in test
61
61
62 $ hg -R test unbundle full.hg
62 $ hg -R test unbundle full.hg
63 adding changesets
63 adding changesets
64 adding manifests
64 adding manifests
65 adding file changes
65 adding file changes
66 added 0 changesets with 0 changes to 4 files
66 added 0 changesets with 0 changes to 4 files
67 (run 'hg update' to get a working copy)
67 (run 'hg update' to get a working copy)
68
68
69 Verify empty
69 Verify empty
70
70
71 $ hg -R empty heads
71 $ hg -R empty heads
72 [1]
72 [1]
73 $ hg -R empty verify
73 $ hg -R empty verify
74 checking changesets
74 checking changesets
75 checking manifests
75 checking manifests
76 crosschecking files in changesets and manifests
76 crosschecking files in changesets and manifests
77 checking files
77 checking files
78 checked 0 changesets with 0 changes to 0 files
78 checked 0 changesets with 0 changes to 0 files
79
79
80 #if repobundlerepo
80 #if repobundlerepo
81
81
82 Pull full.hg into test (using --cwd)
82 Pull full.hg into test (using --cwd)
83
83
84 $ hg --cwd test pull ../full.hg
84 $ hg --cwd test pull ../full.hg
85 pulling from ../full.hg
85 pulling from ../full.hg
86 searching for changes
86 searching for changes
87 no changes found
87 no changes found
88
88
89 Verify that there are no leaked temporary files after pull (issue2797)
89 Verify that there are no leaked temporary files after pull (issue2797)
90
90
91 $ ls test/.hg | grep .hg10un
91 $ ls test/.hg | grep .hg10un
92 [1]
92 [1]
93
93
94 Pull full.hg into empty (using --cwd)
94 Pull full.hg into empty (using --cwd)
95
95
96 $ hg --cwd empty pull ../full.hg
96 $ hg --cwd empty pull ../full.hg
97 pulling from ../full.hg
97 pulling from ../full.hg
98 requesting all changes
98 requesting all changes
99 adding changesets
99 adding changesets
100 adding manifests
100 adding manifests
101 adding file changes
101 adding file changes
102 added 9 changesets with 7 changes to 4 files (+1 heads)
102 added 9 changesets with 7 changes to 4 files (+1 heads)
103 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
103 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
104 (run 'hg heads' to see heads, 'hg merge' to merge)
104 (run 'hg heads' to see heads, 'hg merge' to merge)
105
105
106 Rollback empty
106 Rollback empty
107
107
108 $ hg -R empty rollback
108 $ hg -R empty rollback
109 repository tip rolled back to revision -1 (undo pull)
109 repository tip rolled back to revision -1 (undo pull)
110
110
111 Pull full.hg into empty again (using --cwd)
111 Pull full.hg into empty again (using --cwd)
112
112
113 $ hg --cwd empty pull ../full.hg
113 $ hg --cwd empty pull ../full.hg
114 pulling from ../full.hg
114 pulling from ../full.hg
115 requesting all changes
115 requesting all changes
116 adding changesets
116 adding changesets
117 adding manifests
117 adding manifests
118 adding file changes
118 adding file changes
119 added 9 changesets with 7 changes to 4 files (+1 heads)
119 added 9 changesets with 7 changes to 4 files (+1 heads)
120 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
120 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
121 (run 'hg heads' to see heads, 'hg merge' to merge)
121 (run 'hg heads' to see heads, 'hg merge' to merge)
122
122
123 Pull full.hg into test (using -R)
123 Pull full.hg into test (using -R)
124
124
125 $ hg -R test pull full.hg
125 $ hg -R test pull full.hg
126 pulling from full.hg
126 pulling from full.hg
127 searching for changes
127 searching for changes
128 no changes found
128 no changes found
129
129
130 Pull full.hg into empty (using -R)
130 Pull full.hg into empty (using -R)
131
131
132 $ hg -R empty pull full.hg
132 $ hg -R empty pull full.hg
133 pulling from full.hg
133 pulling from full.hg
134 searching for changes
134 searching for changes
135 no changes found
135 no changes found
136
136
137 Rollback empty
137 Rollback empty
138
138
139 $ hg -R empty rollback
139 $ hg -R empty rollback
140 repository tip rolled back to revision -1 (undo pull)
140 repository tip rolled back to revision -1 (undo pull)
141
141
142 Pull full.hg into empty again (using -R)
142 Pull full.hg into empty again (using -R)
143
143
144 $ hg -R empty pull full.hg
144 $ hg -R empty pull full.hg
145 pulling from full.hg
145 pulling from full.hg
146 requesting all changes
146 requesting all changes
147 adding changesets
147 adding changesets
148 adding manifests
148 adding manifests
149 adding file changes
149 adding file changes
150 added 9 changesets with 7 changes to 4 files (+1 heads)
150 added 9 changesets with 7 changes to 4 files (+1 heads)
151 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
151 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
152 (run 'hg heads' to see heads, 'hg merge' to merge)
152 (run 'hg heads' to see heads, 'hg merge' to merge)
153
153
154 Log -R full.hg in fresh empty
154 Log -R full.hg in fresh empty
155
155
156 $ rm -r empty
156 $ rm -r empty
157 $ hg init empty
157 $ hg init empty
158 $ cd empty
158 $ cd empty
159 $ hg -R bundle://../full.hg log
159 $ hg -R bundle://../full.hg log
160 changeset: 8:aa35859c02ea
160 changeset: 8:aa35859c02ea
161 tag: tip
161 tag: tip
162 parent: 3:eebf5a27f8ca
162 parent: 3:eebf5a27f8ca
163 user: test
163 user: test
164 date: Thu Jan 01 00:00:00 1970 +0000
164 date: Thu Jan 01 00:00:00 1970 +0000
165 summary: 0.3m
165 summary: 0.3m
166
166
167 changeset: 7:a6a34bfa0076
167 changeset: 7:a6a34bfa0076
168 user: test
168 user: test
169 date: Thu Jan 01 00:00:00 1970 +0000
169 date: Thu Jan 01 00:00:00 1970 +0000
170 summary: 1.3m
170 summary: 1.3m
171
171
172 changeset: 6:7373c1169842
172 changeset: 6:7373c1169842
173 user: test
173 user: test
174 date: Thu Jan 01 00:00:00 1970 +0000
174 date: Thu Jan 01 00:00:00 1970 +0000
175 summary: 1.3
175 summary: 1.3
176
176
177 changeset: 5:1bb50a9436a7
177 changeset: 5:1bb50a9436a7
178 user: test
178 user: test
179 date: Thu Jan 01 00:00:00 1970 +0000
179 date: Thu Jan 01 00:00:00 1970 +0000
180 summary: 1.2
180 summary: 1.2
181
181
182 changeset: 4:095197eb4973
182 changeset: 4:095197eb4973
183 parent: 0:f9ee2f85a263
183 parent: 0:f9ee2f85a263
184 user: test
184 user: test
185 date: Thu Jan 01 00:00:00 1970 +0000
185 date: Thu Jan 01 00:00:00 1970 +0000
186 summary: 1.1
186 summary: 1.1
187
187
188 changeset: 3:eebf5a27f8ca
188 changeset: 3:eebf5a27f8ca
189 user: test
189 user: test
190 date: Thu Jan 01 00:00:00 1970 +0000
190 date: Thu Jan 01 00:00:00 1970 +0000
191 summary: 0.3
191 summary: 0.3
192
192
193 changeset: 2:e38ba6f5b7e0
193 changeset: 2:e38ba6f5b7e0
194 user: test
194 user: test
195 date: Thu Jan 01 00:00:00 1970 +0000
195 date: Thu Jan 01 00:00:00 1970 +0000
196 summary: 0.2
196 summary: 0.2
197
197
198 changeset: 1:34c2bf6b0626
198 changeset: 1:34c2bf6b0626
199 user: test
199 user: test
200 date: Thu Jan 01 00:00:00 1970 +0000
200 date: Thu Jan 01 00:00:00 1970 +0000
201 summary: 0.1
201 summary: 0.1
202
202
203 changeset: 0:f9ee2f85a263
203 changeset: 0:f9ee2f85a263
204 user: test
204 user: test
205 date: Thu Jan 01 00:00:00 1970 +0000
205 date: Thu Jan 01 00:00:00 1970 +0000
206 summary: 0.0
206 summary: 0.0
207
207
208 Make sure bundlerepo doesn't leak tempfiles (issue2491)
208 Make sure bundlerepo doesn't leak tempfiles (issue2491)
209
209
210 $ ls .hg
210 $ ls .hg
211 00changelog.i
211 00changelog.i
212 cache
212 cache
213 requires
213 requires
214 store
214 store
215 wcache
215 wcache
216
216
217 Pull ../full.hg into empty (with hook)
217 Pull ../full.hg into empty (with hook)
218
218
219 $ cat >> .hg/hgrc <<EOF
219 $ cat >> .hg/hgrc <<EOF
220 > [hooks]
220 > [hooks]
221 > changegroup = sh -c "printenv.py --line changegroup"
221 > changegroup = sh -c "printenv.py --line changegroup"
222 > EOF
222 > EOF
223
223
224 doesn't work (yet ?)
224 doesn't work (yet ?)
225 NOTE: msys is mangling the URL below
225 NOTE: msys is mangling the URL below
226
226
227 hg -R bundle://../full.hg verify
227 hg -R bundle://../full.hg verify
228
228
229 $ hg pull bundle://../full.hg
229 $ hg pull bundle://../full.hg
230 pulling from bundle:../full.hg
230 pulling from bundle:../full.hg
231 requesting all changes
231 requesting all changes
232 adding changesets
232 adding changesets
233 adding manifests
233 adding manifests
234 adding file changes
234 adding file changes
235 added 9 changesets with 7 changes to 4 files (+1 heads)
235 added 9 changesets with 7 changes to 4 files (+1 heads)
236 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
236 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
237 changegroup hook: HG_HOOKNAME=changegroup
237 changegroup hook: HG_HOOKNAME=changegroup
238 HG_HOOKTYPE=changegroup
238 HG_HOOKTYPE=changegroup
239 HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735
239 HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735
240 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf
240 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf
241 HG_SOURCE=pull
241 HG_SOURCE=pull
242 HG_TXNID=TXN:$ID$
242 HG_TXNID=TXN:$ID$
243 HG_TXNNAME=pull
243 HG_TXNNAME=pull
244 bundle:../full.hg (no-msys !)
244 bundle:../full.hg (no-msys !)
245 bundle;../full.hg (msys !)
245 bundle;../full.hg (msys !)
246 HG_URL=bundle:../full.hg (no-msys !)
246 HG_URL=bundle:../full.hg (no-msys !)
247 HG_URL=bundle;../full.hg (msys !)
247 HG_URL=bundle;../full.hg (msys !)
248
248
249 (run 'hg heads' to see heads, 'hg merge' to merge)
249 (run 'hg heads' to see heads, 'hg merge' to merge)
250
250
251 Rollback empty
251 Rollback empty
252
252
253 $ hg rollback
253 $ hg rollback
254 repository tip rolled back to revision -1 (undo pull)
254 repository tip rolled back to revision -1 (undo pull)
255 $ cd ..
255 $ cd ..
256
256
257 Log -R bundle:empty+full.hg
257 Log -R bundle:empty+full.hg
258
258
259 $ hg -R bundle:empty+full.hg log --template="{rev} "; echo ""
259 $ hg -R bundle:empty+full.hg log --template="{rev} "; echo ""
260 8 7 6 5 4 3 2 1 0
260 8 7 6 5 4 3 2 1 0
261
261
262 Pull full.hg into empty again (using -R; with hook)
262 Pull full.hg into empty again (using -R; with hook)
263
263
264 $ hg -R empty pull full.hg
264 $ hg -R empty pull full.hg
265 pulling from full.hg
265 pulling from full.hg
266 requesting all changes
266 requesting all changes
267 adding changesets
267 adding changesets
268 adding manifests
268 adding manifests
269 adding file changes
269 adding file changes
270 added 9 changesets with 7 changes to 4 files (+1 heads)
270 added 9 changesets with 7 changes to 4 files (+1 heads)
271 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
271 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
272 changegroup hook: HG_HOOKNAME=changegroup
272 changegroup hook: HG_HOOKNAME=changegroup
273 HG_HOOKTYPE=changegroup
273 HG_HOOKTYPE=changegroup
274 HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735
274 HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735
275 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf
275 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf
276 HG_SOURCE=pull
276 HG_SOURCE=pull
277 HG_TXNID=TXN:$ID$
277 HG_TXNID=TXN:$ID$
278 HG_TXNNAME=pull
278 HG_TXNNAME=pull
279 bundle:empty+full.hg
279 bundle:empty+full.hg
280 HG_URL=bundle:empty+full.hg
280 HG_URL=bundle:empty+full.hg
281
281
282 (run 'hg heads' to see heads, 'hg merge' to merge)
282 (run 'hg heads' to see heads, 'hg merge' to merge)
283
283
284 #endif
284 #endif
285
285
286 Cannot produce streaming clone bundles with "hg bundle"
286 Cannot produce streaming clone bundles with "hg bundle"
287
287
288 $ hg -R test bundle -t packed1 packed.hg
288 $ hg -R test bundle -t packed1 packed.hg
289 abort: packed bundles cannot be produced by "hg bundle"
289 abort: packed bundles cannot be produced by "hg bundle"
290 (use 'hg debugcreatestreamclonebundle')
290 (use 'hg debugcreatestreamclonebundle')
291 [10]
291 [10]
292
292
293 packed1 is produced properly
293 packed1 is produced properly
294
294
295
295
296 #if reporevlogstore rust
296 #if reporevlogstore rust
297
297
298 $ hg -R test debugcreatestreamclonebundle packed.hg
298 $ hg -R test debugcreatestreamclonebundle packed.hg
299 writing 2665 bytes for 6 files
299 writing 2665 bytes for 6 files
300 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
300 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
301
301
302 $ f -B 64 --size --sha1 --hexdump packed.hg
302 $ f -B 64 --size --sha1 --hexdump packed.hg
303 packed.hg: size=2865, sha1=353d10311f4befa195d9a1ca4b8e26518115c702
303 packed.hg: size=2865, sha1=353d10311f4befa195d9a1ca4b8e26518115c702
304 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
304 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
305 0010: 00 00 00 00 0a 69 00 3b 67 65 6e 65 72 61 6c 64 |.....i.;generald|
305 0010: 00 00 00 00 0a 69 00 3b 67 65 6e 65 72 61 6c 64 |.....i.;generald|
306 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 2d 63 6f 6d 70 |elta,revlog-comp|
306 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 2d 63 6f 6d 70 |elta,revlog-comp|
307 0030: 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c 72 65 76 |ression-zstd,rev|
307 0030: 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c 72 65 76 |ression-zstd,rev|
308 $ hg debugbundle --spec packed.hg
308 $ hg debugbundle --spec packed.hg
309 none-packed1;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog
309 none-packed1;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog
310 #endif
310 #endif
311
311
312 #if reporevlogstore no-rust zstd
312 #if reporevlogstore no-rust zstd
313
313
314 $ hg -R test debugcreatestreamclonebundle packed.hg
314 $ hg -R test debugcreatestreamclonebundle packed.hg
315 writing 2665 bytes for 6 files
315 writing 2665 bytes for 6 files
316 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
316 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
317
317
318 $ f -B 64 --size --sha1 --hexdump packed.hg
318 $ f -B 64 --size --sha1 --hexdump packed.hg
319 packed.hg: size=2865, sha1=353d10311f4befa195d9a1ca4b8e26518115c702
319 packed.hg: size=2865, sha1=353d10311f4befa195d9a1ca4b8e26518115c702
320 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
320 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
321 0010: 00 00 00 00 0a 69 00 3b 67 65 6e 65 72 61 6c 64 |.....i.;generald|
321 0010: 00 00 00 00 0a 69 00 3b 67 65 6e 65 72 61 6c 64 |.....i.;generald|
322 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 2d 63 6f 6d 70 |elta,revlog-comp|
322 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 2d 63 6f 6d 70 |elta,revlog-comp|
323 0030: 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c 72 65 76 |ression-zstd,rev|
323 0030: 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c 72 65 76 |ression-zstd,rev|
324 $ hg debugbundle --spec packed.hg
324 $ hg debugbundle --spec packed.hg
325 none-packed1;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog
325 none-packed1;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog
326 #endif
326 #endif
327
327
328 #if reporevlogstore no-rust no-zstd
328 #if reporevlogstore no-rust no-zstd
329
329
330 $ hg -R test debugcreatestreamclonebundle packed.hg
330 $ hg -R test debugcreatestreamclonebundle packed.hg
331 writing 2664 bytes for 6 files
331 writing 2664 bytes for 6 files
332 bundle requirements: generaldelta, revlogv1, sparserevlog
332 bundle requirements: generaldelta, revlogv1, sparserevlog
333
333
334 $ f -B 64 --size --sha1 --hexdump packed.hg
334 $ f -B 64 --size --sha1 --hexdump packed.hg
335 packed.hg: size=2840, sha1=12bf3eee3eb8a04c503ce2d29b48f0135c7edff5
335 packed.hg: size=2840, sha1=12bf3eee3eb8a04c503ce2d29b48f0135c7edff5
336 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
336 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
337 0010: 00 00 00 00 0a 68 00 23 67 65 6e 65 72 61 6c 64 |.....h.#generald|
337 0010: 00 00 00 00 0a 68 00 23 67 65 6e 65 72 61 6c 64 |.....h.#generald|
338 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 2c 73 70 |elta,revlogv1,sp|
338 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 2c 73 70 |elta,revlogv1,sp|
339 0030: 61 72 73 65 72 65 76 6c 6f 67 00 64 61 74 61 2f |arserevlog.data/|
339 0030: 61 72 73 65 72 65 76 6c 6f 67 00 64 61 74 61 2f |arserevlog.data/|
340 $ hg debugbundle --spec packed.hg
340 $ hg debugbundle --spec packed.hg
341 none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog
341 none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog
342 #endif
342 #endif
343
343
344 #if reporevlogstore
344 #if reporevlogstore
345
345
346 generaldelta requirement is not listed in stream clone bundles unless used
346 generaldelta requirement is not listed in stream clone bundles unless used
347
347
348 $ hg --config format.usegeneraldelta=false init testnongd
348 $ hg --config format.usegeneraldelta=false init testnongd
349 $ cd testnongd
349 $ cd testnongd
350 $ touch foo
350 $ touch foo
351 $ hg -q commit -A -m initial
351 $ hg -q commit -A -m initial
352 $ cd ..
352 $ cd ..
353
353
354 #endif
354 #endif
355
355
356 #if reporevlogstore rust
356 #if reporevlogstore rust
357
357
358 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
358 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
359 writing 301 bytes for 3 files
359 writing 301 bytes for 3 files
360 bundle requirements: revlog-compression-zstd, revlogv1
360 bundle requirements: revlog-compression-zstd, revlogv1
361
361
362 $ f -B 64 --size --sha1 --hexdump packednongd.hg
362 $ f -B 64 --size --sha1 --hexdump packednongd.hg
363 packednongd.hg: size=407, sha1=0b8714422b785ba8eb98c916b41ffd5fb994c9b5
363 packednongd.hg: size=407, sha1=0b8714422b785ba8eb98c916b41ffd5fb994c9b5
364 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
364 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
365 0010: 00 00 00 00 01 2d 00 21 72 65 76 6c 6f 67 2d 63 |.....-.!revlog-c|
365 0010: 00 00 00 00 01 2d 00 21 72 65 76 6c 6f 67 2d 63 |.....-.!revlog-c|
366 0020: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c |ompression-zstd,|
366 0020: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c |ompression-zstd,|
367 0030: 72 65 76 6c 6f 67 76 31 00 64 61 74 61 2f 66 6f |revlogv1.data/fo|
367 0030: 72 65 76 6c 6f 67 76 31 00 64 61 74 61 2f 66 6f |revlogv1.data/fo|
368
368
369 $ hg debugbundle --spec packednongd.hg
369 $ hg debugbundle --spec packednongd.hg
370 none-packed1;requirements%3Drevlog-compression-zstd%2Crevlogv1
370 none-packed1;requirements%3Drevlog-compression-zstd%2Crevlogv1
371
371
372 #endif
372 #endif
373
373
374 #if reporevlogstore no-rust zstd
374 #if reporevlogstore no-rust zstd
375
375
376 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
376 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
377 writing 301 bytes for 3 files
377 writing 301 bytes for 3 files
378 bundle requirements: revlog-compression-zstd, revlogv1
378 bundle requirements: revlog-compression-zstd, revlogv1
379
379
380 $ f -B 64 --size --sha1 --hexdump packednongd.hg
380 $ f -B 64 --size --sha1 --hexdump packednongd.hg
381 packednongd.hg: size=407, sha1=0b8714422b785ba8eb98c916b41ffd5fb994c9b5
381 packednongd.hg: size=407, sha1=0b8714422b785ba8eb98c916b41ffd5fb994c9b5
382 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
382 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
383 0010: 00 00 00 00 01 2d 00 21 72 65 76 6c 6f 67 2d 63 |.....-.!revlog-c|
383 0010: 00 00 00 00 01 2d 00 21 72 65 76 6c 6f 67 2d 63 |.....-.!revlog-c|
384 0020: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c |ompression-zstd,|
384 0020: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c |ompression-zstd,|
385 0030: 72 65 76 6c 6f 67 76 31 00 64 61 74 61 2f 66 6f |revlogv1.data/fo|
385 0030: 72 65 76 6c 6f 67 76 31 00 64 61 74 61 2f 66 6f |revlogv1.data/fo|
386
386
387 $ hg debugbundle --spec packednongd.hg
387 $ hg debugbundle --spec packednongd.hg
388 none-packed1;requirements%3Drevlog-compression-zstd%2Crevlogv1
388 none-packed1;requirements%3Drevlog-compression-zstd%2Crevlogv1
389
389
390
390
391 #endif
391 #endif
392
392
393 #if reporevlogstore no-rust no-zstd
393 #if reporevlogstore no-rust no-zstd
394
394
395 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
395 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
396 writing 301 bytes for 3 files
396 writing 301 bytes for 3 files
397 bundle requirements: revlogv1
397 bundle requirements: revlogv1
398
398
399 $ f -B 64 --size --sha1 --hexdump packednongd.hg
399 $ f -B 64 --size --sha1 --hexdump packednongd.hg
400 packednongd.hg: size=383, sha1=1d9c230238edd5d38907100b729ba72b1831fe6f
400 packednongd.hg: size=383, sha1=1d9c230238edd5d38907100b729ba72b1831fe6f
401 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
401 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
402 0010: 00 00 00 00 01 2d 00 09 72 65 76 6c 6f 67 76 31 |.....-..revlogv1|
402 0010: 00 00 00 00 01 2d 00 09 72 65 76 6c 6f 67 76 31 |.....-..revlogv1|
403 0020: 00 64 61 74 61 2f 66 6f 6f 2e 69 00 36 34 0a 00 |.data/foo.i.64..|
403 0020: 00 64 61 74 61 2f 66 6f 6f 2e 69 00 36 34 0a 00 |.data/foo.i.64..|
404 0030: 01 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
404 0030: 01 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
405
405
406 $ hg debugbundle --spec packednongd.hg
406 $ hg debugbundle --spec packednongd.hg
407 none-packed1;requirements%3Drevlogv1
407 none-packed1;requirements%3Drevlogv1
408
408
409
409
410 #endif
410 #endif
411
411
412 #if reporevlogstore
412 #if reporevlogstore
413
413
414 Warning emitted when packed bundles contain secret changesets
414 Warning emitted when packed bundles contain secret changesets
415
415
416 $ hg init testsecret
416 $ hg init testsecret
417 $ cd testsecret
417 $ cd testsecret
418 $ touch foo
418 $ touch foo
419 $ hg -q commit -A -m initial
419 $ hg -q commit -A -m initial
420 $ hg phase --force --secret -r .
420 $ hg phase --force --secret -r .
421 $ cd ..
421 $ cd ..
422
422
423 #endif
423 #endif
424
424
425 #if reporevlogstore rust
425 #if reporevlogstore rust
426
426
427 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
427 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
428 (warning: stream clone bundle will contain secret revisions)
428 (warning: stream clone bundle will contain secret revisions)
429 writing 301 bytes for 3 files
429 writing 301 bytes for 3 files
430 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
430 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
431
431
432 #endif
432 #endif
433
433
434 #if reporevlogstore no-rust zstd
434 #if reporevlogstore no-rust zstd
435
435
436 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
436 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
437 (warning: stream clone bundle will contain secret revisions)
437 (warning: stream clone bundle will contain secret revisions)
438 writing 301 bytes for 3 files
438 writing 301 bytes for 3 files
439 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
439 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
440
440
441 #endif
441 #endif
442
442
443 #if reporevlogstore no-rust no-zstd
443 #if reporevlogstore no-rust no-zstd
444
444
445 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
445 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
446 (warning: stream clone bundle will contain secret revisions)
446 (warning: stream clone bundle will contain secret revisions)
447 writing 301 bytes for 3 files
447 writing 301 bytes for 3 files
448 bundle requirements: generaldelta, revlogv1, sparserevlog
448 bundle requirements: generaldelta, revlogv1, sparserevlog
449
449
450 #endif
450 #endif
451
451
452 #if reporevlogstore
452 #if reporevlogstore
453
453
454 Unpacking packed1 bundles with "hg unbundle" isn't allowed
454 Unpacking packed1 bundles with "hg unbundle" isn't allowed
455
455
456 $ hg init packed
456 $ hg init packed
457 $ hg -R packed unbundle packed.hg
457 $ hg -R packed unbundle packed.hg
458 abort: packed bundles cannot be applied with "hg unbundle"
458 abort: packed bundles cannot be applied with "hg unbundle"
459 (use "hg debugapplystreamclonebundle")
459 (use "hg debugapplystreamclonebundle")
460 [10]
460 [10]
461
461
462 packed1 can be consumed from debug command
462 packed1 can be consumed from debug command
463
463
464 (this also confirms that streamclone-ed changes are visible via
464 (this also confirms that streamclone-ed changes are visible via
465 @filecache properties to in-process procedures before closing
465 @filecache properties to in-process procedures before closing
466 transaction)
466 transaction)
467
467
468 $ cat > $TESTTMP/showtip.py <<EOF
468 $ cat > $TESTTMP/showtip.py <<EOF
469 >
469 >
470 > def showtip(ui, repo, hooktype, **kwargs):
470 > def showtip(ui, repo, hooktype, **kwargs):
471 > ui.warn(b'%s: %s\n' % (hooktype, repo[b'tip'].hex()[:12]))
471 > ui.warn(b'%s: %s\n' % (hooktype, repo[b'tip'].hex()[:12]))
472 >
472 >
473 > def reposetup(ui, repo):
473 > def reposetup(ui, repo):
474 > # this confirms (and ensures) that (empty) 00changelog.i
474 > # this confirms (and ensures) that (empty) 00changelog.i
475 > # before streamclone is already cached as repo.changelog
475 > # before streamclone is already cached as repo.changelog
476 > ui.setconfig(b'hooks', b'pretxnopen.showtip', showtip)
476 > ui.setconfig(b'hooks', b'pretxnopen.showtip', showtip)
477 >
477 >
478 > # this confirms that streamclone-ed changes are visible to
478 > # this confirms that streamclone-ed changes are visible to
479 > # in-process procedures before closing transaction
479 > # in-process procedures before closing transaction
480 > ui.setconfig(b'hooks', b'pretxnclose.showtip', showtip)
480 > ui.setconfig(b'hooks', b'pretxnclose.showtip', showtip)
481 >
481 >
482 > # this confirms that streamclone-ed changes are still visible
482 > # this confirms that streamclone-ed changes are still visible
483 > # after closing transaction
483 > # after closing transaction
484 > ui.setconfig(b'hooks', b'txnclose.showtip', showtip)
484 > ui.setconfig(b'hooks', b'txnclose.showtip', showtip)
485 > EOF
485 > EOF
486 $ cat >> $HGRCPATH <<EOF
486 $ cat >> $HGRCPATH <<EOF
487 > [extensions]
487 > [extensions]
488 > showtip = $TESTTMP/showtip.py
488 > showtip = $TESTTMP/showtip.py
489 > EOF
489 > EOF
490
490
491 $ hg -R packed debugapplystreamclonebundle packed.hg
491 $ hg -R packed debugapplystreamclonebundle packed.hg
492 6 files to transfer, 2.60 KB of data
492 6 files to transfer, 2.60 KB of data
493 pretxnopen: 000000000000
493 pretxnopen: 000000000000
494 pretxnclose: aa35859c02ea
494 pretxnclose: aa35859c02ea
495 transferred 2.60 KB in * seconds (* */sec) (glob)
495 transferred 2.60 KB in * seconds (* */sec) (glob)
496 txnclose: aa35859c02ea
496 txnclose: aa35859c02ea
497
497
498 (for safety, confirm visibility of streamclone-ed changes by another
498 (for safety, confirm visibility of streamclone-ed changes by another
499 process, too)
499 process, too)
500
500
501 $ hg -R packed tip -T "{node|short}\n"
501 $ hg -R packed tip -T "{node|short}\n"
502 aa35859c02ea
502 aa35859c02ea
503
503
504 $ cat >> $HGRCPATH <<EOF
504 $ cat >> $HGRCPATH <<EOF
505 > [extensions]
505 > [extensions]
506 > showtip = !
506 > showtip = !
507 > EOF
507 > EOF
508
508
509 Does not work on non-empty repo
509 Does not work on non-empty repo
510
510
511 $ hg -R packed debugapplystreamclonebundle packed.hg
511 $ hg -R packed debugapplystreamclonebundle packed.hg
512 abort: cannot apply stream clone bundle on non-empty repo
512 abort: cannot apply stream clone bundle on non-empty repo
513 [255]
513 [255]
514
514
515 #endif
515 #endif
516
516
517 Create partial clones
517 Create partial clones
518
518
519 $ rm -r empty
519 $ rm -r empty
520 $ hg init empty
520 $ hg init empty
521 $ hg clone -r 3 test partial
521 $ hg clone -r 3 test partial
522 adding changesets
522 adding changesets
523 adding manifests
523 adding manifests
524 adding file changes
524 adding file changes
525 added 4 changesets with 4 changes to 1 files
525 added 4 changesets with 4 changes to 1 files
526 new changesets f9ee2f85a263:eebf5a27f8ca
526 new changesets f9ee2f85a263:eebf5a27f8ca
527 updating to branch default
527 updating to branch default
528 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
528 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
529 $ hg clone partial partial2
529 $ hg clone partial partial2
530 updating to branch default
530 updating to branch default
531 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
531 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
532 $ cd partial
532 $ cd partial
533
533
534 #if repobundlerepo
534 #if repobundlerepo
535
535
536 Log -R full.hg in partial
536 Log -R full.hg in partial
537
537
538 $ hg -R bundle://../full.hg log -T phases
538 $ hg -R bundle://../full.hg log -T phases
539 changeset: 8:aa35859c02ea
539 changeset: 8:aa35859c02ea
540 tag: tip
540 tag: tip
541 phase: draft
541 phase: draft
542 parent: 3:eebf5a27f8ca
542 parent: 3:eebf5a27f8ca
543 user: test
543 user: test
544 date: Thu Jan 01 00:00:00 1970 +0000
544 date: Thu Jan 01 00:00:00 1970 +0000
545 summary: 0.3m
545 summary: 0.3m
546
546
547 changeset: 7:a6a34bfa0076
547 changeset: 7:a6a34bfa0076
548 phase: draft
548 phase: draft
549 user: test
549 user: test
550 date: Thu Jan 01 00:00:00 1970 +0000
550 date: Thu Jan 01 00:00:00 1970 +0000
551 summary: 1.3m
551 summary: 1.3m
552
552
553 changeset: 6:7373c1169842
553 changeset: 6:7373c1169842
554 phase: draft
554 phase: draft
555 user: test
555 user: test
556 date: Thu Jan 01 00:00:00 1970 +0000
556 date: Thu Jan 01 00:00:00 1970 +0000
557 summary: 1.3
557 summary: 1.3
558
558
559 changeset: 5:1bb50a9436a7
559 changeset: 5:1bb50a9436a7
560 phase: draft
560 phase: draft
561 user: test
561 user: test
562 date: Thu Jan 01 00:00:00 1970 +0000
562 date: Thu Jan 01 00:00:00 1970 +0000
563 summary: 1.2
563 summary: 1.2
564
564
565 changeset: 4:095197eb4973
565 changeset: 4:095197eb4973
566 phase: draft
566 phase: draft
567 parent: 0:f9ee2f85a263
567 parent: 0:f9ee2f85a263
568 user: test
568 user: test
569 date: Thu Jan 01 00:00:00 1970 +0000
569 date: Thu Jan 01 00:00:00 1970 +0000
570 summary: 1.1
570 summary: 1.1
571
571
572 changeset: 3:eebf5a27f8ca
572 changeset: 3:eebf5a27f8ca
573 phase: public
573 phase: public
574 user: test
574 user: test
575 date: Thu Jan 01 00:00:00 1970 +0000
575 date: Thu Jan 01 00:00:00 1970 +0000
576 summary: 0.3
576 summary: 0.3
577
577
578 changeset: 2:e38ba6f5b7e0
578 changeset: 2:e38ba6f5b7e0
579 phase: public
579 phase: public
580 user: test
580 user: test
581 date: Thu Jan 01 00:00:00 1970 +0000
581 date: Thu Jan 01 00:00:00 1970 +0000
582 summary: 0.2
582 summary: 0.2
583
583
584 changeset: 1:34c2bf6b0626
584 changeset: 1:34c2bf6b0626
585 phase: public
585 phase: public
586 user: test
586 user: test
587 date: Thu Jan 01 00:00:00 1970 +0000
587 date: Thu Jan 01 00:00:00 1970 +0000
588 summary: 0.1
588 summary: 0.1
589
589
590 changeset: 0:f9ee2f85a263
590 changeset: 0:f9ee2f85a263
591 phase: public
591 phase: public
592 user: test
592 user: test
593 date: Thu Jan 01 00:00:00 1970 +0000
593 date: Thu Jan 01 00:00:00 1970 +0000
594 summary: 0.0
594 summary: 0.0
595
595
596
596
597 Incoming full.hg in partial
597 Incoming full.hg in partial
598
598
599 $ hg incoming bundle://../full.hg
599 $ hg incoming bundle://../full.hg
600 comparing with bundle:../full.hg
600 comparing with bundle:../full.hg
601 searching for changes
601 searching for changes
602 changeset: 4:095197eb4973
602 changeset: 4:095197eb4973
603 parent: 0:f9ee2f85a263
603 parent: 0:f9ee2f85a263
604 user: test
604 user: test
605 date: Thu Jan 01 00:00:00 1970 +0000
605 date: Thu Jan 01 00:00:00 1970 +0000
606 summary: 1.1
606 summary: 1.1
607
607
608 changeset: 5:1bb50a9436a7
608 changeset: 5:1bb50a9436a7
609 user: test
609 user: test
610 date: Thu Jan 01 00:00:00 1970 +0000
610 date: Thu Jan 01 00:00:00 1970 +0000
611 summary: 1.2
611 summary: 1.2
612
612
613 changeset: 6:7373c1169842
613 changeset: 6:7373c1169842
614 user: test
614 user: test
615 date: Thu Jan 01 00:00:00 1970 +0000
615 date: Thu Jan 01 00:00:00 1970 +0000
616 summary: 1.3
616 summary: 1.3
617
617
618 changeset: 7:a6a34bfa0076
618 changeset: 7:a6a34bfa0076
619 user: test
619 user: test
620 date: Thu Jan 01 00:00:00 1970 +0000
620 date: Thu Jan 01 00:00:00 1970 +0000
621 summary: 1.3m
621 summary: 1.3m
622
622
623 changeset: 8:aa35859c02ea
623 changeset: 8:aa35859c02ea
624 tag: tip
624 tag: tip
625 parent: 3:eebf5a27f8ca
625 parent: 3:eebf5a27f8ca
626 user: test
626 user: test
627 date: Thu Jan 01 00:00:00 1970 +0000
627 date: Thu Jan 01 00:00:00 1970 +0000
628 summary: 0.3m
628 summary: 0.3m
629
629
630
630
631 Outgoing -R full.hg vs partial2 in partial
631 Outgoing -R full.hg vs partial2 in partial
632
632
633 $ hg -R bundle://../full.hg outgoing ../partial2
633 $ hg -R bundle://../full.hg outgoing ../partial2
634 comparing with ../partial2
634 comparing with ../partial2
635 searching for changes
635 searching for changes
636 changeset: 4:095197eb4973
636 changeset: 4:095197eb4973
637 parent: 0:f9ee2f85a263
637 parent: 0:f9ee2f85a263
638 user: test
638 user: test
639 date: Thu Jan 01 00:00:00 1970 +0000
639 date: Thu Jan 01 00:00:00 1970 +0000
640 summary: 1.1
640 summary: 1.1
641
641
642 changeset: 5:1bb50a9436a7
642 changeset: 5:1bb50a9436a7
643 user: test
643 user: test
644 date: Thu Jan 01 00:00:00 1970 +0000
644 date: Thu Jan 01 00:00:00 1970 +0000
645 summary: 1.2
645 summary: 1.2
646
646
647 changeset: 6:7373c1169842
647 changeset: 6:7373c1169842
648 user: test
648 user: test
649 date: Thu Jan 01 00:00:00 1970 +0000
649 date: Thu Jan 01 00:00:00 1970 +0000
650 summary: 1.3
650 summary: 1.3
651
651
652 changeset: 7:a6a34bfa0076
652 changeset: 7:a6a34bfa0076
653 user: test
653 user: test
654 date: Thu Jan 01 00:00:00 1970 +0000
654 date: Thu Jan 01 00:00:00 1970 +0000
655 summary: 1.3m
655 summary: 1.3m
656
656
657 changeset: 8:aa35859c02ea
657 changeset: 8:aa35859c02ea
658 tag: tip
658 tag: tip
659 parent: 3:eebf5a27f8ca
659 parent: 3:eebf5a27f8ca
660 user: test
660 user: test
661 date: Thu Jan 01 00:00:00 1970 +0000
661 date: Thu Jan 01 00:00:00 1970 +0000
662 summary: 0.3m
662 summary: 0.3m
663
663
664
664
665 Outgoing -R does-not-exist.hg vs partial2 in partial
665 Outgoing -R does-not-exist.hg vs partial2 in partial
666
666
667 $ hg -R bundle://../does-not-exist.hg outgoing ../partial2
667 $ hg -R bundle://../does-not-exist.hg outgoing ../partial2
668 abort: *../does-not-exist.hg* (glob)
668 abort: *../does-not-exist.hg* (glob)
669 [255]
669 [255]
670
670
671 #endif
671 #endif
672
672
673 $ cd ..
673 $ cd ..
674
674
675 hide outer repo
675 hide outer repo
676 $ hg init
676 $ hg init
677
677
678 Direct clone from bundle (all-history)
678 Direct clone from bundle (all-history)
679
679
680 #if repobundlerepo
680 #if repobundlerepo
681
681
682 $ hg clone full.hg full-clone
682 $ hg clone full.hg full-clone
683 requesting all changes
683 requesting all changes
684 adding changesets
684 adding changesets
685 adding manifests
685 adding manifests
686 adding file changes
686 adding file changes
687 added 9 changesets with 7 changes to 4 files (+1 heads)
687 added 9 changesets with 7 changes to 4 files (+1 heads)
688 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
688 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
689 updating to branch default
689 updating to branch default
690 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
690 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
691 $ hg -R full-clone heads
691 $ hg -R full-clone heads
692 changeset: 8:aa35859c02ea
692 changeset: 8:aa35859c02ea
693 tag: tip
693 tag: tip
694 parent: 3:eebf5a27f8ca
694 parent: 3:eebf5a27f8ca
695 user: test
695 user: test
696 date: Thu Jan 01 00:00:00 1970 +0000
696 date: Thu Jan 01 00:00:00 1970 +0000
697 summary: 0.3m
697 summary: 0.3m
698
698
699 changeset: 7:a6a34bfa0076
699 changeset: 7:a6a34bfa0076
700 user: test
700 user: test
701 date: Thu Jan 01 00:00:00 1970 +0000
701 date: Thu Jan 01 00:00:00 1970 +0000
702 summary: 1.3m
702 summary: 1.3m
703
703
704 $ rm -r full-clone
704 $ rm -r full-clone
705
705
706 When cloning from a non-copiable repository into '', do not
706 When cloning from a non-copiable repository into '', do not
707 recurse infinitely (issue2528)
707 recurse infinitely (issue2528)
708
708
709 $ hg clone full.hg ''
709 $ hg clone full.hg ''
710 abort: empty destination path is not valid
710 abort: empty destination path is not valid
711 [10]
711 [10]
712
712
713 test for https://bz.mercurial-scm.org/216
713 test for https://bz.mercurial-scm.org/216
714
714
715 Unbundle incremental bundles into fresh empty in one go
715 Unbundle incremental bundles into fresh empty in one go
716
716
717 $ rm -r empty
717 $ rm -r empty
718 $ hg init empty
718 $ hg init empty
719 $ hg -R test bundle --base null -r 0 ../0.hg
719 $ hg -R test bundle --base null -r 0 ../0.hg
720 1 changesets found
720 1 changesets found
721 $ hg -R test bundle --exact -r 1 ../1.hg
721 $ hg -R test bundle --exact -r 1 ../1.hg
722 1 changesets found
722 1 changesets found
723 $ hg -R empty unbundle -u ../0.hg ../1.hg
723 $ hg -R empty unbundle -u ../0.hg ../1.hg
724 adding changesets
724 adding changesets
725 adding manifests
725 adding manifests
726 adding file changes
726 adding file changes
727 added 1 changesets with 1 changes to 1 files
727 added 1 changesets with 1 changes to 1 files
728 new changesets f9ee2f85a263 (1 drafts)
728 new changesets f9ee2f85a263 (1 drafts)
729 adding changesets
729 adding changesets
730 adding manifests
730 adding manifests
731 adding file changes
731 adding file changes
732 added 1 changesets with 1 changes to 1 files
732 added 1 changesets with 1 changes to 1 files
733 new changesets 34c2bf6b0626 (1 drafts)
733 new changesets 34c2bf6b0626 (1 drafts)
734 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
734 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
735
735
736 View full contents of the bundle
736 View full contents of the bundle
737 $ hg -R test bundle --base null -r 3 ../partial.hg
737 $ hg -R test bundle --base null -r 3 ../partial.hg
738 4 changesets found
738 4 changesets found
739 $ cd test
739 $ cd test
740 $ hg -R ../../partial.hg log -r "bundle()"
740 $ hg -R ../../partial.hg log -r "bundle()"
741 changeset: 0:f9ee2f85a263
741 changeset: 0:f9ee2f85a263
742 user: test
742 user: test
743 date: Thu Jan 01 00:00:00 1970 +0000
743 date: Thu Jan 01 00:00:00 1970 +0000
744 summary: 0.0
744 summary: 0.0
745
745
746 changeset: 1:34c2bf6b0626
746 changeset: 1:34c2bf6b0626
747 user: test
747 user: test
748 date: Thu Jan 01 00:00:00 1970 +0000
748 date: Thu Jan 01 00:00:00 1970 +0000
749 summary: 0.1
749 summary: 0.1
750
750
751 changeset: 2:e38ba6f5b7e0
751 changeset: 2:e38ba6f5b7e0
752 user: test
752 user: test
753 date: Thu Jan 01 00:00:00 1970 +0000
753 date: Thu Jan 01 00:00:00 1970 +0000
754 summary: 0.2
754 summary: 0.2
755
755
756 changeset: 3:eebf5a27f8ca
756 changeset: 3:eebf5a27f8ca
757 user: test
757 user: test
758 date: Thu Jan 01 00:00:00 1970 +0000
758 date: Thu Jan 01 00:00:00 1970 +0000
759 summary: 0.3
759 summary: 0.3
760
760
761 $ cd ..
761 $ cd ..
762
762
763 #endif
763 #endif
764
764
765 test for 540d1059c802
765 test for 540d1059c802
766
766
767 $ hg init orig
767 $ hg init orig
768 $ cd orig
768 $ cd orig
769 $ echo foo > foo
769 $ echo foo > foo
770 $ hg add foo
770 $ hg add foo
771 $ hg ci -m 'add foo'
771 $ hg ci -m 'add foo'
772
772
773 $ hg clone . ../copy
773 $ hg clone . ../copy
774 updating to branch default
774 updating to branch default
775 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
775 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
776 $ hg tag foo
776 $ hg tag foo
777
777
778 $ cd ../copy
778 $ cd ../copy
779 $ echo >> foo
779 $ echo >> foo
780 $ hg ci -m 'change foo'
780 $ hg ci -m 'change foo'
781 $ hg bundle ../bundle.hg ../orig
781 $ hg bundle ../bundle.hg ../orig
782 searching for changes
782 searching for changes
783 1 changesets found
783 1 changesets found
784
784
785 $ cd ..
785 $ cd ..
786
786
787 #if repobundlerepo
787 #if repobundlerepo
788 $ cd orig
788 $ cd orig
789 $ hg incoming ../bundle.hg
789 $ hg incoming ../bundle.hg
790 comparing with ../bundle.hg
790 comparing with ../bundle.hg
791 searching for changes
791 searching for changes
792 changeset: 2:ed1b79f46b9a
792 changeset: 2:ed1b79f46b9a
793 tag: tip
793 tag: tip
794 parent: 0:bbd179dfa0a7
794 parent: 0:bbd179dfa0a7
795 user: test
795 user: test
796 date: Thu Jan 01 00:00:00 1970 +0000
796 date: Thu Jan 01 00:00:00 1970 +0000
797 summary: change foo
797 summary: change foo
798
798
799 $ cd ..
799 $ cd ..
800
800
801 test bundle with # in the filename (issue2154):
801 test bundle with # in the filename (issue2154):
802
802
803 $ cp bundle.hg 'test#bundle.hg'
803 $ cp bundle.hg 'test#bundle.hg'
804 $ cd orig
804 $ cd orig
805 $ hg incoming '../test#bundle.hg'
805 $ hg incoming '../test#bundle.hg'
806 comparing with ../test
806 comparing with ../test
807 abort: unknown revision 'bundle.hg'
807 abort: unknown revision 'bundle.hg'
808 [10]
808 [10]
809
809
810 note that percent encoding is not handled:
810 note that percent encoding is not handled:
811
811
812 $ hg incoming ../test%23bundle.hg
812 $ hg incoming ../test%23bundle.hg
813 abort: repository ../test%23bundle.hg not found
813 abort: repository ../test%23bundle.hg not found
814 [255]
814 [255]
815 $ cd ..
815 $ cd ..
816
816
817 #endif
817 #endif
818
818
819 test to bundle revisions on the newly created branch (issue3828):
819 test to bundle revisions on the newly created branch (issue3828):
820
820
821 $ hg -q clone -U test test-clone
821 $ hg -q clone -U test test-clone
822 $ cd test
822 $ cd test
823
823
824 $ hg -q branch foo
824 $ hg -q branch foo
825 $ hg commit -m "create foo branch"
825 $ hg commit -m "create foo branch"
826 $ hg -q outgoing ../test-clone
826 $ hg -q outgoing ../test-clone
827 9:b4f5acb1ee27
827 9:b4f5acb1ee27
828 $ hg -q bundle --branch foo foo.hg ../test-clone
828 $ hg -q bundle --branch foo foo.hg ../test-clone
829 #if repobundlerepo
829 #if repobundlerepo
830 $ hg -R foo.hg -q log -r "bundle()"
830 $ hg -R foo.hg -q log -r "bundle()"
831 9:b4f5acb1ee27
831 9:b4f5acb1ee27
832 #endif
832 #endif
833
833
834 $ cd ..
834 $ cd ..
835
835
836 test for https://bz.mercurial-scm.org/1144
836 test for https://bz.mercurial-scm.org/1144
837
837
838 test that verify bundle does not traceback
838 test that verify bundle does not traceback
839
839
840 partial history bundle, fails w/ unknown parent
840 partial history bundle, fails w/ unknown parent
841
841
842 $ hg -R bundle.hg verify
842 $ hg -R bundle.hg verify
843 abort: 00changelog@bbd179dfa0a71671c253b3ae0aa1513b60d199fa: unknown parent
843 abort: 00changelog@bbd179dfa0a71671c253b3ae0aa1513b60d199fa: unknown parent
844 [50]
844 [50]
845
845
846 full history bundle, refuses to verify non-local repo
846 full history bundle, refuses to verify non-local repo
847
847
848 #if repobundlerepo
848 #if repobundlerepo
849 $ hg -R all.hg verify
849 $ hg -R all.hg verify
850 abort: cannot verify bundle or remote repos
850 abort: cannot verify bundle or remote repos
851 [255]
851 [255]
852 #endif
852 #endif
853
853
854 but, regular verify must continue to work
854 but, regular verify must continue to work
855
855
856 $ hg -R orig verify
856 $ hg -R orig verify
857 checking changesets
857 checking changesets
858 checking manifests
858 checking manifests
859 crosschecking files in changesets and manifests
859 crosschecking files in changesets and manifests
860 checking files
860 checking files
861 checked 2 changesets with 2 changes to 2 files
861 checked 2 changesets with 2 changes to 2 files
862
862
863 #if repobundlerepo
863 #if repobundlerepo
864 diff against bundle
864 diff against bundle
865
865
866 $ hg init b
866 $ hg init b
867 $ cd b
867 $ cd b
868 $ hg -R ../all.hg diff -r tip
868 $ hg -R ../all.hg diff -r tip
869 diff -r aa35859c02ea anotherfile
869 diff -r aa35859c02ea anotherfile
870 --- a/anotherfile Thu Jan 01 00:00:00 1970 +0000
870 --- a/anotherfile Thu Jan 01 00:00:00 1970 +0000
871 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
871 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
872 @@ -1,4 +0,0 @@
872 @@ -1,4 +0,0 @@
873 -0
873 -0
874 -1
874 -1
875 -2
875 -2
876 -3
876 -3
877 $ cd ..
877 $ cd ..
878 #endif
878 #endif
879
879
880 bundle single branch
880 bundle single branch
881
881
882 $ hg init branchy
882 $ hg init branchy
883 $ cd branchy
883 $ cd branchy
884 $ echo a >a
884 $ echo a >a
885 $ echo x >x
885 $ echo x >x
886 $ hg ci -Ama
886 $ hg ci -Ama
887 adding a
887 adding a
888 adding x
888 adding x
889 $ echo c >c
889 $ echo c >c
890 $ echo xx >x
890 $ echo xx >x
891 $ hg ci -Amc
891 $ hg ci -Amc
892 adding c
892 adding c
893 $ echo c1 >c1
893 $ echo c1 >c1
894 $ hg ci -Amc1
894 $ hg ci -Amc1
895 adding c1
895 adding c1
896 $ hg up 0
896 $ hg up 0
897 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
897 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
898 $ echo b >b
898 $ echo b >b
899 $ hg ci -Amb
899 $ hg ci -Amb
900 adding b
900 adding b
901 created new head
901 created new head
902 $ echo b1 >b1
902 $ echo b1 >b1
903 $ echo xx >x
903 $ echo xx >x
904 $ hg ci -Amb1
904 $ hg ci -Amb1
905 adding b1
905 adding b1
906 $ hg clone -q -r2 . part
906 $ hg clone -q -r2 . part
907
907
908 == bundling via incoming
908 == bundling via incoming
909
909
910 $ hg in -R part --bundle incoming.hg --template "{node}\n" .
910 $ hg in -R part --bundle incoming.hg --template "{node}\n" .
911 comparing with .
911 comparing with .
912 searching for changes
912 searching for changes
913 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
913 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
914 057f4db07f61970e1c11e83be79e9d08adc4dc31
914 057f4db07f61970e1c11e83be79e9d08adc4dc31
915
915
916 == bundling
916 == bundling
917
917
918 $ hg bundle bundle.hg part --debug --config progress.debug=true
918 $ hg bundle bundle.hg part --debug --config progress.debug=true
919 query 1; heads
919 query 1; heads
920 searching for changes
920 searching for changes
921 all remote heads known locally
921 all remote heads known locally
922 2 changesets found
922 2 changesets found
923 list of changesets:
923 list of changesets:
924 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
924 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
925 057f4db07f61970e1c11e83be79e9d08adc4dc31
925 057f4db07f61970e1c11e83be79e9d08adc4dc31
926 bundle2-output-bundle: "HG20", (1 params) 2 parts total
926 bundle2-output-bundle: "HG20", (1 params) 2 parts total
927 bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
927 bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
928 changesets: 1/2 chunks (50.00%)
928 changesets: 1/2 chunks (50.00%)
929 changesets: 2/2 chunks (100.00%)
929 changesets: 2/2 chunks (100.00%)
930 manifests: 1/2 chunks (50.00%)
930 manifests: 1/2 chunks (50.00%)
931 manifests: 2/2 chunks (100.00%)
931 manifests: 2/2 chunks (100.00%)
932 files: b 1/3 files (33.33%)
932 files: b 1/3 files (33.33%)
933 files: b1 2/3 files (66.67%)
933 files: b1 2/3 files (66.67%)
934 files: x 3/3 files (100.00%)
934 files: x 3/3 files (100.00%)
935 bundle2-output-part: "cache:rev-branch-cache" (advisory) streamed payload
935 bundle2-output-part: "cache:rev-branch-cache" (advisory) streamed payload
936
936
937 #if repobundlerepo
937 #if repobundlerepo
938 == Test for issue3441
938 == Test for issue3441
939
939
940 $ hg clone -q -r0 . part2
940 $ hg clone -q -r0 . part2
941 $ hg -q -R part2 pull bundle.hg
941 $ hg -q -R part2 pull bundle.hg
942 $ hg -R part2 verify
942 $ hg -R part2 verify
943 checking changesets
943 checking changesets
944 checking manifests
944 checking manifests
945 crosschecking files in changesets and manifests
945 crosschecking files in changesets and manifests
946 checking files
946 checking files
947 checked 3 changesets with 5 changes to 4 files
947 checked 3 changesets with 5 changes to 4 files
948 #endif
948 #endif
949
949
950 == Test bundling no commits
950 == Test bundling no commits
951
951
952 $ hg bundle -r 'public()' no-output.hg
952 $ hg bundle -r 'public()' no-output.hg
953 abort: no commits to bundle
953 abort: no commits to bundle
954 [10]
954 [10]
955
955
956 $ cd ..
956 $ cd ..
957
957
958 When user merges to the revision existing only in the bundle,
958 When user merges to the revision existing only in the bundle,
959 it should show warning that second parent of the working
959 it should show warning that second parent of the working
960 directory does not exist
960 directory does not exist
961
961
962 $ hg init update2bundled
962 $ hg init update2bundled
963 $ cd update2bundled
963 $ cd update2bundled
964 $ cat <<EOF >> .hg/hgrc
964 $ cat <<EOF >> .hg/hgrc
965 > [extensions]
965 > [extensions]
966 > strip =
966 > strip =
967 > EOF
967 > EOF
968 $ echo "aaa" >> a
968 $ echo "aaa" >> a
969 $ hg commit -A -m 0
969 $ hg commit -A -m 0
970 adding a
970 adding a
971 $ echo "bbb" >> b
971 $ echo "bbb" >> b
972 $ hg commit -A -m 1
972 $ hg commit -A -m 1
973 adding b
973 adding b
974 $ echo "ccc" >> c
974 $ echo "ccc" >> c
975 $ hg commit -A -m 2
975 $ hg commit -A -m 2
976 adding c
976 adding c
977 $ hg update -r 1
977 $ hg update -r 1
978 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
978 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
979 $ echo "ddd" >> d
979 $ echo "ddd" >> d
980 $ hg commit -A -m 3
980 $ hg commit -A -m 3
981 adding d
981 adding d
982 created new head
982 created new head
983 $ hg update -r 2
983 $ hg update -r 2
984 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
984 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
985 $ hg log -G
985 $ hg log -G
986 o changeset: 3:8bd3e1f196af
986 o changeset: 3:8bd3e1f196af
987 | tag: tip
987 | tag: tip
988 | parent: 1:a01eca7af26d
988 | parent: 1:a01eca7af26d
989 | user: test
989 | user: test
990 | date: Thu Jan 01 00:00:00 1970 +0000
990 | date: Thu Jan 01 00:00:00 1970 +0000
991 | summary: 3
991 | summary: 3
992 |
992 |
993 | @ changeset: 2:4652c276ac4f
993 | @ changeset: 2:4652c276ac4f
994 |/ user: test
994 |/ user: test
995 | date: Thu Jan 01 00:00:00 1970 +0000
995 | date: Thu Jan 01 00:00:00 1970 +0000
996 | summary: 2
996 | summary: 2
997 |
997 |
998 o changeset: 1:a01eca7af26d
998 o changeset: 1:a01eca7af26d
999 | user: test
999 | user: test
1000 | date: Thu Jan 01 00:00:00 1970 +0000
1000 | date: Thu Jan 01 00:00:00 1970 +0000
1001 | summary: 1
1001 | summary: 1
1002 |
1002 |
1003 o changeset: 0:4fe08cd4693e
1003 o changeset: 0:4fe08cd4693e
1004 user: test
1004 user: test
1005 date: Thu Jan 01 00:00:00 1970 +0000
1005 date: Thu Jan 01 00:00:00 1970 +0000
1006 summary: 0
1006 summary: 0
1007
1007
1008
1008
1009 #if repobundlerepo
1009 #if repobundlerepo
1010 $ hg bundle --base 1 -r 3 ../update2bundled.hg
1010 $ hg bundle --base 1 -r 3 ../update2bundled.hg
1011 1 changesets found
1011 1 changesets found
1012 $ hg strip -r 3
1012 $ hg strip -r 3
1013 saved backup bundle to $TESTTMP/update2bundled/.hg/strip-backup/8bd3e1f196af-017e56d8-backup.hg
1013 saved backup bundle to $TESTTMP/update2bundled/.hg/strip-backup/8bd3e1f196af-017e56d8-backup.hg
1014 $ hg merge -R ../update2bundled.hg -r 3
1014 $ hg merge -R ../update2bundled.hg -r 3
1015 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
1015 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
1016 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1016 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1017 (branch merge, don't forget to commit)
1017 (branch merge, don't forget to commit)
1018
1018
1019 When user updates to the revision existing only in the bundle,
1019 When user updates to the revision existing only in the bundle,
1020 it should show warning
1020 it should show warning
1021
1021
1022 $ hg update -R ../update2bundled.hg --clean -r 3
1022 $ hg update -R ../update2bundled.hg --clean -r 3
1023 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
1023 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
1024 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
1024 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
1025
1025
1026 When user updates to the revision existing in the local repository
1026 When user updates to the revision existing in the local repository
1027 the warning shouldn't be emitted
1027 the warning shouldn't be emitted
1028
1028
1029 $ hg update -R ../update2bundled.hg -r 0
1029 $ hg update -R ../update2bundled.hg -r 0
1030 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1030 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1031 #endif
1031 #endif
1032
1032
1033 Test the option that create slim bundle
1033 Test the option that create slim bundle
1034
1034
1035 $ hg bundle -a --config devel.bundle.delta=p1 ./slim.hg
1035 $ hg bundle -a --config devel.bundle.delta=p1 ./slim.hg
1036 3 changesets found
1036 3 changesets found
1037
1037
1038 Test the option that create and no-delta's bundle
1038 Test the option that create and no-delta's bundle
1039 $ hg bundle -a --config devel.bundle.delta=full ./full.hg
1039 $ hg bundle -a --config devel.bundle.delta=full ./full.hg
1040 3 changesets found
1040 3 changesets found
1041
1041
1042
1042
1043 Test the debug statistic when building a bundle
1043 Test the debug statistic when building a bundle
1044 -----------------------------------------------
1044 -----------------------------------------------
1045
1045
1046 $ hg bundle -a ./default.hg --config debug.bundling-stats=yes
1046 $ hg bundle -a ./default.hg --config debug.bundling-stats=yes
1047 3 changesets found
1047 3 changesets found
1048 DEBUG-BUNDLING: revisions: 9
1048 DEBUG-BUNDLING: revisions: 9
1049 DEBUG-BUNDLING: changelog: 3
1049 DEBUG-BUNDLING: changelog: 3
1050 DEBUG-BUNDLING: manifest: 3
1050 DEBUG-BUNDLING: manifest: 3
1051 DEBUG-BUNDLING: files: 3 (for 3 revlogs)
1051 DEBUG-BUNDLING: files: 3 (for 3 revlogs)
1052 DEBUG-BUNDLING: deltas:
1052 DEBUG-BUNDLING: deltas:
1053 DEBUG-BUNDLING: from-storage: 2 (100% of available 2)
1053 DEBUG-BUNDLING: from-storage: 2 (100% of available 2)
1054 DEBUG-BUNDLING: computed: 7
1054 DEBUG-BUNDLING: computed: 7
1055 DEBUG-BUNDLING: full: 7 (100% of native 7)
1055 DEBUG-BUNDLING: full: 7 (100% of native 7)
1056 DEBUG-BUNDLING: changelog: 3 (100% of native 3)
1056 DEBUG-BUNDLING: changelog: 3 (100% of native 3)
1057 DEBUG-BUNDLING: manifests: 1 (100% of native 1)
1057 DEBUG-BUNDLING: manifests: 1 (100% of native 1)
1058 DEBUG-BUNDLING: files: 3 (100% of native 3)
1058 DEBUG-BUNDLING: files: 3 (100% of native 3)
1059
1059
1060 Test the debug output when applying delta
1060 Test the debug output when applying delta
1061 -----------------------------------------
1061 -----------------------------------------
1062
1062
1063 $ hg init foo
1063 $ hg init foo
1064 $ hg -R foo unbundle ./slim.hg \
1064 $ hg -R foo unbundle ./slim.hg \
1065 > --config debug.revlog.debug-delta=yes \
1065 > --config debug.revlog.debug-delta=yes \
1066 > --config storage.revlog.reuse-external-delta=no \
1066 > --config storage.revlog.reuse-external-delta=no \
1067 > --config storage.revlog.reuse-external-delta-parent=no
1067 > --config storage.revlog.reuse-external-delta-parent=no
1068 adding changesets
1068 adding changesets
1069 DBG-DELTAS: CHANGELOG: rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1069 DBG-DELTAS: CHANGELOG: rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1070 DBG-DELTAS: CHANGELOG: rev=1: delta-base=1 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1070 DBG-DELTAS: CHANGELOG: rev=1: delta-base=1 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1071 DBG-DELTAS: CHANGELOG: rev=2: delta-base=2 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1071 DBG-DELTAS: CHANGELOG: rev=2: delta-base=2 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1072 adding manifests
1072 adding manifests
1073 DBG-DELTAS: MANIFESTLOG: rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1073 DBG-DELTAS: MANIFESTLOG: rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1074 DBG-DELTAS: MANIFESTLOG: rev=1: delta-base=0 is-cached=1 - search-rounds=1 try-count=1 - delta-type=delta snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1074 DBG-DELTAS: MANIFESTLOG: rev=1: delta-base=0 is-cached=1 - search-rounds=1 try-count=1 - delta-type=delta snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1075 DBG-DELTAS: MANIFESTLOG: rev=2: delta-base=1 is-cached=1 - search-rounds=1 try-count=1 - delta-type=delta snap-depth=0 - p1-chain-length=1 p2-chain-length=-1 - duration=* (glob)
1075 DBG-DELTAS: MANIFESTLOG: rev=2: delta-base=1 is-cached=1 - search-rounds=1 try-count=1 - delta-type=delta snap-depth=0 - p1-chain-length=1 p2-chain-length=-1 - duration=* (glob)
1076 adding file changes
1076 adding file changes
1077 DBG-DELTAS: FILELOG:a: rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1077 DBG-DELTAS: FILELOG:a: rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1078 DBG-DELTAS: FILELOG:b: rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1078 DBG-DELTAS: FILELOG:b: rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1079 DBG-DELTAS: FILELOG:c: rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1079 DBG-DELTAS: FILELOG:c: rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1080 added 3 changesets with 3 changes to 3 files
1080 added 3 changesets with 3 changes to 3 files
1081 new changesets 4fe08cd4693e:4652c276ac4f (3 drafts)
1081 new changesets 4fe08cd4693e:4652c276ac4f (3 drafts)
1082 (run 'hg update' to get a working copy)
1082 (run 'hg update' to get a working copy)
1083
1083
1084
1085 Test the debug statistic when applying a bundle
1086 -----------------------------------------------
1087
1088 $ hg init bar
1089 $ hg -R bar unbundle ./default.hg --config debug.unbundling-stats=yes
1090 adding changesets
1091 adding manifests
1092 adding file changes
1093 DEBUG-UNBUNDLING: revisions: 9
1094 DEBUG-UNBUNDLING: changelog: 3 ( 33%)
1095 DEBUG-UNBUNDLING: manifests: 3 ( 33%)
1096 DEBUG-UNBUNDLING: files: 3 ( 33%)
1097 DEBUG-UNBUNDLING: total-time: ?????????????? seconds (glob)
1098 DEBUG-UNBUNDLING: changelog: ?????????????? seconds (???%) (glob)
1099 DEBUG-UNBUNDLING: manifests: ?????????????? seconds (???%) (glob)
1100 DEBUG-UNBUNDLING: files: ?????????????? seconds (???%) (glob)
1101 DEBUG-UNBUNDLING: type-count:
1102 DEBUG-UNBUNDLING: changelog:
1103 DEBUG-UNBUNDLING: full: 3
1104 DEBUG-UNBUNDLING: cached: 0 ( 0%)
1105 DEBUG-UNBUNDLING: manifests:
1106 DEBUG-UNBUNDLING: full: 1
1107 DEBUG-UNBUNDLING: cached: 0 ( 0%)
1108 DEBUG-UNBUNDLING: delta: 2
1109 DEBUG-UNBUNDLING: cached: 2 (100%)
1110 DEBUG-UNBUNDLING: files:
1111 DEBUG-UNBUNDLING: full: 3
1112 DEBUG-UNBUNDLING: cached: 0 ( 0%)
1113 DEBUG-UNBUNDLING: type-time:
1114 DEBUG-UNBUNDLING: changelog:
1115 DEBUG-UNBUNDLING: full: ?????????????? seconds (???% of total) (glob)
1116 DEBUG-UNBUNDLING: cached: ?????????????? seconds (???% of total) (glob)
1117 DEBUG-UNBUNDLING: manifests:
1118 DEBUG-UNBUNDLING: full: ?????????????? seconds (???% of total) (glob)
1119 DEBUG-UNBUNDLING: cached: ?????????????? seconds (???% of total) (glob)
1120 DEBUG-UNBUNDLING: delta: ?????????????? seconds (???% of total) (glob)
1121 DEBUG-UNBUNDLING: cached: ?????????????? seconds (???% of total) (glob)
1122 DEBUG-UNBUNDLING: files:
1123 DEBUG-UNBUNDLING: full: ?????????????? seconds (???% of total) (glob)
1124 DEBUG-UNBUNDLING: cached: ?????????????? seconds (???% of total) (glob)
1125 added 3 changesets with 3 changes to 3 files
1126 new changesets 4fe08cd4693e:4652c276ac4f (3 drafts)
1127 (run 'hg update' to get a working copy)
General Comments 0
You need to be logged in to leave comments. Login now