##// END OF EJS Templates
revlog: remove legacy usage of `_withsparseread`...
marmoute -
r51952:47d43efd default
parent child Browse files
Show More
@@ -1,4617 +1,4623 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238
238
239 # for "historical portability":
239 # for "historical portability":
240 # define parsealiases locally, because cmdutil.parsealiases has been
240 # define parsealiases locally, because cmdutil.parsealiases has been
241 # available since 1.5 (or 6252852b4332)
241 # available since 1.5 (or 6252852b4332)
242 def parsealiases(cmd):
242 def parsealiases(cmd):
243 return cmd.split(b"|")
243 return cmd.split(b"|")
244
244
245
245
246 if safehasattr(registrar, 'command'):
246 if safehasattr(registrar, 'command'):
247 command = registrar.command(cmdtable)
247 command = registrar.command(cmdtable)
248 elif safehasattr(cmdutil, 'command'):
248 elif safehasattr(cmdutil, 'command'):
249 command = cmdutil.command(cmdtable)
249 command = cmdutil.command(cmdtable)
250 if 'norepo' not in getargspec(command).args:
250 if 'norepo' not in getargspec(command).args:
251 # for "historical portability":
251 # for "historical portability":
252 # wrap original cmdutil.command, because "norepo" option has
252 # wrap original cmdutil.command, because "norepo" option has
253 # been available since 3.1 (or 75a96326cecb)
253 # been available since 3.1 (or 75a96326cecb)
254 _command = command
254 _command = command
255
255
256 def command(name, options=(), synopsis=None, norepo=False):
256 def command(name, options=(), synopsis=None, norepo=False):
257 if norepo:
257 if norepo:
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 return _command(name, list(options), synopsis)
259 return _command(name, list(options), synopsis)
260
260
261
261
262 else:
262 else:
263 # for "historical portability":
263 # for "historical portability":
264 # define "@command" annotation locally, because cmdutil.command
264 # define "@command" annotation locally, because cmdutil.command
265 # has been available since 1.9 (or 2daa5179e73f)
265 # has been available since 1.9 (or 2daa5179e73f)
266 def command(name, options=(), synopsis=None, norepo=False):
266 def command(name, options=(), synopsis=None, norepo=False):
267 def decorator(func):
267 def decorator(func):
268 if synopsis:
268 if synopsis:
269 cmdtable[name] = func, list(options), synopsis
269 cmdtable[name] = func, list(options), synopsis
270 else:
270 else:
271 cmdtable[name] = func, list(options)
271 cmdtable[name] = func, list(options)
272 if norepo:
272 if norepo:
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 return func
274 return func
275
275
276 return decorator
276 return decorator
277
277
278
278
279 try:
279 try:
280 import mercurial.registrar
280 import mercurial.registrar
281 import mercurial.configitems
281 import mercurial.configitems
282
282
283 configtable = {}
283 configtable = {}
284 configitem = mercurial.registrar.configitem(configtable)
284 configitem = mercurial.registrar.configitem(configtable)
285 configitem(
285 configitem(
286 b'perf',
286 b'perf',
287 b'presleep',
287 b'presleep',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 experimental=True,
289 experimental=True,
290 )
290 )
291 configitem(
291 configitem(
292 b'perf',
292 b'perf',
293 b'stub',
293 b'stub',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 experimental=True,
295 experimental=True,
296 )
296 )
297 configitem(
297 configitem(
298 b'perf',
298 b'perf',
299 b'parentscount',
299 b'parentscount',
300 default=mercurial.configitems.dynamicdefault,
300 default=mercurial.configitems.dynamicdefault,
301 experimental=True,
301 experimental=True,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'all-timing',
305 b'all-timing',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 configitem(
309 configitem(
310 b'perf',
310 b'perf',
311 b'pre-run',
311 b'pre-run',
312 default=mercurial.configitems.dynamicdefault,
312 default=mercurial.configitems.dynamicdefault,
313 )
313 )
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'profile-benchmark',
316 b'profile-benchmark',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'run-limits',
321 b'run-limits',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 experimental=True,
323 experimental=True,
324 )
324 )
325 except (ImportError, AttributeError):
325 except (ImportError, AttributeError):
326 pass
326 pass
327 except TypeError:
327 except TypeError:
328 # compatibility fix for a11fd395e83f
328 # compatibility fix for a11fd395e83f
329 # hg version: 5.2
329 # hg version: 5.2
330 configitem(
330 configitem(
331 b'perf',
331 b'perf',
332 b'presleep',
332 b'presleep',
333 default=mercurial.configitems.dynamicdefault,
333 default=mercurial.configitems.dynamicdefault,
334 )
334 )
335 configitem(
335 configitem(
336 b'perf',
336 b'perf',
337 b'stub',
337 b'stub',
338 default=mercurial.configitems.dynamicdefault,
338 default=mercurial.configitems.dynamicdefault,
339 )
339 )
340 configitem(
340 configitem(
341 b'perf',
341 b'perf',
342 b'parentscount',
342 b'parentscount',
343 default=mercurial.configitems.dynamicdefault,
343 default=mercurial.configitems.dynamicdefault,
344 )
344 )
345 configitem(
345 configitem(
346 b'perf',
346 b'perf',
347 b'all-timing',
347 b'all-timing',
348 default=mercurial.configitems.dynamicdefault,
348 default=mercurial.configitems.dynamicdefault,
349 )
349 )
350 configitem(
350 configitem(
351 b'perf',
351 b'perf',
352 b'pre-run',
352 b'pre-run',
353 default=mercurial.configitems.dynamicdefault,
353 default=mercurial.configitems.dynamicdefault,
354 )
354 )
355 configitem(
355 configitem(
356 b'perf',
356 b'perf',
357 b'profile-benchmark',
357 b'profile-benchmark',
358 default=mercurial.configitems.dynamicdefault,
358 default=mercurial.configitems.dynamicdefault,
359 )
359 )
360 configitem(
360 configitem(
361 b'perf',
361 b'perf',
362 b'run-limits',
362 b'run-limits',
363 default=mercurial.configitems.dynamicdefault,
363 default=mercurial.configitems.dynamicdefault,
364 )
364 )
365
365
366
366
367 def getlen(ui):
367 def getlen(ui):
368 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
369 return lambda x: 1
369 return lambda x: 1
370 return len
370 return len
371
371
372
372
373 class noop:
373 class noop:
374 """dummy context manager"""
374 """dummy context manager"""
375
375
376 def __enter__(self):
376 def __enter__(self):
377 pass
377 pass
378
378
379 def __exit__(self, *args):
379 def __exit__(self, *args):
380 pass
380 pass
381
381
382
382
383 NOOPCTX = noop()
383 NOOPCTX = noop()
384
384
385
385
386 def gettimer(ui, opts=None):
386 def gettimer(ui, opts=None):
387 """return a timer function and formatter: (timer, formatter)
387 """return a timer function and formatter: (timer, formatter)
388
388
389 This function exists to gather the creation of formatter in a single
389 This function exists to gather the creation of formatter in a single
390 place instead of duplicating it in all performance commands."""
390 place instead of duplicating it in all performance commands."""
391
391
392 # enforce an idle period before execution to counteract power management
392 # enforce an idle period before execution to counteract power management
393 # experimental config: perf.presleep
393 # experimental config: perf.presleep
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395
395
396 if opts is None:
396 if opts is None:
397 opts = {}
397 opts = {}
398 # redirect all to stderr unless buffer api is in use
398 # redirect all to stderr unless buffer api is in use
399 if not ui._buffers:
399 if not ui._buffers:
400 ui = ui.copy()
400 ui = ui.copy()
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 if uifout:
402 if uifout:
403 # for "historical portability":
403 # for "historical portability":
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 uifout.set(ui.ferr)
405 uifout.set(ui.ferr)
406
406
407 # get a formatter
407 # get a formatter
408 uiformatter = getattr(ui, 'formatter', None)
408 uiformatter = getattr(ui, 'formatter', None)
409 if uiformatter:
409 if uiformatter:
410 fm = uiformatter(b'perf', opts)
410 fm = uiformatter(b'perf', opts)
411 else:
411 else:
412 # for "historical portability":
412 # for "historical portability":
413 # define formatter locally, because ui.formatter has been
413 # define formatter locally, because ui.formatter has been
414 # available since 2.2 (or ae5f92e154d3)
414 # available since 2.2 (or ae5f92e154d3)
415 from mercurial import node
415 from mercurial import node
416
416
417 class defaultformatter:
417 class defaultformatter:
418 """Minimized composition of baseformatter and plainformatter"""
418 """Minimized composition of baseformatter and plainformatter"""
419
419
420 def __init__(self, ui, topic, opts):
420 def __init__(self, ui, topic, opts):
421 self._ui = ui
421 self._ui = ui
422 if ui.debugflag:
422 if ui.debugflag:
423 self.hexfunc = node.hex
423 self.hexfunc = node.hex
424 else:
424 else:
425 self.hexfunc = node.short
425 self.hexfunc = node.short
426
426
427 def __nonzero__(self):
427 def __nonzero__(self):
428 return False
428 return False
429
429
430 __bool__ = __nonzero__
430 __bool__ = __nonzero__
431
431
432 def startitem(self):
432 def startitem(self):
433 pass
433 pass
434
434
435 def data(self, **data):
435 def data(self, **data):
436 pass
436 pass
437
437
438 def write(self, fields, deftext, *fielddata, **opts):
438 def write(self, fields, deftext, *fielddata, **opts):
439 self._ui.write(deftext % fielddata, **opts)
439 self._ui.write(deftext % fielddata, **opts)
440
440
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 if cond:
442 if cond:
443 self._ui.write(deftext % fielddata, **opts)
443 self._ui.write(deftext % fielddata, **opts)
444
444
445 def plain(self, text, **opts):
445 def plain(self, text, **opts):
446 self._ui.write(text, **opts)
446 self._ui.write(text, **opts)
447
447
448 def end(self):
448 def end(self):
449 pass
449 pass
450
450
451 fm = defaultformatter(ui, b'perf', opts)
451 fm = defaultformatter(ui, b'perf', opts)
452
452
453 # stub function, runs code only once instead of in a loop
453 # stub function, runs code only once instead of in a loop
454 # experimental config: perf.stub
454 # experimental config: perf.stub
455 if ui.configbool(b"perf", b"stub", False):
455 if ui.configbool(b"perf", b"stub", False):
456 return functools.partial(stub_timer, fm), fm
456 return functools.partial(stub_timer, fm), fm
457
457
458 # experimental config: perf.all-timing
458 # experimental config: perf.all-timing
459 displayall = ui.configbool(b"perf", b"all-timing", True)
459 displayall = ui.configbool(b"perf", b"all-timing", True)
460
460
461 # experimental config: perf.run-limits
461 # experimental config: perf.run-limits
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 limits = []
463 limits = []
464 for item in limitspec:
464 for item in limitspec:
465 parts = item.split(b'-', 1)
465 parts = item.split(b'-', 1)
466 if len(parts) < 2:
466 if len(parts) < 2:
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 continue
468 continue
469 try:
469 try:
470 time_limit = float(_sysstr(parts[0]))
470 time_limit = float(_sysstr(parts[0]))
471 except ValueError as e:
471 except ValueError as e:
472 ui.warn(
472 ui.warn(
473 (
473 (
474 b'malformatted run limit entry, %s: %s\n'
474 b'malformatted run limit entry, %s: %s\n'
475 % (_bytestr(e), item)
475 % (_bytestr(e), item)
476 )
476 )
477 )
477 )
478 continue
478 continue
479 try:
479 try:
480 run_limit = int(_sysstr(parts[1]))
480 run_limit = int(_sysstr(parts[1]))
481 except ValueError as e:
481 except ValueError as e:
482 ui.warn(
482 ui.warn(
483 (
483 (
484 b'malformatted run limit entry, %s: %s\n'
484 b'malformatted run limit entry, %s: %s\n'
485 % (_bytestr(e), item)
485 % (_bytestr(e), item)
486 )
486 )
487 )
487 )
488 continue
488 continue
489 limits.append((time_limit, run_limit))
489 limits.append((time_limit, run_limit))
490 if not limits:
490 if not limits:
491 limits = DEFAULTLIMITS
491 limits = DEFAULTLIMITS
492
492
493 profiler = None
493 profiler = None
494 if profiling is not None:
494 if profiling is not None:
495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 profiler = profiling.profile(ui)
496 profiler = profiling.profile(ui)
497
497
498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 t = functools.partial(
499 t = functools.partial(
500 _timer,
500 _timer,
501 fm,
501 fm,
502 displayall=displayall,
502 displayall=displayall,
503 limits=limits,
503 limits=limits,
504 prerun=prerun,
504 prerun=prerun,
505 profiler=profiler,
505 profiler=profiler,
506 )
506 )
507 return t, fm
507 return t, fm
508
508
509
509
510 def stub_timer(fm, func, setup=None, title=None):
510 def stub_timer(fm, func, setup=None, title=None):
511 if setup is not None:
511 if setup is not None:
512 setup()
512 setup()
513 func()
513 func()
514
514
515
515
516 @contextlib.contextmanager
516 @contextlib.contextmanager
517 def timeone():
517 def timeone():
518 r = []
518 r = []
519 ostart = os.times()
519 ostart = os.times()
520 cstart = util.timer()
520 cstart = util.timer()
521 yield r
521 yield r
522 cstop = util.timer()
522 cstop = util.timer()
523 ostop = os.times()
523 ostop = os.times()
524 a, b = ostart, ostop
524 a, b = ostart, ostop
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526
526
527
527
528 # list of stop condition (elapsed time, minimal run count)
528 # list of stop condition (elapsed time, minimal run count)
529 DEFAULTLIMITS = (
529 DEFAULTLIMITS = (
530 (3.0, 100),
530 (3.0, 100),
531 (10.0, 3),
531 (10.0, 3),
532 )
532 )
533
533
534
534
535 @contextlib.contextmanager
535 @contextlib.contextmanager
536 def noop_context():
536 def noop_context():
537 yield
537 yield
538
538
539
539
540 def _timer(
540 def _timer(
541 fm,
541 fm,
542 func,
542 func,
543 setup=None,
543 setup=None,
544 context=noop_context,
544 context=noop_context,
545 title=None,
545 title=None,
546 displayall=False,
546 displayall=False,
547 limits=DEFAULTLIMITS,
547 limits=DEFAULTLIMITS,
548 prerun=0,
548 prerun=0,
549 profiler=None,
549 profiler=None,
550 ):
550 ):
551 gc.collect()
551 gc.collect()
552 results = []
552 results = []
553 begin = util.timer()
553 begin = util.timer()
554 count = 0
554 count = 0
555 if profiler is None:
555 if profiler is None:
556 profiler = NOOPCTX
556 profiler = NOOPCTX
557 for i in range(prerun):
557 for i in range(prerun):
558 if setup is not None:
558 if setup is not None:
559 setup()
559 setup()
560 with context():
560 with context():
561 func()
561 func()
562 keepgoing = True
562 keepgoing = True
563 while keepgoing:
563 while keepgoing:
564 if setup is not None:
564 if setup is not None:
565 setup()
565 setup()
566 with context():
566 with context():
567 with profiler:
567 with profiler:
568 with timeone() as item:
568 with timeone() as item:
569 r = func()
569 r = func()
570 profiler = NOOPCTX
570 profiler = NOOPCTX
571 count += 1
571 count += 1
572 results.append(item[0])
572 results.append(item[0])
573 cstop = util.timer()
573 cstop = util.timer()
574 # Look for a stop condition.
574 # Look for a stop condition.
575 elapsed = cstop - begin
575 elapsed = cstop - begin
576 for t, mincount in limits:
576 for t, mincount in limits:
577 if elapsed >= t and count >= mincount:
577 if elapsed >= t and count >= mincount:
578 keepgoing = False
578 keepgoing = False
579 break
579 break
580
580
581 formatone(fm, results, title=title, result=r, displayall=displayall)
581 formatone(fm, results, title=title, result=r, displayall=displayall)
582
582
583
583
584 def formatone(fm, timings, title=None, result=None, displayall=False):
584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 count = len(timings)
585 count = len(timings)
586
586
587 fm.startitem()
587 fm.startitem()
588
588
589 if title:
589 if title:
590 fm.write(b'title', b'! %s\n', title)
590 fm.write(b'title', b'! %s\n', title)
591 if result:
591 if result:
592 fm.write(b'result', b'! result: %s\n', result)
592 fm.write(b'result', b'! result: %s\n', result)
593
593
594 def display(role, entry):
594 def display(role, entry):
595 prefix = b''
595 prefix = b''
596 if role != b'best':
596 if role != b'best':
597 prefix = b'%s.' % role
597 prefix = b'%s.' % role
598 fm.plain(b'!')
598 fm.plain(b'!')
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 fm.write(prefix + b'user', b' user %f', entry[1])
601 fm.write(prefix + b'user', b' user %f', entry[1])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 fm.plain(b'\n')
604 fm.plain(b'\n')
605
605
606 timings.sort()
606 timings.sort()
607 min_val = timings[0]
607 min_val = timings[0]
608 display(b'best', min_val)
608 display(b'best', min_val)
609 if displayall:
609 if displayall:
610 max_val = timings[-1]
610 max_val = timings[-1]
611 display(b'max', max_val)
611 display(b'max', max_val)
612 avg = tuple([sum(x) / count for x in zip(*timings)])
612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 display(b'avg', avg)
613 display(b'avg', avg)
614 median = timings[len(timings) // 2]
614 median = timings[len(timings) // 2]
615 display(b'median', median)
615 display(b'median', median)
616
616
617
617
618 # utilities for historical portability
618 # utilities for historical portability
619
619
620
620
621 def getint(ui, section, name, default):
621 def getint(ui, section, name, default):
622 # for "historical portability":
622 # for "historical portability":
623 # ui.configint has been available since 1.9 (or fa2b596db182)
623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 v = ui.config(section, name, None)
624 v = ui.config(section, name, None)
625 if v is None:
625 if v is None:
626 return default
626 return default
627 try:
627 try:
628 return int(v)
628 return int(v)
629 except ValueError:
629 except ValueError:
630 raise error.ConfigError(
630 raise error.ConfigError(
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 )
632 )
633
633
634
634
635 def safeattrsetter(obj, name, ignoremissing=False):
635 def safeattrsetter(obj, name, ignoremissing=False):
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637
637
638 This function is aborted, if 'obj' doesn't have 'name' attribute
638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 at runtime. This avoids overlooking removal of an attribute, which
639 at runtime. This avoids overlooking removal of an attribute, which
640 breaks assumption of performance measurement, in the future.
640 breaks assumption of performance measurement, in the future.
641
641
642 This function returns the object to (1) assign a new value, and
642 This function returns the object to (1) assign a new value, and
643 (2) restore an original value to the attribute.
643 (2) restore an original value to the attribute.
644
644
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 abortion, and this function returns None. This is useful to
646 abortion, and this function returns None. This is useful to
647 examine an attribute, which isn't ensured in all Mercurial
647 examine an attribute, which isn't ensured in all Mercurial
648 versions.
648 versions.
649 """
649 """
650 if not util.safehasattr(obj, name):
650 if not util.safehasattr(obj, name):
651 if ignoremissing:
651 if ignoremissing:
652 return None
652 return None
653 raise error.Abort(
653 raise error.Abort(
654 (
654 (
655 b"missing attribute %s of %s might break assumption"
655 b"missing attribute %s of %s might break assumption"
656 b" of performance measurement"
656 b" of performance measurement"
657 )
657 )
658 % (name, obj)
658 % (name, obj)
659 )
659 )
660
660
661 origvalue = getattr(obj, _sysstr(name))
661 origvalue = getattr(obj, _sysstr(name))
662
662
663 class attrutil:
663 class attrutil:
664 def set(self, newvalue):
664 def set(self, newvalue):
665 setattr(obj, _sysstr(name), newvalue)
665 setattr(obj, _sysstr(name), newvalue)
666
666
667 def restore(self):
667 def restore(self):
668 setattr(obj, _sysstr(name), origvalue)
668 setattr(obj, _sysstr(name), origvalue)
669
669
670 return attrutil()
670 return attrutil()
671
671
672
672
673 # utilities to examine each internal API changes
673 # utilities to examine each internal API changes
674
674
675
675
676 def getbranchmapsubsettable():
676 def getbranchmapsubsettable():
677 # for "historical portability":
677 # for "historical portability":
678 # subsettable is defined in:
678 # subsettable is defined in:
679 # - branchmap since 2.9 (or 175c6fd8cacc)
679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 # - repoview since 2.5 (or 59a9f18d4587)
680 # - repoview since 2.5 (or 59a9f18d4587)
681 # - repoviewutil since 5.0
681 # - repoviewutil since 5.0
682 for mod in (branchmap, repoview, repoviewutil):
682 for mod in (branchmap, repoview, repoviewutil):
683 subsettable = getattr(mod, 'subsettable', None)
683 subsettable = getattr(mod, 'subsettable', None)
684 if subsettable:
684 if subsettable:
685 return subsettable
685 return subsettable
686
686
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 # branchmap and repoview modules exist, but subsettable attribute
688 # branchmap and repoview modules exist, but subsettable attribute
689 # doesn't)
689 # doesn't)
690 raise error.Abort(
690 raise error.Abort(
691 b"perfbranchmap not available with this Mercurial",
691 b"perfbranchmap not available with this Mercurial",
692 hint=b"use 2.5 or later",
692 hint=b"use 2.5 or later",
693 )
693 )
694
694
695
695
696 def getsvfs(repo):
696 def getsvfs(repo):
697 """Return appropriate object to access files under .hg/store"""
697 """Return appropriate object to access files under .hg/store"""
698 # for "historical portability":
698 # for "historical portability":
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 svfs = getattr(repo, 'svfs', None)
700 svfs = getattr(repo, 'svfs', None)
701 if svfs:
701 if svfs:
702 return svfs
702 return svfs
703 else:
703 else:
704 return getattr(repo, 'sopener')
704 return getattr(repo, 'sopener')
705
705
706
706
707 def getvfs(repo):
707 def getvfs(repo):
708 """Return appropriate object to access files under .hg"""
708 """Return appropriate object to access files under .hg"""
709 # for "historical portability":
709 # for "historical portability":
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 vfs = getattr(repo, 'vfs', None)
711 vfs = getattr(repo, 'vfs', None)
712 if vfs:
712 if vfs:
713 return vfs
713 return vfs
714 else:
714 else:
715 return getattr(repo, 'opener')
715 return getattr(repo, 'opener')
716
716
717
717
718 def repocleartagscachefunc(repo):
718 def repocleartagscachefunc(repo):
719 """Return the function to clear tags cache according to repo internal API"""
719 """Return the function to clear tags cache according to repo internal API"""
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 # correct way to clear tags cache, because existing code paths
722 # correct way to clear tags cache, because existing code paths
723 # expect _tagscache to be a structured object.
723 # expect _tagscache to be a structured object.
724 def clearcache():
724 def clearcache():
725 # _tagscache has been filteredpropertycache since 2.5 (or
725 # _tagscache has been filteredpropertycache since 2.5 (or
726 # 98c867ac1330), and delattr() can't work in such case
726 # 98c867ac1330), and delattr() can't work in such case
727 if '_tagscache' in vars(repo):
727 if '_tagscache' in vars(repo):
728 del repo.__dict__['_tagscache']
728 del repo.__dict__['_tagscache']
729
729
730 return clearcache
730 return clearcache
731
731
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 if repotags: # since 1.4 (or 5614a628d173)
733 if repotags: # since 1.4 (or 5614a628d173)
734 return lambda: repotags.set(None)
734 return lambda: repotags.set(None)
735
735
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 return lambda: repotagscache.set(None)
738 return lambda: repotagscache.set(None)
739
739
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 # this point, but it isn't so problematic, because:
741 # this point, but it isn't so problematic, because:
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 # in perftags() causes failure soon
743 # in perftags() causes failure soon
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 raise error.Abort(b"tags API of this hg command is unknown")
745 raise error.Abort(b"tags API of this hg command is unknown")
746
746
747
747
748 # utilities to clear cache
748 # utilities to clear cache
749
749
750
750
751 def clearfilecache(obj, attrname):
751 def clearfilecache(obj, attrname):
752 unfiltered = getattr(obj, 'unfiltered', None)
752 unfiltered = getattr(obj, 'unfiltered', None)
753 if unfiltered is not None:
753 if unfiltered is not None:
754 obj = obj.unfiltered()
754 obj = obj.unfiltered()
755 if attrname in vars(obj):
755 if attrname in vars(obj):
756 delattr(obj, attrname)
756 delattr(obj, attrname)
757 obj._filecache.pop(attrname, None)
757 obj._filecache.pop(attrname, None)
758
758
759
759
760 def clearchangelog(repo):
760 def clearchangelog(repo):
761 if repo is not repo.unfiltered():
761 if repo is not repo.unfiltered():
762 object.__setattr__(repo, '_clcachekey', None)
762 object.__setattr__(repo, '_clcachekey', None)
763 object.__setattr__(repo, '_clcache', None)
763 object.__setattr__(repo, '_clcache', None)
764 clearfilecache(repo.unfiltered(), 'changelog')
764 clearfilecache(repo.unfiltered(), 'changelog')
765
765
766
766
767 # perf commands
767 # perf commands
768
768
769
769
770 @command(b'perf::walk|perfwalk', formatteropts)
770 @command(b'perf::walk|perfwalk', formatteropts)
771 def perfwalk(ui, repo, *pats, **opts):
771 def perfwalk(ui, repo, *pats, **opts):
772 opts = _byteskwargs(opts)
772 opts = _byteskwargs(opts)
773 timer, fm = gettimer(ui, opts)
773 timer, fm = gettimer(ui, opts)
774 m = scmutil.match(repo[None], pats, {})
774 m = scmutil.match(repo[None], pats, {})
775 timer(
775 timer(
776 lambda: len(
776 lambda: len(
777 list(
777 list(
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 )
779 )
780 )
780 )
781 )
781 )
782 fm.end()
782 fm.end()
783
783
784
784
785 @command(b'perf::annotate|perfannotate', formatteropts)
785 @command(b'perf::annotate|perfannotate', formatteropts)
786 def perfannotate(ui, repo, f, **opts):
786 def perfannotate(ui, repo, f, **opts):
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
789 fc = repo[b'.'][f]
789 fc = repo[b'.'][f]
790 timer(lambda: len(fc.annotate(True)))
790 timer(lambda: len(fc.annotate(True)))
791 fm.end()
791 fm.end()
792
792
793
793
794 @command(
794 @command(
795 b'perf::status|perfstatus',
795 b'perf::status|perfstatus',
796 [
796 [
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 ]
799 ]
800 + formatteropts,
800 + formatteropts,
801 )
801 )
802 def perfstatus(ui, repo, **opts):
802 def perfstatus(ui, repo, **opts):
803 """benchmark the performance of a single status call
803 """benchmark the performance of a single status call
804
804
805 The repository data are preserved between each call.
805 The repository data are preserved between each call.
806
806
807 By default, only the status of the tracked file are requested. If
807 By default, only the status of the tracked file are requested. If
808 `--unknown` is passed, the "unknown" files are also tracked.
808 `--unknown` is passed, the "unknown" files are also tracked.
809 """
809 """
810 opts = _byteskwargs(opts)
810 opts = _byteskwargs(opts)
811 # m = match.always(repo.root, repo.getcwd())
811 # m = match.always(repo.root, repo.getcwd())
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 # False))))
813 # False))))
814 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
815 if opts[b'dirstate']:
815 if opts[b'dirstate']:
816 dirstate = repo.dirstate
816 dirstate = repo.dirstate
817 m = scmutil.matchall(repo)
817 m = scmutil.matchall(repo)
818 unknown = opts[b'unknown']
818 unknown = opts[b'unknown']
819
819
820 def status_dirstate():
820 def status_dirstate():
821 s = dirstate.status(
821 s = dirstate.status(
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 )
823 )
824 sum(map(bool, s))
824 sum(map(bool, s))
825
825
826 if util.safehasattr(dirstate, 'running_status'):
826 if util.safehasattr(dirstate, 'running_status'):
827 with dirstate.running_status(repo):
827 with dirstate.running_status(repo):
828 timer(status_dirstate)
828 timer(status_dirstate)
829 dirstate.invalidate()
829 dirstate.invalidate()
830 else:
830 else:
831 timer(status_dirstate)
831 timer(status_dirstate)
832 else:
832 else:
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 fm.end()
834 fm.end()
835
835
836
836
837 @command(b'perf::addremove|perfaddremove', formatteropts)
837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 def perfaddremove(ui, repo, **opts):
838 def perfaddremove(ui, repo, **opts):
839 opts = _byteskwargs(opts)
839 opts = _byteskwargs(opts)
840 timer, fm = gettimer(ui, opts)
840 timer, fm = gettimer(ui, opts)
841 try:
841 try:
842 oldquiet = repo.ui.quiet
842 oldquiet = repo.ui.quiet
843 repo.ui.quiet = True
843 repo.ui.quiet = True
844 matcher = scmutil.match(repo[None])
844 matcher = scmutil.match(repo[None])
845 opts[b'dry_run'] = True
845 opts[b'dry_run'] = True
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 uipathfn = scmutil.getuipathfn(repo)
847 uipathfn = scmutil.getuipathfn(repo)
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 else:
849 else:
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 finally:
851 finally:
852 repo.ui.quiet = oldquiet
852 repo.ui.quiet = oldquiet
853 fm.end()
853 fm.end()
854
854
855
855
856 def clearcaches(cl):
856 def clearcaches(cl):
857 # behave somewhat consistently across internal API changes
857 # behave somewhat consistently across internal API changes
858 if util.safehasattr(cl, b'clearcaches'):
858 if util.safehasattr(cl, b'clearcaches'):
859 cl.clearcaches()
859 cl.clearcaches()
860 elif util.safehasattr(cl, b'_nodecache'):
860 elif util.safehasattr(cl, b'_nodecache'):
861 # <= hg-5.2
861 # <= hg-5.2
862 from mercurial.node import nullid, nullrev
862 from mercurial.node import nullid, nullrev
863
863
864 cl._nodecache = {nullid: nullrev}
864 cl._nodecache = {nullid: nullrev}
865 cl._nodepos = None
865 cl._nodepos = None
866
866
867
867
868 @command(b'perf::heads|perfheads', formatteropts)
868 @command(b'perf::heads|perfheads', formatteropts)
869 def perfheads(ui, repo, **opts):
869 def perfheads(ui, repo, **opts):
870 """benchmark the computation of a changelog heads"""
870 """benchmark the computation of a changelog heads"""
871 opts = _byteskwargs(opts)
871 opts = _byteskwargs(opts)
872 timer, fm = gettimer(ui, opts)
872 timer, fm = gettimer(ui, opts)
873 cl = repo.changelog
873 cl = repo.changelog
874
874
875 def s():
875 def s():
876 clearcaches(cl)
876 clearcaches(cl)
877
877
878 def d():
878 def d():
879 len(cl.headrevs())
879 len(cl.headrevs())
880
880
881 timer(d, setup=s)
881 timer(d, setup=s)
882 fm.end()
882 fm.end()
883
883
884
884
885 def _default_clear_on_disk_tags_cache(repo):
885 def _default_clear_on_disk_tags_cache(repo):
886 from mercurial import tags
886 from mercurial import tags
887
887
888 repo.cachevfs.tryunlink(tags._filename(repo))
888 repo.cachevfs.tryunlink(tags._filename(repo))
889
889
890
890
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 from mercurial import tags
892 from mercurial import tags
893
893
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
895
895
896
896
897 def _default_forget_fnodes(repo, revs):
897 def _default_forget_fnodes(repo, revs):
898 """function used by the perf extension to prune some entries from the
898 """function used by the perf extension to prune some entries from the
899 fnodes cache"""
899 fnodes cache"""
900 from mercurial import tags
900 from mercurial import tags
901
901
902 missing_1 = b'\xff' * 4
902 missing_1 = b'\xff' * 4
903 missing_2 = b'\xff' * 20
903 missing_2 = b'\xff' * 20
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
905 for r in revs:
905 for r in revs:
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
907 cache.write()
907 cache.write()
908
908
909
909
910 @command(
910 @command(
911 b'perf::tags|perftags',
911 b'perf::tags|perftags',
912 formatteropts
912 formatteropts
913 + [
913 + [
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
915 (
915 (
916 b'',
916 b'',
917 b'clear-on-disk-cache',
917 b'clear-on-disk-cache',
918 False,
918 False,
919 b'clear on disk tags cache (DESTRUCTIVE)',
919 b'clear on disk tags cache (DESTRUCTIVE)',
920 ),
920 ),
921 (
921 (
922 b'',
922 b'',
923 b'clear-fnode-cache-all',
923 b'clear-fnode-cache-all',
924 False,
924 False,
925 b'clear on disk file node cache (DESTRUCTIVE),',
925 b'clear on disk file node cache (DESTRUCTIVE),',
926 ),
926 ),
927 (
927 (
928 b'',
928 b'',
929 b'clear-fnode-cache-rev',
929 b'clear-fnode-cache-rev',
930 [],
930 [],
931 b'clear on disk file node cache (DESTRUCTIVE),',
931 b'clear on disk file node cache (DESTRUCTIVE),',
932 b'REVS',
932 b'REVS',
933 ),
933 ),
934 (
934 (
935 b'',
935 b'',
936 b'update-last',
936 b'update-last',
937 b'',
937 b'',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
939 b'N',
939 b'N',
940 ),
940 ),
941 ],
941 ],
942 )
942 )
943 def perftags(ui, repo, **opts):
943 def perftags(ui, repo, **opts):
944 """Benchmark tags retrieval in various situation
944 """Benchmark tags retrieval in various situation
945
945
946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
947 altering performance after the command was run. However, it does not
947 altering performance after the command was run. However, it does not
948 destroy any stored data.
948 destroy any stored data.
949 """
949 """
950 from mercurial import tags
950 from mercurial import tags
951
951
952 opts = _byteskwargs(opts)
952 opts = _byteskwargs(opts)
953 timer, fm = gettimer(ui, opts)
953 timer, fm = gettimer(ui, opts)
954 repocleartagscache = repocleartagscachefunc(repo)
954 repocleartagscache = repocleartagscachefunc(repo)
955 clearrevlogs = opts[b'clear_revlogs']
955 clearrevlogs = opts[b'clear_revlogs']
956 clear_disk = opts[b'clear_on_disk_cache']
956 clear_disk = opts[b'clear_on_disk_cache']
957 clear_fnode = opts[b'clear_fnode_cache_all']
957 clear_fnode = opts[b'clear_fnode_cache_all']
958
958
959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
960 update_last_str = opts[b'update_last']
960 update_last_str = opts[b'update_last']
961 update_last = None
961 update_last = None
962 if update_last_str:
962 if update_last_str:
963 try:
963 try:
964 update_last = int(update_last_str)
964 update_last = int(update_last_str)
965 except ValueError:
965 except ValueError:
966 msg = b'could not parse value for update-last: "%s"'
966 msg = b'could not parse value for update-last: "%s"'
967 msg %= update_last_str
967 msg %= update_last_str
968 hint = b'value should be an integer'
968 hint = b'value should be an integer'
969 raise error.Abort(msg, hint=hint)
969 raise error.Abort(msg, hint=hint)
970
970
971 clear_disk_fn = getattr(
971 clear_disk_fn = getattr(
972 tags,
972 tags,
973 "clear_cache_on_disk",
973 "clear_cache_on_disk",
974 _default_clear_on_disk_tags_cache,
974 _default_clear_on_disk_tags_cache,
975 )
975 )
976 clear_fnodes_fn = getattr(
976 clear_fnodes_fn = getattr(
977 tags,
977 tags,
978 "clear_cache_fnodes",
978 "clear_cache_fnodes",
979 _default_clear_on_disk_tags_fnodes_cache,
979 _default_clear_on_disk_tags_fnodes_cache,
980 )
980 )
981 clear_fnodes_rev_fn = getattr(
981 clear_fnodes_rev_fn = getattr(
982 tags,
982 tags,
983 "forget_fnodes",
983 "forget_fnodes",
984 _default_forget_fnodes,
984 _default_forget_fnodes,
985 )
985 )
986
986
987 clear_revs = []
987 clear_revs = []
988 if clear_fnode_revs:
988 if clear_fnode_revs:
989 clear_revs.extends(scmutil.revrange(repo, clear_fnode_revs))
989 clear_revs.extends(scmutil.revrange(repo, clear_fnode_revs))
990
990
991 if update_last:
991 if update_last:
992 revset = b'last(all(), %d)' % update_last
992 revset = b'last(all(), %d)' % update_last
993 last_revs = repo.unfiltered().revs(revset)
993 last_revs = repo.unfiltered().revs(revset)
994 clear_revs.extend(last_revs)
994 clear_revs.extend(last_revs)
995
995
996 from mercurial import repoview
996 from mercurial import repoview
997
997
998 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
998 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
999 with repo.ui.configoverride(rev_filter, source=b"perf"):
999 with repo.ui.configoverride(rev_filter, source=b"perf"):
1000 filter_id = repoview.extrafilter(repo.ui)
1000 filter_id = repoview.extrafilter(repo.ui)
1001
1001
1002 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1002 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1003 pre_repo = repo.filtered(filter_name)
1003 pre_repo = repo.filtered(filter_name)
1004 pre_repo.tags() # warm the cache
1004 pre_repo.tags() # warm the cache
1005 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1005 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1006 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1006 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1007
1007
1008 clear_revs = sorted(set(clear_revs))
1008 clear_revs = sorted(set(clear_revs))
1009
1009
1010 def s():
1010 def s():
1011 if update_last:
1011 if update_last:
1012 util.copyfile(old_tags_path, new_tags_path)
1012 util.copyfile(old_tags_path, new_tags_path)
1013 if clearrevlogs:
1013 if clearrevlogs:
1014 clearchangelog(repo)
1014 clearchangelog(repo)
1015 clearfilecache(repo.unfiltered(), 'manifest')
1015 clearfilecache(repo.unfiltered(), 'manifest')
1016 if clear_disk:
1016 if clear_disk:
1017 clear_disk_fn(repo)
1017 clear_disk_fn(repo)
1018 if clear_fnode:
1018 if clear_fnode:
1019 clear_fnodes_fn(repo)
1019 clear_fnodes_fn(repo)
1020 elif clear_revs:
1020 elif clear_revs:
1021 clear_fnodes_rev_fn(repo, clear_revs)
1021 clear_fnodes_rev_fn(repo, clear_revs)
1022 repocleartagscache()
1022 repocleartagscache()
1023
1023
1024 def t():
1024 def t():
1025 len(repo.tags())
1025 len(repo.tags())
1026
1026
1027 timer(t, setup=s)
1027 timer(t, setup=s)
1028 fm.end()
1028 fm.end()
1029
1029
1030
1030
1031 @command(b'perf::ancestors|perfancestors', formatteropts)
1031 @command(b'perf::ancestors|perfancestors', formatteropts)
1032 def perfancestors(ui, repo, **opts):
1032 def perfancestors(ui, repo, **opts):
1033 opts = _byteskwargs(opts)
1033 opts = _byteskwargs(opts)
1034 timer, fm = gettimer(ui, opts)
1034 timer, fm = gettimer(ui, opts)
1035 heads = repo.changelog.headrevs()
1035 heads = repo.changelog.headrevs()
1036
1036
1037 def d():
1037 def d():
1038 for a in repo.changelog.ancestors(heads):
1038 for a in repo.changelog.ancestors(heads):
1039 pass
1039 pass
1040
1040
1041 timer(d)
1041 timer(d)
1042 fm.end()
1042 fm.end()
1043
1043
1044
1044
1045 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1045 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1046 def perfancestorset(ui, repo, revset, **opts):
1046 def perfancestorset(ui, repo, revset, **opts):
1047 opts = _byteskwargs(opts)
1047 opts = _byteskwargs(opts)
1048 timer, fm = gettimer(ui, opts)
1048 timer, fm = gettimer(ui, opts)
1049 revs = repo.revs(revset)
1049 revs = repo.revs(revset)
1050 heads = repo.changelog.headrevs()
1050 heads = repo.changelog.headrevs()
1051
1051
1052 def d():
1052 def d():
1053 s = repo.changelog.ancestors(heads)
1053 s = repo.changelog.ancestors(heads)
1054 for rev in revs:
1054 for rev in revs:
1055 rev in s
1055 rev in s
1056
1056
1057 timer(d)
1057 timer(d)
1058 fm.end()
1058 fm.end()
1059
1059
1060
1060
1061 @command(
1061 @command(
1062 b'perf::delta-find',
1062 b'perf::delta-find',
1063 revlogopts + formatteropts,
1063 revlogopts + formatteropts,
1064 b'-c|-m|FILE REV',
1064 b'-c|-m|FILE REV',
1065 )
1065 )
1066 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1066 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1067 """benchmark the process of finding a valid delta for a revlog revision
1067 """benchmark the process of finding a valid delta for a revlog revision
1068
1068
1069 When a revlog receives a new revision (e.g. from a commit, or from an
1069 When a revlog receives a new revision (e.g. from a commit, or from an
1070 incoming bundle), it searches for a suitable delta-base to produce a delta.
1070 incoming bundle), it searches for a suitable delta-base to produce a delta.
1071 This perf command measures how much time we spend in this process. It
1071 This perf command measures how much time we spend in this process. It
1072 operates on an already stored revision.
1072 operates on an already stored revision.
1073
1073
1074 See `hg help debug-delta-find` for another related command.
1074 See `hg help debug-delta-find` for another related command.
1075 """
1075 """
1076 from mercurial import revlogutils
1076 from mercurial import revlogutils
1077 import mercurial.revlogutils.deltas as deltautil
1077 import mercurial.revlogutils.deltas as deltautil
1078
1078
1079 opts = _byteskwargs(opts)
1079 opts = _byteskwargs(opts)
1080 if arg_2 is None:
1080 if arg_2 is None:
1081 file_ = None
1081 file_ = None
1082 rev = arg_1
1082 rev = arg_1
1083 else:
1083 else:
1084 file_ = arg_1
1084 file_ = arg_1
1085 rev = arg_2
1085 rev = arg_2
1086
1086
1087 repo = repo.unfiltered()
1087 repo = repo.unfiltered()
1088
1088
1089 timer, fm = gettimer(ui, opts)
1089 timer, fm = gettimer(ui, opts)
1090
1090
1091 rev = int(rev)
1091 rev = int(rev)
1092
1092
1093 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1093 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1094
1094
1095 deltacomputer = deltautil.deltacomputer(revlog)
1095 deltacomputer = deltautil.deltacomputer(revlog)
1096
1096
1097 node = revlog.node(rev)
1097 node = revlog.node(rev)
1098 p1r, p2r = revlog.parentrevs(rev)
1098 p1r, p2r = revlog.parentrevs(rev)
1099 p1 = revlog.node(p1r)
1099 p1 = revlog.node(p1r)
1100 p2 = revlog.node(p2r)
1100 p2 = revlog.node(p2r)
1101 full_text = revlog.revision(rev)
1101 full_text = revlog.revision(rev)
1102 textlen = len(full_text)
1102 textlen = len(full_text)
1103 cachedelta = None
1103 cachedelta = None
1104 flags = revlog.flags(rev)
1104 flags = revlog.flags(rev)
1105
1105
1106 revinfo = revlogutils.revisioninfo(
1106 revinfo = revlogutils.revisioninfo(
1107 node,
1107 node,
1108 p1,
1108 p1,
1109 p2,
1109 p2,
1110 [full_text], # btext
1110 [full_text], # btext
1111 textlen,
1111 textlen,
1112 cachedelta,
1112 cachedelta,
1113 flags,
1113 flags,
1114 )
1114 )
1115
1115
1116 # Note: we should probably purge the potential caches (like the full
1116 # Note: we should probably purge the potential caches (like the full
1117 # manifest cache) between runs.
1117 # manifest cache) between runs.
1118 def find_one():
1118 def find_one():
1119 with revlog._datafp() as fh:
1119 with revlog._datafp() as fh:
1120 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1120 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1121
1121
1122 timer(find_one)
1122 timer(find_one)
1123 fm.end()
1123 fm.end()
1124
1124
1125
1125
1126 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1126 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1127 def perfdiscovery(ui, repo, path, **opts):
1127 def perfdiscovery(ui, repo, path, **opts):
1128 """benchmark discovery between local repo and the peer at given path"""
1128 """benchmark discovery between local repo and the peer at given path"""
1129 repos = [repo, None]
1129 repos = [repo, None]
1130 timer, fm = gettimer(ui, opts)
1130 timer, fm = gettimer(ui, opts)
1131
1131
1132 try:
1132 try:
1133 from mercurial.utils.urlutil import get_unique_pull_path_obj
1133 from mercurial.utils.urlutil import get_unique_pull_path_obj
1134
1134
1135 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1135 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1136 except ImportError:
1136 except ImportError:
1137 try:
1137 try:
1138 from mercurial.utils.urlutil import get_unique_pull_path
1138 from mercurial.utils.urlutil import get_unique_pull_path
1139
1139
1140 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1140 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1141 except ImportError:
1141 except ImportError:
1142 path = ui.expandpath(path)
1142 path = ui.expandpath(path)
1143
1143
1144 def s():
1144 def s():
1145 repos[1] = hg.peer(ui, opts, path)
1145 repos[1] = hg.peer(ui, opts, path)
1146
1146
1147 def d():
1147 def d():
1148 setdiscovery.findcommonheads(ui, *repos)
1148 setdiscovery.findcommonheads(ui, *repos)
1149
1149
1150 timer(d, setup=s)
1150 timer(d, setup=s)
1151 fm.end()
1151 fm.end()
1152
1152
1153
1153
1154 @command(
1154 @command(
1155 b'perf::bookmarks|perfbookmarks',
1155 b'perf::bookmarks|perfbookmarks',
1156 formatteropts
1156 formatteropts
1157 + [
1157 + [
1158 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1158 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1159 ],
1159 ],
1160 )
1160 )
1161 def perfbookmarks(ui, repo, **opts):
1161 def perfbookmarks(ui, repo, **opts):
1162 """benchmark parsing bookmarks from disk to memory"""
1162 """benchmark parsing bookmarks from disk to memory"""
1163 opts = _byteskwargs(opts)
1163 opts = _byteskwargs(opts)
1164 timer, fm = gettimer(ui, opts)
1164 timer, fm = gettimer(ui, opts)
1165
1165
1166 clearrevlogs = opts[b'clear_revlogs']
1166 clearrevlogs = opts[b'clear_revlogs']
1167
1167
1168 def s():
1168 def s():
1169 if clearrevlogs:
1169 if clearrevlogs:
1170 clearchangelog(repo)
1170 clearchangelog(repo)
1171 clearfilecache(repo, b'_bookmarks')
1171 clearfilecache(repo, b'_bookmarks')
1172
1172
1173 def d():
1173 def d():
1174 repo._bookmarks
1174 repo._bookmarks
1175
1175
1176 timer(d, setup=s)
1176 timer(d, setup=s)
1177 fm.end()
1177 fm.end()
1178
1178
1179
1179
1180 @command(
1180 @command(
1181 b'perf::bundle',
1181 b'perf::bundle',
1182 [
1182 [
1183 (
1183 (
1184 b'r',
1184 b'r',
1185 b'rev',
1185 b'rev',
1186 [],
1186 [],
1187 b'changesets to bundle',
1187 b'changesets to bundle',
1188 b'REV',
1188 b'REV',
1189 ),
1189 ),
1190 (
1190 (
1191 b't',
1191 b't',
1192 b'type',
1192 b'type',
1193 b'none',
1193 b'none',
1194 b'bundlespec to use (see `hg help bundlespec`)',
1194 b'bundlespec to use (see `hg help bundlespec`)',
1195 b'TYPE',
1195 b'TYPE',
1196 ),
1196 ),
1197 ]
1197 ]
1198 + formatteropts,
1198 + formatteropts,
1199 b'REVS',
1199 b'REVS',
1200 )
1200 )
1201 def perfbundle(ui, repo, *revs, **opts):
1201 def perfbundle(ui, repo, *revs, **opts):
1202 """benchmark the creation of a bundle from a repository
1202 """benchmark the creation of a bundle from a repository
1203
1203
1204 For now, this only supports "none" compression.
1204 For now, this only supports "none" compression.
1205 """
1205 """
1206 try:
1206 try:
1207 from mercurial import bundlecaches
1207 from mercurial import bundlecaches
1208
1208
1209 parsebundlespec = bundlecaches.parsebundlespec
1209 parsebundlespec = bundlecaches.parsebundlespec
1210 except ImportError:
1210 except ImportError:
1211 from mercurial import exchange
1211 from mercurial import exchange
1212
1212
1213 parsebundlespec = exchange.parsebundlespec
1213 parsebundlespec = exchange.parsebundlespec
1214
1214
1215 from mercurial import discovery
1215 from mercurial import discovery
1216 from mercurial import bundle2
1216 from mercurial import bundle2
1217
1217
1218 opts = _byteskwargs(opts)
1218 opts = _byteskwargs(opts)
1219 timer, fm = gettimer(ui, opts)
1219 timer, fm = gettimer(ui, opts)
1220
1220
1221 cl = repo.changelog
1221 cl = repo.changelog
1222 revs = list(revs)
1222 revs = list(revs)
1223 revs.extend(opts.get(b'rev', ()))
1223 revs.extend(opts.get(b'rev', ()))
1224 revs = scmutil.revrange(repo, revs)
1224 revs = scmutil.revrange(repo, revs)
1225 if not revs:
1225 if not revs:
1226 raise error.Abort(b"not revision specified")
1226 raise error.Abort(b"not revision specified")
1227 # make it a consistent set (ie: without topological gaps)
1227 # make it a consistent set (ie: without topological gaps)
1228 old_len = len(revs)
1228 old_len = len(revs)
1229 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1229 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1230 if old_len != len(revs):
1230 if old_len != len(revs):
1231 new_count = len(revs) - old_len
1231 new_count = len(revs) - old_len
1232 msg = b"add %d new revisions to make it a consistent set\n"
1232 msg = b"add %d new revisions to make it a consistent set\n"
1233 ui.write_err(msg % new_count)
1233 ui.write_err(msg % new_count)
1234
1234
1235 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1235 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1236 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1236 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1237 outgoing = discovery.outgoing(repo, bases, targets)
1237 outgoing = discovery.outgoing(repo, bases, targets)
1238
1238
1239 bundle_spec = opts.get(b'type')
1239 bundle_spec = opts.get(b'type')
1240
1240
1241 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1241 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1242
1242
1243 cgversion = bundle_spec.params.get(b"cg.version")
1243 cgversion = bundle_spec.params.get(b"cg.version")
1244 if cgversion is None:
1244 if cgversion is None:
1245 if bundle_spec.version == b'v1':
1245 if bundle_spec.version == b'v1':
1246 cgversion = b'01'
1246 cgversion = b'01'
1247 if bundle_spec.version == b'v2':
1247 if bundle_spec.version == b'v2':
1248 cgversion = b'02'
1248 cgversion = b'02'
1249 if cgversion not in changegroup.supportedoutgoingversions(repo):
1249 if cgversion not in changegroup.supportedoutgoingversions(repo):
1250 err = b"repository does not support bundle version %s"
1250 err = b"repository does not support bundle version %s"
1251 raise error.Abort(err % cgversion)
1251 raise error.Abort(err % cgversion)
1252
1252
1253 if cgversion == b'01': # bundle1
1253 if cgversion == b'01': # bundle1
1254 bversion = b'HG10' + bundle_spec.wirecompression
1254 bversion = b'HG10' + bundle_spec.wirecompression
1255 bcompression = None
1255 bcompression = None
1256 elif cgversion in (b'02', b'03'):
1256 elif cgversion in (b'02', b'03'):
1257 bversion = b'HG20'
1257 bversion = b'HG20'
1258 bcompression = bundle_spec.wirecompression
1258 bcompression = bundle_spec.wirecompression
1259 else:
1259 else:
1260 err = b'perf::bundle: unexpected changegroup version %s'
1260 err = b'perf::bundle: unexpected changegroup version %s'
1261 raise error.ProgrammingError(err % cgversion)
1261 raise error.ProgrammingError(err % cgversion)
1262
1262
1263 if bcompression is None:
1263 if bcompression is None:
1264 bcompression = b'UN'
1264 bcompression = b'UN'
1265
1265
1266 if bcompression != b'UN':
1266 if bcompression != b'UN':
1267 err = b'perf::bundle: compression currently unsupported: %s'
1267 err = b'perf::bundle: compression currently unsupported: %s'
1268 raise error.ProgrammingError(err % bcompression)
1268 raise error.ProgrammingError(err % bcompression)
1269
1269
1270 def do_bundle():
1270 def do_bundle():
1271 bundle2.writenewbundle(
1271 bundle2.writenewbundle(
1272 ui,
1272 ui,
1273 repo,
1273 repo,
1274 b'perf::bundle',
1274 b'perf::bundle',
1275 os.devnull,
1275 os.devnull,
1276 bversion,
1276 bversion,
1277 outgoing,
1277 outgoing,
1278 bundle_spec.params,
1278 bundle_spec.params,
1279 )
1279 )
1280
1280
1281 timer(do_bundle)
1281 timer(do_bundle)
1282 fm.end()
1282 fm.end()
1283
1283
1284
1284
1285 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1285 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1286 def perfbundleread(ui, repo, bundlepath, **opts):
1286 def perfbundleread(ui, repo, bundlepath, **opts):
1287 """Benchmark reading of bundle files.
1287 """Benchmark reading of bundle files.
1288
1288
1289 This command is meant to isolate the I/O part of bundle reading as
1289 This command is meant to isolate the I/O part of bundle reading as
1290 much as possible.
1290 much as possible.
1291 """
1291 """
1292 from mercurial import (
1292 from mercurial import (
1293 bundle2,
1293 bundle2,
1294 exchange,
1294 exchange,
1295 streamclone,
1295 streamclone,
1296 )
1296 )
1297
1297
1298 opts = _byteskwargs(opts)
1298 opts = _byteskwargs(opts)
1299
1299
1300 def makebench(fn):
1300 def makebench(fn):
1301 def run():
1301 def run():
1302 with open(bundlepath, b'rb') as fh:
1302 with open(bundlepath, b'rb') as fh:
1303 bundle = exchange.readbundle(ui, fh, bundlepath)
1303 bundle = exchange.readbundle(ui, fh, bundlepath)
1304 fn(bundle)
1304 fn(bundle)
1305
1305
1306 return run
1306 return run
1307
1307
1308 def makereadnbytes(size):
1308 def makereadnbytes(size):
1309 def run():
1309 def run():
1310 with open(bundlepath, b'rb') as fh:
1310 with open(bundlepath, b'rb') as fh:
1311 bundle = exchange.readbundle(ui, fh, bundlepath)
1311 bundle = exchange.readbundle(ui, fh, bundlepath)
1312 while bundle.read(size):
1312 while bundle.read(size):
1313 pass
1313 pass
1314
1314
1315 return run
1315 return run
1316
1316
1317 def makestdioread(size):
1317 def makestdioread(size):
1318 def run():
1318 def run():
1319 with open(bundlepath, b'rb') as fh:
1319 with open(bundlepath, b'rb') as fh:
1320 while fh.read(size):
1320 while fh.read(size):
1321 pass
1321 pass
1322
1322
1323 return run
1323 return run
1324
1324
1325 # bundle1
1325 # bundle1
1326
1326
1327 def deltaiter(bundle):
1327 def deltaiter(bundle):
1328 for delta in bundle.deltaiter():
1328 for delta in bundle.deltaiter():
1329 pass
1329 pass
1330
1330
1331 def iterchunks(bundle):
1331 def iterchunks(bundle):
1332 for chunk in bundle.getchunks():
1332 for chunk in bundle.getchunks():
1333 pass
1333 pass
1334
1334
1335 # bundle2
1335 # bundle2
1336
1336
1337 def forwardchunks(bundle):
1337 def forwardchunks(bundle):
1338 for chunk in bundle._forwardchunks():
1338 for chunk in bundle._forwardchunks():
1339 pass
1339 pass
1340
1340
1341 def iterparts(bundle):
1341 def iterparts(bundle):
1342 for part in bundle.iterparts():
1342 for part in bundle.iterparts():
1343 pass
1343 pass
1344
1344
1345 def iterpartsseekable(bundle):
1345 def iterpartsseekable(bundle):
1346 for part in bundle.iterparts(seekable=True):
1346 for part in bundle.iterparts(seekable=True):
1347 pass
1347 pass
1348
1348
1349 def seek(bundle):
1349 def seek(bundle):
1350 for part in bundle.iterparts(seekable=True):
1350 for part in bundle.iterparts(seekable=True):
1351 part.seek(0, os.SEEK_END)
1351 part.seek(0, os.SEEK_END)
1352
1352
1353 def makepartreadnbytes(size):
1353 def makepartreadnbytes(size):
1354 def run():
1354 def run():
1355 with open(bundlepath, b'rb') as fh:
1355 with open(bundlepath, b'rb') as fh:
1356 bundle = exchange.readbundle(ui, fh, bundlepath)
1356 bundle = exchange.readbundle(ui, fh, bundlepath)
1357 for part in bundle.iterparts():
1357 for part in bundle.iterparts():
1358 while part.read(size):
1358 while part.read(size):
1359 pass
1359 pass
1360
1360
1361 return run
1361 return run
1362
1362
1363 benches = [
1363 benches = [
1364 (makestdioread(8192), b'read(8k)'),
1364 (makestdioread(8192), b'read(8k)'),
1365 (makestdioread(16384), b'read(16k)'),
1365 (makestdioread(16384), b'read(16k)'),
1366 (makestdioread(32768), b'read(32k)'),
1366 (makestdioread(32768), b'read(32k)'),
1367 (makestdioread(131072), b'read(128k)'),
1367 (makestdioread(131072), b'read(128k)'),
1368 ]
1368 ]
1369
1369
1370 with open(bundlepath, b'rb') as fh:
1370 with open(bundlepath, b'rb') as fh:
1371 bundle = exchange.readbundle(ui, fh, bundlepath)
1371 bundle = exchange.readbundle(ui, fh, bundlepath)
1372
1372
1373 if isinstance(bundle, changegroup.cg1unpacker):
1373 if isinstance(bundle, changegroup.cg1unpacker):
1374 benches.extend(
1374 benches.extend(
1375 [
1375 [
1376 (makebench(deltaiter), b'cg1 deltaiter()'),
1376 (makebench(deltaiter), b'cg1 deltaiter()'),
1377 (makebench(iterchunks), b'cg1 getchunks()'),
1377 (makebench(iterchunks), b'cg1 getchunks()'),
1378 (makereadnbytes(8192), b'cg1 read(8k)'),
1378 (makereadnbytes(8192), b'cg1 read(8k)'),
1379 (makereadnbytes(16384), b'cg1 read(16k)'),
1379 (makereadnbytes(16384), b'cg1 read(16k)'),
1380 (makereadnbytes(32768), b'cg1 read(32k)'),
1380 (makereadnbytes(32768), b'cg1 read(32k)'),
1381 (makereadnbytes(131072), b'cg1 read(128k)'),
1381 (makereadnbytes(131072), b'cg1 read(128k)'),
1382 ]
1382 ]
1383 )
1383 )
1384 elif isinstance(bundle, bundle2.unbundle20):
1384 elif isinstance(bundle, bundle2.unbundle20):
1385 benches.extend(
1385 benches.extend(
1386 [
1386 [
1387 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1387 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1388 (makebench(iterparts), b'bundle2 iterparts()'),
1388 (makebench(iterparts), b'bundle2 iterparts()'),
1389 (
1389 (
1390 makebench(iterpartsseekable),
1390 makebench(iterpartsseekable),
1391 b'bundle2 iterparts() seekable',
1391 b'bundle2 iterparts() seekable',
1392 ),
1392 ),
1393 (makebench(seek), b'bundle2 part seek()'),
1393 (makebench(seek), b'bundle2 part seek()'),
1394 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1394 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1395 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1395 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1396 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1396 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1397 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1397 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1398 ]
1398 ]
1399 )
1399 )
1400 elif isinstance(bundle, streamclone.streamcloneapplier):
1400 elif isinstance(bundle, streamclone.streamcloneapplier):
1401 raise error.Abort(b'stream clone bundles not supported')
1401 raise error.Abort(b'stream clone bundles not supported')
1402 else:
1402 else:
1403 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1403 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1404
1404
1405 for fn, title in benches:
1405 for fn, title in benches:
1406 timer, fm = gettimer(ui, opts)
1406 timer, fm = gettimer(ui, opts)
1407 timer(fn, title=title)
1407 timer(fn, title=title)
1408 fm.end()
1408 fm.end()
1409
1409
1410
1410
1411 @command(
1411 @command(
1412 b'perf::changegroupchangelog|perfchangegroupchangelog',
1412 b'perf::changegroupchangelog|perfchangegroupchangelog',
1413 formatteropts
1413 formatteropts
1414 + [
1414 + [
1415 (b'', b'cgversion', b'02', b'changegroup version'),
1415 (b'', b'cgversion', b'02', b'changegroup version'),
1416 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1416 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1417 ],
1417 ],
1418 )
1418 )
1419 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1419 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1420 """Benchmark producing a changelog group for a changegroup.
1420 """Benchmark producing a changelog group for a changegroup.
1421
1421
1422 This measures the time spent processing the changelog during a
1422 This measures the time spent processing the changelog during a
1423 bundle operation. This occurs during `hg bundle` and on a server
1423 bundle operation. This occurs during `hg bundle` and on a server
1424 processing a `getbundle` wire protocol request (handles clones
1424 processing a `getbundle` wire protocol request (handles clones
1425 and pull requests).
1425 and pull requests).
1426
1426
1427 By default, all revisions are added to the changegroup.
1427 By default, all revisions are added to the changegroup.
1428 """
1428 """
1429 opts = _byteskwargs(opts)
1429 opts = _byteskwargs(opts)
1430 cl = repo.changelog
1430 cl = repo.changelog
1431 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1431 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1432 bundler = changegroup.getbundler(cgversion, repo)
1432 bundler = changegroup.getbundler(cgversion, repo)
1433
1433
1434 def d():
1434 def d():
1435 state, chunks = bundler._generatechangelog(cl, nodes)
1435 state, chunks = bundler._generatechangelog(cl, nodes)
1436 for chunk in chunks:
1436 for chunk in chunks:
1437 pass
1437 pass
1438
1438
1439 timer, fm = gettimer(ui, opts)
1439 timer, fm = gettimer(ui, opts)
1440
1440
1441 # Terminal printing can interfere with timing. So disable it.
1441 # Terminal printing can interfere with timing. So disable it.
1442 with ui.configoverride({(b'progress', b'disable'): True}):
1442 with ui.configoverride({(b'progress', b'disable'): True}):
1443 timer(d)
1443 timer(d)
1444
1444
1445 fm.end()
1445 fm.end()
1446
1446
1447
1447
1448 @command(b'perf::dirs|perfdirs', formatteropts)
1448 @command(b'perf::dirs|perfdirs', formatteropts)
1449 def perfdirs(ui, repo, **opts):
1449 def perfdirs(ui, repo, **opts):
1450 opts = _byteskwargs(opts)
1450 opts = _byteskwargs(opts)
1451 timer, fm = gettimer(ui, opts)
1451 timer, fm = gettimer(ui, opts)
1452 dirstate = repo.dirstate
1452 dirstate = repo.dirstate
1453 b'a' in dirstate
1453 b'a' in dirstate
1454
1454
1455 def d():
1455 def d():
1456 dirstate.hasdir(b'a')
1456 dirstate.hasdir(b'a')
1457 try:
1457 try:
1458 del dirstate._map._dirs
1458 del dirstate._map._dirs
1459 except AttributeError:
1459 except AttributeError:
1460 pass
1460 pass
1461
1461
1462 timer(d)
1462 timer(d)
1463 fm.end()
1463 fm.end()
1464
1464
1465
1465
1466 @command(
1466 @command(
1467 b'perf::dirstate|perfdirstate',
1467 b'perf::dirstate|perfdirstate',
1468 [
1468 [
1469 (
1469 (
1470 b'',
1470 b'',
1471 b'iteration',
1471 b'iteration',
1472 None,
1472 None,
1473 b'benchmark a full iteration for the dirstate',
1473 b'benchmark a full iteration for the dirstate',
1474 ),
1474 ),
1475 (
1475 (
1476 b'',
1476 b'',
1477 b'contains',
1477 b'contains',
1478 None,
1478 None,
1479 b'benchmark a large amount of `nf in dirstate` calls',
1479 b'benchmark a large amount of `nf in dirstate` calls',
1480 ),
1480 ),
1481 ]
1481 ]
1482 + formatteropts,
1482 + formatteropts,
1483 )
1483 )
1484 def perfdirstate(ui, repo, **opts):
1484 def perfdirstate(ui, repo, **opts):
1485 """benchmap the time of various distate operations
1485 """benchmap the time of various distate operations
1486
1486
1487 By default benchmark the time necessary to load a dirstate from scratch.
1487 By default benchmark the time necessary to load a dirstate from scratch.
1488 The dirstate is loaded to the point were a "contains" request can be
1488 The dirstate is loaded to the point were a "contains" request can be
1489 answered.
1489 answered.
1490 """
1490 """
1491 opts = _byteskwargs(opts)
1491 opts = _byteskwargs(opts)
1492 timer, fm = gettimer(ui, opts)
1492 timer, fm = gettimer(ui, opts)
1493 b"a" in repo.dirstate
1493 b"a" in repo.dirstate
1494
1494
1495 if opts[b'iteration'] and opts[b'contains']:
1495 if opts[b'iteration'] and opts[b'contains']:
1496 msg = b'only specify one of --iteration or --contains'
1496 msg = b'only specify one of --iteration or --contains'
1497 raise error.Abort(msg)
1497 raise error.Abort(msg)
1498
1498
1499 if opts[b'iteration']:
1499 if opts[b'iteration']:
1500 setup = None
1500 setup = None
1501 dirstate = repo.dirstate
1501 dirstate = repo.dirstate
1502
1502
1503 def d():
1503 def d():
1504 for f in dirstate:
1504 for f in dirstate:
1505 pass
1505 pass
1506
1506
1507 elif opts[b'contains']:
1507 elif opts[b'contains']:
1508 setup = None
1508 setup = None
1509 dirstate = repo.dirstate
1509 dirstate = repo.dirstate
1510 allfiles = list(dirstate)
1510 allfiles = list(dirstate)
1511 # also add file path that will be "missing" from the dirstate
1511 # also add file path that will be "missing" from the dirstate
1512 allfiles.extend([f[::-1] for f in allfiles])
1512 allfiles.extend([f[::-1] for f in allfiles])
1513
1513
1514 def d():
1514 def d():
1515 for f in allfiles:
1515 for f in allfiles:
1516 f in dirstate
1516 f in dirstate
1517
1517
1518 else:
1518 else:
1519
1519
1520 def setup():
1520 def setup():
1521 repo.dirstate.invalidate()
1521 repo.dirstate.invalidate()
1522
1522
1523 def d():
1523 def d():
1524 b"a" in repo.dirstate
1524 b"a" in repo.dirstate
1525
1525
1526 timer(d, setup=setup)
1526 timer(d, setup=setup)
1527 fm.end()
1527 fm.end()
1528
1528
1529
1529
1530 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1530 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1531 def perfdirstatedirs(ui, repo, **opts):
1531 def perfdirstatedirs(ui, repo, **opts):
1532 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1532 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1533 opts = _byteskwargs(opts)
1533 opts = _byteskwargs(opts)
1534 timer, fm = gettimer(ui, opts)
1534 timer, fm = gettimer(ui, opts)
1535 repo.dirstate.hasdir(b"a")
1535 repo.dirstate.hasdir(b"a")
1536
1536
1537 def setup():
1537 def setup():
1538 try:
1538 try:
1539 del repo.dirstate._map._dirs
1539 del repo.dirstate._map._dirs
1540 except AttributeError:
1540 except AttributeError:
1541 pass
1541 pass
1542
1542
1543 def d():
1543 def d():
1544 repo.dirstate.hasdir(b"a")
1544 repo.dirstate.hasdir(b"a")
1545
1545
1546 timer(d, setup=setup)
1546 timer(d, setup=setup)
1547 fm.end()
1547 fm.end()
1548
1548
1549
1549
1550 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1550 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1551 def perfdirstatefoldmap(ui, repo, **opts):
1551 def perfdirstatefoldmap(ui, repo, **opts):
1552 """benchmap a `dirstate._map.filefoldmap.get()` request
1552 """benchmap a `dirstate._map.filefoldmap.get()` request
1553
1553
1554 The dirstate filefoldmap cache is dropped between every request.
1554 The dirstate filefoldmap cache is dropped between every request.
1555 """
1555 """
1556 opts = _byteskwargs(opts)
1556 opts = _byteskwargs(opts)
1557 timer, fm = gettimer(ui, opts)
1557 timer, fm = gettimer(ui, opts)
1558 dirstate = repo.dirstate
1558 dirstate = repo.dirstate
1559 dirstate._map.filefoldmap.get(b'a')
1559 dirstate._map.filefoldmap.get(b'a')
1560
1560
1561 def setup():
1561 def setup():
1562 del dirstate._map.filefoldmap
1562 del dirstate._map.filefoldmap
1563
1563
1564 def d():
1564 def d():
1565 dirstate._map.filefoldmap.get(b'a')
1565 dirstate._map.filefoldmap.get(b'a')
1566
1566
1567 timer(d, setup=setup)
1567 timer(d, setup=setup)
1568 fm.end()
1568 fm.end()
1569
1569
1570
1570
1571 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1571 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1572 def perfdirfoldmap(ui, repo, **opts):
1572 def perfdirfoldmap(ui, repo, **opts):
1573 """benchmap a `dirstate._map.dirfoldmap.get()` request
1573 """benchmap a `dirstate._map.dirfoldmap.get()` request
1574
1574
1575 The dirstate dirfoldmap cache is dropped between every request.
1575 The dirstate dirfoldmap cache is dropped between every request.
1576 """
1576 """
1577 opts = _byteskwargs(opts)
1577 opts = _byteskwargs(opts)
1578 timer, fm = gettimer(ui, opts)
1578 timer, fm = gettimer(ui, opts)
1579 dirstate = repo.dirstate
1579 dirstate = repo.dirstate
1580 dirstate._map.dirfoldmap.get(b'a')
1580 dirstate._map.dirfoldmap.get(b'a')
1581
1581
1582 def setup():
1582 def setup():
1583 del dirstate._map.dirfoldmap
1583 del dirstate._map.dirfoldmap
1584 try:
1584 try:
1585 del dirstate._map._dirs
1585 del dirstate._map._dirs
1586 except AttributeError:
1586 except AttributeError:
1587 pass
1587 pass
1588
1588
1589 def d():
1589 def d():
1590 dirstate._map.dirfoldmap.get(b'a')
1590 dirstate._map.dirfoldmap.get(b'a')
1591
1591
1592 timer(d, setup=setup)
1592 timer(d, setup=setup)
1593 fm.end()
1593 fm.end()
1594
1594
1595
1595
1596 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1596 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1597 def perfdirstatewrite(ui, repo, **opts):
1597 def perfdirstatewrite(ui, repo, **opts):
1598 """benchmap the time it take to write a dirstate on disk"""
1598 """benchmap the time it take to write a dirstate on disk"""
1599 opts = _byteskwargs(opts)
1599 opts = _byteskwargs(opts)
1600 timer, fm = gettimer(ui, opts)
1600 timer, fm = gettimer(ui, opts)
1601 ds = repo.dirstate
1601 ds = repo.dirstate
1602 b"a" in ds
1602 b"a" in ds
1603
1603
1604 def setup():
1604 def setup():
1605 ds._dirty = True
1605 ds._dirty = True
1606
1606
1607 def d():
1607 def d():
1608 ds.write(repo.currenttransaction())
1608 ds.write(repo.currenttransaction())
1609
1609
1610 with repo.wlock():
1610 with repo.wlock():
1611 timer(d, setup=setup)
1611 timer(d, setup=setup)
1612 fm.end()
1612 fm.end()
1613
1613
1614
1614
1615 def _getmergerevs(repo, opts):
1615 def _getmergerevs(repo, opts):
1616 """parse command argument to return rev involved in merge
1616 """parse command argument to return rev involved in merge
1617
1617
1618 input: options dictionnary with `rev`, `from` and `bse`
1618 input: options dictionnary with `rev`, `from` and `bse`
1619 output: (localctx, otherctx, basectx)
1619 output: (localctx, otherctx, basectx)
1620 """
1620 """
1621 if opts[b'from']:
1621 if opts[b'from']:
1622 fromrev = scmutil.revsingle(repo, opts[b'from'])
1622 fromrev = scmutil.revsingle(repo, opts[b'from'])
1623 wctx = repo[fromrev]
1623 wctx = repo[fromrev]
1624 else:
1624 else:
1625 wctx = repo[None]
1625 wctx = repo[None]
1626 # we don't want working dir files to be stat'd in the benchmark, so
1626 # we don't want working dir files to be stat'd in the benchmark, so
1627 # prime that cache
1627 # prime that cache
1628 wctx.dirty()
1628 wctx.dirty()
1629 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1629 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1630 if opts[b'base']:
1630 if opts[b'base']:
1631 fromrev = scmutil.revsingle(repo, opts[b'base'])
1631 fromrev = scmutil.revsingle(repo, opts[b'base'])
1632 ancestor = repo[fromrev]
1632 ancestor = repo[fromrev]
1633 else:
1633 else:
1634 ancestor = wctx.ancestor(rctx)
1634 ancestor = wctx.ancestor(rctx)
1635 return (wctx, rctx, ancestor)
1635 return (wctx, rctx, ancestor)
1636
1636
1637
1637
1638 @command(
1638 @command(
1639 b'perf::mergecalculate|perfmergecalculate',
1639 b'perf::mergecalculate|perfmergecalculate',
1640 [
1640 [
1641 (b'r', b'rev', b'.', b'rev to merge against'),
1641 (b'r', b'rev', b'.', b'rev to merge against'),
1642 (b'', b'from', b'', b'rev to merge from'),
1642 (b'', b'from', b'', b'rev to merge from'),
1643 (b'', b'base', b'', b'the revision to use as base'),
1643 (b'', b'base', b'', b'the revision to use as base'),
1644 ]
1644 ]
1645 + formatteropts,
1645 + formatteropts,
1646 )
1646 )
1647 def perfmergecalculate(ui, repo, **opts):
1647 def perfmergecalculate(ui, repo, **opts):
1648 opts = _byteskwargs(opts)
1648 opts = _byteskwargs(opts)
1649 timer, fm = gettimer(ui, opts)
1649 timer, fm = gettimer(ui, opts)
1650
1650
1651 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1651 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1652
1652
1653 def d():
1653 def d():
1654 # acceptremote is True because we don't want prompts in the middle of
1654 # acceptremote is True because we don't want prompts in the middle of
1655 # our benchmark
1655 # our benchmark
1656 merge.calculateupdates(
1656 merge.calculateupdates(
1657 repo,
1657 repo,
1658 wctx,
1658 wctx,
1659 rctx,
1659 rctx,
1660 [ancestor],
1660 [ancestor],
1661 branchmerge=False,
1661 branchmerge=False,
1662 force=False,
1662 force=False,
1663 acceptremote=True,
1663 acceptremote=True,
1664 followcopies=True,
1664 followcopies=True,
1665 )
1665 )
1666
1666
1667 timer(d)
1667 timer(d)
1668 fm.end()
1668 fm.end()
1669
1669
1670
1670
1671 @command(
1671 @command(
1672 b'perf::mergecopies|perfmergecopies',
1672 b'perf::mergecopies|perfmergecopies',
1673 [
1673 [
1674 (b'r', b'rev', b'.', b'rev to merge against'),
1674 (b'r', b'rev', b'.', b'rev to merge against'),
1675 (b'', b'from', b'', b'rev to merge from'),
1675 (b'', b'from', b'', b'rev to merge from'),
1676 (b'', b'base', b'', b'the revision to use as base'),
1676 (b'', b'base', b'', b'the revision to use as base'),
1677 ]
1677 ]
1678 + formatteropts,
1678 + formatteropts,
1679 )
1679 )
1680 def perfmergecopies(ui, repo, **opts):
1680 def perfmergecopies(ui, repo, **opts):
1681 """measure runtime of `copies.mergecopies`"""
1681 """measure runtime of `copies.mergecopies`"""
1682 opts = _byteskwargs(opts)
1682 opts = _byteskwargs(opts)
1683 timer, fm = gettimer(ui, opts)
1683 timer, fm = gettimer(ui, opts)
1684 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1684 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1685
1685
1686 def d():
1686 def d():
1687 # acceptremote is True because we don't want prompts in the middle of
1687 # acceptremote is True because we don't want prompts in the middle of
1688 # our benchmark
1688 # our benchmark
1689 copies.mergecopies(repo, wctx, rctx, ancestor)
1689 copies.mergecopies(repo, wctx, rctx, ancestor)
1690
1690
1691 timer(d)
1691 timer(d)
1692 fm.end()
1692 fm.end()
1693
1693
1694
1694
1695 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1695 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1696 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1696 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1697 """benchmark the copy tracing logic"""
1697 """benchmark the copy tracing logic"""
1698 opts = _byteskwargs(opts)
1698 opts = _byteskwargs(opts)
1699 timer, fm = gettimer(ui, opts)
1699 timer, fm = gettimer(ui, opts)
1700 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1700 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1701 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1701 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1702
1702
1703 def d():
1703 def d():
1704 copies.pathcopies(ctx1, ctx2)
1704 copies.pathcopies(ctx1, ctx2)
1705
1705
1706 timer(d)
1706 timer(d)
1707 fm.end()
1707 fm.end()
1708
1708
1709
1709
1710 @command(
1710 @command(
1711 b'perf::phases|perfphases',
1711 b'perf::phases|perfphases',
1712 [
1712 [
1713 (b'', b'full', False, b'include file reading time too'),
1713 (b'', b'full', False, b'include file reading time too'),
1714 ],
1714 ],
1715 b"",
1715 b"",
1716 )
1716 )
1717 def perfphases(ui, repo, **opts):
1717 def perfphases(ui, repo, **opts):
1718 """benchmark phasesets computation"""
1718 """benchmark phasesets computation"""
1719 opts = _byteskwargs(opts)
1719 opts = _byteskwargs(opts)
1720 timer, fm = gettimer(ui, opts)
1720 timer, fm = gettimer(ui, opts)
1721 _phases = repo._phasecache
1721 _phases = repo._phasecache
1722 full = opts.get(b'full')
1722 full = opts.get(b'full')
1723
1723
1724 def d():
1724 def d():
1725 phases = _phases
1725 phases = _phases
1726 if full:
1726 if full:
1727 clearfilecache(repo, b'_phasecache')
1727 clearfilecache(repo, b'_phasecache')
1728 phases = repo._phasecache
1728 phases = repo._phasecache
1729 phases.invalidate()
1729 phases.invalidate()
1730 phases.loadphaserevs(repo)
1730 phases.loadphaserevs(repo)
1731
1731
1732 timer(d)
1732 timer(d)
1733 fm.end()
1733 fm.end()
1734
1734
1735
1735
1736 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1736 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1737 def perfphasesremote(ui, repo, dest=None, **opts):
1737 def perfphasesremote(ui, repo, dest=None, **opts):
1738 """benchmark time needed to analyse phases of the remote server"""
1738 """benchmark time needed to analyse phases of the remote server"""
1739 from mercurial.node import bin
1739 from mercurial.node import bin
1740 from mercurial import (
1740 from mercurial import (
1741 exchange,
1741 exchange,
1742 hg,
1742 hg,
1743 phases,
1743 phases,
1744 )
1744 )
1745
1745
1746 opts = _byteskwargs(opts)
1746 opts = _byteskwargs(opts)
1747 timer, fm = gettimer(ui, opts)
1747 timer, fm = gettimer(ui, opts)
1748
1748
1749 path = ui.getpath(dest, default=(b'default-push', b'default'))
1749 path = ui.getpath(dest, default=(b'default-push', b'default'))
1750 if not path:
1750 if not path:
1751 raise error.Abort(
1751 raise error.Abort(
1752 b'default repository not configured!',
1752 b'default repository not configured!',
1753 hint=b"see 'hg help config.paths'",
1753 hint=b"see 'hg help config.paths'",
1754 )
1754 )
1755 if util.safehasattr(path, 'main_path'):
1755 if util.safehasattr(path, 'main_path'):
1756 path = path.get_push_variant()
1756 path = path.get_push_variant()
1757 dest = path.loc
1757 dest = path.loc
1758 else:
1758 else:
1759 dest = path.pushloc or path.loc
1759 dest = path.pushloc or path.loc
1760 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1760 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1761 other = hg.peer(repo, opts, dest)
1761 other = hg.peer(repo, opts, dest)
1762
1762
1763 # easier to perform discovery through the operation
1763 # easier to perform discovery through the operation
1764 op = exchange.pushoperation(repo, other)
1764 op = exchange.pushoperation(repo, other)
1765 exchange._pushdiscoverychangeset(op)
1765 exchange._pushdiscoverychangeset(op)
1766
1766
1767 remotesubset = op.fallbackheads
1767 remotesubset = op.fallbackheads
1768
1768
1769 with other.commandexecutor() as e:
1769 with other.commandexecutor() as e:
1770 remotephases = e.callcommand(
1770 remotephases = e.callcommand(
1771 b'listkeys', {b'namespace': b'phases'}
1771 b'listkeys', {b'namespace': b'phases'}
1772 ).result()
1772 ).result()
1773 del other
1773 del other
1774 publishing = remotephases.get(b'publishing', False)
1774 publishing = remotephases.get(b'publishing', False)
1775 if publishing:
1775 if publishing:
1776 ui.statusnoi18n(b'publishing: yes\n')
1776 ui.statusnoi18n(b'publishing: yes\n')
1777 else:
1777 else:
1778 ui.statusnoi18n(b'publishing: no\n')
1778 ui.statusnoi18n(b'publishing: no\n')
1779
1779
1780 has_node = getattr(repo.changelog.index, 'has_node', None)
1780 has_node = getattr(repo.changelog.index, 'has_node', None)
1781 if has_node is None:
1781 if has_node is None:
1782 has_node = repo.changelog.nodemap.__contains__
1782 has_node = repo.changelog.nodemap.__contains__
1783 nonpublishroots = 0
1783 nonpublishroots = 0
1784 for nhex, phase in remotephases.iteritems():
1784 for nhex, phase in remotephases.iteritems():
1785 if nhex == b'publishing': # ignore data related to publish option
1785 if nhex == b'publishing': # ignore data related to publish option
1786 continue
1786 continue
1787 node = bin(nhex)
1787 node = bin(nhex)
1788 if has_node(node) and int(phase):
1788 if has_node(node) and int(phase):
1789 nonpublishroots += 1
1789 nonpublishroots += 1
1790 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1790 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1791 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1791 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1792
1792
1793 def d():
1793 def d():
1794 phases.remotephasessummary(repo, remotesubset, remotephases)
1794 phases.remotephasessummary(repo, remotesubset, remotephases)
1795
1795
1796 timer(d)
1796 timer(d)
1797 fm.end()
1797 fm.end()
1798
1798
1799
1799
1800 @command(
1800 @command(
1801 b'perf::manifest|perfmanifest',
1801 b'perf::manifest|perfmanifest',
1802 [
1802 [
1803 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1803 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1804 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1804 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1805 ]
1805 ]
1806 + formatteropts,
1806 + formatteropts,
1807 b'REV|NODE',
1807 b'REV|NODE',
1808 )
1808 )
1809 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1809 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1810 """benchmark the time to read a manifest from disk and return a usable
1810 """benchmark the time to read a manifest from disk and return a usable
1811 dict-like object
1811 dict-like object
1812
1812
1813 Manifest caches are cleared before retrieval."""
1813 Manifest caches are cleared before retrieval."""
1814 opts = _byteskwargs(opts)
1814 opts = _byteskwargs(opts)
1815 timer, fm = gettimer(ui, opts)
1815 timer, fm = gettimer(ui, opts)
1816 if not manifest_rev:
1816 if not manifest_rev:
1817 ctx = scmutil.revsingle(repo, rev, rev)
1817 ctx = scmutil.revsingle(repo, rev, rev)
1818 t = ctx.manifestnode()
1818 t = ctx.manifestnode()
1819 else:
1819 else:
1820 from mercurial.node import bin
1820 from mercurial.node import bin
1821
1821
1822 if len(rev) == 40:
1822 if len(rev) == 40:
1823 t = bin(rev)
1823 t = bin(rev)
1824 else:
1824 else:
1825 try:
1825 try:
1826 rev = int(rev)
1826 rev = int(rev)
1827
1827
1828 if util.safehasattr(repo.manifestlog, b'getstorage'):
1828 if util.safehasattr(repo.manifestlog, b'getstorage'):
1829 t = repo.manifestlog.getstorage(b'').node(rev)
1829 t = repo.manifestlog.getstorage(b'').node(rev)
1830 else:
1830 else:
1831 t = repo.manifestlog._revlog.lookup(rev)
1831 t = repo.manifestlog._revlog.lookup(rev)
1832 except ValueError:
1832 except ValueError:
1833 raise error.Abort(
1833 raise error.Abort(
1834 b'manifest revision must be integer or full node'
1834 b'manifest revision must be integer or full node'
1835 )
1835 )
1836
1836
1837 def d():
1837 def d():
1838 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1838 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1839 repo.manifestlog[t].read()
1839 repo.manifestlog[t].read()
1840
1840
1841 timer(d)
1841 timer(d)
1842 fm.end()
1842 fm.end()
1843
1843
1844
1844
1845 @command(b'perf::changeset|perfchangeset', formatteropts)
1845 @command(b'perf::changeset|perfchangeset', formatteropts)
1846 def perfchangeset(ui, repo, rev, **opts):
1846 def perfchangeset(ui, repo, rev, **opts):
1847 opts = _byteskwargs(opts)
1847 opts = _byteskwargs(opts)
1848 timer, fm = gettimer(ui, opts)
1848 timer, fm = gettimer(ui, opts)
1849 n = scmutil.revsingle(repo, rev).node()
1849 n = scmutil.revsingle(repo, rev).node()
1850
1850
1851 def d():
1851 def d():
1852 repo.changelog.read(n)
1852 repo.changelog.read(n)
1853 # repo.changelog._cache = None
1853 # repo.changelog._cache = None
1854
1854
1855 timer(d)
1855 timer(d)
1856 fm.end()
1856 fm.end()
1857
1857
1858
1858
1859 @command(b'perf::ignore|perfignore', formatteropts)
1859 @command(b'perf::ignore|perfignore', formatteropts)
1860 def perfignore(ui, repo, **opts):
1860 def perfignore(ui, repo, **opts):
1861 """benchmark operation related to computing ignore"""
1861 """benchmark operation related to computing ignore"""
1862 opts = _byteskwargs(opts)
1862 opts = _byteskwargs(opts)
1863 timer, fm = gettimer(ui, opts)
1863 timer, fm = gettimer(ui, opts)
1864 dirstate = repo.dirstate
1864 dirstate = repo.dirstate
1865
1865
1866 def setupone():
1866 def setupone():
1867 dirstate.invalidate()
1867 dirstate.invalidate()
1868 clearfilecache(dirstate, b'_ignore')
1868 clearfilecache(dirstate, b'_ignore')
1869
1869
1870 def runone():
1870 def runone():
1871 dirstate._ignore
1871 dirstate._ignore
1872
1872
1873 timer(runone, setup=setupone, title=b"load")
1873 timer(runone, setup=setupone, title=b"load")
1874 fm.end()
1874 fm.end()
1875
1875
1876
1876
1877 @command(
1877 @command(
1878 b'perf::index|perfindex',
1878 b'perf::index|perfindex',
1879 [
1879 [
1880 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1880 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1881 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1881 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1882 ]
1882 ]
1883 + formatteropts,
1883 + formatteropts,
1884 )
1884 )
1885 def perfindex(ui, repo, **opts):
1885 def perfindex(ui, repo, **opts):
1886 """benchmark index creation time followed by a lookup
1886 """benchmark index creation time followed by a lookup
1887
1887
1888 The default is to look `tip` up. Depending on the index implementation,
1888 The default is to look `tip` up. Depending on the index implementation,
1889 the revision looked up can matters. For example, an implementation
1889 the revision looked up can matters. For example, an implementation
1890 scanning the index will have a faster lookup time for `--rev tip` than for
1890 scanning the index will have a faster lookup time for `--rev tip` than for
1891 `--rev 0`. The number of looked up revisions and their order can also
1891 `--rev 0`. The number of looked up revisions and their order can also
1892 matters.
1892 matters.
1893
1893
1894 Example of useful set to test:
1894 Example of useful set to test:
1895
1895
1896 * tip
1896 * tip
1897 * 0
1897 * 0
1898 * -10:
1898 * -10:
1899 * :10
1899 * :10
1900 * -10: + :10
1900 * -10: + :10
1901 * :10: + -10:
1901 * :10: + -10:
1902 * -10000:
1902 * -10000:
1903 * -10000: + 0
1903 * -10000: + 0
1904
1904
1905 It is not currently possible to check for lookup of a missing node. For
1905 It is not currently possible to check for lookup of a missing node. For
1906 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1906 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1907 import mercurial.revlog
1907 import mercurial.revlog
1908
1908
1909 opts = _byteskwargs(opts)
1909 opts = _byteskwargs(opts)
1910 timer, fm = gettimer(ui, opts)
1910 timer, fm = gettimer(ui, opts)
1911 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1911 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1912 if opts[b'no_lookup']:
1912 if opts[b'no_lookup']:
1913 if opts['rev']:
1913 if opts['rev']:
1914 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1914 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1915 nodes = []
1915 nodes = []
1916 elif not opts[b'rev']:
1916 elif not opts[b'rev']:
1917 nodes = [repo[b"tip"].node()]
1917 nodes = [repo[b"tip"].node()]
1918 else:
1918 else:
1919 revs = scmutil.revrange(repo, opts[b'rev'])
1919 revs = scmutil.revrange(repo, opts[b'rev'])
1920 cl = repo.changelog
1920 cl = repo.changelog
1921 nodes = [cl.node(r) for r in revs]
1921 nodes = [cl.node(r) for r in revs]
1922
1922
1923 unfi = repo.unfiltered()
1923 unfi = repo.unfiltered()
1924 # find the filecache func directly
1924 # find the filecache func directly
1925 # This avoid polluting the benchmark with the filecache logic
1925 # This avoid polluting the benchmark with the filecache logic
1926 makecl = unfi.__class__.changelog.func
1926 makecl = unfi.__class__.changelog.func
1927
1927
1928 def setup():
1928 def setup():
1929 # probably not necessary, but for good measure
1929 # probably not necessary, but for good measure
1930 clearchangelog(unfi)
1930 clearchangelog(unfi)
1931
1931
1932 def d():
1932 def d():
1933 cl = makecl(unfi)
1933 cl = makecl(unfi)
1934 for n in nodes:
1934 for n in nodes:
1935 cl.rev(n)
1935 cl.rev(n)
1936
1936
1937 timer(d, setup=setup)
1937 timer(d, setup=setup)
1938 fm.end()
1938 fm.end()
1939
1939
1940
1940
1941 @command(
1941 @command(
1942 b'perf::nodemap|perfnodemap',
1942 b'perf::nodemap|perfnodemap',
1943 [
1943 [
1944 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1944 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1945 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1945 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1946 ]
1946 ]
1947 + formatteropts,
1947 + formatteropts,
1948 )
1948 )
1949 def perfnodemap(ui, repo, **opts):
1949 def perfnodemap(ui, repo, **opts):
1950 """benchmark the time necessary to look up revision from a cold nodemap
1950 """benchmark the time necessary to look up revision from a cold nodemap
1951
1951
1952 Depending on the implementation, the amount and order of revision we look
1952 Depending on the implementation, the amount and order of revision we look
1953 up can varies. Example of useful set to test:
1953 up can varies. Example of useful set to test:
1954 * tip
1954 * tip
1955 * 0
1955 * 0
1956 * -10:
1956 * -10:
1957 * :10
1957 * :10
1958 * -10: + :10
1958 * -10: + :10
1959 * :10: + -10:
1959 * :10: + -10:
1960 * -10000:
1960 * -10000:
1961 * -10000: + 0
1961 * -10000: + 0
1962
1962
1963 The command currently focus on valid binary lookup. Benchmarking for
1963 The command currently focus on valid binary lookup. Benchmarking for
1964 hexlookup, prefix lookup and missing lookup would also be valuable.
1964 hexlookup, prefix lookup and missing lookup would also be valuable.
1965 """
1965 """
1966 import mercurial.revlog
1966 import mercurial.revlog
1967
1967
1968 opts = _byteskwargs(opts)
1968 opts = _byteskwargs(opts)
1969 timer, fm = gettimer(ui, opts)
1969 timer, fm = gettimer(ui, opts)
1970 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1970 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1971
1971
1972 unfi = repo.unfiltered()
1972 unfi = repo.unfiltered()
1973 clearcaches = opts[b'clear_caches']
1973 clearcaches = opts[b'clear_caches']
1974 # find the filecache func directly
1974 # find the filecache func directly
1975 # This avoid polluting the benchmark with the filecache logic
1975 # This avoid polluting the benchmark with the filecache logic
1976 makecl = unfi.__class__.changelog.func
1976 makecl = unfi.__class__.changelog.func
1977 if not opts[b'rev']:
1977 if not opts[b'rev']:
1978 raise error.Abort(b'use --rev to specify revisions to look up')
1978 raise error.Abort(b'use --rev to specify revisions to look up')
1979 revs = scmutil.revrange(repo, opts[b'rev'])
1979 revs = scmutil.revrange(repo, opts[b'rev'])
1980 cl = repo.changelog
1980 cl = repo.changelog
1981 nodes = [cl.node(r) for r in revs]
1981 nodes = [cl.node(r) for r in revs]
1982
1982
1983 # use a list to pass reference to a nodemap from one closure to the next
1983 # use a list to pass reference to a nodemap from one closure to the next
1984 nodeget = [None]
1984 nodeget = [None]
1985
1985
1986 def setnodeget():
1986 def setnodeget():
1987 # probably not necessary, but for good measure
1987 # probably not necessary, but for good measure
1988 clearchangelog(unfi)
1988 clearchangelog(unfi)
1989 cl = makecl(unfi)
1989 cl = makecl(unfi)
1990 if util.safehasattr(cl.index, 'get_rev'):
1990 if util.safehasattr(cl.index, 'get_rev'):
1991 nodeget[0] = cl.index.get_rev
1991 nodeget[0] = cl.index.get_rev
1992 else:
1992 else:
1993 nodeget[0] = cl.nodemap.get
1993 nodeget[0] = cl.nodemap.get
1994
1994
1995 def d():
1995 def d():
1996 get = nodeget[0]
1996 get = nodeget[0]
1997 for n in nodes:
1997 for n in nodes:
1998 get(n)
1998 get(n)
1999
1999
2000 setup = None
2000 setup = None
2001 if clearcaches:
2001 if clearcaches:
2002
2002
2003 def setup():
2003 def setup():
2004 setnodeget()
2004 setnodeget()
2005
2005
2006 else:
2006 else:
2007 setnodeget()
2007 setnodeget()
2008 d() # prewarm the data structure
2008 d() # prewarm the data structure
2009 timer(d, setup=setup)
2009 timer(d, setup=setup)
2010 fm.end()
2010 fm.end()
2011
2011
2012
2012
2013 @command(b'perf::startup|perfstartup', formatteropts)
2013 @command(b'perf::startup|perfstartup', formatteropts)
2014 def perfstartup(ui, repo, **opts):
2014 def perfstartup(ui, repo, **opts):
2015 opts = _byteskwargs(opts)
2015 opts = _byteskwargs(opts)
2016 timer, fm = gettimer(ui, opts)
2016 timer, fm = gettimer(ui, opts)
2017
2017
2018 def d():
2018 def d():
2019 if os.name != 'nt':
2019 if os.name != 'nt':
2020 os.system(
2020 os.system(
2021 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2021 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2022 )
2022 )
2023 else:
2023 else:
2024 os.environ['HGRCPATH'] = r' '
2024 os.environ['HGRCPATH'] = r' '
2025 os.system("%s version -q > NUL" % sys.argv[0])
2025 os.system("%s version -q > NUL" % sys.argv[0])
2026
2026
2027 timer(d)
2027 timer(d)
2028 fm.end()
2028 fm.end()
2029
2029
2030
2030
2031 def _find_stream_generator(version):
2031 def _find_stream_generator(version):
2032 """find the proper generator function for this stream version"""
2032 """find the proper generator function for this stream version"""
2033 import mercurial.streamclone
2033 import mercurial.streamclone
2034
2034
2035 available = {}
2035 available = {}
2036
2036
2037 # try to fetch a v1 generator
2037 # try to fetch a v1 generator
2038 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2038 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2039 if generatev1 is not None:
2039 if generatev1 is not None:
2040
2040
2041 def generate(repo):
2041 def generate(repo):
2042 entries, bytes, data = generatev2(repo, None, None, True)
2042 entries, bytes, data = generatev2(repo, None, None, True)
2043 return data
2043 return data
2044
2044
2045 available[b'v1'] = generatev1
2045 available[b'v1'] = generatev1
2046 # try to fetch a v2 generator
2046 # try to fetch a v2 generator
2047 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2047 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2048 if generatev2 is not None:
2048 if generatev2 is not None:
2049
2049
2050 def generate(repo):
2050 def generate(repo):
2051 entries, bytes, data = generatev2(repo, None, None, True)
2051 entries, bytes, data = generatev2(repo, None, None, True)
2052 return data
2052 return data
2053
2053
2054 available[b'v2'] = generate
2054 available[b'v2'] = generate
2055 # try to fetch a v3 generator
2055 # try to fetch a v3 generator
2056 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2056 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2057 if generatev3 is not None:
2057 if generatev3 is not None:
2058
2058
2059 def generate(repo):
2059 def generate(repo):
2060 entries, bytes, data = generatev3(repo, None, None, True)
2060 entries, bytes, data = generatev3(repo, None, None, True)
2061 return data
2061 return data
2062
2062
2063 available[b'v3-exp'] = generate
2063 available[b'v3-exp'] = generate
2064
2064
2065 # resolve the request
2065 # resolve the request
2066 if version == b"latest":
2066 if version == b"latest":
2067 # latest is the highest non experimental version
2067 # latest is the highest non experimental version
2068 latest_key = max(v for v in available if b'-exp' not in v)
2068 latest_key = max(v for v in available if b'-exp' not in v)
2069 return available[latest_key]
2069 return available[latest_key]
2070 elif version in available:
2070 elif version in available:
2071 return available[version]
2071 return available[version]
2072 else:
2072 else:
2073 msg = b"unkown or unavailable version: %s"
2073 msg = b"unkown or unavailable version: %s"
2074 msg %= version
2074 msg %= version
2075 hint = b"available versions: %s"
2075 hint = b"available versions: %s"
2076 hint %= b', '.join(sorted(available))
2076 hint %= b', '.join(sorted(available))
2077 raise error.Abort(msg, hint=hint)
2077 raise error.Abort(msg, hint=hint)
2078
2078
2079
2079
2080 @command(
2080 @command(
2081 b'perf::stream-locked-section',
2081 b'perf::stream-locked-section',
2082 [
2082 [
2083 (
2083 (
2084 b'',
2084 b'',
2085 b'stream-version',
2085 b'stream-version',
2086 b'latest',
2086 b'latest',
2087 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2087 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2088 ),
2088 ),
2089 ]
2089 ]
2090 + formatteropts,
2090 + formatteropts,
2091 )
2091 )
2092 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2092 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2093 """benchmark the initial, repo-locked, section of a stream-clone"""
2093 """benchmark the initial, repo-locked, section of a stream-clone"""
2094
2094
2095 opts = _byteskwargs(opts)
2095 opts = _byteskwargs(opts)
2096 timer, fm = gettimer(ui, opts)
2096 timer, fm = gettimer(ui, opts)
2097
2097
2098 # deletion of the generator may trigger some cleanup that we do not want to
2098 # deletion of the generator may trigger some cleanup that we do not want to
2099 # measure
2099 # measure
2100 result_holder = [None]
2100 result_holder = [None]
2101
2101
2102 def setupone():
2102 def setupone():
2103 result_holder[0] = None
2103 result_holder[0] = None
2104
2104
2105 generate = _find_stream_generator(stream_version)
2105 generate = _find_stream_generator(stream_version)
2106
2106
2107 def runone():
2107 def runone():
2108 # the lock is held for the duration the initialisation
2108 # the lock is held for the duration the initialisation
2109 result_holder[0] = generate(repo)
2109 result_holder[0] = generate(repo)
2110
2110
2111 timer(runone, setup=setupone, title=b"load")
2111 timer(runone, setup=setupone, title=b"load")
2112 fm.end()
2112 fm.end()
2113
2113
2114
2114
2115 @command(
2115 @command(
2116 b'perf::stream-generate',
2116 b'perf::stream-generate',
2117 [
2117 [
2118 (
2118 (
2119 b'',
2119 b'',
2120 b'stream-version',
2120 b'stream-version',
2121 b'latest',
2121 b'latest',
2122 b'stream version to us ("v1", "v2" or "latest", (the default))',
2122 b'stream version to us ("v1", "v2" or "latest", (the default))',
2123 ),
2123 ),
2124 ]
2124 ]
2125 + formatteropts,
2125 + formatteropts,
2126 )
2126 )
2127 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2127 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2128 """benchmark the full generation of a stream clone"""
2128 """benchmark the full generation of a stream clone"""
2129
2129
2130 opts = _byteskwargs(opts)
2130 opts = _byteskwargs(opts)
2131 timer, fm = gettimer(ui, opts)
2131 timer, fm = gettimer(ui, opts)
2132
2132
2133 # deletion of the generator may trigger some cleanup that we do not want to
2133 # deletion of the generator may trigger some cleanup that we do not want to
2134 # measure
2134 # measure
2135
2135
2136 generate = _find_stream_generator(stream_version)
2136 generate = _find_stream_generator(stream_version)
2137
2137
2138 def runone():
2138 def runone():
2139 # the lock is held for the duration the initialisation
2139 # the lock is held for the duration the initialisation
2140 for chunk in generate(repo):
2140 for chunk in generate(repo):
2141 pass
2141 pass
2142
2142
2143 timer(runone, title=b"generate")
2143 timer(runone, title=b"generate")
2144 fm.end()
2144 fm.end()
2145
2145
2146
2146
2147 @command(
2147 @command(
2148 b'perf::stream-consume',
2148 b'perf::stream-consume',
2149 formatteropts,
2149 formatteropts,
2150 )
2150 )
2151 def perf_stream_clone_consume(ui, repo, filename, **opts):
2151 def perf_stream_clone_consume(ui, repo, filename, **opts):
2152 """benchmark the full application of a stream clone
2152 """benchmark the full application of a stream clone
2153
2153
2154 This include the creation of the repository
2154 This include the creation of the repository
2155 """
2155 """
2156 # try except to appease check code
2156 # try except to appease check code
2157 msg = b"mercurial too old, missing necessary module: %s"
2157 msg = b"mercurial too old, missing necessary module: %s"
2158 try:
2158 try:
2159 from mercurial import bundle2
2159 from mercurial import bundle2
2160 except ImportError as exc:
2160 except ImportError as exc:
2161 msg %= _bytestr(exc)
2161 msg %= _bytestr(exc)
2162 raise error.Abort(msg)
2162 raise error.Abort(msg)
2163 try:
2163 try:
2164 from mercurial import exchange
2164 from mercurial import exchange
2165 except ImportError as exc:
2165 except ImportError as exc:
2166 msg %= _bytestr(exc)
2166 msg %= _bytestr(exc)
2167 raise error.Abort(msg)
2167 raise error.Abort(msg)
2168 try:
2168 try:
2169 from mercurial import hg
2169 from mercurial import hg
2170 except ImportError as exc:
2170 except ImportError as exc:
2171 msg %= _bytestr(exc)
2171 msg %= _bytestr(exc)
2172 raise error.Abort(msg)
2172 raise error.Abort(msg)
2173 try:
2173 try:
2174 from mercurial import localrepo
2174 from mercurial import localrepo
2175 except ImportError as exc:
2175 except ImportError as exc:
2176 msg %= _bytestr(exc)
2176 msg %= _bytestr(exc)
2177 raise error.Abort(msg)
2177 raise error.Abort(msg)
2178
2178
2179 opts = _byteskwargs(opts)
2179 opts = _byteskwargs(opts)
2180 timer, fm = gettimer(ui, opts)
2180 timer, fm = gettimer(ui, opts)
2181
2181
2182 # deletion of the generator may trigger some cleanup that we do not want to
2182 # deletion of the generator may trigger some cleanup that we do not want to
2183 # measure
2183 # measure
2184 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2184 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2185 raise error.Abort("not a readable file: %s" % filename)
2185 raise error.Abort("not a readable file: %s" % filename)
2186
2186
2187 run_variables = [None, None]
2187 run_variables = [None, None]
2188
2188
2189 @contextlib.contextmanager
2189 @contextlib.contextmanager
2190 def context():
2190 def context():
2191 with open(filename, mode='rb') as bundle:
2191 with open(filename, mode='rb') as bundle:
2192 with tempfile.TemporaryDirectory() as tmp_dir:
2192 with tempfile.TemporaryDirectory() as tmp_dir:
2193 tmp_dir = fsencode(tmp_dir)
2193 tmp_dir = fsencode(tmp_dir)
2194 run_variables[0] = bundle
2194 run_variables[0] = bundle
2195 run_variables[1] = tmp_dir
2195 run_variables[1] = tmp_dir
2196 yield
2196 yield
2197 run_variables[0] = None
2197 run_variables[0] = None
2198 run_variables[1] = None
2198 run_variables[1] = None
2199
2199
2200 def runone():
2200 def runone():
2201 bundle = run_variables[0]
2201 bundle = run_variables[0]
2202 tmp_dir = run_variables[1]
2202 tmp_dir = run_variables[1]
2203 # only pass ui when no srcrepo
2203 # only pass ui when no srcrepo
2204 localrepo.createrepository(
2204 localrepo.createrepository(
2205 repo.ui, tmp_dir, requirements=repo.requirements
2205 repo.ui, tmp_dir, requirements=repo.requirements
2206 )
2206 )
2207 target = hg.repository(repo.ui, tmp_dir)
2207 target = hg.repository(repo.ui, tmp_dir)
2208 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2208 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2209 # stream v1
2209 # stream v1
2210 if util.safehasattr(gen, 'apply'):
2210 if util.safehasattr(gen, 'apply'):
2211 gen.apply(target)
2211 gen.apply(target)
2212 else:
2212 else:
2213 with target.transaction(b"perf::stream-consume") as tr:
2213 with target.transaction(b"perf::stream-consume") as tr:
2214 bundle2.applybundle(
2214 bundle2.applybundle(
2215 target,
2215 target,
2216 gen,
2216 gen,
2217 tr,
2217 tr,
2218 source=b'unbundle',
2218 source=b'unbundle',
2219 url=filename,
2219 url=filename,
2220 )
2220 )
2221
2221
2222 timer(runone, context=context, title=b"consume")
2222 timer(runone, context=context, title=b"consume")
2223 fm.end()
2223 fm.end()
2224
2224
2225
2225
2226 @command(b'perf::parents|perfparents', formatteropts)
2226 @command(b'perf::parents|perfparents', formatteropts)
2227 def perfparents(ui, repo, **opts):
2227 def perfparents(ui, repo, **opts):
2228 """benchmark the time necessary to fetch one changeset's parents.
2228 """benchmark the time necessary to fetch one changeset's parents.
2229
2229
2230 The fetch is done using the `node identifier`, traversing all object layers
2230 The fetch is done using the `node identifier`, traversing all object layers
2231 from the repository object. The first N revisions will be used for this
2231 from the repository object. The first N revisions will be used for this
2232 benchmark. N is controlled by the ``perf.parentscount`` config option
2232 benchmark. N is controlled by the ``perf.parentscount`` config option
2233 (default: 1000).
2233 (default: 1000).
2234 """
2234 """
2235 opts = _byteskwargs(opts)
2235 opts = _byteskwargs(opts)
2236 timer, fm = gettimer(ui, opts)
2236 timer, fm = gettimer(ui, opts)
2237 # control the number of commits perfparents iterates over
2237 # control the number of commits perfparents iterates over
2238 # experimental config: perf.parentscount
2238 # experimental config: perf.parentscount
2239 count = getint(ui, b"perf", b"parentscount", 1000)
2239 count = getint(ui, b"perf", b"parentscount", 1000)
2240 if len(repo.changelog) < count:
2240 if len(repo.changelog) < count:
2241 raise error.Abort(b"repo needs %d commits for this test" % count)
2241 raise error.Abort(b"repo needs %d commits for this test" % count)
2242 repo = repo.unfiltered()
2242 repo = repo.unfiltered()
2243 nl = [repo.changelog.node(i) for i in _xrange(count)]
2243 nl = [repo.changelog.node(i) for i in _xrange(count)]
2244
2244
2245 def d():
2245 def d():
2246 for n in nl:
2246 for n in nl:
2247 repo.changelog.parents(n)
2247 repo.changelog.parents(n)
2248
2248
2249 timer(d)
2249 timer(d)
2250 fm.end()
2250 fm.end()
2251
2251
2252
2252
2253 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2253 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2254 def perfctxfiles(ui, repo, x, **opts):
2254 def perfctxfiles(ui, repo, x, **opts):
2255 opts = _byteskwargs(opts)
2255 opts = _byteskwargs(opts)
2256 x = int(x)
2256 x = int(x)
2257 timer, fm = gettimer(ui, opts)
2257 timer, fm = gettimer(ui, opts)
2258
2258
2259 def d():
2259 def d():
2260 len(repo[x].files())
2260 len(repo[x].files())
2261
2261
2262 timer(d)
2262 timer(d)
2263 fm.end()
2263 fm.end()
2264
2264
2265
2265
2266 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2266 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2267 def perfrawfiles(ui, repo, x, **opts):
2267 def perfrawfiles(ui, repo, x, **opts):
2268 opts = _byteskwargs(opts)
2268 opts = _byteskwargs(opts)
2269 x = int(x)
2269 x = int(x)
2270 timer, fm = gettimer(ui, opts)
2270 timer, fm = gettimer(ui, opts)
2271 cl = repo.changelog
2271 cl = repo.changelog
2272
2272
2273 def d():
2273 def d():
2274 len(cl.read(x)[3])
2274 len(cl.read(x)[3])
2275
2275
2276 timer(d)
2276 timer(d)
2277 fm.end()
2277 fm.end()
2278
2278
2279
2279
2280 @command(b'perf::lookup|perflookup', formatteropts)
2280 @command(b'perf::lookup|perflookup', formatteropts)
2281 def perflookup(ui, repo, rev, **opts):
2281 def perflookup(ui, repo, rev, **opts):
2282 opts = _byteskwargs(opts)
2282 opts = _byteskwargs(opts)
2283 timer, fm = gettimer(ui, opts)
2283 timer, fm = gettimer(ui, opts)
2284 timer(lambda: len(repo.lookup(rev)))
2284 timer(lambda: len(repo.lookup(rev)))
2285 fm.end()
2285 fm.end()
2286
2286
2287
2287
2288 @command(
2288 @command(
2289 b'perf::linelogedits|perflinelogedits',
2289 b'perf::linelogedits|perflinelogedits',
2290 [
2290 [
2291 (b'n', b'edits', 10000, b'number of edits'),
2291 (b'n', b'edits', 10000, b'number of edits'),
2292 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2292 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2293 ],
2293 ],
2294 norepo=True,
2294 norepo=True,
2295 )
2295 )
2296 def perflinelogedits(ui, **opts):
2296 def perflinelogedits(ui, **opts):
2297 from mercurial import linelog
2297 from mercurial import linelog
2298
2298
2299 opts = _byteskwargs(opts)
2299 opts = _byteskwargs(opts)
2300
2300
2301 edits = opts[b'edits']
2301 edits = opts[b'edits']
2302 maxhunklines = opts[b'max_hunk_lines']
2302 maxhunklines = opts[b'max_hunk_lines']
2303
2303
2304 maxb1 = 100000
2304 maxb1 = 100000
2305 random.seed(0)
2305 random.seed(0)
2306 randint = random.randint
2306 randint = random.randint
2307 currentlines = 0
2307 currentlines = 0
2308 arglist = []
2308 arglist = []
2309 for rev in _xrange(edits):
2309 for rev in _xrange(edits):
2310 a1 = randint(0, currentlines)
2310 a1 = randint(0, currentlines)
2311 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2311 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2312 b1 = randint(0, maxb1)
2312 b1 = randint(0, maxb1)
2313 b2 = randint(b1, b1 + maxhunklines)
2313 b2 = randint(b1, b1 + maxhunklines)
2314 currentlines += (b2 - b1) - (a2 - a1)
2314 currentlines += (b2 - b1) - (a2 - a1)
2315 arglist.append((rev, a1, a2, b1, b2))
2315 arglist.append((rev, a1, a2, b1, b2))
2316
2316
2317 def d():
2317 def d():
2318 ll = linelog.linelog()
2318 ll = linelog.linelog()
2319 for args in arglist:
2319 for args in arglist:
2320 ll.replacelines(*args)
2320 ll.replacelines(*args)
2321
2321
2322 timer, fm = gettimer(ui, opts)
2322 timer, fm = gettimer(ui, opts)
2323 timer(d)
2323 timer(d)
2324 fm.end()
2324 fm.end()
2325
2325
2326
2326
2327 @command(b'perf::revrange|perfrevrange', formatteropts)
2327 @command(b'perf::revrange|perfrevrange', formatteropts)
2328 def perfrevrange(ui, repo, *specs, **opts):
2328 def perfrevrange(ui, repo, *specs, **opts):
2329 opts = _byteskwargs(opts)
2329 opts = _byteskwargs(opts)
2330 timer, fm = gettimer(ui, opts)
2330 timer, fm = gettimer(ui, opts)
2331 revrange = scmutil.revrange
2331 revrange = scmutil.revrange
2332 timer(lambda: len(revrange(repo, specs)))
2332 timer(lambda: len(revrange(repo, specs)))
2333 fm.end()
2333 fm.end()
2334
2334
2335
2335
2336 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2336 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2337 def perfnodelookup(ui, repo, rev, **opts):
2337 def perfnodelookup(ui, repo, rev, **opts):
2338 opts = _byteskwargs(opts)
2338 opts = _byteskwargs(opts)
2339 timer, fm = gettimer(ui, opts)
2339 timer, fm = gettimer(ui, opts)
2340 import mercurial.revlog
2340 import mercurial.revlog
2341
2341
2342 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2342 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2343 n = scmutil.revsingle(repo, rev).node()
2343 n = scmutil.revsingle(repo, rev).node()
2344
2344
2345 try:
2345 try:
2346 cl = revlog(getsvfs(repo), radix=b"00changelog")
2346 cl = revlog(getsvfs(repo), radix=b"00changelog")
2347 except TypeError:
2347 except TypeError:
2348 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2348 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2349
2349
2350 def d():
2350 def d():
2351 cl.rev(n)
2351 cl.rev(n)
2352 clearcaches(cl)
2352 clearcaches(cl)
2353
2353
2354 timer(d)
2354 timer(d)
2355 fm.end()
2355 fm.end()
2356
2356
2357
2357
2358 @command(
2358 @command(
2359 b'perf::log|perflog',
2359 b'perf::log|perflog',
2360 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2360 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2361 )
2361 )
2362 def perflog(ui, repo, rev=None, **opts):
2362 def perflog(ui, repo, rev=None, **opts):
2363 opts = _byteskwargs(opts)
2363 opts = _byteskwargs(opts)
2364 if rev is None:
2364 if rev is None:
2365 rev = []
2365 rev = []
2366 timer, fm = gettimer(ui, opts)
2366 timer, fm = gettimer(ui, opts)
2367 ui.pushbuffer()
2367 ui.pushbuffer()
2368 timer(
2368 timer(
2369 lambda: commands.log(
2369 lambda: commands.log(
2370 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2370 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2371 )
2371 )
2372 )
2372 )
2373 ui.popbuffer()
2373 ui.popbuffer()
2374 fm.end()
2374 fm.end()
2375
2375
2376
2376
2377 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2377 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2378 def perfmoonwalk(ui, repo, **opts):
2378 def perfmoonwalk(ui, repo, **opts):
2379 """benchmark walking the changelog backwards
2379 """benchmark walking the changelog backwards
2380
2380
2381 This also loads the changelog data for each revision in the changelog.
2381 This also loads the changelog data for each revision in the changelog.
2382 """
2382 """
2383 opts = _byteskwargs(opts)
2383 opts = _byteskwargs(opts)
2384 timer, fm = gettimer(ui, opts)
2384 timer, fm = gettimer(ui, opts)
2385
2385
2386 def moonwalk():
2386 def moonwalk():
2387 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2387 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2388 ctx = repo[i]
2388 ctx = repo[i]
2389 ctx.branch() # read changelog data (in addition to the index)
2389 ctx.branch() # read changelog data (in addition to the index)
2390
2390
2391 timer(moonwalk)
2391 timer(moonwalk)
2392 fm.end()
2392 fm.end()
2393
2393
2394
2394
2395 @command(
2395 @command(
2396 b'perf::templating|perftemplating',
2396 b'perf::templating|perftemplating',
2397 [
2397 [
2398 (b'r', b'rev', [], b'revisions to run the template on'),
2398 (b'r', b'rev', [], b'revisions to run the template on'),
2399 ]
2399 ]
2400 + formatteropts,
2400 + formatteropts,
2401 )
2401 )
2402 def perftemplating(ui, repo, testedtemplate=None, **opts):
2402 def perftemplating(ui, repo, testedtemplate=None, **opts):
2403 """test the rendering time of a given template"""
2403 """test the rendering time of a given template"""
2404 if makelogtemplater is None:
2404 if makelogtemplater is None:
2405 raise error.Abort(
2405 raise error.Abort(
2406 b"perftemplating not available with this Mercurial",
2406 b"perftemplating not available with this Mercurial",
2407 hint=b"use 4.3 or later",
2407 hint=b"use 4.3 or later",
2408 )
2408 )
2409
2409
2410 opts = _byteskwargs(opts)
2410 opts = _byteskwargs(opts)
2411
2411
2412 nullui = ui.copy()
2412 nullui = ui.copy()
2413 nullui.fout = open(os.devnull, 'wb')
2413 nullui.fout = open(os.devnull, 'wb')
2414 nullui.disablepager()
2414 nullui.disablepager()
2415 revs = opts.get(b'rev')
2415 revs = opts.get(b'rev')
2416 if not revs:
2416 if not revs:
2417 revs = [b'all()']
2417 revs = [b'all()']
2418 revs = list(scmutil.revrange(repo, revs))
2418 revs = list(scmutil.revrange(repo, revs))
2419
2419
2420 defaulttemplate = (
2420 defaulttemplate = (
2421 b'{date|shortdate} [{rev}:{node|short}]'
2421 b'{date|shortdate} [{rev}:{node|short}]'
2422 b' {author|person}: {desc|firstline}\n'
2422 b' {author|person}: {desc|firstline}\n'
2423 )
2423 )
2424 if testedtemplate is None:
2424 if testedtemplate is None:
2425 testedtemplate = defaulttemplate
2425 testedtemplate = defaulttemplate
2426 displayer = makelogtemplater(nullui, repo, testedtemplate)
2426 displayer = makelogtemplater(nullui, repo, testedtemplate)
2427
2427
2428 def format():
2428 def format():
2429 for r in revs:
2429 for r in revs:
2430 ctx = repo[r]
2430 ctx = repo[r]
2431 displayer.show(ctx)
2431 displayer.show(ctx)
2432 displayer.flush(ctx)
2432 displayer.flush(ctx)
2433
2433
2434 timer, fm = gettimer(ui, opts)
2434 timer, fm = gettimer(ui, opts)
2435 timer(format)
2435 timer(format)
2436 fm.end()
2436 fm.end()
2437
2437
2438
2438
2439 def _displaystats(ui, opts, entries, data):
2439 def _displaystats(ui, opts, entries, data):
2440 # use a second formatter because the data are quite different, not sure
2440 # use a second formatter because the data are quite different, not sure
2441 # how it flies with the templater.
2441 # how it flies with the templater.
2442 fm = ui.formatter(b'perf-stats', opts)
2442 fm = ui.formatter(b'perf-stats', opts)
2443 for key, title in entries:
2443 for key, title in entries:
2444 values = data[key]
2444 values = data[key]
2445 nbvalues = len(data)
2445 nbvalues = len(data)
2446 values.sort()
2446 values.sort()
2447 stats = {
2447 stats = {
2448 'key': key,
2448 'key': key,
2449 'title': title,
2449 'title': title,
2450 'nbitems': len(values),
2450 'nbitems': len(values),
2451 'min': values[0][0],
2451 'min': values[0][0],
2452 '10%': values[(nbvalues * 10) // 100][0],
2452 '10%': values[(nbvalues * 10) // 100][0],
2453 '25%': values[(nbvalues * 25) // 100][0],
2453 '25%': values[(nbvalues * 25) // 100][0],
2454 '50%': values[(nbvalues * 50) // 100][0],
2454 '50%': values[(nbvalues * 50) // 100][0],
2455 '75%': values[(nbvalues * 75) // 100][0],
2455 '75%': values[(nbvalues * 75) // 100][0],
2456 '80%': values[(nbvalues * 80) // 100][0],
2456 '80%': values[(nbvalues * 80) // 100][0],
2457 '85%': values[(nbvalues * 85) // 100][0],
2457 '85%': values[(nbvalues * 85) // 100][0],
2458 '90%': values[(nbvalues * 90) // 100][0],
2458 '90%': values[(nbvalues * 90) // 100][0],
2459 '95%': values[(nbvalues * 95) // 100][0],
2459 '95%': values[(nbvalues * 95) // 100][0],
2460 '99%': values[(nbvalues * 99) // 100][0],
2460 '99%': values[(nbvalues * 99) // 100][0],
2461 'max': values[-1][0],
2461 'max': values[-1][0],
2462 }
2462 }
2463 fm.startitem()
2463 fm.startitem()
2464 fm.data(**stats)
2464 fm.data(**stats)
2465 # make node pretty for the human output
2465 # make node pretty for the human output
2466 fm.plain('### %s (%d items)\n' % (title, len(values)))
2466 fm.plain('### %s (%d items)\n' % (title, len(values)))
2467 lines = [
2467 lines = [
2468 'min',
2468 'min',
2469 '10%',
2469 '10%',
2470 '25%',
2470 '25%',
2471 '50%',
2471 '50%',
2472 '75%',
2472 '75%',
2473 '80%',
2473 '80%',
2474 '85%',
2474 '85%',
2475 '90%',
2475 '90%',
2476 '95%',
2476 '95%',
2477 '99%',
2477 '99%',
2478 'max',
2478 'max',
2479 ]
2479 ]
2480 for l in lines:
2480 for l in lines:
2481 fm.plain('%s: %s\n' % (l, stats[l]))
2481 fm.plain('%s: %s\n' % (l, stats[l]))
2482 fm.end()
2482 fm.end()
2483
2483
2484
2484
2485 @command(
2485 @command(
2486 b'perf::helper-mergecopies|perfhelper-mergecopies',
2486 b'perf::helper-mergecopies|perfhelper-mergecopies',
2487 formatteropts
2487 formatteropts
2488 + [
2488 + [
2489 (b'r', b'revs', [], b'restrict search to these revisions'),
2489 (b'r', b'revs', [], b'restrict search to these revisions'),
2490 (b'', b'timing', False, b'provides extra data (costly)'),
2490 (b'', b'timing', False, b'provides extra data (costly)'),
2491 (b'', b'stats', False, b'provides statistic about the measured data'),
2491 (b'', b'stats', False, b'provides statistic about the measured data'),
2492 ],
2492 ],
2493 )
2493 )
2494 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2494 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2495 """find statistics about potential parameters for `perfmergecopies`
2495 """find statistics about potential parameters for `perfmergecopies`
2496
2496
2497 This command find (base, p1, p2) triplet relevant for copytracing
2497 This command find (base, p1, p2) triplet relevant for copytracing
2498 benchmarking in the context of a merge. It reports values for some of the
2498 benchmarking in the context of a merge. It reports values for some of the
2499 parameters that impact merge copy tracing time during merge.
2499 parameters that impact merge copy tracing time during merge.
2500
2500
2501 If `--timing` is set, rename detection is run and the associated timing
2501 If `--timing` is set, rename detection is run and the associated timing
2502 will be reported. The extra details come at the cost of slower command
2502 will be reported. The extra details come at the cost of slower command
2503 execution.
2503 execution.
2504
2504
2505 Since rename detection is only run once, other factors might easily
2505 Since rename detection is only run once, other factors might easily
2506 affect the precision of the timing. However it should give a good
2506 affect the precision of the timing. However it should give a good
2507 approximation of which revision triplets are very costly.
2507 approximation of which revision triplets are very costly.
2508 """
2508 """
2509 opts = _byteskwargs(opts)
2509 opts = _byteskwargs(opts)
2510 fm = ui.formatter(b'perf', opts)
2510 fm = ui.formatter(b'perf', opts)
2511 dotiming = opts[b'timing']
2511 dotiming = opts[b'timing']
2512 dostats = opts[b'stats']
2512 dostats = opts[b'stats']
2513
2513
2514 output_template = [
2514 output_template = [
2515 ("base", "%(base)12s"),
2515 ("base", "%(base)12s"),
2516 ("p1", "%(p1.node)12s"),
2516 ("p1", "%(p1.node)12s"),
2517 ("p2", "%(p2.node)12s"),
2517 ("p2", "%(p2.node)12s"),
2518 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2518 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2519 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2519 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2520 ("p1.renames", "%(p1.renamedfiles)12d"),
2520 ("p1.renames", "%(p1.renamedfiles)12d"),
2521 ("p1.time", "%(p1.time)12.3f"),
2521 ("p1.time", "%(p1.time)12.3f"),
2522 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2522 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2523 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2523 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2524 ("p2.renames", "%(p2.renamedfiles)12d"),
2524 ("p2.renames", "%(p2.renamedfiles)12d"),
2525 ("p2.time", "%(p2.time)12.3f"),
2525 ("p2.time", "%(p2.time)12.3f"),
2526 ("renames", "%(nbrenamedfiles)12d"),
2526 ("renames", "%(nbrenamedfiles)12d"),
2527 ("total.time", "%(time)12.3f"),
2527 ("total.time", "%(time)12.3f"),
2528 ]
2528 ]
2529 if not dotiming:
2529 if not dotiming:
2530 output_template = [
2530 output_template = [
2531 i
2531 i
2532 for i in output_template
2532 for i in output_template
2533 if not ('time' in i[0] or 'renames' in i[0])
2533 if not ('time' in i[0] or 'renames' in i[0])
2534 ]
2534 ]
2535 header_names = [h for (h, v) in output_template]
2535 header_names = [h for (h, v) in output_template]
2536 output = ' '.join([v for (h, v) in output_template]) + '\n'
2536 output = ' '.join([v for (h, v) in output_template]) + '\n'
2537 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2537 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2538 fm.plain(header % tuple(header_names))
2538 fm.plain(header % tuple(header_names))
2539
2539
2540 if not revs:
2540 if not revs:
2541 revs = ['all()']
2541 revs = ['all()']
2542 revs = scmutil.revrange(repo, revs)
2542 revs = scmutil.revrange(repo, revs)
2543
2543
2544 if dostats:
2544 if dostats:
2545 alldata = {
2545 alldata = {
2546 'nbrevs': [],
2546 'nbrevs': [],
2547 'nbmissingfiles': [],
2547 'nbmissingfiles': [],
2548 }
2548 }
2549 if dotiming:
2549 if dotiming:
2550 alldata['parentnbrenames'] = []
2550 alldata['parentnbrenames'] = []
2551 alldata['totalnbrenames'] = []
2551 alldata['totalnbrenames'] = []
2552 alldata['parenttime'] = []
2552 alldata['parenttime'] = []
2553 alldata['totaltime'] = []
2553 alldata['totaltime'] = []
2554
2554
2555 roi = repo.revs('merge() and %ld', revs)
2555 roi = repo.revs('merge() and %ld', revs)
2556 for r in roi:
2556 for r in roi:
2557 ctx = repo[r]
2557 ctx = repo[r]
2558 p1 = ctx.p1()
2558 p1 = ctx.p1()
2559 p2 = ctx.p2()
2559 p2 = ctx.p2()
2560 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2560 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2561 for b in bases:
2561 for b in bases:
2562 b = repo[b]
2562 b = repo[b]
2563 p1missing = copies._computeforwardmissing(b, p1)
2563 p1missing = copies._computeforwardmissing(b, p1)
2564 p2missing = copies._computeforwardmissing(b, p2)
2564 p2missing = copies._computeforwardmissing(b, p2)
2565 data = {
2565 data = {
2566 b'base': b.hex(),
2566 b'base': b.hex(),
2567 b'p1.node': p1.hex(),
2567 b'p1.node': p1.hex(),
2568 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2568 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2569 b'p1.nbmissingfiles': len(p1missing),
2569 b'p1.nbmissingfiles': len(p1missing),
2570 b'p2.node': p2.hex(),
2570 b'p2.node': p2.hex(),
2571 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2571 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2572 b'p2.nbmissingfiles': len(p2missing),
2572 b'p2.nbmissingfiles': len(p2missing),
2573 }
2573 }
2574 if dostats:
2574 if dostats:
2575 if p1missing:
2575 if p1missing:
2576 alldata['nbrevs'].append(
2576 alldata['nbrevs'].append(
2577 (data['p1.nbrevs'], b.hex(), p1.hex())
2577 (data['p1.nbrevs'], b.hex(), p1.hex())
2578 )
2578 )
2579 alldata['nbmissingfiles'].append(
2579 alldata['nbmissingfiles'].append(
2580 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2580 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2581 )
2581 )
2582 if p2missing:
2582 if p2missing:
2583 alldata['nbrevs'].append(
2583 alldata['nbrevs'].append(
2584 (data['p2.nbrevs'], b.hex(), p2.hex())
2584 (data['p2.nbrevs'], b.hex(), p2.hex())
2585 )
2585 )
2586 alldata['nbmissingfiles'].append(
2586 alldata['nbmissingfiles'].append(
2587 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2587 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2588 )
2588 )
2589 if dotiming:
2589 if dotiming:
2590 begin = util.timer()
2590 begin = util.timer()
2591 mergedata = copies.mergecopies(repo, p1, p2, b)
2591 mergedata = copies.mergecopies(repo, p1, p2, b)
2592 end = util.timer()
2592 end = util.timer()
2593 # not very stable timing since we did only one run
2593 # not very stable timing since we did only one run
2594 data['time'] = end - begin
2594 data['time'] = end - begin
2595 # mergedata contains five dicts: "copy", "movewithdir",
2595 # mergedata contains five dicts: "copy", "movewithdir",
2596 # "diverge", "renamedelete" and "dirmove".
2596 # "diverge", "renamedelete" and "dirmove".
2597 # The first 4 are about renamed file so lets count that.
2597 # The first 4 are about renamed file so lets count that.
2598 renames = len(mergedata[0])
2598 renames = len(mergedata[0])
2599 renames += len(mergedata[1])
2599 renames += len(mergedata[1])
2600 renames += len(mergedata[2])
2600 renames += len(mergedata[2])
2601 renames += len(mergedata[3])
2601 renames += len(mergedata[3])
2602 data['nbrenamedfiles'] = renames
2602 data['nbrenamedfiles'] = renames
2603 begin = util.timer()
2603 begin = util.timer()
2604 p1renames = copies.pathcopies(b, p1)
2604 p1renames = copies.pathcopies(b, p1)
2605 end = util.timer()
2605 end = util.timer()
2606 data['p1.time'] = end - begin
2606 data['p1.time'] = end - begin
2607 begin = util.timer()
2607 begin = util.timer()
2608 p2renames = copies.pathcopies(b, p2)
2608 p2renames = copies.pathcopies(b, p2)
2609 end = util.timer()
2609 end = util.timer()
2610 data['p2.time'] = end - begin
2610 data['p2.time'] = end - begin
2611 data['p1.renamedfiles'] = len(p1renames)
2611 data['p1.renamedfiles'] = len(p1renames)
2612 data['p2.renamedfiles'] = len(p2renames)
2612 data['p2.renamedfiles'] = len(p2renames)
2613
2613
2614 if dostats:
2614 if dostats:
2615 if p1missing:
2615 if p1missing:
2616 alldata['parentnbrenames'].append(
2616 alldata['parentnbrenames'].append(
2617 (data['p1.renamedfiles'], b.hex(), p1.hex())
2617 (data['p1.renamedfiles'], b.hex(), p1.hex())
2618 )
2618 )
2619 alldata['parenttime'].append(
2619 alldata['parenttime'].append(
2620 (data['p1.time'], b.hex(), p1.hex())
2620 (data['p1.time'], b.hex(), p1.hex())
2621 )
2621 )
2622 if p2missing:
2622 if p2missing:
2623 alldata['parentnbrenames'].append(
2623 alldata['parentnbrenames'].append(
2624 (data['p2.renamedfiles'], b.hex(), p2.hex())
2624 (data['p2.renamedfiles'], b.hex(), p2.hex())
2625 )
2625 )
2626 alldata['parenttime'].append(
2626 alldata['parenttime'].append(
2627 (data['p2.time'], b.hex(), p2.hex())
2627 (data['p2.time'], b.hex(), p2.hex())
2628 )
2628 )
2629 if p1missing or p2missing:
2629 if p1missing or p2missing:
2630 alldata['totalnbrenames'].append(
2630 alldata['totalnbrenames'].append(
2631 (
2631 (
2632 data['nbrenamedfiles'],
2632 data['nbrenamedfiles'],
2633 b.hex(),
2633 b.hex(),
2634 p1.hex(),
2634 p1.hex(),
2635 p2.hex(),
2635 p2.hex(),
2636 )
2636 )
2637 )
2637 )
2638 alldata['totaltime'].append(
2638 alldata['totaltime'].append(
2639 (data['time'], b.hex(), p1.hex(), p2.hex())
2639 (data['time'], b.hex(), p1.hex(), p2.hex())
2640 )
2640 )
2641 fm.startitem()
2641 fm.startitem()
2642 fm.data(**data)
2642 fm.data(**data)
2643 # make node pretty for the human output
2643 # make node pretty for the human output
2644 out = data.copy()
2644 out = data.copy()
2645 out['base'] = fm.hexfunc(b.node())
2645 out['base'] = fm.hexfunc(b.node())
2646 out['p1.node'] = fm.hexfunc(p1.node())
2646 out['p1.node'] = fm.hexfunc(p1.node())
2647 out['p2.node'] = fm.hexfunc(p2.node())
2647 out['p2.node'] = fm.hexfunc(p2.node())
2648 fm.plain(output % out)
2648 fm.plain(output % out)
2649
2649
2650 fm.end()
2650 fm.end()
2651 if dostats:
2651 if dostats:
2652 # use a second formatter because the data are quite different, not sure
2652 # use a second formatter because the data are quite different, not sure
2653 # how it flies with the templater.
2653 # how it flies with the templater.
2654 entries = [
2654 entries = [
2655 ('nbrevs', 'number of revision covered'),
2655 ('nbrevs', 'number of revision covered'),
2656 ('nbmissingfiles', 'number of missing files at head'),
2656 ('nbmissingfiles', 'number of missing files at head'),
2657 ]
2657 ]
2658 if dotiming:
2658 if dotiming:
2659 entries.append(
2659 entries.append(
2660 ('parentnbrenames', 'rename from one parent to base')
2660 ('parentnbrenames', 'rename from one parent to base')
2661 )
2661 )
2662 entries.append(('totalnbrenames', 'total number of renames'))
2662 entries.append(('totalnbrenames', 'total number of renames'))
2663 entries.append(('parenttime', 'time for one parent'))
2663 entries.append(('parenttime', 'time for one parent'))
2664 entries.append(('totaltime', 'time for both parents'))
2664 entries.append(('totaltime', 'time for both parents'))
2665 _displaystats(ui, opts, entries, alldata)
2665 _displaystats(ui, opts, entries, alldata)
2666
2666
2667
2667
2668 @command(
2668 @command(
2669 b'perf::helper-pathcopies|perfhelper-pathcopies',
2669 b'perf::helper-pathcopies|perfhelper-pathcopies',
2670 formatteropts
2670 formatteropts
2671 + [
2671 + [
2672 (b'r', b'revs', [], b'restrict search to these revisions'),
2672 (b'r', b'revs', [], b'restrict search to these revisions'),
2673 (b'', b'timing', False, b'provides extra data (costly)'),
2673 (b'', b'timing', False, b'provides extra data (costly)'),
2674 (b'', b'stats', False, b'provides statistic about the measured data'),
2674 (b'', b'stats', False, b'provides statistic about the measured data'),
2675 ],
2675 ],
2676 )
2676 )
2677 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2677 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2678 """find statistic about potential parameters for the `perftracecopies`
2678 """find statistic about potential parameters for the `perftracecopies`
2679
2679
2680 This command find source-destination pair relevant for copytracing testing.
2680 This command find source-destination pair relevant for copytracing testing.
2681 It report value for some of the parameters that impact copy tracing time.
2681 It report value for some of the parameters that impact copy tracing time.
2682
2682
2683 If `--timing` is set, rename detection is run and the associated timing
2683 If `--timing` is set, rename detection is run and the associated timing
2684 will be reported. The extra details comes at the cost of a slower command
2684 will be reported. The extra details comes at the cost of a slower command
2685 execution.
2685 execution.
2686
2686
2687 Since the rename detection is only run once, other factors might easily
2687 Since the rename detection is only run once, other factors might easily
2688 affect the precision of the timing. However it should give a good
2688 affect the precision of the timing. However it should give a good
2689 approximation of which revision pairs are very costly.
2689 approximation of which revision pairs are very costly.
2690 """
2690 """
2691 opts = _byteskwargs(opts)
2691 opts = _byteskwargs(opts)
2692 fm = ui.formatter(b'perf', opts)
2692 fm = ui.formatter(b'perf', opts)
2693 dotiming = opts[b'timing']
2693 dotiming = opts[b'timing']
2694 dostats = opts[b'stats']
2694 dostats = opts[b'stats']
2695
2695
2696 if dotiming:
2696 if dotiming:
2697 header = '%12s %12s %12s %12s %12s %12s\n'
2697 header = '%12s %12s %12s %12s %12s %12s\n'
2698 output = (
2698 output = (
2699 "%(source)12s %(destination)12s "
2699 "%(source)12s %(destination)12s "
2700 "%(nbrevs)12d %(nbmissingfiles)12d "
2700 "%(nbrevs)12d %(nbmissingfiles)12d "
2701 "%(nbrenamedfiles)12d %(time)18.5f\n"
2701 "%(nbrenamedfiles)12d %(time)18.5f\n"
2702 )
2702 )
2703 header_names = (
2703 header_names = (
2704 "source",
2704 "source",
2705 "destination",
2705 "destination",
2706 "nb-revs",
2706 "nb-revs",
2707 "nb-files",
2707 "nb-files",
2708 "nb-renames",
2708 "nb-renames",
2709 "time",
2709 "time",
2710 )
2710 )
2711 fm.plain(header % header_names)
2711 fm.plain(header % header_names)
2712 else:
2712 else:
2713 header = '%12s %12s %12s %12s\n'
2713 header = '%12s %12s %12s %12s\n'
2714 output = (
2714 output = (
2715 "%(source)12s %(destination)12s "
2715 "%(source)12s %(destination)12s "
2716 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2716 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2717 )
2717 )
2718 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2718 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2719
2719
2720 if not revs:
2720 if not revs:
2721 revs = ['all()']
2721 revs = ['all()']
2722 revs = scmutil.revrange(repo, revs)
2722 revs = scmutil.revrange(repo, revs)
2723
2723
2724 if dostats:
2724 if dostats:
2725 alldata = {
2725 alldata = {
2726 'nbrevs': [],
2726 'nbrevs': [],
2727 'nbmissingfiles': [],
2727 'nbmissingfiles': [],
2728 }
2728 }
2729 if dotiming:
2729 if dotiming:
2730 alldata['nbrenames'] = []
2730 alldata['nbrenames'] = []
2731 alldata['time'] = []
2731 alldata['time'] = []
2732
2732
2733 roi = repo.revs('merge() and %ld', revs)
2733 roi = repo.revs('merge() and %ld', revs)
2734 for r in roi:
2734 for r in roi:
2735 ctx = repo[r]
2735 ctx = repo[r]
2736 p1 = ctx.p1().rev()
2736 p1 = ctx.p1().rev()
2737 p2 = ctx.p2().rev()
2737 p2 = ctx.p2().rev()
2738 bases = repo.changelog._commonancestorsheads(p1, p2)
2738 bases = repo.changelog._commonancestorsheads(p1, p2)
2739 for p in (p1, p2):
2739 for p in (p1, p2):
2740 for b in bases:
2740 for b in bases:
2741 base = repo[b]
2741 base = repo[b]
2742 parent = repo[p]
2742 parent = repo[p]
2743 missing = copies._computeforwardmissing(base, parent)
2743 missing = copies._computeforwardmissing(base, parent)
2744 if not missing:
2744 if not missing:
2745 continue
2745 continue
2746 data = {
2746 data = {
2747 b'source': base.hex(),
2747 b'source': base.hex(),
2748 b'destination': parent.hex(),
2748 b'destination': parent.hex(),
2749 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2749 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2750 b'nbmissingfiles': len(missing),
2750 b'nbmissingfiles': len(missing),
2751 }
2751 }
2752 if dostats:
2752 if dostats:
2753 alldata['nbrevs'].append(
2753 alldata['nbrevs'].append(
2754 (
2754 (
2755 data['nbrevs'],
2755 data['nbrevs'],
2756 base.hex(),
2756 base.hex(),
2757 parent.hex(),
2757 parent.hex(),
2758 )
2758 )
2759 )
2759 )
2760 alldata['nbmissingfiles'].append(
2760 alldata['nbmissingfiles'].append(
2761 (
2761 (
2762 data['nbmissingfiles'],
2762 data['nbmissingfiles'],
2763 base.hex(),
2763 base.hex(),
2764 parent.hex(),
2764 parent.hex(),
2765 )
2765 )
2766 )
2766 )
2767 if dotiming:
2767 if dotiming:
2768 begin = util.timer()
2768 begin = util.timer()
2769 renames = copies.pathcopies(base, parent)
2769 renames = copies.pathcopies(base, parent)
2770 end = util.timer()
2770 end = util.timer()
2771 # not very stable timing since we did only one run
2771 # not very stable timing since we did only one run
2772 data['time'] = end - begin
2772 data['time'] = end - begin
2773 data['nbrenamedfiles'] = len(renames)
2773 data['nbrenamedfiles'] = len(renames)
2774 if dostats:
2774 if dostats:
2775 alldata['time'].append(
2775 alldata['time'].append(
2776 (
2776 (
2777 data['time'],
2777 data['time'],
2778 base.hex(),
2778 base.hex(),
2779 parent.hex(),
2779 parent.hex(),
2780 )
2780 )
2781 )
2781 )
2782 alldata['nbrenames'].append(
2782 alldata['nbrenames'].append(
2783 (
2783 (
2784 data['nbrenamedfiles'],
2784 data['nbrenamedfiles'],
2785 base.hex(),
2785 base.hex(),
2786 parent.hex(),
2786 parent.hex(),
2787 )
2787 )
2788 )
2788 )
2789 fm.startitem()
2789 fm.startitem()
2790 fm.data(**data)
2790 fm.data(**data)
2791 out = data.copy()
2791 out = data.copy()
2792 out['source'] = fm.hexfunc(base.node())
2792 out['source'] = fm.hexfunc(base.node())
2793 out['destination'] = fm.hexfunc(parent.node())
2793 out['destination'] = fm.hexfunc(parent.node())
2794 fm.plain(output % out)
2794 fm.plain(output % out)
2795
2795
2796 fm.end()
2796 fm.end()
2797 if dostats:
2797 if dostats:
2798 entries = [
2798 entries = [
2799 ('nbrevs', 'number of revision covered'),
2799 ('nbrevs', 'number of revision covered'),
2800 ('nbmissingfiles', 'number of missing files at head'),
2800 ('nbmissingfiles', 'number of missing files at head'),
2801 ]
2801 ]
2802 if dotiming:
2802 if dotiming:
2803 entries.append(('nbrenames', 'renamed files'))
2803 entries.append(('nbrenames', 'renamed files'))
2804 entries.append(('time', 'time'))
2804 entries.append(('time', 'time'))
2805 _displaystats(ui, opts, entries, alldata)
2805 _displaystats(ui, opts, entries, alldata)
2806
2806
2807
2807
2808 @command(b'perf::cca|perfcca', formatteropts)
2808 @command(b'perf::cca|perfcca', formatteropts)
2809 def perfcca(ui, repo, **opts):
2809 def perfcca(ui, repo, **opts):
2810 opts = _byteskwargs(opts)
2810 opts = _byteskwargs(opts)
2811 timer, fm = gettimer(ui, opts)
2811 timer, fm = gettimer(ui, opts)
2812 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2812 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2813 fm.end()
2813 fm.end()
2814
2814
2815
2815
2816 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2816 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2817 def perffncacheload(ui, repo, **opts):
2817 def perffncacheload(ui, repo, **opts):
2818 opts = _byteskwargs(opts)
2818 opts = _byteskwargs(opts)
2819 timer, fm = gettimer(ui, opts)
2819 timer, fm = gettimer(ui, opts)
2820 s = repo.store
2820 s = repo.store
2821
2821
2822 def d():
2822 def d():
2823 s.fncache._load()
2823 s.fncache._load()
2824
2824
2825 timer(d)
2825 timer(d)
2826 fm.end()
2826 fm.end()
2827
2827
2828
2828
2829 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2829 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2830 def perffncachewrite(ui, repo, **opts):
2830 def perffncachewrite(ui, repo, **opts):
2831 opts = _byteskwargs(opts)
2831 opts = _byteskwargs(opts)
2832 timer, fm = gettimer(ui, opts)
2832 timer, fm = gettimer(ui, opts)
2833 s = repo.store
2833 s = repo.store
2834 lock = repo.lock()
2834 lock = repo.lock()
2835 s.fncache._load()
2835 s.fncache._load()
2836 tr = repo.transaction(b'perffncachewrite')
2836 tr = repo.transaction(b'perffncachewrite')
2837 tr.addbackup(b'fncache')
2837 tr.addbackup(b'fncache')
2838
2838
2839 def d():
2839 def d():
2840 s.fncache._dirty = True
2840 s.fncache._dirty = True
2841 s.fncache.write(tr)
2841 s.fncache.write(tr)
2842
2842
2843 timer(d)
2843 timer(d)
2844 tr.close()
2844 tr.close()
2845 lock.release()
2845 lock.release()
2846 fm.end()
2846 fm.end()
2847
2847
2848
2848
2849 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2849 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2850 def perffncacheencode(ui, repo, **opts):
2850 def perffncacheencode(ui, repo, **opts):
2851 opts = _byteskwargs(opts)
2851 opts = _byteskwargs(opts)
2852 timer, fm = gettimer(ui, opts)
2852 timer, fm = gettimer(ui, opts)
2853 s = repo.store
2853 s = repo.store
2854 s.fncache._load()
2854 s.fncache._load()
2855
2855
2856 def d():
2856 def d():
2857 for p in s.fncache.entries:
2857 for p in s.fncache.entries:
2858 s.encode(p)
2858 s.encode(p)
2859
2859
2860 timer(d)
2860 timer(d)
2861 fm.end()
2861 fm.end()
2862
2862
2863
2863
2864 def _bdiffworker(q, blocks, xdiff, ready, done):
2864 def _bdiffworker(q, blocks, xdiff, ready, done):
2865 while not done.is_set():
2865 while not done.is_set():
2866 pair = q.get()
2866 pair = q.get()
2867 while pair is not None:
2867 while pair is not None:
2868 if xdiff:
2868 if xdiff:
2869 mdiff.bdiff.xdiffblocks(*pair)
2869 mdiff.bdiff.xdiffblocks(*pair)
2870 elif blocks:
2870 elif blocks:
2871 mdiff.bdiff.blocks(*pair)
2871 mdiff.bdiff.blocks(*pair)
2872 else:
2872 else:
2873 mdiff.textdiff(*pair)
2873 mdiff.textdiff(*pair)
2874 q.task_done()
2874 q.task_done()
2875 pair = q.get()
2875 pair = q.get()
2876 q.task_done() # for the None one
2876 q.task_done() # for the None one
2877 with ready:
2877 with ready:
2878 ready.wait()
2878 ready.wait()
2879
2879
2880
2880
2881 def _manifestrevision(repo, mnode):
2881 def _manifestrevision(repo, mnode):
2882 ml = repo.manifestlog
2882 ml = repo.manifestlog
2883
2883
2884 if util.safehasattr(ml, b'getstorage'):
2884 if util.safehasattr(ml, b'getstorage'):
2885 store = ml.getstorage(b'')
2885 store = ml.getstorage(b'')
2886 else:
2886 else:
2887 store = ml._revlog
2887 store = ml._revlog
2888
2888
2889 return store.revision(mnode)
2889 return store.revision(mnode)
2890
2890
2891
2891
2892 @command(
2892 @command(
2893 b'perf::bdiff|perfbdiff',
2893 b'perf::bdiff|perfbdiff',
2894 revlogopts
2894 revlogopts
2895 + formatteropts
2895 + formatteropts
2896 + [
2896 + [
2897 (
2897 (
2898 b'',
2898 b'',
2899 b'count',
2899 b'count',
2900 1,
2900 1,
2901 b'number of revisions to test (when using --startrev)',
2901 b'number of revisions to test (when using --startrev)',
2902 ),
2902 ),
2903 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2903 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2904 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2904 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2905 (b'', b'blocks', False, b'test computing diffs into blocks'),
2905 (b'', b'blocks', False, b'test computing diffs into blocks'),
2906 (b'', b'xdiff', False, b'use xdiff algorithm'),
2906 (b'', b'xdiff', False, b'use xdiff algorithm'),
2907 ],
2907 ],
2908 b'-c|-m|FILE REV',
2908 b'-c|-m|FILE REV',
2909 )
2909 )
2910 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2910 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2911 """benchmark a bdiff between revisions
2911 """benchmark a bdiff between revisions
2912
2912
2913 By default, benchmark a bdiff between its delta parent and itself.
2913 By default, benchmark a bdiff between its delta parent and itself.
2914
2914
2915 With ``--count``, benchmark bdiffs between delta parents and self for N
2915 With ``--count``, benchmark bdiffs between delta parents and self for N
2916 revisions starting at the specified revision.
2916 revisions starting at the specified revision.
2917
2917
2918 With ``--alldata``, assume the requested revision is a changeset and
2918 With ``--alldata``, assume the requested revision is a changeset and
2919 measure bdiffs for all changes related to that changeset (manifest
2919 measure bdiffs for all changes related to that changeset (manifest
2920 and filelogs).
2920 and filelogs).
2921 """
2921 """
2922 opts = _byteskwargs(opts)
2922 opts = _byteskwargs(opts)
2923
2923
2924 if opts[b'xdiff'] and not opts[b'blocks']:
2924 if opts[b'xdiff'] and not opts[b'blocks']:
2925 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2925 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2926
2926
2927 if opts[b'alldata']:
2927 if opts[b'alldata']:
2928 opts[b'changelog'] = True
2928 opts[b'changelog'] = True
2929
2929
2930 if opts.get(b'changelog') or opts.get(b'manifest'):
2930 if opts.get(b'changelog') or opts.get(b'manifest'):
2931 file_, rev = None, file_
2931 file_, rev = None, file_
2932 elif rev is None:
2932 elif rev is None:
2933 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2933 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2934
2934
2935 blocks = opts[b'blocks']
2935 blocks = opts[b'blocks']
2936 xdiff = opts[b'xdiff']
2936 xdiff = opts[b'xdiff']
2937 textpairs = []
2937 textpairs = []
2938
2938
2939 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2939 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2940
2940
2941 startrev = r.rev(r.lookup(rev))
2941 startrev = r.rev(r.lookup(rev))
2942 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2942 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2943 if opts[b'alldata']:
2943 if opts[b'alldata']:
2944 # Load revisions associated with changeset.
2944 # Load revisions associated with changeset.
2945 ctx = repo[rev]
2945 ctx = repo[rev]
2946 mtext = _manifestrevision(repo, ctx.manifestnode())
2946 mtext = _manifestrevision(repo, ctx.manifestnode())
2947 for pctx in ctx.parents():
2947 for pctx in ctx.parents():
2948 pman = _manifestrevision(repo, pctx.manifestnode())
2948 pman = _manifestrevision(repo, pctx.manifestnode())
2949 textpairs.append((pman, mtext))
2949 textpairs.append((pman, mtext))
2950
2950
2951 # Load filelog revisions by iterating manifest delta.
2951 # Load filelog revisions by iterating manifest delta.
2952 man = ctx.manifest()
2952 man = ctx.manifest()
2953 pman = ctx.p1().manifest()
2953 pman = ctx.p1().manifest()
2954 for filename, change in pman.diff(man).items():
2954 for filename, change in pman.diff(man).items():
2955 fctx = repo.file(filename)
2955 fctx = repo.file(filename)
2956 f1 = fctx.revision(change[0][0] or -1)
2956 f1 = fctx.revision(change[0][0] or -1)
2957 f2 = fctx.revision(change[1][0] or -1)
2957 f2 = fctx.revision(change[1][0] or -1)
2958 textpairs.append((f1, f2))
2958 textpairs.append((f1, f2))
2959 else:
2959 else:
2960 dp = r.deltaparent(rev)
2960 dp = r.deltaparent(rev)
2961 textpairs.append((r.revision(dp), r.revision(rev)))
2961 textpairs.append((r.revision(dp), r.revision(rev)))
2962
2962
2963 withthreads = threads > 0
2963 withthreads = threads > 0
2964 if not withthreads:
2964 if not withthreads:
2965
2965
2966 def d():
2966 def d():
2967 for pair in textpairs:
2967 for pair in textpairs:
2968 if xdiff:
2968 if xdiff:
2969 mdiff.bdiff.xdiffblocks(*pair)
2969 mdiff.bdiff.xdiffblocks(*pair)
2970 elif blocks:
2970 elif blocks:
2971 mdiff.bdiff.blocks(*pair)
2971 mdiff.bdiff.blocks(*pair)
2972 else:
2972 else:
2973 mdiff.textdiff(*pair)
2973 mdiff.textdiff(*pair)
2974
2974
2975 else:
2975 else:
2976 q = queue()
2976 q = queue()
2977 for i in _xrange(threads):
2977 for i in _xrange(threads):
2978 q.put(None)
2978 q.put(None)
2979 ready = threading.Condition()
2979 ready = threading.Condition()
2980 done = threading.Event()
2980 done = threading.Event()
2981 for i in _xrange(threads):
2981 for i in _xrange(threads):
2982 threading.Thread(
2982 threading.Thread(
2983 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2983 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2984 ).start()
2984 ).start()
2985 q.join()
2985 q.join()
2986
2986
2987 def d():
2987 def d():
2988 for pair in textpairs:
2988 for pair in textpairs:
2989 q.put(pair)
2989 q.put(pair)
2990 for i in _xrange(threads):
2990 for i in _xrange(threads):
2991 q.put(None)
2991 q.put(None)
2992 with ready:
2992 with ready:
2993 ready.notify_all()
2993 ready.notify_all()
2994 q.join()
2994 q.join()
2995
2995
2996 timer, fm = gettimer(ui, opts)
2996 timer, fm = gettimer(ui, opts)
2997 timer(d)
2997 timer(d)
2998 fm.end()
2998 fm.end()
2999
2999
3000 if withthreads:
3000 if withthreads:
3001 done.set()
3001 done.set()
3002 for i in _xrange(threads):
3002 for i in _xrange(threads):
3003 q.put(None)
3003 q.put(None)
3004 with ready:
3004 with ready:
3005 ready.notify_all()
3005 ready.notify_all()
3006
3006
3007
3007
3008 @command(
3008 @command(
3009 b'perf::unbundle',
3009 b'perf::unbundle',
3010 formatteropts,
3010 formatteropts,
3011 b'BUNDLE_FILE',
3011 b'BUNDLE_FILE',
3012 )
3012 )
3013 def perf_unbundle(ui, repo, fname, **opts):
3013 def perf_unbundle(ui, repo, fname, **opts):
3014 """benchmark application of a bundle in a repository.
3014 """benchmark application of a bundle in a repository.
3015
3015
3016 This does not include the final transaction processing"""
3016 This does not include the final transaction processing"""
3017
3017
3018 from mercurial import exchange
3018 from mercurial import exchange
3019 from mercurial import bundle2
3019 from mercurial import bundle2
3020 from mercurial import transaction
3020 from mercurial import transaction
3021
3021
3022 opts = _byteskwargs(opts)
3022 opts = _byteskwargs(opts)
3023
3023
3024 ### some compatibility hotfix
3024 ### some compatibility hotfix
3025 #
3025 #
3026 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3026 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3027 # critical regression that break transaction rollback for files that are
3027 # critical regression that break transaction rollback for files that are
3028 # de-inlined.
3028 # de-inlined.
3029 method = transaction.transaction._addentry
3029 method = transaction.transaction._addentry
3030 pre_63edc384d3b7 = "data" in getargspec(method).args
3030 pre_63edc384d3b7 = "data" in getargspec(method).args
3031 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3031 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3032 # a changeset that is a close descendant of 18415fc918a1, the changeset
3032 # a changeset that is a close descendant of 18415fc918a1, the changeset
3033 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3033 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3034 args = getargspec(error.Abort.__init__).args
3034 args = getargspec(error.Abort.__init__).args
3035 post_18415fc918a1 = "detailed_exit_code" in args
3035 post_18415fc918a1 = "detailed_exit_code" in args
3036
3036
3037 old_max_inline = None
3037 old_max_inline = None
3038 try:
3038 try:
3039 if not (pre_63edc384d3b7 or post_18415fc918a1):
3039 if not (pre_63edc384d3b7 or post_18415fc918a1):
3040 # disable inlining
3040 # disable inlining
3041 old_max_inline = mercurial.revlog._maxinline
3041 old_max_inline = mercurial.revlog._maxinline
3042 # large enough to never happen
3042 # large enough to never happen
3043 mercurial.revlog._maxinline = 2 ** 50
3043 mercurial.revlog._maxinline = 2 ** 50
3044
3044
3045 with repo.lock():
3045 with repo.lock():
3046 bundle = [None, None]
3046 bundle = [None, None]
3047 orig_quiet = repo.ui.quiet
3047 orig_quiet = repo.ui.quiet
3048 try:
3048 try:
3049 repo.ui.quiet = True
3049 repo.ui.quiet = True
3050 with open(fname, mode="rb") as f:
3050 with open(fname, mode="rb") as f:
3051
3051
3052 def noop_report(*args, **kwargs):
3052 def noop_report(*args, **kwargs):
3053 pass
3053 pass
3054
3054
3055 def setup():
3055 def setup():
3056 gen, tr = bundle
3056 gen, tr = bundle
3057 if tr is not None:
3057 if tr is not None:
3058 tr.abort()
3058 tr.abort()
3059 bundle[:] = [None, None]
3059 bundle[:] = [None, None]
3060 f.seek(0)
3060 f.seek(0)
3061 bundle[0] = exchange.readbundle(ui, f, fname)
3061 bundle[0] = exchange.readbundle(ui, f, fname)
3062 bundle[1] = repo.transaction(b'perf::unbundle')
3062 bundle[1] = repo.transaction(b'perf::unbundle')
3063 # silence the transaction
3063 # silence the transaction
3064 bundle[1]._report = noop_report
3064 bundle[1]._report = noop_report
3065
3065
3066 def apply():
3066 def apply():
3067 gen, tr = bundle
3067 gen, tr = bundle
3068 bundle2.applybundle(
3068 bundle2.applybundle(
3069 repo,
3069 repo,
3070 gen,
3070 gen,
3071 tr,
3071 tr,
3072 source=b'perf::unbundle',
3072 source=b'perf::unbundle',
3073 url=fname,
3073 url=fname,
3074 )
3074 )
3075
3075
3076 timer, fm = gettimer(ui, opts)
3076 timer, fm = gettimer(ui, opts)
3077 timer(apply, setup=setup)
3077 timer(apply, setup=setup)
3078 fm.end()
3078 fm.end()
3079 finally:
3079 finally:
3080 repo.ui.quiet == orig_quiet
3080 repo.ui.quiet == orig_quiet
3081 gen, tr = bundle
3081 gen, tr = bundle
3082 if tr is not None:
3082 if tr is not None:
3083 tr.abort()
3083 tr.abort()
3084 finally:
3084 finally:
3085 if old_max_inline is not None:
3085 if old_max_inline is not None:
3086 mercurial.revlog._maxinline = old_max_inline
3086 mercurial.revlog._maxinline = old_max_inline
3087
3087
3088
3088
3089 @command(
3089 @command(
3090 b'perf::unidiff|perfunidiff',
3090 b'perf::unidiff|perfunidiff',
3091 revlogopts
3091 revlogopts
3092 + formatteropts
3092 + formatteropts
3093 + [
3093 + [
3094 (
3094 (
3095 b'',
3095 b'',
3096 b'count',
3096 b'count',
3097 1,
3097 1,
3098 b'number of revisions to test (when using --startrev)',
3098 b'number of revisions to test (when using --startrev)',
3099 ),
3099 ),
3100 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3100 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3101 ],
3101 ],
3102 b'-c|-m|FILE REV',
3102 b'-c|-m|FILE REV',
3103 )
3103 )
3104 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3104 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3105 """benchmark a unified diff between revisions
3105 """benchmark a unified diff between revisions
3106
3106
3107 This doesn't include any copy tracing - it's just a unified diff
3107 This doesn't include any copy tracing - it's just a unified diff
3108 of the texts.
3108 of the texts.
3109
3109
3110 By default, benchmark a diff between its delta parent and itself.
3110 By default, benchmark a diff between its delta parent and itself.
3111
3111
3112 With ``--count``, benchmark diffs between delta parents and self for N
3112 With ``--count``, benchmark diffs between delta parents and self for N
3113 revisions starting at the specified revision.
3113 revisions starting at the specified revision.
3114
3114
3115 With ``--alldata``, assume the requested revision is a changeset and
3115 With ``--alldata``, assume the requested revision is a changeset and
3116 measure diffs for all changes related to that changeset (manifest
3116 measure diffs for all changes related to that changeset (manifest
3117 and filelogs).
3117 and filelogs).
3118 """
3118 """
3119 opts = _byteskwargs(opts)
3119 opts = _byteskwargs(opts)
3120 if opts[b'alldata']:
3120 if opts[b'alldata']:
3121 opts[b'changelog'] = True
3121 opts[b'changelog'] = True
3122
3122
3123 if opts.get(b'changelog') or opts.get(b'manifest'):
3123 if opts.get(b'changelog') or opts.get(b'manifest'):
3124 file_, rev = None, file_
3124 file_, rev = None, file_
3125 elif rev is None:
3125 elif rev is None:
3126 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3126 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3127
3127
3128 textpairs = []
3128 textpairs = []
3129
3129
3130 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3130 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3131
3131
3132 startrev = r.rev(r.lookup(rev))
3132 startrev = r.rev(r.lookup(rev))
3133 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3133 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3134 if opts[b'alldata']:
3134 if opts[b'alldata']:
3135 # Load revisions associated with changeset.
3135 # Load revisions associated with changeset.
3136 ctx = repo[rev]
3136 ctx = repo[rev]
3137 mtext = _manifestrevision(repo, ctx.manifestnode())
3137 mtext = _manifestrevision(repo, ctx.manifestnode())
3138 for pctx in ctx.parents():
3138 for pctx in ctx.parents():
3139 pman = _manifestrevision(repo, pctx.manifestnode())
3139 pman = _manifestrevision(repo, pctx.manifestnode())
3140 textpairs.append((pman, mtext))
3140 textpairs.append((pman, mtext))
3141
3141
3142 # Load filelog revisions by iterating manifest delta.
3142 # Load filelog revisions by iterating manifest delta.
3143 man = ctx.manifest()
3143 man = ctx.manifest()
3144 pman = ctx.p1().manifest()
3144 pman = ctx.p1().manifest()
3145 for filename, change in pman.diff(man).items():
3145 for filename, change in pman.diff(man).items():
3146 fctx = repo.file(filename)
3146 fctx = repo.file(filename)
3147 f1 = fctx.revision(change[0][0] or -1)
3147 f1 = fctx.revision(change[0][0] or -1)
3148 f2 = fctx.revision(change[1][0] or -1)
3148 f2 = fctx.revision(change[1][0] or -1)
3149 textpairs.append((f1, f2))
3149 textpairs.append((f1, f2))
3150 else:
3150 else:
3151 dp = r.deltaparent(rev)
3151 dp = r.deltaparent(rev)
3152 textpairs.append((r.revision(dp), r.revision(rev)))
3152 textpairs.append((r.revision(dp), r.revision(rev)))
3153
3153
3154 def d():
3154 def d():
3155 for left, right in textpairs:
3155 for left, right in textpairs:
3156 # The date strings don't matter, so we pass empty strings.
3156 # The date strings don't matter, so we pass empty strings.
3157 headerlines, hunks = mdiff.unidiff(
3157 headerlines, hunks = mdiff.unidiff(
3158 left, b'', right, b'', b'left', b'right', binary=False
3158 left, b'', right, b'', b'left', b'right', binary=False
3159 )
3159 )
3160 # consume iterators in roughly the way patch.py does
3160 # consume iterators in roughly the way patch.py does
3161 b'\n'.join(headerlines)
3161 b'\n'.join(headerlines)
3162 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3162 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3163
3163
3164 timer, fm = gettimer(ui, opts)
3164 timer, fm = gettimer(ui, opts)
3165 timer(d)
3165 timer(d)
3166 fm.end()
3166 fm.end()
3167
3167
3168
3168
3169 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3169 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3170 def perfdiffwd(ui, repo, **opts):
3170 def perfdiffwd(ui, repo, **opts):
3171 """Profile diff of working directory changes"""
3171 """Profile diff of working directory changes"""
3172 opts = _byteskwargs(opts)
3172 opts = _byteskwargs(opts)
3173 timer, fm = gettimer(ui, opts)
3173 timer, fm = gettimer(ui, opts)
3174 options = {
3174 options = {
3175 'w': 'ignore_all_space',
3175 'w': 'ignore_all_space',
3176 'b': 'ignore_space_change',
3176 'b': 'ignore_space_change',
3177 'B': 'ignore_blank_lines',
3177 'B': 'ignore_blank_lines',
3178 }
3178 }
3179
3179
3180 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3180 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3181 opts = {options[c]: b'1' for c in diffopt}
3181 opts = {options[c]: b'1' for c in diffopt}
3182
3182
3183 def d():
3183 def d():
3184 ui.pushbuffer()
3184 ui.pushbuffer()
3185 commands.diff(ui, repo, **opts)
3185 commands.diff(ui, repo, **opts)
3186 ui.popbuffer()
3186 ui.popbuffer()
3187
3187
3188 diffopt = diffopt.encode('ascii')
3188 diffopt = diffopt.encode('ascii')
3189 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3189 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3190 timer(d, title=title)
3190 timer(d, title=title)
3191 fm.end()
3191 fm.end()
3192
3192
3193
3193
3194 @command(
3194 @command(
3195 b'perf::revlogindex|perfrevlogindex',
3195 b'perf::revlogindex|perfrevlogindex',
3196 revlogopts + formatteropts,
3196 revlogopts + formatteropts,
3197 b'-c|-m|FILE',
3197 b'-c|-m|FILE',
3198 )
3198 )
3199 def perfrevlogindex(ui, repo, file_=None, **opts):
3199 def perfrevlogindex(ui, repo, file_=None, **opts):
3200 """Benchmark operations against a revlog index.
3200 """Benchmark operations against a revlog index.
3201
3201
3202 This tests constructing a revlog instance, reading index data,
3202 This tests constructing a revlog instance, reading index data,
3203 parsing index data, and performing various operations related to
3203 parsing index data, and performing various operations related to
3204 index data.
3204 index data.
3205 """
3205 """
3206
3206
3207 opts = _byteskwargs(opts)
3207 opts = _byteskwargs(opts)
3208
3208
3209 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3209 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3210
3210
3211 opener = getattr(rl, 'opener') # trick linter
3211 opener = getattr(rl, 'opener') # trick linter
3212 # compat with hg <= 5.8
3212 # compat with hg <= 5.8
3213 radix = getattr(rl, 'radix', None)
3213 radix = getattr(rl, 'radix', None)
3214 indexfile = getattr(rl, '_indexfile', None)
3214 indexfile = getattr(rl, '_indexfile', None)
3215 if indexfile is None:
3215 if indexfile is None:
3216 # compatibility with <= hg-5.8
3216 # compatibility with <= hg-5.8
3217 indexfile = getattr(rl, 'indexfile')
3217 indexfile = getattr(rl, 'indexfile')
3218 data = opener.read(indexfile)
3218 data = opener.read(indexfile)
3219
3219
3220 header = struct.unpack(b'>I', data[0:4])[0]
3220 header = struct.unpack(b'>I', data[0:4])[0]
3221 version = header & 0xFFFF
3221 version = header & 0xFFFF
3222 if version == 1:
3222 if version == 1:
3223 inline = header & (1 << 16)
3223 inline = header & (1 << 16)
3224 else:
3224 else:
3225 raise error.Abort(b'unsupported revlog version: %d' % version)
3225 raise error.Abort(b'unsupported revlog version: %d' % version)
3226
3226
3227 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3227 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3228 if parse_index_v1 is None:
3228 if parse_index_v1 is None:
3229 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3229 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3230
3230
3231 rllen = len(rl)
3231 rllen = len(rl)
3232
3232
3233 node0 = rl.node(0)
3233 node0 = rl.node(0)
3234 node25 = rl.node(rllen // 4)
3234 node25 = rl.node(rllen // 4)
3235 node50 = rl.node(rllen // 2)
3235 node50 = rl.node(rllen // 2)
3236 node75 = rl.node(rllen // 4 * 3)
3236 node75 = rl.node(rllen // 4 * 3)
3237 node100 = rl.node(rllen - 1)
3237 node100 = rl.node(rllen - 1)
3238
3238
3239 allrevs = range(rllen)
3239 allrevs = range(rllen)
3240 allrevsrev = list(reversed(allrevs))
3240 allrevsrev = list(reversed(allrevs))
3241 allnodes = [rl.node(rev) for rev in range(rllen)]
3241 allnodes = [rl.node(rev) for rev in range(rllen)]
3242 allnodesrev = list(reversed(allnodes))
3242 allnodesrev = list(reversed(allnodes))
3243
3243
3244 def constructor():
3244 def constructor():
3245 if radix is not None:
3245 if radix is not None:
3246 revlog(opener, radix=radix)
3246 revlog(opener, radix=radix)
3247 else:
3247 else:
3248 # hg <= 5.8
3248 # hg <= 5.8
3249 revlog(opener, indexfile=indexfile)
3249 revlog(opener, indexfile=indexfile)
3250
3250
3251 def read():
3251 def read():
3252 with opener(indexfile) as fh:
3252 with opener(indexfile) as fh:
3253 fh.read()
3253 fh.read()
3254
3254
3255 def parseindex():
3255 def parseindex():
3256 parse_index_v1(data, inline)
3256 parse_index_v1(data, inline)
3257
3257
3258 def getentry(revornode):
3258 def getentry(revornode):
3259 index = parse_index_v1(data, inline)[0]
3259 index = parse_index_v1(data, inline)[0]
3260 index[revornode]
3260 index[revornode]
3261
3261
3262 def getentries(revs, count=1):
3262 def getentries(revs, count=1):
3263 index = parse_index_v1(data, inline)[0]
3263 index = parse_index_v1(data, inline)[0]
3264
3264
3265 for i in range(count):
3265 for i in range(count):
3266 for rev in revs:
3266 for rev in revs:
3267 index[rev]
3267 index[rev]
3268
3268
3269 def resolvenode(node):
3269 def resolvenode(node):
3270 index = parse_index_v1(data, inline)[0]
3270 index = parse_index_v1(data, inline)[0]
3271 rev = getattr(index, 'rev', None)
3271 rev = getattr(index, 'rev', None)
3272 if rev is None:
3272 if rev is None:
3273 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3273 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3274 # This only works for the C code.
3274 # This only works for the C code.
3275 if nodemap is None:
3275 if nodemap is None:
3276 return
3276 return
3277 rev = nodemap.__getitem__
3277 rev = nodemap.__getitem__
3278
3278
3279 try:
3279 try:
3280 rev(node)
3280 rev(node)
3281 except error.RevlogError:
3281 except error.RevlogError:
3282 pass
3282 pass
3283
3283
3284 def resolvenodes(nodes, count=1):
3284 def resolvenodes(nodes, count=1):
3285 index = parse_index_v1(data, inline)[0]
3285 index = parse_index_v1(data, inline)[0]
3286 rev = getattr(index, 'rev', None)
3286 rev = getattr(index, 'rev', None)
3287 if rev is None:
3287 if rev is None:
3288 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3288 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3289 # This only works for the C code.
3289 # This only works for the C code.
3290 if nodemap is None:
3290 if nodemap is None:
3291 return
3291 return
3292 rev = nodemap.__getitem__
3292 rev = nodemap.__getitem__
3293
3293
3294 for i in range(count):
3294 for i in range(count):
3295 for node in nodes:
3295 for node in nodes:
3296 try:
3296 try:
3297 rev(node)
3297 rev(node)
3298 except error.RevlogError:
3298 except error.RevlogError:
3299 pass
3299 pass
3300
3300
3301 benches = [
3301 benches = [
3302 (constructor, b'revlog constructor'),
3302 (constructor, b'revlog constructor'),
3303 (read, b'read'),
3303 (read, b'read'),
3304 (parseindex, b'create index object'),
3304 (parseindex, b'create index object'),
3305 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3305 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3306 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3306 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3307 (lambda: resolvenode(node0), b'look up node at rev 0'),
3307 (lambda: resolvenode(node0), b'look up node at rev 0'),
3308 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3308 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3309 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3309 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3310 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3310 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3311 (lambda: resolvenode(node100), b'look up node at tip'),
3311 (lambda: resolvenode(node100), b'look up node at tip'),
3312 # 2x variation is to measure caching impact.
3312 # 2x variation is to measure caching impact.
3313 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3313 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3314 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3314 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3315 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3315 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3316 (
3316 (
3317 lambda: resolvenodes(allnodesrev, 2),
3317 lambda: resolvenodes(allnodesrev, 2),
3318 b'look up all nodes 2x (reverse)',
3318 b'look up all nodes 2x (reverse)',
3319 ),
3319 ),
3320 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3320 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3321 (
3321 (
3322 lambda: getentries(allrevs, 2),
3322 lambda: getentries(allrevs, 2),
3323 b'retrieve all index entries 2x (forward)',
3323 b'retrieve all index entries 2x (forward)',
3324 ),
3324 ),
3325 (
3325 (
3326 lambda: getentries(allrevsrev),
3326 lambda: getentries(allrevsrev),
3327 b'retrieve all index entries (reverse)',
3327 b'retrieve all index entries (reverse)',
3328 ),
3328 ),
3329 (
3329 (
3330 lambda: getentries(allrevsrev, 2),
3330 lambda: getentries(allrevsrev, 2),
3331 b'retrieve all index entries 2x (reverse)',
3331 b'retrieve all index entries 2x (reverse)',
3332 ),
3332 ),
3333 ]
3333 ]
3334
3334
3335 for fn, title in benches:
3335 for fn, title in benches:
3336 timer, fm = gettimer(ui, opts)
3336 timer, fm = gettimer(ui, opts)
3337 timer(fn, title=title)
3337 timer(fn, title=title)
3338 fm.end()
3338 fm.end()
3339
3339
3340
3340
3341 @command(
3341 @command(
3342 b'perf::revlogrevisions|perfrevlogrevisions',
3342 b'perf::revlogrevisions|perfrevlogrevisions',
3343 revlogopts
3343 revlogopts
3344 + formatteropts
3344 + formatteropts
3345 + [
3345 + [
3346 (b'd', b'dist', 100, b'distance between the revisions'),
3346 (b'd', b'dist', 100, b'distance between the revisions'),
3347 (b's', b'startrev', 0, b'revision to start reading at'),
3347 (b's', b'startrev', 0, b'revision to start reading at'),
3348 (b'', b'reverse', False, b'read in reverse'),
3348 (b'', b'reverse', False, b'read in reverse'),
3349 ],
3349 ],
3350 b'-c|-m|FILE',
3350 b'-c|-m|FILE',
3351 )
3351 )
3352 def perfrevlogrevisions(
3352 def perfrevlogrevisions(
3353 ui, repo, file_=None, startrev=0, reverse=False, **opts
3353 ui, repo, file_=None, startrev=0, reverse=False, **opts
3354 ):
3354 ):
3355 """Benchmark reading a series of revisions from a revlog.
3355 """Benchmark reading a series of revisions from a revlog.
3356
3356
3357 By default, we read every ``-d/--dist`` revision from 0 to tip of
3357 By default, we read every ``-d/--dist`` revision from 0 to tip of
3358 the specified revlog.
3358 the specified revlog.
3359
3359
3360 The start revision can be defined via ``-s/--startrev``.
3360 The start revision can be defined via ``-s/--startrev``.
3361 """
3361 """
3362 opts = _byteskwargs(opts)
3362 opts = _byteskwargs(opts)
3363
3363
3364 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3364 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3365 rllen = getlen(ui)(rl)
3365 rllen = getlen(ui)(rl)
3366
3366
3367 if startrev < 0:
3367 if startrev < 0:
3368 startrev = rllen + startrev
3368 startrev = rllen + startrev
3369
3369
3370 def d():
3370 def d():
3371 rl.clearcaches()
3371 rl.clearcaches()
3372
3372
3373 beginrev = startrev
3373 beginrev = startrev
3374 endrev = rllen
3374 endrev = rllen
3375 dist = opts[b'dist']
3375 dist = opts[b'dist']
3376
3376
3377 if reverse:
3377 if reverse:
3378 beginrev, endrev = endrev - 1, beginrev - 1
3378 beginrev, endrev = endrev - 1, beginrev - 1
3379 dist = -1 * dist
3379 dist = -1 * dist
3380
3380
3381 for x in _xrange(beginrev, endrev, dist):
3381 for x in _xrange(beginrev, endrev, dist):
3382 # Old revisions don't support passing int.
3382 # Old revisions don't support passing int.
3383 n = rl.node(x)
3383 n = rl.node(x)
3384 rl.revision(n)
3384 rl.revision(n)
3385
3385
3386 timer, fm = gettimer(ui, opts)
3386 timer, fm = gettimer(ui, opts)
3387 timer(d)
3387 timer(d)
3388 fm.end()
3388 fm.end()
3389
3389
3390
3390
3391 @command(
3391 @command(
3392 b'perf::revlogwrite|perfrevlogwrite',
3392 b'perf::revlogwrite|perfrevlogwrite',
3393 revlogopts
3393 revlogopts
3394 + formatteropts
3394 + formatteropts
3395 + [
3395 + [
3396 (b's', b'startrev', 1000, b'revision to start writing at'),
3396 (b's', b'startrev', 1000, b'revision to start writing at'),
3397 (b'', b'stoprev', -1, b'last revision to write'),
3397 (b'', b'stoprev', -1, b'last revision to write'),
3398 (b'', b'count', 3, b'number of passes to perform'),
3398 (b'', b'count', 3, b'number of passes to perform'),
3399 (b'', b'details', False, b'print timing for every revisions tested'),
3399 (b'', b'details', False, b'print timing for every revisions tested'),
3400 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3400 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3401 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3401 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3402 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3402 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3403 ],
3403 ],
3404 b'-c|-m|FILE',
3404 b'-c|-m|FILE',
3405 )
3405 )
3406 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3406 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3407 """Benchmark writing a series of revisions to a revlog.
3407 """Benchmark writing a series of revisions to a revlog.
3408
3408
3409 Possible source values are:
3409 Possible source values are:
3410 * `full`: add from a full text (default).
3410 * `full`: add from a full text (default).
3411 * `parent-1`: add from a delta to the first parent
3411 * `parent-1`: add from a delta to the first parent
3412 * `parent-2`: add from a delta to the second parent if it exists
3412 * `parent-2`: add from a delta to the second parent if it exists
3413 (use a delta from the first parent otherwise)
3413 (use a delta from the first parent otherwise)
3414 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3414 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3415 * `storage`: add from the existing precomputed deltas
3415 * `storage`: add from the existing precomputed deltas
3416
3416
3417 Note: This performance command measures performance in a custom way. As a
3417 Note: This performance command measures performance in a custom way. As a
3418 result some of the global configuration of the 'perf' command does not
3418 result some of the global configuration of the 'perf' command does not
3419 apply to it:
3419 apply to it:
3420
3420
3421 * ``pre-run``: disabled
3421 * ``pre-run``: disabled
3422
3422
3423 * ``profile-benchmark``: disabled
3423 * ``profile-benchmark``: disabled
3424
3424
3425 * ``run-limits``: disabled use --count instead
3425 * ``run-limits``: disabled use --count instead
3426 """
3426 """
3427 opts = _byteskwargs(opts)
3427 opts = _byteskwargs(opts)
3428
3428
3429 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3429 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3430 rllen = getlen(ui)(rl)
3430 rllen = getlen(ui)(rl)
3431 if startrev < 0:
3431 if startrev < 0:
3432 startrev = rllen + startrev
3432 startrev = rllen + startrev
3433 if stoprev < 0:
3433 if stoprev < 0:
3434 stoprev = rllen + stoprev
3434 stoprev = rllen + stoprev
3435
3435
3436 lazydeltabase = opts['lazydeltabase']
3436 lazydeltabase = opts['lazydeltabase']
3437 source = opts['source']
3437 source = opts['source']
3438 clearcaches = opts['clear_caches']
3438 clearcaches = opts['clear_caches']
3439 validsource = (
3439 validsource = (
3440 b'full',
3440 b'full',
3441 b'parent-1',
3441 b'parent-1',
3442 b'parent-2',
3442 b'parent-2',
3443 b'parent-smallest',
3443 b'parent-smallest',
3444 b'storage',
3444 b'storage',
3445 )
3445 )
3446 if source not in validsource:
3446 if source not in validsource:
3447 raise error.Abort('invalid source type: %s' % source)
3447 raise error.Abort('invalid source type: %s' % source)
3448
3448
3449 ### actually gather results
3449 ### actually gather results
3450 count = opts['count']
3450 count = opts['count']
3451 if count <= 0:
3451 if count <= 0:
3452 raise error.Abort('invalide run count: %d' % count)
3452 raise error.Abort('invalide run count: %d' % count)
3453 allresults = []
3453 allresults = []
3454 for c in range(count):
3454 for c in range(count):
3455 timing = _timeonewrite(
3455 timing = _timeonewrite(
3456 ui,
3456 ui,
3457 rl,
3457 rl,
3458 source,
3458 source,
3459 startrev,
3459 startrev,
3460 stoprev,
3460 stoprev,
3461 c + 1,
3461 c + 1,
3462 lazydeltabase=lazydeltabase,
3462 lazydeltabase=lazydeltabase,
3463 clearcaches=clearcaches,
3463 clearcaches=clearcaches,
3464 )
3464 )
3465 allresults.append(timing)
3465 allresults.append(timing)
3466
3466
3467 ### consolidate the results in a single list
3467 ### consolidate the results in a single list
3468 results = []
3468 results = []
3469 for idx, (rev, t) in enumerate(allresults[0]):
3469 for idx, (rev, t) in enumerate(allresults[0]):
3470 ts = [t]
3470 ts = [t]
3471 for other in allresults[1:]:
3471 for other in allresults[1:]:
3472 orev, ot = other[idx]
3472 orev, ot = other[idx]
3473 assert orev == rev
3473 assert orev == rev
3474 ts.append(ot)
3474 ts.append(ot)
3475 results.append((rev, ts))
3475 results.append((rev, ts))
3476 resultcount = len(results)
3476 resultcount = len(results)
3477
3477
3478 ### Compute and display relevant statistics
3478 ### Compute and display relevant statistics
3479
3479
3480 # get a formatter
3480 # get a formatter
3481 fm = ui.formatter(b'perf', opts)
3481 fm = ui.formatter(b'perf', opts)
3482 displayall = ui.configbool(b"perf", b"all-timing", True)
3482 displayall = ui.configbool(b"perf", b"all-timing", True)
3483
3483
3484 # print individual details if requested
3484 # print individual details if requested
3485 if opts['details']:
3485 if opts['details']:
3486 for idx, item in enumerate(results, 1):
3486 for idx, item in enumerate(results, 1):
3487 rev, data = item
3487 rev, data = item
3488 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3488 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3489 formatone(fm, data, title=title, displayall=displayall)
3489 formatone(fm, data, title=title, displayall=displayall)
3490
3490
3491 # sorts results by median time
3491 # sorts results by median time
3492 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3492 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3493 # list of (name, index) to display)
3493 # list of (name, index) to display)
3494 relevants = [
3494 relevants = [
3495 ("min", 0),
3495 ("min", 0),
3496 ("10%", resultcount * 10 // 100),
3496 ("10%", resultcount * 10 // 100),
3497 ("25%", resultcount * 25 // 100),
3497 ("25%", resultcount * 25 // 100),
3498 ("50%", resultcount * 70 // 100),
3498 ("50%", resultcount * 70 // 100),
3499 ("75%", resultcount * 75 // 100),
3499 ("75%", resultcount * 75 // 100),
3500 ("90%", resultcount * 90 // 100),
3500 ("90%", resultcount * 90 // 100),
3501 ("95%", resultcount * 95 // 100),
3501 ("95%", resultcount * 95 // 100),
3502 ("99%", resultcount * 99 // 100),
3502 ("99%", resultcount * 99 // 100),
3503 ("99.9%", resultcount * 999 // 1000),
3503 ("99.9%", resultcount * 999 // 1000),
3504 ("99.99%", resultcount * 9999 // 10000),
3504 ("99.99%", resultcount * 9999 // 10000),
3505 ("99.999%", resultcount * 99999 // 100000),
3505 ("99.999%", resultcount * 99999 // 100000),
3506 ("max", -1),
3506 ("max", -1),
3507 ]
3507 ]
3508 if not ui.quiet:
3508 if not ui.quiet:
3509 for name, idx in relevants:
3509 for name, idx in relevants:
3510 data = results[idx]
3510 data = results[idx]
3511 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3511 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3512 formatone(fm, data[1], title=title, displayall=displayall)
3512 formatone(fm, data[1], title=title, displayall=displayall)
3513
3513
3514 # XXX summing that many float will not be very precise, we ignore this fact
3514 # XXX summing that many float will not be very precise, we ignore this fact
3515 # for now
3515 # for now
3516 totaltime = []
3516 totaltime = []
3517 for item in allresults:
3517 for item in allresults:
3518 totaltime.append(
3518 totaltime.append(
3519 (
3519 (
3520 sum(x[1][0] for x in item),
3520 sum(x[1][0] for x in item),
3521 sum(x[1][1] for x in item),
3521 sum(x[1][1] for x in item),
3522 sum(x[1][2] for x in item),
3522 sum(x[1][2] for x in item),
3523 )
3523 )
3524 )
3524 )
3525 formatone(
3525 formatone(
3526 fm,
3526 fm,
3527 totaltime,
3527 totaltime,
3528 title="total time (%d revs)" % resultcount,
3528 title="total time (%d revs)" % resultcount,
3529 displayall=displayall,
3529 displayall=displayall,
3530 )
3530 )
3531 fm.end()
3531 fm.end()
3532
3532
3533
3533
3534 class _faketr:
3534 class _faketr:
3535 def add(s, x, y, z=None):
3535 def add(s, x, y, z=None):
3536 return None
3536 return None
3537
3537
3538
3538
3539 def _timeonewrite(
3539 def _timeonewrite(
3540 ui,
3540 ui,
3541 orig,
3541 orig,
3542 source,
3542 source,
3543 startrev,
3543 startrev,
3544 stoprev,
3544 stoprev,
3545 runidx=None,
3545 runidx=None,
3546 lazydeltabase=True,
3546 lazydeltabase=True,
3547 clearcaches=True,
3547 clearcaches=True,
3548 ):
3548 ):
3549 timings = []
3549 timings = []
3550 tr = _faketr()
3550 tr = _faketr()
3551 with _temprevlog(ui, orig, startrev) as dest:
3551 with _temprevlog(ui, orig, startrev) as dest:
3552 dest._lazydeltabase = lazydeltabase
3552 dest._lazydeltabase = lazydeltabase
3553 revs = list(orig.revs(startrev, stoprev))
3553 revs = list(orig.revs(startrev, stoprev))
3554 total = len(revs)
3554 total = len(revs)
3555 topic = 'adding'
3555 topic = 'adding'
3556 if runidx is not None:
3556 if runidx is not None:
3557 topic += ' (run #%d)' % runidx
3557 topic += ' (run #%d)' % runidx
3558 # Support both old and new progress API
3558 # Support both old and new progress API
3559 if util.safehasattr(ui, 'makeprogress'):
3559 if util.safehasattr(ui, 'makeprogress'):
3560 progress = ui.makeprogress(topic, unit='revs', total=total)
3560 progress = ui.makeprogress(topic, unit='revs', total=total)
3561
3561
3562 def updateprogress(pos):
3562 def updateprogress(pos):
3563 progress.update(pos)
3563 progress.update(pos)
3564
3564
3565 def completeprogress():
3565 def completeprogress():
3566 progress.complete()
3566 progress.complete()
3567
3567
3568 else:
3568 else:
3569
3569
3570 def updateprogress(pos):
3570 def updateprogress(pos):
3571 ui.progress(topic, pos, unit='revs', total=total)
3571 ui.progress(topic, pos, unit='revs', total=total)
3572
3572
3573 def completeprogress():
3573 def completeprogress():
3574 ui.progress(topic, None, unit='revs', total=total)
3574 ui.progress(topic, None, unit='revs', total=total)
3575
3575
3576 for idx, rev in enumerate(revs):
3576 for idx, rev in enumerate(revs):
3577 updateprogress(idx)
3577 updateprogress(idx)
3578 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3578 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3579 if clearcaches:
3579 if clearcaches:
3580 dest.index.clearcaches()
3580 dest.index.clearcaches()
3581 dest.clearcaches()
3581 dest.clearcaches()
3582 with timeone() as r:
3582 with timeone() as r:
3583 dest.addrawrevision(*addargs, **addkwargs)
3583 dest.addrawrevision(*addargs, **addkwargs)
3584 timings.append((rev, r[0]))
3584 timings.append((rev, r[0]))
3585 updateprogress(total)
3585 updateprogress(total)
3586 completeprogress()
3586 completeprogress()
3587 return timings
3587 return timings
3588
3588
3589
3589
3590 def _getrevisionseed(orig, rev, tr, source):
3590 def _getrevisionseed(orig, rev, tr, source):
3591 from mercurial.node import nullid
3591 from mercurial.node import nullid
3592
3592
3593 linkrev = orig.linkrev(rev)
3593 linkrev = orig.linkrev(rev)
3594 node = orig.node(rev)
3594 node = orig.node(rev)
3595 p1, p2 = orig.parents(node)
3595 p1, p2 = orig.parents(node)
3596 flags = orig.flags(rev)
3596 flags = orig.flags(rev)
3597 cachedelta = None
3597 cachedelta = None
3598 text = None
3598 text = None
3599
3599
3600 if source == b'full':
3600 if source == b'full':
3601 text = orig.revision(rev)
3601 text = orig.revision(rev)
3602 elif source == b'parent-1':
3602 elif source == b'parent-1':
3603 baserev = orig.rev(p1)
3603 baserev = orig.rev(p1)
3604 cachedelta = (baserev, orig.revdiff(p1, rev))
3604 cachedelta = (baserev, orig.revdiff(p1, rev))
3605 elif source == b'parent-2':
3605 elif source == b'parent-2':
3606 parent = p2
3606 parent = p2
3607 if p2 == nullid:
3607 if p2 == nullid:
3608 parent = p1
3608 parent = p1
3609 baserev = orig.rev(parent)
3609 baserev = orig.rev(parent)
3610 cachedelta = (baserev, orig.revdiff(parent, rev))
3610 cachedelta = (baserev, orig.revdiff(parent, rev))
3611 elif source == b'parent-smallest':
3611 elif source == b'parent-smallest':
3612 p1diff = orig.revdiff(p1, rev)
3612 p1diff = orig.revdiff(p1, rev)
3613 parent = p1
3613 parent = p1
3614 diff = p1diff
3614 diff = p1diff
3615 if p2 != nullid:
3615 if p2 != nullid:
3616 p2diff = orig.revdiff(p2, rev)
3616 p2diff = orig.revdiff(p2, rev)
3617 if len(p1diff) > len(p2diff):
3617 if len(p1diff) > len(p2diff):
3618 parent = p2
3618 parent = p2
3619 diff = p2diff
3619 diff = p2diff
3620 baserev = orig.rev(parent)
3620 baserev = orig.rev(parent)
3621 cachedelta = (baserev, diff)
3621 cachedelta = (baserev, diff)
3622 elif source == b'storage':
3622 elif source == b'storage':
3623 baserev = orig.deltaparent(rev)
3623 baserev = orig.deltaparent(rev)
3624 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3624 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3625
3625
3626 return (
3626 return (
3627 (text, tr, linkrev, p1, p2),
3627 (text, tr, linkrev, p1, p2),
3628 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3628 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3629 )
3629 )
3630
3630
3631
3631
3632 @contextlib.contextmanager
3632 @contextlib.contextmanager
3633 def _temprevlog(ui, orig, truncaterev):
3633 def _temprevlog(ui, orig, truncaterev):
3634 from mercurial import vfs as vfsmod
3634 from mercurial import vfs as vfsmod
3635
3635
3636 if orig._inline:
3636 if orig._inline:
3637 raise error.Abort('not supporting inline revlog (yet)')
3637 raise error.Abort('not supporting inline revlog (yet)')
3638 revlogkwargs = {}
3638 revlogkwargs = {}
3639 k = 'upperboundcomp'
3639 k = 'upperboundcomp'
3640 if util.safehasattr(orig, k):
3640 if util.safehasattr(orig, k):
3641 revlogkwargs[k] = getattr(orig, k)
3641 revlogkwargs[k] = getattr(orig, k)
3642
3642
3643 indexfile = getattr(orig, '_indexfile', None)
3643 indexfile = getattr(orig, '_indexfile', None)
3644 if indexfile is None:
3644 if indexfile is None:
3645 # compatibility with <= hg-5.8
3645 # compatibility with <= hg-5.8
3646 indexfile = getattr(orig, 'indexfile')
3646 indexfile = getattr(orig, 'indexfile')
3647 origindexpath = orig.opener.join(indexfile)
3647 origindexpath = orig.opener.join(indexfile)
3648
3648
3649 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3649 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3650 origdatapath = orig.opener.join(datafile)
3650 origdatapath = orig.opener.join(datafile)
3651 radix = b'revlog'
3651 radix = b'revlog'
3652 indexname = b'revlog.i'
3652 indexname = b'revlog.i'
3653 dataname = b'revlog.d'
3653 dataname = b'revlog.d'
3654
3654
3655 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3655 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3656 try:
3656 try:
3657 # copy the data file in a temporary directory
3657 # copy the data file in a temporary directory
3658 ui.debug('copying data in %s\n' % tmpdir)
3658 ui.debug('copying data in %s\n' % tmpdir)
3659 destindexpath = os.path.join(tmpdir, 'revlog.i')
3659 destindexpath = os.path.join(tmpdir, 'revlog.i')
3660 destdatapath = os.path.join(tmpdir, 'revlog.d')
3660 destdatapath = os.path.join(tmpdir, 'revlog.d')
3661 shutil.copyfile(origindexpath, destindexpath)
3661 shutil.copyfile(origindexpath, destindexpath)
3662 shutil.copyfile(origdatapath, destdatapath)
3662 shutil.copyfile(origdatapath, destdatapath)
3663
3663
3664 # remove the data we want to add again
3664 # remove the data we want to add again
3665 ui.debug('truncating data to be rewritten\n')
3665 ui.debug('truncating data to be rewritten\n')
3666 with open(destindexpath, 'ab') as index:
3666 with open(destindexpath, 'ab') as index:
3667 index.seek(0)
3667 index.seek(0)
3668 index.truncate(truncaterev * orig._io.size)
3668 index.truncate(truncaterev * orig._io.size)
3669 with open(destdatapath, 'ab') as data:
3669 with open(destdatapath, 'ab') as data:
3670 data.seek(0)
3670 data.seek(0)
3671 data.truncate(orig.start(truncaterev))
3671 data.truncate(orig.start(truncaterev))
3672
3672
3673 # instantiate a new revlog from the temporary copy
3673 # instantiate a new revlog from the temporary copy
3674 ui.debug('truncating adding to be rewritten\n')
3674 ui.debug('truncating adding to be rewritten\n')
3675 vfs = vfsmod.vfs(tmpdir)
3675 vfs = vfsmod.vfs(tmpdir)
3676 vfs.options = getattr(orig.opener, 'options', None)
3676 vfs.options = getattr(orig.opener, 'options', None)
3677
3677
3678 try:
3678 try:
3679 dest = revlog(vfs, radix=radix, **revlogkwargs)
3679 dest = revlog(vfs, radix=radix, **revlogkwargs)
3680 except TypeError:
3680 except TypeError:
3681 dest = revlog(
3681 dest = revlog(
3682 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3682 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3683 )
3683 )
3684 if dest._inline:
3684 if dest._inline:
3685 raise error.Abort('not supporting inline revlog (yet)')
3685 raise error.Abort('not supporting inline revlog (yet)')
3686 # make sure internals are initialized
3686 # make sure internals are initialized
3687 dest.revision(len(dest) - 1)
3687 dest.revision(len(dest) - 1)
3688 yield dest
3688 yield dest
3689 del dest, vfs
3689 del dest, vfs
3690 finally:
3690 finally:
3691 shutil.rmtree(tmpdir, True)
3691 shutil.rmtree(tmpdir, True)
3692
3692
3693
3693
3694 @command(
3694 @command(
3695 b'perf::revlogchunks|perfrevlogchunks',
3695 b'perf::revlogchunks|perfrevlogchunks',
3696 revlogopts
3696 revlogopts
3697 + formatteropts
3697 + formatteropts
3698 + [
3698 + [
3699 (b'e', b'engines', b'', b'compression engines to use'),
3699 (b'e', b'engines', b'', b'compression engines to use'),
3700 (b's', b'startrev', 0, b'revision to start at'),
3700 (b's', b'startrev', 0, b'revision to start at'),
3701 ],
3701 ],
3702 b'-c|-m|FILE',
3702 b'-c|-m|FILE',
3703 )
3703 )
3704 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3704 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3705 """Benchmark operations on revlog chunks.
3705 """Benchmark operations on revlog chunks.
3706
3706
3707 Logically, each revlog is a collection of fulltext revisions. However,
3707 Logically, each revlog is a collection of fulltext revisions. However,
3708 stored within each revlog are "chunks" of possibly compressed data. This
3708 stored within each revlog are "chunks" of possibly compressed data. This
3709 data needs to be read and decompressed or compressed and written.
3709 data needs to be read and decompressed or compressed and written.
3710
3710
3711 This command measures the time it takes to read+decompress and recompress
3711 This command measures the time it takes to read+decompress and recompress
3712 chunks in a revlog. It effectively isolates I/O and compression performance.
3712 chunks in a revlog. It effectively isolates I/O and compression performance.
3713 For measurements of higher-level operations like resolving revisions,
3713 For measurements of higher-level operations like resolving revisions,
3714 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3714 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3715 """
3715 """
3716 opts = _byteskwargs(opts)
3716 opts = _byteskwargs(opts)
3717
3717
3718 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3718 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3719
3719
3720 # _chunkraw was renamed to _getsegmentforrevs.
3720 # _chunkraw was renamed to _getsegmentforrevs.
3721 try:
3721 try:
3722 segmentforrevs = rl._getsegmentforrevs
3722 segmentforrevs = rl._getsegmentforrevs
3723 except AttributeError:
3723 except AttributeError:
3724 segmentforrevs = rl._chunkraw
3724 segmentforrevs = rl._chunkraw
3725
3725
3726 # Verify engines argument.
3726 # Verify engines argument.
3727 if engines:
3727 if engines:
3728 engines = {e.strip() for e in engines.split(b',')}
3728 engines = {e.strip() for e in engines.split(b',')}
3729 for engine in engines:
3729 for engine in engines:
3730 try:
3730 try:
3731 util.compressionengines[engine]
3731 util.compressionengines[engine]
3732 except KeyError:
3732 except KeyError:
3733 raise error.Abort(b'unknown compression engine: %s' % engine)
3733 raise error.Abort(b'unknown compression engine: %s' % engine)
3734 else:
3734 else:
3735 engines = []
3735 engines = []
3736 for e in util.compengines:
3736 for e in util.compengines:
3737 engine = util.compengines[e]
3737 engine = util.compengines[e]
3738 try:
3738 try:
3739 if engine.available():
3739 if engine.available():
3740 engine.revlogcompressor().compress(b'dummy')
3740 engine.revlogcompressor().compress(b'dummy')
3741 engines.append(e)
3741 engines.append(e)
3742 except NotImplementedError:
3742 except NotImplementedError:
3743 pass
3743 pass
3744
3744
3745 revs = list(rl.revs(startrev, len(rl) - 1))
3745 revs = list(rl.revs(startrev, len(rl) - 1))
3746
3746
3747 @contextlib.contextmanager
3747 @contextlib.contextmanager
3748 def reading(rl):
3748 def reading(rl):
3749 if getattr(rl, 'reading', None) is not None:
3749 if getattr(rl, 'reading', None) is not None:
3750 with rl.reading():
3750 with rl.reading():
3751 yield None
3751 yield None
3752 elif rl._inline:
3752 elif rl._inline:
3753 indexfile = getattr(rl, '_indexfile', None)
3753 indexfile = getattr(rl, '_indexfile', None)
3754 if indexfile is None:
3754 if indexfile is None:
3755 # compatibility with <= hg-5.8
3755 # compatibility with <= hg-5.8
3756 indexfile = getattr(rl, 'indexfile')
3756 indexfile = getattr(rl, 'indexfile')
3757 yield getsvfs(repo)(indexfile)
3757 yield getsvfs(repo)(indexfile)
3758 else:
3758 else:
3759 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3759 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3760 yield getsvfs(repo)(datafile)
3760 yield getsvfs(repo)(datafile)
3761
3761
3762 if getattr(rl, 'reading', None) is not None:
3762 if getattr(rl, 'reading', None) is not None:
3763
3763
3764 @contextlib.contextmanager
3764 @contextlib.contextmanager
3765 def lazy_reading(rl):
3765 def lazy_reading(rl):
3766 with rl.reading():
3766 with rl.reading():
3767 yield
3767 yield
3768
3768
3769 else:
3769 else:
3770
3770
3771 @contextlib.contextmanager
3771 @contextlib.contextmanager
3772 def lazy_reading(rl):
3772 def lazy_reading(rl):
3773 yield
3773 yield
3774
3774
3775 def doread():
3775 def doread():
3776 rl.clearcaches()
3776 rl.clearcaches()
3777 for rev in revs:
3777 for rev in revs:
3778 with lazy_reading(rl):
3778 with lazy_reading(rl):
3779 segmentforrevs(rev, rev)
3779 segmentforrevs(rev, rev)
3780
3780
3781 def doreadcachedfh():
3781 def doreadcachedfh():
3782 rl.clearcaches()
3782 rl.clearcaches()
3783 with reading(rl) as fh:
3783 with reading(rl) as fh:
3784 if fh is not None:
3784 if fh is not None:
3785 for rev in revs:
3785 for rev in revs:
3786 segmentforrevs(rev, rev, df=fh)
3786 segmentforrevs(rev, rev, df=fh)
3787 else:
3787 else:
3788 for rev in revs:
3788 for rev in revs:
3789 segmentforrevs(rev, rev)
3789 segmentforrevs(rev, rev)
3790
3790
3791 def doreadbatch():
3791 def doreadbatch():
3792 rl.clearcaches()
3792 rl.clearcaches()
3793 with lazy_reading(rl):
3793 with lazy_reading(rl):
3794 segmentforrevs(revs[0], revs[-1])
3794 segmentforrevs(revs[0], revs[-1])
3795
3795
3796 def doreadbatchcachedfh():
3796 def doreadbatchcachedfh():
3797 rl.clearcaches()
3797 rl.clearcaches()
3798 with reading(rl) as fh:
3798 with reading(rl) as fh:
3799 if fh is not None:
3799 if fh is not None:
3800 segmentforrevs(revs[0], revs[-1], df=fh)
3800 segmentforrevs(revs[0], revs[-1], df=fh)
3801 else:
3801 else:
3802 segmentforrevs(revs[0], revs[-1])
3802 segmentforrevs(revs[0], revs[-1])
3803
3803
3804 def dochunk():
3804 def dochunk():
3805 rl.clearcaches()
3805 rl.clearcaches()
3806 with reading(rl) as fh:
3806 with reading(rl) as fh:
3807 if fh is not None:
3807 if fh is not None:
3808 for rev in revs:
3808 for rev in revs:
3809 rl._chunk(rev, df=fh)
3809 rl._chunk(rev, df=fh)
3810 else:
3810 else:
3811 for rev in revs:
3811 for rev in revs:
3812 rl._chunk(rev)
3812 rl._chunk(rev)
3813
3813
3814 chunks = [None]
3814 chunks = [None]
3815
3815
3816 def dochunkbatch():
3816 def dochunkbatch():
3817 rl.clearcaches()
3817 rl.clearcaches()
3818 with reading(rl) as fh:
3818 with reading(rl) as fh:
3819 if fh is not None:
3819 if fh is not None:
3820 # Save chunks as a side-effect.
3820 # Save chunks as a side-effect.
3821 chunks[0] = rl._chunks(revs, df=fh)
3821 chunks[0] = rl._chunks(revs, df=fh)
3822 else:
3822 else:
3823 # Save chunks as a side-effect.
3823 # Save chunks as a side-effect.
3824 chunks[0] = rl._chunks(revs)
3824 chunks[0] = rl._chunks(revs)
3825
3825
3826 def docompress(compressor):
3826 def docompress(compressor):
3827 rl.clearcaches()
3827 rl.clearcaches()
3828
3828
3829 try:
3829 try:
3830 # Swap in the requested compression engine.
3830 # Swap in the requested compression engine.
3831 oldcompressor = rl._compressor
3831 oldcompressor = rl._compressor
3832 rl._compressor = compressor
3832 rl._compressor = compressor
3833 for chunk in chunks[0]:
3833 for chunk in chunks[0]:
3834 rl.compress(chunk)
3834 rl.compress(chunk)
3835 finally:
3835 finally:
3836 rl._compressor = oldcompressor
3836 rl._compressor = oldcompressor
3837
3837
3838 benches = [
3838 benches = [
3839 (lambda: doread(), b'read'),
3839 (lambda: doread(), b'read'),
3840 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3840 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3841 (lambda: doreadbatch(), b'read batch'),
3841 (lambda: doreadbatch(), b'read batch'),
3842 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3842 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3843 (lambda: dochunk(), b'chunk'),
3843 (lambda: dochunk(), b'chunk'),
3844 (lambda: dochunkbatch(), b'chunk batch'),
3844 (lambda: dochunkbatch(), b'chunk batch'),
3845 ]
3845 ]
3846
3846
3847 for engine in sorted(engines):
3847 for engine in sorted(engines):
3848 compressor = util.compengines[engine].revlogcompressor()
3848 compressor = util.compengines[engine].revlogcompressor()
3849 benches.append(
3849 benches.append(
3850 (
3850 (
3851 functools.partial(docompress, compressor),
3851 functools.partial(docompress, compressor),
3852 b'compress w/ %s' % engine,
3852 b'compress w/ %s' % engine,
3853 )
3853 )
3854 )
3854 )
3855
3855
3856 for fn, title in benches:
3856 for fn, title in benches:
3857 timer, fm = gettimer(ui, opts)
3857 timer, fm = gettimer(ui, opts)
3858 timer(fn, title=title)
3858 timer(fn, title=title)
3859 fm.end()
3859 fm.end()
3860
3860
3861
3861
3862 @command(
3862 @command(
3863 b'perf::revlogrevision|perfrevlogrevision',
3863 b'perf::revlogrevision|perfrevlogrevision',
3864 revlogopts
3864 revlogopts
3865 + formatteropts
3865 + formatteropts
3866 + [(b'', b'cache', False, b'use caches instead of clearing')],
3866 + [(b'', b'cache', False, b'use caches instead of clearing')],
3867 b'-c|-m|FILE REV',
3867 b'-c|-m|FILE REV',
3868 )
3868 )
3869 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3869 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3870 """Benchmark obtaining a revlog revision.
3870 """Benchmark obtaining a revlog revision.
3871
3871
3872 Obtaining a revlog revision consists of roughly the following steps:
3872 Obtaining a revlog revision consists of roughly the following steps:
3873
3873
3874 1. Compute the delta chain
3874 1. Compute the delta chain
3875 2. Slice the delta chain if applicable
3875 2. Slice the delta chain if applicable
3876 3. Obtain the raw chunks for that delta chain
3876 3. Obtain the raw chunks for that delta chain
3877 4. Decompress each raw chunk
3877 4. Decompress each raw chunk
3878 5. Apply binary patches to obtain fulltext
3878 5. Apply binary patches to obtain fulltext
3879 6. Verify hash of fulltext
3879 6. Verify hash of fulltext
3880
3880
3881 This command measures the time spent in each of these phases.
3881 This command measures the time spent in each of these phases.
3882 """
3882 """
3883 opts = _byteskwargs(opts)
3883 opts = _byteskwargs(opts)
3884
3884
3885 if opts.get(b'changelog') or opts.get(b'manifest'):
3885 if opts.get(b'changelog') or opts.get(b'manifest'):
3886 file_, rev = None, file_
3886 file_, rev = None, file_
3887 elif rev is None:
3887 elif rev is None:
3888 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3888 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3889
3889
3890 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3890 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3891
3891
3892 # _chunkraw was renamed to _getsegmentforrevs.
3892 # _chunkraw was renamed to _getsegmentforrevs.
3893 try:
3893 try:
3894 segmentforrevs = r._getsegmentforrevs
3894 segmentforrevs = r._getsegmentforrevs
3895 except AttributeError:
3895 except AttributeError:
3896 segmentforrevs = r._chunkraw
3896 segmentforrevs = r._chunkraw
3897
3897
3898 node = r.lookup(rev)
3898 node = r.lookup(rev)
3899 rev = r.rev(node)
3899 rev = r.rev(node)
3900
3900
3901 if getattr(r, 'reading', None) is not None:
3901 if getattr(r, 'reading', None) is not None:
3902
3902
3903 @contextlib.contextmanager
3903 @contextlib.contextmanager
3904 def lazy_reading(r):
3904 def lazy_reading(r):
3905 with r.reading():
3905 with r.reading():
3906 yield
3906 yield
3907
3907
3908 else:
3908 else:
3909
3909
3910 @contextlib.contextmanager
3910 @contextlib.contextmanager
3911 def lazy_reading(r):
3911 def lazy_reading(r):
3912 yield
3912 yield
3913
3913
3914 def getrawchunks(data, chain):
3914 def getrawchunks(data, chain):
3915 start = r.start
3915 start = r.start
3916 length = r.length
3916 length = r.length
3917 inline = r._inline
3917 inline = r._inline
3918 try:
3918 try:
3919 iosize = r.index.entry_size
3919 iosize = r.index.entry_size
3920 except AttributeError:
3920 except AttributeError:
3921 iosize = r._io.size
3921 iosize = r._io.size
3922 buffer = util.buffer
3922 buffer = util.buffer
3923
3923
3924 chunks = []
3924 chunks = []
3925 ladd = chunks.append
3925 ladd = chunks.append
3926 for idx, item in enumerate(chain):
3926 for idx, item in enumerate(chain):
3927 offset = start(item[0])
3927 offset = start(item[0])
3928 bits = data[idx]
3928 bits = data[idx]
3929 for rev in item:
3929 for rev in item:
3930 chunkstart = start(rev)
3930 chunkstart = start(rev)
3931 if inline:
3931 if inline:
3932 chunkstart += (rev + 1) * iosize
3932 chunkstart += (rev + 1) * iosize
3933 chunklength = length(rev)
3933 chunklength = length(rev)
3934 ladd(buffer(bits, chunkstart - offset, chunklength))
3934 ladd(buffer(bits, chunkstart - offset, chunklength))
3935
3935
3936 return chunks
3936 return chunks
3937
3937
3938 def dodeltachain(rev):
3938 def dodeltachain(rev):
3939 if not cache:
3939 if not cache:
3940 r.clearcaches()
3940 r.clearcaches()
3941 r._deltachain(rev)
3941 r._deltachain(rev)
3942
3942
3943 def doread(chain):
3943 def doread(chain):
3944 if not cache:
3944 if not cache:
3945 r.clearcaches()
3945 r.clearcaches()
3946 for item in slicedchain:
3946 for item in slicedchain:
3947 with lazy_reading(r):
3947 with lazy_reading(r):
3948 segmentforrevs(item[0], item[-1])
3948 segmentforrevs(item[0], item[-1])
3949
3949
3950 def doslice(r, chain, size):
3950 def doslice(r, chain, size):
3951 for s in slicechunk(r, chain, targetsize=size):
3951 for s in slicechunk(r, chain, targetsize=size):
3952 pass
3952 pass
3953
3953
3954 def dorawchunks(data, chain):
3954 def dorawchunks(data, chain):
3955 if not cache:
3955 if not cache:
3956 r.clearcaches()
3956 r.clearcaches()
3957 getrawchunks(data, chain)
3957 getrawchunks(data, chain)
3958
3958
3959 def dodecompress(chunks):
3959 def dodecompress(chunks):
3960 decomp = r.decompress
3960 decomp = r.decompress
3961 for chunk in chunks:
3961 for chunk in chunks:
3962 decomp(chunk)
3962 decomp(chunk)
3963
3963
3964 def dopatch(text, bins):
3964 def dopatch(text, bins):
3965 if not cache:
3965 if not cache:
3966 r.clearcaches()
3966 r.clearcaches()
3967 mdiff.patches(text, bins)
3967 mdiff.patches(text, bins)
3968
3968
3969 def dohash(text):
3969 def dohash(text):
3970 if not cache:
3970 if not cache:
3971 r.clearcaches()
3971 r.clearcaches()
3972 r.checkhash(text, node, rev=rev)
3972 r.checkhash(text, node, rev=rev)
3973
3973
3974 def dorevision():
3974 def dorevision():
3975 if not cache:
3975 if not cache:
3976 r.clearcaches()
3976 r.clearcaches()
3977 r.revision(node)
3977 r.revision(node)
3978
3978
3979 try:
3979 try:
3980 from mercurial.revlogutils.deltas import slicechunk
3980 from mercurial.revlogutils.deltas import slicechunk
3981 except ImportError:
3981 except ImportError:
3982 slicechunk = getattr(revlog, '_slicechunk', None)
3982 slicechunk = getattr(revlog, '_slicechunk', None)
3983
3983
3984 size = r.length(rev)
3984 size = r.length(rev)
3985 chain = r._deltachain(rev)[0]
3985 chain = r._deltachain(rev)[0]
3986 if not getattr(r, '_withsparseread', False):
3986
3987 with_sparse_read = False
3988 if hasattr(r, 'data_config'):
3989 with_sparse_read = r.data_config.with_sparse_read
3990 elif hasattr(r, '_withsparseread'):
3991 with_sparse_read = r._withsparseread
3992 if with_sparse_read:
3987 slicedchain = (chain,)
3993 slicedchain = (chain,)
3988 else:
3994 else:
3989 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3995 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3990 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3996 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3991 rawchunks = getrawchunks(data, slicedchain)
3997 rawchunks = getrawchunks(data, slicedchain)
3992 bins = r._chunks(chain)
3998 bins = r._chunks(chain)
3993 text = bytes(bins[0])
3999 text = bytes(bins[0])
3994 bins = bins[1:]
4000 bins = bins[1:]
3995 text = mdiff.patches(text, bins)
4001 text = mdiff.patches(text, bins)
3996
4002
3997 benches = [
4003 benches = [
3998 (lambda: dorevision(), b'full'),
4004 (lambda: dorevision(), b'full'),
3999 (lambda: dodeltachain(rev), b'deltachain'),
4005 (lambda: dodeltachain(rev), b'deltachain'),
4000 (lambda: doread(chain), b'read'),
4006 (lambda: doread(chain), b'read'),
4001 ]
4007 ]
4002
4008
4003 if getattr(r, '_withsparseread', False):
4009 if with_sparse_read:
4004 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4010 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4005 benches.append(slicing)
4011 benches.append(slicing)
4006
4012
4007 benches.extend(
4013 benches.extend(
4008 [
4014 [
4009 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4015 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4010 (lambda: dodecompress(rawchunks), b'decompress'),
4016 (lambda: dodecompress(rawchunks), b'decompress'),
4011 (lambda: dopatch(text, bins), b'patch'),
4017 (lambda: dopatch(text, bins), b'patch'),
4012 (lambda: dohash(text), b'hash'),
4018 (lambda: dohash(text), b'hash'),
4013 ]
4019 ]
4014 )
4020 )
4015
4021
4016 timer, fm = gettimer(ui, opts)
4022 timer, fm = gettimer(ui, opts)
4017 for fn, title in benches:
4023 for fn, title in benches:
4018 timer(fn, title=title)
4024 timer(fn, title=title)
4019 fm.end()
4025 fm.end()
4020
4026
4021
4027
4022 @command(
4028 @command(
4023 b'perf::revset|perfrevset',
4029 b'perf::revset|perfrevset',
4024 [
4030 [
4025 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4031 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4026 (b'', b'contexts', False, b'obtain changectx for each revision'),
4032 (b'', b'contexts', False, b'obtain changectx for each revision'),
4027 ]
4033 ]
4028 + formatteropts,
4034 + formatteropts,
4029 b"REVSET",
4035 b"REVSET",
4030 )
4036 )
4031 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4037 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4032 """benchmark the execution time of a revset
4038 """benchmark the execution time of a revset
4033
4039
4034 Use the --clean option if need to evaluate the impact of build volatile
4040 Use the --clean option if need to evaluate the impact of build volatile
4035 revisions set cache on the revset execution. Volatile cache hold filtered
4041 revisions set cache on the revset execution. Volatile cache hold filtered
4036 and obsolete related cache."""
4042 and obsolete related cache."""
4037 opts = _byteskwargs(opts)
4043 opts = _byteskwargs(opts)
4038
4044
4039 timer, fm = gettimer(ui, opts)
4045 timer, fm = gettimer(ui, opts)
4040
4046
4041 def d():
4047 def d():
4042 if clear:
4048 if clear:
4043 repo.invalidatevolatilesets()
4049 repo.invalidatevolatilesets()
4044 if contexts:
4050 if contexts:
4045 for ctx in repo.set(expr):
4051 for ctx in repo.set(expr):
4046 pass
4052 pass
4047 else:
4053 else:
4048 for r in repo.revs(expr):
4054 for r in repo.revs(expr):
4049 pass
4055 pass
4050
4056
4051 timer(d)
4057 timer(d)
4052 fm.end()
4058 fm.end()
4053
4059
4054
4060
4055 @command(
4061 @command(
4056 b'perf::volatilesets|perfvolatilesets',
4062 b'perf::volatilesets|perfvolatilesets',
4057 [
4063 [
4058 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4064 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4059 ]
4065 ]
4060 + formatteropts,
4066 + formatteropts,
4061 )
4067 )
4062 def perfvolatilesets(ui, repo, *names, **opts):
4068 def perfvolatilesets(ui, repo, *names, **opts):
4063 """benchmark the computation of various volatile set
4069 """benchmark the computation of various volatile set
4064
4070
4065 Volatile set computes element related to filtering and obsolescence."""
4071 Volatile set computes element related to filtering and obsolescence."""
4066 opts = _byteskwargs(opts)
4072 opts = _byteskwargs(opts)
4067 timer, fm = gettimer(ui, opts)
4073 timer, fm = gettimer(ui, opts)
4068 repo = repo.unfiltered()
4074 repo = repo.unfiltered()
4069
4075
4070 def getobs(name):
4076 def getobs(name):
4071 def d():
4077 def d():
4072 repo.invalidatevolatilesets()
4078 repo.invalidatevolatilesets()
4073 if opts[b'clear_obsstore']:
4079 if opts[b'clear_obsstore']:
4074 clearfilecache(repo, b'obsstore')
4080 clearfilecache(repo, b'obsstore')
4075 obsolete.getrevs(repo, name)
4081 obsolete.getrevs(repo, name)
4076
4082
4077 return d
4083 return d
4078
4084
4079 allobs = sorted(obsolete.cachefuncs)
4085 allobs = sorted(obsolete.cachefuncs)
4080 if names:
4086 if names:
4081 allobs = [n for n in allobs if n in names]
4087 allobs = [n for n in allobs if n in names]
4082
4088
4083 for name in allobs:
4089 for name in allobs:
4084 timer(getobs(name), title=name)
4090 timer(getobs(name), title=name)
4085
4091
4086 def getfiltered(name):
4092 def getfiltered(name):
4087 def d():
4093 def d():
4088 repo.invalidatevolatilesets()
4094 repo.invalidatevolatilesets()
4089 if opts[b'clear_obsstore']:
4095 if opts[b'clear_obsstore']:
4090 clearfilecache(repo, b'obsstore')
4096 clearfilecache(repo, b'obsstore')
4091 repoview.filterrevs(repo, name)
4097 repoview.filterrevs(repo, name)
4092
4098
4093 return d
4099 return d
4094
4100
4095 allfilter = sorted(repoview.filtertable)
4101 allfilter = sorted(repoview.filtertable)
4096 if names:
4102 if names:
4097 allfilter = [n for n in allfilter if n in names]
4103 allfilter = [n for n in allfilter if n in names]
4098
4104
4099 for name in allfilter:
4105 for name in allfilter:
4100 timer(getfiltered(name), title=name)
4106 timer(getfiltered(name), title=name)
4101 fm.end()
4107 fm.end()
4102
4108
4103
4109
4104 @command(
4110 @command(
4105 b'perf::branchmap|perfbranchmap',
4111 b'perf::branchmap|perfbranchmap',
4106 [
4112 [
4107 (b'f', b'full', False, b'Includes build time of subset'),
4113 (b'f', b'full', False, b'Includes build time of subset'),
4108 (
4114 (
4109 b'',
4115 b'',
4110 b'clear-revbranch',
4116 b'clear-revbranch',
4111 False,
4117 False,
4112 b'purge the revbranch cache between computation',
4118 b'purge the revbranch cache between computation',
4113 ),
4119 ),
4114 ]
4120 ]
4115 + formatteropts,
4121 + formatteropts,
4116 )
4122 )
4117 def perfbranchmap(ui, repo, *filternames, **opts):
4123 def perfbranchmap(ui, repo, *filternames, **opts):
4118 """benchmark the update of a branchmap
4124 """benchmark the update of a branchmap
4119
4125
4120 This benchmarks the full repo.branchmap() call with read and write disabled
4126 This benchmarks the full repo.branchmap() call with read and write disabled
4121 """
4127 """
4122 opts = _byteskwargs(opts)
4128 opts = _byteskwargs(opts)
4123 full = opts.get(b"full", False)
4129 full = opts.get(b"full", False)
4124 clear_revbranch = opts.get(b"clear_revbranch", False)
4130 clear_revbranch = opts.get(b"clear_revbranch", False)
4125 timer, fm = gettimer(ui, opts)
4131 timer, fm = gettimer(ui, opts)
4126
4132
4127 def getbranchmap(filtername):
4133 def getbranchmap(filtername):
4128 """generate a benchmark function for the filtername"""
4134 """generate a benchmark function for the filtername"""
4129 if filtername is None:
4135 if filtername is None:
4130 view = repo
4136 view = repo
4131 else:
4137 else:
4132 view = repo.filtered(filtername)
4138 view = repo.filtered(filtername)
4133 if util.safehasattr(view._branchcaches, '_per_filter'):
4139 if util.safehasattr(view._branchcaches, '_per_filter'):
4134 filtered = view._branchcaches._per_filter
4140 filtered = view._branchcaches._per_filter
4135 else:
4141 else:
4136 # older versions
4142 # older versions
4137 filtered = view._branchcaches
4143 filtered = view._branchcaches
4138
4144
4139 def d():
4145 def d():
4140 if clear_revbranch:
4146 if clear_revbranch:
4141 repo.revbranchcache()._clear()
4147 repo.revbranchcache()._clear()
4142 if full:
4148 if full:
4143 view._branchcaches.clear()
4149 view._branchcaches.clear()
4144 else:
4150 else:
4145 filtered.pop(filtername, None)
4151 filtered.pop(filtername, None)
4146 view.branchmap()
4152 view.branchmap()
4147
4153
4148 return d
4154 return d
4149
4155
4150 # add filter in smaller subset to bigger subset
4156 # add filter in smaller subset to bigger subset
4151 possiblefilters = set(repoview.filtertable)
4157 possiblefilters = set(repoview.filtertable)
4152 if filternames:
4158 if filternames:
4153 possiblefilters &= set(filternames)
4159 possiblefilters &= set(filternames)
4154 subsettable = getbranchmapsubsettable()
4160 subsettable = getbranchmapsubsettable()
4155 allfilters = []
4161 allfilters = []
4156 while possiblefilters:
4162 while possiblefilters:
4157 for name in possiblefilters:
4163 for name in possiblefilters:
4158 subset = subsettable.get(name)
4164 subset = subsettable.get(name)
4159 if subset not in possiblefilters:
4165 if subset not in possiblefilters:
4160 break
4166 break
4161 else:
4167 else:
4162 assert False, b'subset cycle %s!' % possiblefilters
4168 assert False, b'subset cycle %s!' % possiblefilters
4163 allfilters.append(name)
4169 allfilters.append(name)
4164 possiblefilters.remove(name)
4170 possiblefilters.remove(name)
4165
4171
4166 # warm the cache
4172 # warm the cache
4167 if not full:
4173 if not full:
4168 for name in allfilters:
4174 for name in allfilters:
4169 repo.filtered(name).branchmap()
4175 repo.filtered(name).branchmap()
4170 if not filternames or b'unfiltered' in filternames:
4176 if not filternames or b'unfiltered' in filternames:
4171 # add unfiltered
4177 # add unfiltered
4172 allfilters.append(None)
4178 allfilters.append(None)
4173
4179
4174 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4180 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4175 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4181 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4176 branchcacheread.set(classmethod(lambda *args: None))
4182 branchcacheread.set(classmethod(lambda *args: None))
4177 else:
4183 else:
4178 # older versions
4184 # older versions
4179 branchcacheread = safeattrsetter(branchmap, b'read')
4185 branchcacheread = safeattrsetter(branchmap, b'read')
4180 branchcacheread.set(lambda *args: None)
4186 branchcacheread.set(lambda *args: None)
4181 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4187 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4182 branchcachewrite.set(lambda *args: None)
4188 branchcachewrite.set(lambda *args: None)
4183 try:
4189 try:
4184 for name in allfilters:
4190 for name in allfilters:
4185 printname = name
4191 printname = name
4186 if name is None:
4192 if name is None:
4187 printname = b'unfiltered'
4193 printname = b'unfiltered'
4188 timer(getbranchmap(name), title=printname)
4194 timer(getbranchmap(name), title=printname)
4189 finally:
4195 finally:
4190 branchcacheread.restore()
4196 branchcacheread.restore()
4191 branchcachewrite.restore()
4197 branchcachewrite.restore()
4192 fm.end()
4198 fm.end()
4193
4199
4194
4200
4195 @command(
4201 @command(
4196 b'perf::branchmapupdate|perfbranchmapupdate',
4202 b'perf::branchmapupdate|perfbranchmapupdate',
4197 [
4203 [
4198 (b'', b'base', [], b'subset of revision to start from'),
4204 (b'', b'base', [], b'subset of revision to start from'),
4199 (b'', b'target', [], b'subset of revision to end with'),
4205 (b'', b'target', [], b'subset of revision to end with'),
4200 (b'', b'clear-caches', False, b'clear cache between each runs'),
4206 (b'', b'clear-caches', False, b'clear cache between each runs'),
4201 ]
4207 ]
4202 + formatteropts,
4208 + formatteropts,
4203 )
4209 )
4204 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4210 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4205 """benchmark branchmap update from for <base> revs to <target> revs
4211 """benchmark branchmap update from for <base> revs to <target> revs
4206
4212
4207 If `--clear-caches` is passed, the following items will be reset before
4213 If `--clear-caches` is passed, the following items will be reset before
4208 each update:
4214 each update:
4209 * the changelog instance and associated indexes
4215 * the changelog instance and associated indexes
4210 * the rev-branch-cache instance
4216 * the rev-branch-cache instance
4211
4217
4212 Examples:
4218 Examples:
4213
4219
4214 # update for the one last revision
4220 # update for the one last revision
4215 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4221 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4216
4222
4217 $ update for change coming with a new branch
4223 $ update for change coming with a new branch
4218 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4224 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4219 """
4225 """
4220 from mercurial import branchmap
4226 from mercurial import branchmap
4221 from mercurial import repoview
4227 from mercurial import repoview
4222
4228
4223 opts = _byteskwargs(opts)
4229 opts = _byteskwargs(opts)
4224 timer, fm = gettimer(ui, opts)
4230 timer, fm = gettimer(ui, opts)
4225 clearcaches = opts[b'clear_caches']
4231 clearcaches = opts[b'clear_caches']
4226 unfi = repo.unfiltered()
4232 unfi = repo.unfiltered()
4227 x = [None] # used to pass data between closure
4233 x = [None] # used to pass data between closure
4228
4234
4229 # we use a `list` here to avoid possible side effect from smartset
4235 # we use a `list` here to avoid possible side effect from smartset
4230 baserevs = list(scmutil.revrange(repo, base))
4236 baserevs = list(scmutil.revrange(repo, base))
4231 targetrevs = list(scmutil.revrange(repo, target))
4237 targetrevs = list(scmutil.revrange(repo, target))
4232 if not baserevs:
4238 if not baserevs:
4233 raise error.Abort(b'no revisions selected for --base')
4239 raise error.Abort(b'no revisions selected for --base')
4234 if not targetrevs:
4240 if not targetrevs:
4235 raise error.Abort(b'no revisions selected for --target')
4241 raise error.Abort(b'no revisions selected for --target')
4236
4242
4237 # make sure the target branchmap also contains the one in the base
4243 # make sure the target branchmap also contains the one in the base
4238 targetrevs = list(set(baserevs) | set(targetrevs))
4244 targetrevs = list(set(baserevs) | set(targetrevs))
4239 targetrevs.sort()
4245 targetrevs.sort()
4240
4246
4241 cl = repo.changelog
4247 cl = repo.changelog
4242 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4248 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4243 allbaserevs.sort()
4249 allbaserevs.sort()
4244 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4250 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4245
4251
4246 newrevs = list(alltargetrevs.difference(allbaserevs))
4252 newrevs = list(alltargetrevs.difference(allbaserevs))
4247 newrevs.sort()
4253 newrevs.sort()
4248
4254
4249 allrevs = frozenset(unfi.changelog.revs())
4255 allrevs = frozenset(unfi.changelog.revs())
4250 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4256 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4251 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4257 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4252
4258
4253 def basefilter(repo, visibilityexceptions=None):
4259 def basefilter(repo, visibilityexceptions=None):
4254 return basefilterrevs
4260 return basefilterrevs
4255
4261
4256 def targetfilter(repo, visibilityexceptions=None):
4262 def targetfilter(repo, visibilityexceptions=None):
4257 return targetfilterrevs
4263 return targetfilterrevs
4258
4264
4259 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4265 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4260 ui.status(msg % (len(allbaserevs), len(newrevs)))
4266 ui.status(msg % (len(allbaserevs), len(newrevs)))
4261 if targetfilterrevs:
4267 if targetfilterrevs:
4262 msg = b'(%d revisions still filtered)\n'
4268 msg = b'(%d revisions still filtered)\n'
4263 ui.status(msg % len(targetfilterrevs))
4269 ui.status(msg % len(targetfilterrevs))
4264
4270
4265 try:
4271 try:
4266 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4272 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4267 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4273 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4268
4274
4269 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4275 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4270 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4276 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4271
4277
4272 # try to find an existing branchmap to reuse
4278 # try to find an existing branchmap to reuse
4273 subsettable = getbranchmapsubsettable()
4279 subsettable = getbranchmapsubsettable()
4274 candidatefilter = subsettable.get(None)
4280 candidatefilter = subsettable.get(None)
4275 while candidatefilter is not None:
4281 while candidatefilter is not None:
4276 candidatebm = repo.filtered(candidatefilter).branchmap()
4282 candidatebm = repo.filtered(candidatefilter).branchmap()
4277 if candidatebm.validfor(baserepo):
4283 if candidatebm.validfor(baserepo):
4278 filtered = repoview.filterrevs(repo, candidatefilter)
4284 filtered = repoview.filterrevs(repo, candidatefilter)
4279 missing = [r for r in allbaserevs if r in filtered]
4285 missing = [r for r in allbaserevs if r in filtered]
4280 base = candidatebm.copy()
4286 base = candidatebm.copy()
4281 base.update(baserepo, missing)
4287 base.update(baserepo, missing)
4282 break
4288 break
4283 candidatefilter = subsettable.get(candidatefilter)
4289 candidatefilter = subsettable.get(candidatefilter)
4284 else:
4290 else:
4285 # no suitable subset where found
4291 # no suitable subset where found
4286 base = branchmap.branchcache()
4292 base = branchmap.branchcache()
4287 base.update(baserepo, allbaserevs)
4293 base.update(baserepo, allbaserevs)
4288
4294
4289 def setup():
4295 def setup():
4290 x[0] = base.copy()
4296 x[0] = base.copy()
4291 if clearcaches:
4297 if clearcaches:
4292 unfi._revbranchcache = None
4298 unfi._revbranchcache = None
4293 clearchangelog(repo)
4299 clearchangelog(repo)
4294
4300
4295 def bench():
4301 def bench():
4296 x[0].update(targetrepo, newrevs)
4302 x[0].update(targetrepo, newrevs)
4297
4303
4298 timer(bench, setup=setup)
4304 timer(bench, setup=setup)
4299 fm.end()
4305 fm.end()
4300 finally:
4306 finally:
4301 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4307 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4302 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4308 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4303
4309
4304
4310
4305 @command(
4311 @command(
4306 b'perf::branchmapload|perfbranchmapload',
4312 b'perf::branchmapload|perfbranchmapload',
4307 [
4313 [
4308 (b'f', b'filter', b'', b'Specify repoview filter'),
4314 (b'f', b'filter', b'', b'Specify repoview filter'),
4309 (b'', b'list', False, b'List brachmap filter caches'),
4315 (b'', b'list', False, b'List brachmap filter caches'),
4310 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4316 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4311 ]
4317 ]
4312 + formatteropts,
4318 + formatteropts,
4313 )
4319 )
4314 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4320 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4315 """benchmark reading the branchmap"""
4321 """benchmark reading the branchmap"""
4316 opts = _byteskwargs(opts)
4322 opts = _byteskwargs(opts)
4317 clearrevlogs = opts[b'clear_revlogs']
4323 clearrevlogs = opts[b'clear_revlogs']
4318
4324
4319 if list:
4325 if list:
4320 for name, kind, st in repo.cachevfs.readdir(stat=True):
4326 for name, kind, st in repo.cachevfs.readdir(stat=True):
4321 if name.startswith(b'branch2'):
4327 if name.startswith(b'branch2'):
4322 filtername = name.partition(b'-')[2] or b'unfiltered'
4328 filtername = name.partition(b'-')[2] or b'unfiltered'
4323 ui.status(
4329 ui.status(
4324 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4330 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4325 )
4331 )
4326 return
4332 return
4327 if not filter:
4333 if not filter:
4328 filter = None
4334 filter = None
4329 subsettable = getbranchmapsubsettable()
4335 subsettable = getbranchmapsubsettable()
4330 if filter is None:
4336 if filter is None:
4331 repo = repo.unfiltered()
4337 repo = repo.unfiltered()
4332 else:
4338 else:
4333 repo = repoview.repoview(repo, filter)
4339 repo = repoview.repoview(repo, filter)
4334
4340
4335 repo.branchmap() # make sure we have a relevant, up to date branchmap
4341 repo.branchmap() # make sure we have a relevant, up to date branchmap
4336
4342
4337 try:
4343 try:
4338 fromfile = branchmap.branchcache.fromfile
4344 fromfile = branchmap.branchcache.fromfile
4339 except AttributeError:
4345 except AttributeError:
4340 # older versions
4346 # older versions
4341 fromfile = branchmap.read
4347 fromfile = branchmap.read
4342
4348
4343 currentfilter = filter
4349 currentfilter = filter
4344 # try once without timer, the filter may not be cached
4350 # try once without timer, the filter may not be cached
4345 while fromfile(repo) is None:
4351 while fromfile(repo) is None:
4346 currentfilter = subsettable.get(currentfilter)
4352 currentfilter = subsettable.get(currentfilter)
4347 if currentfilter is None:
4353 if currentfilter is None:
4348 raise error.Abort(
4354 raise error.Abort(
4349 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4355 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4350 )
4356 )
4351 repo = repo.filtered(currentfilter)
4357 repo = repo.filtered(currentfilter)
4352 timer, fm = gettimer(ui, opts)
4358 timer, fm = gettimer(ui, opts)
4353
4359
4354 def setup():
4360 def setup():
4355 if clearrevlogs:
4361 if clearrevlogs:
4356 clearchangelog(repo)
4362 clearchangelog(repo)
4357
4363
4358 def bench():
4364 def bench():
4359 fromfile(repo)
4365 fromfile(repo)
4360
4366
4361 timer(bench, setup=setup)
4367 timer(bench, setup=setup)
4362 fm.end()
4368 fm.end()
4363
4369
4364
4370
4365 @command(b'perf::loadmarkers|perfloadmarkers')
4371 @command(b'perf::loadmarkers|perfloadmarkers')
4366 def perfloadmarkers(ui, repo):
4372 def perfloadmarkers(ui, repo):
4367 """benchmark the time to parse the on-disk markers for a repo
4373 """benchmark the time to parse the on-disk markers for a repo
4368
4374
4369 Result is the number of markers in the repo."""
4375 Result is the number of markers in the repo."""
4370 timer, fm = gettimer(ui)
4376 timer, fm = gettimer(ui)
4371 svfs = getsvfs(repo)
4377 svfs = getsvfs(repo)
4372 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4378 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4373 fm.end()
4379 fm.end()
4374
4380
4375
4381
4376 @command(
4382 @command(
4377 b'perf::lrucachedict|perflrucachedict',
4383 b'perf::lrucachedict|perflrucachedict',
4378 formatteropts
4384 formatteropts
4379 + [
4385 + [
4380 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4386 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4381 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4387 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4382 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4388 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4383 (b'', b'size', 4, b'size of cache'),
4389 (b'', b'size', 4, b'size of cache'),
4384 (b'', b'gets', 10000, b'number of key lookups'),
4390 (b'', b'gets', 10000, b'number of key lookups'),
4385 (b'', b'sets', 10000, b'number of key sets'),
4391 (b'', b'sets', 10000, b'number of key sets'),
4386 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4392 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4387 (
4393 (
4388 b'',
4394 b'',
4389 b'mixedgetfreq',
4395 b'mixedgetfreq',
4390 50,
4396 50,
4391 b'frequency of get vs set ops in mixed mode',
4397 b'frequency of get vs set ops in mixed mode',
4392 ),
4398 ),
4393 ],
4399 ],
4394 norepo=True,
4400 norepo=True,
4395 )
4401 )
4396 def perflrucache(
4402 def perflrucache(
4397 ui,
4403 ui,
4398 mincost=0,
4404 mincost=0,
4399 maxcost=100,
4405 maxcost=100,
4400 costlimit=0,
4406 costlimit=0,
4401 size=4,
4407 size=4,
4402 gets=10000,
4408 gets=10000,
4403 sets=10000,
4409 sets=10000,
4404 mixed=10000,
4410 mixed=10000,
4405 mixedgetfreq=50,
4411 mixedgetfreq=50,
4406 **opts
4412 **opts
4407 ):
4413 ):
4408 opts = _byteskwargs(opts)
4414 opts = _byteskwargs(opts)
4409
4415
4410 def doinit():
4416 def doinit():
4411 for i in _xrange(10000):
4417 for i in _xrange(10000):
4412 util.lrucachedict(size)
4418 util.lrucachedict(size)
4413
4419
4414 costrange = list(range(mincost, maxcost + 1))
4420 costrange = list(range(mincost, maxcost + 1))
4415
4421
4416 values = []
4422 values = []
4417 for i in _xrange(size):
4423 for i in _xrange(size):
4418 values.append(random.randint(0, _maxint))
4424 values.append(random.randint(0, _maxint))
4419
4425
4420 # Get mode fills the cache and tests raw lookup performance with no
4426 # Get mode fills the cache and tests raw lookup performance with no
4421 # eviction.
4427 # eviction.
4422 getseq = []
4428 getseq = []
4423 for i in _xrange(gets):
4429 for i in _xrange(gets):
4424 getseq.append(random.choice(values))
4430 getseq.append(random.choice(values))
4425
4431
4426 def dogets():
4432 def dogets():
4427 d = util.lrucachedict(size)
4433 d = util.lrucachedict(size)
4428 for v in values:
4434 for v in values:
4429 d[v] = v
4435 d[v] = v
4430 for key in getseq:
4436 for key in getseq:
4431 value = d[key]
4437 value = d[key]
4432 value # silence pyflakes warning
4438 value # silence pyflakes warning
4433
4439
4434 def dogetscost():
4440 def dogetscost():
4435 d = util.lrucachedict(size, maxcost=costlimit)
4441 d = util.lrucachedict(size, maxcost=costlimit)
4436 for i, v in enumerate(values):
4442 for i, v in enumerate(values):
4437 d.insert(v, v, cost=costs[i])
4443 d.insert(v, v, cost=costs[i])
4438 for key in getseq:
4444 for key in getseq:
4439 try:
4445 try:
4440 value = d[key]
4446 value = d[key]
4441 value # silence pyflakes warning
4447 value # silence pyflakes warning
4442 except KeyError:
4448 except KeyError:
4443 pass
4449 pass
4444
4450
4445 # Set mode tests insertion speed with cache eviction.
4451 # Set mode tests insertion speed with cache eviction.
4446 setseq = []
4452 setseq = []
4447 costs = []
4453 costs = []
4448 for i in _xrange(sets):
4454 for i in _xrange(sets):
4449 setseq.append(random.randint(0, _maxint))
4455 setseq.append(random.randint(0, _maxint))
4450 costs.append(random.choice(costrange))
4456 costs.append(random.choice(costrange))
4451
4457
4452 def doinserts():
4458 def doinserts():
4453 d = util.lrucachedict(size)
4459 d = util.lrucachedict(size)
4454 for v in setseq:
4460 for v in setseq:
4455 d.insert(v, v)
4461 d.insert(v, v)
4456
4462
4457 def doinsertscost():
4463 def doinsertscost():
4458 d = util.lrucachedict(size, maxcost=costlimit)
4464 d = util.lrucachedict(size, maxcost=costlimit)
4459 for i, v in enumerate(setseq):
4465 for i, v in enumerate(setseq):
4460 d.insert(v, v, cost=costs[i])
4466 d.insert(v, v, cost=costs[i])
4461
4467
4462 def dosets():
4468 def dosets():
4463 d = util.lrucachedict(size)
4469 d = util.lrucachedict(size)
4464 for v in setseq:
4470 for v in setseq:
4465 d[v] = v
4471 d[v] = v
4466
4472
4467 # Mixed mode randomly performs gets and sets with eviction.
4473 # Mixed mode randomly performs gets and sets with eviction.
4468 mixedops = []
4474 mixedops = []
4469 for i in _xrange(mixed):
4475 for i in _xrange(mixed):
4470 r = random.randint(0, 100)
4476 r = random.randint(0, 100)
4471 if r < mixedgetfreq:
4477 if r < mixedgetfreq:
4472 op = 0
4478 op = 0
4473 else:
4479 else:
4474 op = 1
4480 op = 1
4475
4481
4476 mixedops.append(
4482 mixedops.append(
4477 (op, random.randint(0, size * 2), random.choice(costrange))
4483 (op, random.randint(0, size * 2), random.choice(costrange))
4478 )
4484 )
4479
4485
4480 def domixed():
4486 def domixed():
4481 d = util.lrucachedict(size)
4487 d = util.lrucachedict(size)
4482
4488
4483 for op, v, cost in mixedops:
4489 for op, v, cost in mixedops:
4484 if op == 0:
4490 if op == 0:
4485 try:
4491 try:
4486 d[v]
4492 d[v]
4487 except KeyError:
4493 except KeyError:
4488 pass
4494 pass
4489 else:
4495 else:
4490 d[v] = v
4496 d[v] = v
4491
4497
4492 def domixedcost():
4498 def domixedcost():
4493 d = util.lrucachedict(size, maxcost=costlimit)
4499 d = util.lrucachedict(size, maxcost=costlimit)
4494
4500
4495 for op, v, cost in mixedops:
4501 for op, v, cost in mixedops:
4496 if op == 0:
4502 if op == 0:
4497 try:
4503 try:
4498 d[v]
4504 d[v]
4499 except KeyError:
4505 except KeyError:
4500 pass
4506 pass
4501 else:
4507 else:
4502 d.insert(v, v, cost=cost)
4508 d.insert(v, v, cost=cost)
4503
4509
4504 benches = [
4510 benches = [
4505 (doinit, b'init'),
4511 (doinit, b'init'),
4506 ]
4512 ]
4507
4513
4508 if costlimit:
4514 if costlimit:
4509 benches.extend(
4515 benches.extend(
4510 [
4516 [
4511 (dogetscost, b'gets w/ cost limit'),
4517 (dogetscost, b'gets w/ cost limit'),
4512 (doinsertscost, b'inserts w/ cost limit'),
4518 (doinsertscost, b'inserts w/ cost limit'),
4513 (domixedcost, b'mixed w/ cost limit'),
4519 (domixedcost, b'mixed w/ cost limit'),
4514 ]
4520 ]
4515 )
4521 )
4516 else:
4522 else:
4517 benches.extend(
4523 benches.extend(
4518 [
4524 [
4519 (dogets, b'gets'),
4525 (dogets, b'gets'),
4520 (doinserts, b'inserts'),
4526 (doinserts, b'inserts'),
4521 (dosets, b'sets'),
4527 (dosets, b'sets'),
4522 (domixed, b'mixed'),
4528 (domixed, b'mixed'),
4523 ]
4529 ]
4524 )
4530 )
4525
4531
4526 for fn, title in benches:
4532 for fn, title in benches:
4527 timer, fm = gettimer(ui, opts)
4533 timer, fm = gettimer(ui, opts)
4528 timer(fn, title=title)
4534 timer(fn, title=title)
4529 fm.end()
4535 fm.end()
4530
4536
4531
4537
4532 @command(
4538 @command(
4533 b'perf::write|perfwrite',
4539 b'perf::write|perfwrite',
4534 formatteropts
4540 formatteropts
4535 + [
4541 + [
4536 (b'', b'write-method', b'write', b'ui write method'),
4542 (b'', b'write-method', b'write', b'ui write method'),
4537 (b'', b'nlines', 100, b'number of lines'),
4543 (b'', b'nlines', 100, b'number of lines'),
4538 (b'', b'nitems', 100, b'number of items (per line)'),
4544 (b'', b'nitems', 100, b'number of items (per line)'),
4539 (b'', b'item', b'x', b'item that is written'),
4545 (b'', b'item', b'x', b'item that is written'),
4540 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4546 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4541 (b'', b'flush-line', None, b'flush after each line'),
4547 (b'', b'flush-line', None, b'flush after each line'),
4542 ],
4548 ],
4543 )
4549 )
4544 def perfwrite(ui, repo, **opts):
4550 def perfwrite(ui, repo, **opts):
4545 """microbenchmark ui.write (and others)"""
4551 """microbenchmark ui.write (and others)"""
4546 opts = _byteskwargs(opts)
4552 opts = _byteskwargs(opts)
4547
4553
4548 write = getattr(ui, _sysstr(opts[b'write_method']))
4554 write = getattr(ui, _sysstr(opts[b'write_method']))
4549 nlines = int(opts[b'nlines'])
4555 nlines = int(opts[b'nlines'])
4550 nitems = int(opts[b'nitems'])
4556 nitems = int(opts[b'nitems'])
4551 item = opts[b'item']
4557 item = opts[b'item']
4552 batch_line = opts.get(b'batch_line')
4558 batch_line = opts.get(b'batch_line')
4553 flush_line = opts.get(b'flush_line')
4559 flush_line = opts.get(b'flush_line')
4554
4560
4555 if batch_line:
4561 if batch_line:
4556 line = item * nitems + b'\n'
4562 line = item * nitems + b'\n'
4557
4563
4558 def benchmark():
4564 def benchmark():
4559 for i in pycompat.xrange(nlines):
4565 for i in pycompat.xrange(nlines):
4560 if batch_line:
4566 if batch_line:
4561 write(line)
4567 write(line)
4562 else:
4568 else:
4563 for i in pycompat.xrange(nitems):
4569 for i in pycompat.xrange(nitems):
4564 write(item)
4570 write(item)
4565 write(b'\n')
4571 write(b'\n')
4566 if flush_line:
4572 if flush_line:
4567 ui.flush()
4573 ui.flush()
4568 ui.flush()
4574 ui.flush()
4569
4575
4570 timer, fm = gettimer(ui, opts)
4576 timer, fm = gettimer(ui, opts)
4571 timer(benchmark)
4577 timer(benchmark)
4572 fm.end()
4578 fm.end()
4573
4579
4574
4580
4575 def uisetup(ui):
4581 def uisetup(ui):
4576 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4582 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4577 commands, b'debugrevlogopts'
4583 commands, b'debugrevlogopts'
4578 ):
4584 ):
4579 # for "historical portability":
4585 # for "historical portability":
4580 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4586 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4581 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4587 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4582 # openrevlog() should cause failure, because it has been
4588 # openrevlog() should cause failure, because it has been
4583 # available since 3.5 (or 49c583ca48c4).
4589 # available since 3.5 (or 49c583ca48c4).
4584 def openrevlog(orig, repo, cmd, file_, opts):
4590 def openrevlog(orig, repo, cmd, file_, opts):
4585 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4591 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4586 raise error.Abort(
4592 raise error.Abort(
4587 b"This version doesn't support --dir option",
4593 b"This version doesn't support --dir option",
4588 hint=b"use 3.5 or later",
4594 hint=b"use 3.5 or later",
4589 )
4595 )
4590 return orig(repo, cmd, file_, opts)
4596 return orig(repo, cmd, file_, opts)
4591
4597
4592 name = _sysstr(b'openrevlog')
4598 name = _sysstr(b'openrevlog')
4593 extensions.wrapfunction(cmdutil, name, openrevlog)
4599 extensions.wrapfunction(cmdutil, name, openrevlog)
4594
4600
4595
4601
4596 @command(
4602 @command(
4597 b'perf::progress|perfprogress',
4603 b'perf::progress|perfprogress',
4598 formatteropts
4604 formatteropts
4599 + [
4605 + [
4600 (b'', b'topic', b'topic', b'topic for progress messages'),
4606 (b'', b'topic', b'topic', b'topic for progress messages'),
4601 (b'c', b'total', 1000000, b'total value we are progressing to'),
4607 (b'c', b'total', 1000000, b'total value we are progressing to'),
4602 ],
4608 ],
4603 norepo=True,
4609 norepo=True,
4604 )
4610 )
4605 def perfprogress(ui, topic=None, total=None, **opts):
4611 def perfprogress(ui, topic=None, total=None, **opts):
4606 """printing of progress bars"""
4612 """printing of progress bars"""
4607 opts = _byteskwargs(opts)
4613 opts = _byteskwargs(opts)
4608
4614
4609 timer, fm = gettimer(ui, opts)
4615 timer, fm = gettimer(ui, opts)
4610
4616
4611 def doprogress():
4617 def doprogress():
4612 with ui.makeprogress(topic, total=total) as progress:
4618 with ui.makeprogress(topic, total=total) as progress:
4613 for i in _xrange(total):
4619 for i in _xrange(total):
4614 progress.increment()
4620 progress.increment()
4615
4621
4616 timer(doprogress)
4622 timer(doprogress)
4617 fm.end()
4623 fm.end()
@@ -1,4836 +1,4836 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import binascii
9 import binascii
10 import codecs
10 import codecs
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import difflib
13 import difflib
14 import errno
14 import errno
15 import glob
15 import glob
16 import operator
16 import operator
17 import os
17 import os
18 import platform
18 import platform
19 import random
19 import random
20 import re
20 import re
21 import socket
21 import socket
22 import ssl
22 import ssl
23 import stat
23 import stat
24 import subprocess
24 import subprocess
25 import sys
25 import sys
26 import time
26 import time
27
27
28 from .i18n import _
28 from .i18n import _
29 from .node import (
29 from .node import (
30 bin,
30 bin,
31 hex,
31 hex,
32 nullrev,
32 nullrev,
33 short,
33 short,
34 )
34 )
35 from .pycompat import (
35 from .pycompat import (
36 open,
36 open,
37 )
37 )
38 from . import (
38 from . import (
39 bundle2,
39 bundle2,
40 bundlerepo,
40 bundlerepo,
41 changegroup,
41 changegroup,
42 cmdutil,
42 cmdutil,
43 color,
43 color,
44 context,
44 context,
45 copies,
45 copies,
46 dagparser,
46 dagparser,
47 dirstateutils,
47 dirstateutils,
48 encoding,
48 encoding,
49 error,
49 error,
50 exchange,
50 exchange,
51 extensions,
51 extensions,
52 filelog,
52 filelog,
53 filemerge,
53 filemerge,
54 filesetlang,
54 filesetlang,
55 formatter,
55 formatter,
56 hg,
56 hg,
57 httppeer,
57 httppeer,
58 localrepo,
58 localrepo,
59 lock as lockmod,
59 lock as lockmod,
60 logcmdutil,
60 logcmdutil,
61 manifest,
61 manifest,
62 mergestate as mergestatemod,
62 mergestate as mergestatemod,
63 metadata,
63 metadata,
64 obsolete,
64 obsolete,
65 obsutil,
65 obsutil,
66 pathutil,
66 pathutil,
67 phases,
67 phases,
68 policy,
68 policy,
69 pvec,
69 pvec,
70 pycompat,
70 pycompat,
71 registrar,
71 registrar,
72 repair,
72 repair,
73 repoview,
73 repoview,
74 requirements,
74 requirements,
75 revlog,
75 revlog,
76 revset,
76 revset,
77 revsetlang,
77 revsetlang,
78 scmutil,
78 scmutil,
79 setdiscovery,
79 setdiscovery,
80 simplemerge,
80 simplemerge,
81 sshpeer,
81 sshpeer,
82 sslutil,
82 sslutil,
83 streamclone,
83 streamclone,
84 strip,
84 strip,
85 tags as tagsmod,
85 tags as tagsmod,
86 templater,
86 templater,
87 treediscovery,
87 treediscovery,
88 upgrade,
88 upgrade,
89 url as urlmod,
89 url as urlmod,
90 util,
90 util,
91 verify,
91 verify,
92 vfs as vfsmod,
92 vfs as vfsmod,
93 wireprotoframing,
93 wireprotoframing,
94 wireprotoserver,
94 wireprotoserver,
95 )
95 )
96 from .interfaces import repository
96 from .interfaces import repository
97 from .stabletailgraph import stabletailsort
97 from .stabletailgraph import stabletailsort
98 from .utils import (
98 from .utils import (
99 cborutil,
99 cborutil,
100 compression,
100 compression,
101 dateutil,
101 dateutil,
102 procutil,
102 procutil,
103 stringutil,
103 stringutil,
104 urlutil,
104 urlutil,
105 )
105 )
106
106
107 from .revlogutils import (
107 from .revlogutils import (
108 constants as revlog_constants,
108 constants as revlog_constants,
109 debug as revlog_debug,
109 debug as revlog_debug,
110 deltas as deltautil,
110 deltas as deltautil,
111 nodemap,
111 nodemap,
112 rewrite,
112 rewrite,
113 sidedata,
113 sidedata,
114 )
114 )
115
115
116 release = lockmod.release
116 release = lockmod.release
117
117
118 table = {}
118 table = {}
119 table.update(strip.command._table)
119 table.update(strip.command._table)
120 command = registrar.command(table)
120 command = registrar.command(table)
121
121
122
122
123 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
123 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
124 def debugancestor(ui, repo, *args):
124 def debugancestor(ui, repo, *args):
125 """find the ancestor revision of two revisions in a given index"""
125 """find the ancestor revision of two revisions in a given index"""
126 if len(args) == 3:
126 if len(args) == 3:
127 index, rev1, rev2 = args
127 index, rev1, rev2 = args
128 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
128 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
129 lookup = r.lookup
129 lookup = r.lookup
130 elif len(args) == 2:
130 elif len(args) == 2:
131 if not repo:
131 if not repo:
132 raise error.Abort(
132 raise error.Abort(
133 _(b'there is no Mercurial repository here (.hg not found)')
133 _(b'there is no Mercurial repository here (.hg not found)')
134 )
134 )
135 rev1, rev2 = args
135 rev1, rev2 = args
136 r = repo.changelog
136 r = repo.changelog
137 lookup = repo.lookup
137 lookup = repo.lookup
138 else:
138 else:
139 raise error.Abort(_(b'either two or three arguments required'))
139 raise error.Abort(_(b'either two or three arguments required'))
140 a = r.ancestor(lookup(rev1), lookup(rev2))
140 a = r.ancestor(lookup(rev1), lookup(rev2))
141 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
141 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
142
142
143
143
144 @command(b'debugantivirusrunning', [])
144 @command(b'debugantivirusrunning', [])
145 def debugantivirusrunning(ui, repo):
145 def debugantivirusrunning(ui, repo):
146 """attempt to trigger an antivirus scanner to see if one is active"""
146 """attempt to trigger an antivirus scanner to see if one is active"""
147 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
147 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
148 f.write(
148 f.write(
149 util.b85decode(
149 util.b85decode(
150 # This is a base85-armored version of the EICAR test file. See
150 # This is a base85-armored version of the EICAR test file. See
151 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
151 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
152 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
152 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
153 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
153 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
154 )
154 )
155 )
155 )
156 # Give an AV engine time to scan the file.
156 # Give an AV engine time to scan the file.
157 time.sleep(2)
157 time.sleep(2)
158 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
158 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
159
159
160
160
161 @command(b'debugapplystreamclonebundle', [], b'FILE')
161 @command(b'debugapplystreamclonebundle', [], b'FILE')
162 def debugapplystreamclonebundle(ui, repo, fname):
162 def debugapplystreamclonebundle(ui, repo, fname):
163 """apply a stream clone bundle file"""
163 """apply a stream clone bundle file"""
164 f = hg.openpath(ui, fname)
164 f = hg.openpath(ui, fname)
165 gen = exchange.readbundle(ui, f, fname)
165 gen = exchange.readbundle(ui, f, fname)
166 gen.apply(repo)
166 gen.apply(repo)
167
167
168
168
169 @command(
169 @command(
170 b'debugbuilddag',
170 b'debugbuilddag',
171 [
171 [
172 (
172 (
173 b'm',
173 b'm',
174 b'mergeable-file',
174 b'mergeable-file',
175 None,
175 None,
176 _(b'add single file mergeable changes'),
176 _(b'add single file mergeable changes'),
177 ),
177 ),
178 (
178 (
179 b'o',
179 b'o',
180 b'overwritten-file',
180 b'overwritten-file',
181 None,
181 None,
182 _(b'add single file all revs overwrite'),
182 _(b'add single file all revs overwrite'),
183 ),
183 ),
184 (b'n', b'new-file', None, _(b'add new file at each rev')),
184 (b'n', b'new-file', None, _(b'add new file at each rev')),
185 (
185 (
186 b'',
186 b'',
187 b'from-existing',
187 b'from-existing',
188 None,
188 None,
189 _(b'continue from a non-empty repository'),
189 _(b'continue from a non-empty repository'),
190 ),
190 ),
191 ],
191 ],
192 _(b'[OPTION]... [TEXT]'),
192 _(b'[OPTION]... [TEXT]'),
193 )
193 )
194 def debugbuilddag(
194 def debugbuilddag(
195 ui,
195 ui,
196 repo,
196 repo,
197 text=None,
197 text=None,
198 mergeable_file=False,
198 mergeable_file=False,
199 overwritten_file=False,
199 overwritten_file=False,
200 new_file=False,
200 new_file=False,
201 from_existing=False,
201 from_existing=False,
202 ):
202 ):
203 """builds a repo with a given DAG from scratch in the current empty repo
203 """builds a repo with a given DAG from scratch in the current empty repo
204
204
205 The description of the DAG is read from stdin if not given on the
205 The description of the DAG is read from stdin if not given on the
206 command line.
206 command line.
207
207
208 Elements:
208 Elements:
209
209
210 - "+n" is a linear run of n nodes based on the current default parent
210 - "+n" is a linear run of n nodes based on the current default parent
211 - "." is a single node based on the current default parent
211 - "." is a single node based on the current default parent
212 - "$" resets the default parent to null (implied at the start);
212 - "$" resets the default parent to null (implied at the start);
213 otherwise the default parent is always the last node created
213 otherwise the default parent is always the last node created
214 - "<p" sets the default parent to the backref p
214 - "<p" sets the default parent to the backref p
215 - "*p" is a fork at parent p, which is a backref
215 - "*p" is a fork at parent p, which is a backref
216 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
216 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
217 - "/p2" is a merge of the preceding node and p2
217 - "/p2" is a merge of the preceding node and p2
218 - ":tag" defines a local tag for the preceding node
218 - ":tag" defines a local tag for the preceding node
219 - "@branch" sets the named branch for subsequent nodes
219 - "@branch" sets the named branch for subsequent nodes
220 - "#...\\n" is a comment up to the end of the line
220 - "#...\\n" is a comment up to the end of the line
221
221
222 Whitespace between the above elements is ignored.
222 Whitespace between the above elements is ignored.
223
223
224 A backref is either
224 A backref is either
225
225
226 - a number n, which references the node curr-n, where curr is the current
226 - a number n, which references the node curr-n, where curr is the current
227 node, or
227 node, or
228 - the name of a local tag you placed earlier using ":tag", or
228 - the name of a local tag you placed earlier using ":tag", or
229 - empty to denote the default parent.
229 - empty to denote the default parent.
230
230
231 All string valued-elements are either strictly alphanumeric, or must
231 All string valued-elements are either strictly alphanumeric, or must
232 be enclosed in double quotes ("..."), with "\\" as escape character.
232 be enclosed in double quotes ("..."), with "\\" as escape character.
233 """
233 """
234
234
235 if text is None:
235 if text is None:
236 ui.status(_(b"reading DAG from stdin\n"))
236 ui.status(_(b"reading DAG from stdin\n"))
237 text = ui.fin.read()
237 text = ui.fin.read()
238
238
239 cl = repo.changelog
239 cl = repo.changelog
240 if len(cl) > 0 and not from_existing:
240 if len(cl) > 0 and not from_existing:
241 raise error.Abort(_(b'repository is not empty'))
241 raise error.Abort(_(b'repository is not empty'))
242
242
243 # determine number of revs in DAG
243 # determine number of revs in DAG
244 total = 0
244 total = 0
245 for type, data in dagparser.parsedag(text):
245 for type, data in dagparser.parsedag(text):
246 if type == b'n':
246 if type == b'n':
247 total += 1
247 total += 1
248
248
249 if mergeable_file:
249 if mergeable_file:
250 linesperrev = 2
250 linesperrev = 2
251 # make a file with k lines per rev
251 # make a file with k lines per rev
252 initialmergedlines = [b'%d' % i for i in range(0, total * linesperrev)]
252 initialmergedlines = [b'%d' % i for i in range(0, total * linesperrev)]
253 initialmergedlines.append(b"")
253 initialmergedlines.append(b"")
254
254
255 tags = []
255 tags = []
256 progress = ui.makeprogress(
256 progress = ui.makeprogress(
257 _(b'building'), unit=_(b'revisions'), total=total
257 _(b'building'), unit=_(b'revisions'), total=total
258 )
258 )
259 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
259 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
260 at = -1
260 at = -1
261 atbranch = b'default'
261 atbranch = b'default'
262 nodeids = []
262 nodeids = []
263 id = 0
263 id = 0
264 progress.update(id)
264 progress.update(id)
265 for type, data in dagparser.parsedag(text):
265 for type, data in dagparser.parsedag(text):
266 if type == b'n':
266 if type == b'n':
267 ui.note((b'node %s\n' % pycompat.bytestr(data)))
267 ui.note((b'node %s\n' % pycompat.bytestr(data)))
268 id, ps = data
268 id, ps = data
269
269
270 files = []
270 files = []
271 filecontent = {}
271 filecontent = {}
272
272
273 p2 = None
273 p2 = None
274 if mergeable_file:
274 if mergeable_file:
275 fn = b"mf"
275 fn = b"mf"
276 p1 = repo[ps[0]]
276 p1 = repo[ps[0]]
277 if len(ps) > 1:
277 if len(ps) > 1:
278 p2 = repo[ps[1]]
278 p2 = repo[ps[1]]
279 pa = p1.ancestor(p2)
279 pa = p1.ancestor(p2)
280 base, local, other = [
280 base, local, other = [
281 x[fn].data() for x in (pa, p1, p2)
281 x[fn].data() for x in (pa, p1, p2)
282 ]
282 ]
283 m3 = simplemerge.Merge3Text(base, local, other)
283 m3 = simplemerge.Merge3Text(base, local, other)
284 ml = [
284 ml = [
285 l.strip()
285 l.strip()
286 for l in simplemerge.render_minimized(m3)[0]
286 for l in simplemerge.render_minimized(m3)[0]
287 ]
287 ]
288 ml.append(b"")
288 ml.append(b"")
289 elif at > 0:
289 elif at > 0:
290 ml = p1[fn].data().split(b"\n")
290 ml = p1[fn].data().split(b"\n")
291 else:
291 else:
292 ml = initialmergedlines
292 ml = initialmergedlines
293 ml[id * linesperrev] += b" r%i" % id
293 ml[id * linesperrev] += b" r%i" % id
294 mergedtext = b"\n".join(ml)
294 mergedtext = b"\n".join(ml)
295 files.append(fn)
295 files.append(fn)
296 filecontent[fn] = mergedtext
296 filecontent[fn] = mergedtext
297
297
298 if overwritten_file:
298 if overwritten_file:
299 fn = b"of"
299 fn = b"of"
300 files.append(fn)
300 files.append(fn)
301 filecontent[fn] = b"r%i\n" % id
301 filecontent[fn] = b"r%i\n" % id
302
302
303 if new_file:
303 if new_file:
304 fn = b"nf%i" % id
304 fn = b"nf%i" % id
305 files.append(fn)
305 files.append(fn)
306 filecontent[fn] = b"r%i\n" % id
306 filecontent[fn] = b"r%i\n" % id
307 if len(ps) > 1:
307 if len(ps) > 1:
308 if not p2:
308 if not p2:
309 p2 = repo[ps[1]]
309 p2 = repo[ps[1]]
310 for fn in p2:
310 for fn in p2:
311 if fn.startswith(b"nf"):
311 if fn.startswith(b"nf"):
312 files.append(fn)
312 files.append(fn)
313 filecontent[fn] = p2[fn].data()
313 filecontent[fn] = p2[fn].data()
314
314
315 def fctxfn(repo, cx, path):
315 def fctxfn(repo, cx, path):
316 if path in filecontent:
316 if path in filecontent:
317 return context.memfilectx(
317 return context.memfilectx(
318 repo, cx, path, filecontent[path]
318 repo, cx, path, filecontent[path]
319 )
319 )
320 return None
320 return None
321
321
322 if len(ps) == 0 or ps[0] < 0:
322 if len(ps) == 0 or ps[0] < 0:
323 pars = [None, None]
323 pars = [None, None]
324 elif len(ps) == 1:
324 elif len(ps) == 1:
325 pars = [nodeids[ps[0]], None]
325 pars = [nodeids[ps[0]], None]
326 else:
326 else:
327 pars = [nodeids[p] for p in ps]
327 pars = [nodeids[p] for p in ps]
328 cx = context.memctx(
328 cx = context.memctx(
329 repo,
329 repo,
330 pars,
330 pars,
331 b"r%i" % id,
331 b"r%i" % id,
332 files,
332 files,
333 fctxfn,
333 fctxfn,
334 date=(id, 0),
334 date=(id, 0),
335 user=b"debugbuilddag",
335 user=b"debugbuilddag",
336 extra={b'branch': atbranch},
336 extra={b'branch': atbranch},
337 )
337 )
338 nodeid = repo.commitctx(cx)
338 nodeid = repo.commitctx(cx)
339 nodeids.append(nodeid)
339 nodeids.append(nodeid)
340 at = id
340 at = id
341 elif type == b'l':
341 elif type == b'l':
342 id, name = data
342 id, name = data
343 ui.note((b'tag %s\n' % name))
343 ui.note((b'tag %s\n' % name))
344 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
344 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
345 elif type == b'a':
345 elif type == b'a':
346 ui.note((b'branch %s\n' % data))
346 ui.note((b'branch %s\n' % data))
347 atbranch = data
347 atbranch = data
348 progress.update(id)
348 progress.update(id)
349
349
350 if tags:
350 if tags:
351 repo.vfs.write(b"localtags", b"".join(tags))
351 repo.vfs.write(b"localtags", b"".join(tags))
352
352
353
353
354 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
354 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
355 indent_string = b' ' * indent
355 indent_string = b' ' * indent
356 if all:
356 if all:
357 ui.writenoi18n(
357 ui.writenoi18n(
358 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
358 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
359 % indent_string
359 % indent_string
360 )
360 )
361
361
362 def showchunks(named):
362 def showchunks(named):
363 ui.write(b"\n%s%s\n" % (indent_string, named))
363 ui.write(b"\n%s%s\n" % (indent_string, named))
364 for deltadata in gen.deltaiter():
364 for deltadata in gen.deltaiter():
365 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
365 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
366 ui.write(
366 ui.write(
367 b"%s%s %s %s %s %s %d\n"
367 b"%s%s %s %s %s %s %d\n"
368 % (
368 % (
369 indent_string,
369 indent_string,
370 hex(node),
370 hex(node),
371 hex(p1),
371 hex(p1),
372 hex(p2),
372 hex(p2),
373 hex(cs),
373 hex(cs),
374 hex(deltabase),
374 hex(deltabase),
375 len(delta),
375 len(delta),
376 )
376 )
377 )
377 )
378
378
379 gen.changelogheader()
379 gen.changelogheader()
380 showchunks(b"changelog")
380 showchunks(b"changelog")
381 gen.manifestheader()
381 gen.manifestheader()
382 showchunks(b"manifest")
382 showchunks(b"manifest")
383 for chunkdata in iter(gen.filelogheader, {}):
383 for chunkdata in iter(gen.filelogheader, {}):
384 fname = chunkdata[b'filename']
384 fname = chunkdata[b'filename']
385 showchunks(fname)
385 showchunks(fname)
386 else:
386 else:
387 if isinstance(gen, bundle2.unbundle20):
387 if isinstance(gen, bundle2.unbundle20):
388 raise error.Abort(_(b'use debugbundle2 for this file'))
388 raise error.Abort(_(b'use debugbundle2 for this file'))
389 gen.changelogheader()
389 gen.changelogheader()
390 for deltadata in gen.deltaiter():
390 for deltadata in gen.deltaiter():
391 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
391 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
392 ui.write(b"%s%s\n" % (indent_string, hex(node)))
392 ui.write(b"%s%s\n" % (indent_string, hex(node)))
393
393
394
394
395 def _debugobsmarkers(ui, part, indent=0, **opts):
395 def _debugobsmarkers(ui, part, indent=0, **opts):
396 """display version and markers contained in 'data'"""
396 """display version and markers contained in 'data'"""
397 data = part.read()
397 data = part.read()
398 indent_string = b' ' * indent
398 indent_string = b' ' * indent
399 try:
399 try:
400 version, markers = obsolete._readmarkers(data)
400 version, markers = obsolete._readmarkers(data)
401 except error.UnknownVersion as exc:
401 except error.UnknownVersion as exc:
402 msg = b"%sunsupported version: %s (%d bytes)\n"
402 msg = b"%sunsupported version: %s (%d bytes)\n"
403 msg %= indent_string, exc.version, len(data)
403 msg %= indent_string, exc.version, len(data)
404 ui.write(msg)
404 ui.write(msg)
405 else:
405 else:
406 msg = b"%sversion: %d (%d bytes)\n"
406 msg = b"%sversion: %d (%d bytes)\n"
407 msg %= indent_string, version, len(data)
407 msg %= indent_string, version, len(data)
408 ui.write(msg)
408 ui.write(msg)
409 fm = ui.formatter(b'debugobsolete', pycompat.byteskwargs(opts))
409 fm = ui.formatter(b'debugobsolete', pycompat.byteskwargs(opts))
410 for rawmarker in sorted(markers):
410 for rawmarker in sorted(markers):
411 m = obsutil.marker(None, rawmarker)
411 m = obsutil.marker(None, rawmarker)
412 fm.startitem()
412 fm.startitem()
413 fm.plain(indent_string)
413 fm.plain(indent_string)
414 cmdutil.showmarker(fm, m)
414 cmdutil.showmarker(fm, m)
415 fm.end()
415 fm.end()
416
416
417
417
418 def _debugphaseheads(ui, data, indent=0):
418 def _debugphaseheads(ui, data, indent=0):
419 """display version and markers contained in 'data'"""
419 """display version and markers contained in 'data'"""
420 indent_string = b' ' * indent
420 indent_string = b' ' * indent
421 headsbyphase = phases.binarydecode(data)
421 headsbyphase = phases.binarydecode(data)
422 for phase in phases.allphases:
422 for phase in phases.allphases:
423 for head in headsbyphase[phase]:
423 for head in headsbyphase[phase]:
424 ui.write(indent_string)
424 ui.write(indent_string)
425 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
425 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
426
426
427
427
428 def _quasirepr(thing):
428 def _quasirepr(thing):
429 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
429 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
430 return b'{%s}' % (
430 return b'{%s}' % (
431 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
431 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
432 )
432 )
433 return pycompat.bytestr(repr(thing))
433 return pycompat.bytestr(repr(thing))
434
434
435
435
436 def _debugbundle2(ui, gen, all=None, **opts):
436 def _debugbundle2(ui, gen, all=None, **opts):
437 """lists the contents of a bundle2"""
437 """lists the contents of a bundle2"""
438 if not isinstance(gen, bundle2.unbundle20):
438 if not isinstance(gen, bundle2.unbundle20):
439 raise error.Abort(_(b'not a bundle2 file'))
439 raise error.Abort(_(b'not a bundle2 file'))
440 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
440 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
441 parttypes = opts.get('part_type', [])
441 parttypes = opts.get('part_type', [])
442 for part in gen.iterparts():
442 for part in gen.iterparts():
443 if parttypes and part.type not in parttypes:
443 if parttypes and part.type not in parttypes:
444 continue
444 continue
445 msg = b'%s -- %s (mandatory: %r)\n'
445 msg = b'%s -- %s (mandatory: %r)\n'
446 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
446 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
447 if part.type == b'changegroup':
447 if part.type == b'changegroup':
448 version = part.params.get(b'version', b'01')
448 version = part.params.get(b'version', b'01')
449 cg = changegroup.getunbundler(version, part, b'UN')
449 cg = changegroup.getunbundler(version, part, b'UN')
450 if not ui.quiet:
450 if not ui.quiet:
451 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
451 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
452 if part.type == b'obsmarkers':
452 if part.type == b'obsmarkers':
453 if not ui.quiet:
453 if not ui.quiet:
454 _debugobsmarkers(ui, part, indent=4, **opts)
454 _debugobsmarkers(ui, part, indent=4, **opts)
455 if part.type == b'phase-heads':
455 if part.type == b'phase-heads':
456 if not ui.quiet:
456 if not ui.quiet:
457 _debugphaseheads(ui, part, indent=4)
457 _debugphaseheads(ui, part, indent=4)
458
458
459
459
460 @command(
460 @command(
461 b'debugbundle',
461 b'debugbundle',
462 [
462 [
463 (b'a', b'all', None, _(b'show all details')),
463 (b'a', b'all', None, _(b'show all details')),
464 (b'', b'part-type', [], _(b'show only the named part type')),
464 (b'', b'part-type', [], _(b'show only the named part type')),
465 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
465 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
466 ],
466 ],
467 _(b'FILE'),
467 _(b'FILE'),
468 norepo=True,
468 norepo=True,
469 )
469 )
470 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
470 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
471 """lists the contents of a bundle"""
471 """lists the contents of a bundle"""
472 with hg.openpath(ui, bundlepath) as f:
472 with hg.openpath(ui, bundlepath) as f:
473 if spec:
473 if spec:
474 spec = exchange.getbundlespec(ui, f)
474 spec = exchange.getbundlespec(ui, f)
475 ui.write(b'%s\n' % spec)
475 ui.write(b'%s\n' % spec)
476 return
476 return
477
477
478 gen = exchange.readbundle(ui, f, bundlepath)
478 gen = exchange.readbundle(ui, f, bundlepath)
479 if isinstance(gen, bundle2.unbundle20):
479 if isinstance(gen, bundle2.unbundle20):
480 return _debugbundle2(ui, gen, all=all, **opts)
480 return _debugbundle2(ui, gen, all=all, **opts)
481 _debugchangegroup(ui, gen, all=all, **opts)
481 _debugchangegroup(ui, gen, all=all, **opts)
482
482
483
483
484 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
484 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
485 def debugcapabilities(ui, path, **opts):
485 def debugcapabilities(ui, path, **opts):
486 """lists the capabilities of a remote peer"""
486 """lists the capabilities of a remote peer"""
487 peer = hg.peer(ui, pycompat.byteskwargs(opts), path)
487 peer = hg.peer(ui, pycompat.byteskwargs(opts), path)
488 try:
488 try:
489 caps = peer.capabilities()
489 caps = peer.capabilities()
490 ui.writenoi18n(b'Main capabilities:\n')
490 ui.writenoi18n(b'Main capabilities:\n')
491 for c in sorted(caps):
491 for c in sorted(caps):
492 ui.write(b' %s\n' % c)
492 ui.write(b' %s\n' % c)
493 b2caps = bundle2.bundle2caps(peer)
493 b2caps = bundle2.bundle2caps(peer)
494 if b2caps:
494 if b2caps:
495 ui.writenoi18n(b'Bundle2 capabilities:\n')
495 ui.writenoi18n(b'Bundle2 capabilities:\n')
496 for key, values in sorted(b2caps.items()):
496 for key, values in sorted(b2caps.items()):
497 ui.write(b' %s\n' % key)
497 ui.write(b' %s\n' % key)
498 for v in values:
498 for v in values:
499 ui.write(b' %s\n' % v)
499 ui.write(b' %s\n' % v)
500 finally:
500 finally:
501 peer.close()
501 peer.close()
502
502
503
503
504 @command(
504 @command(
505 b'debugchangedfiles',
505 b'debugchangedfiles',
506 [
506 [
507 (
507 (
508 b'',
508 b'',
509 b'compute',
509 b'compute',
510 False,
510 False,
511 b"compute information instead of reading it from storage",
511 b"compute information instead of reading it from storage",
512 ),
512 ),
513 ],
513 ],
514 b'REV',
514 b'REV',
515 )
515 )
516 def debugchangedfiles(ui, repo, rev, **opts):
516 def debugchangedfiles(ui, repo, rev, **opts):
517 """list the stored files changes for a revision"""
517 """list the stored files changes for a revision"""
518 ctx = logcmdutil.revsingle(repo, rev, None)
518 ctx = logcmdutil.revsingle(repo, rev, None)
519 files = None
519 files = None
520
520
521 if opts['compute']:
521 if opts['compute']:
522 files = metadata.compute_all_files_changes(ctx)
522 files = metadata.compute_all_files_changes(ctx)
523 else:
523 else:
524 sd = repo.changelog.sidedata(ctx.rev())
524 sd = repo.changelog.sidedata(ctx.rev())
525 files_block = sd.get(sidedata.SD_FILES)
525 files_block = sd.get(sidedata.SD_FILES)
526 if files_block is not None:
526 if files_block is not None:
527 files = metadata.decode_files_sidedata(sd)
527 files = metadata.decode_files_sidedata(sd)
528 if files is not None:
528 if files is not None:
529 for f in sorted(files.touched):
529 for f in sorted(files.touched):
530 if f in files.added:
530 if f in files.added:
531 action = b"added"
531 action = b"added"
532 elif f in files.removed:
532 elif f in files.removed:
533 action = b"removed"
533 action = b"removed"
534 elif f in files.merged:
534 elif f in files.merged:
535 action = b"merged"
535 action = b"merged"
536 elif f in files.salvaged:
536 elif f in files.salvaged:
537 action = b"salvaged"
537 action = b"salvaged"
538 else:
538 else:
539 action = b"touched"
539 action = b"touched"
540
540
541 copy_parent = b""
541 copy_parent = b""
542 copy_source = b""
542 copy_source = b""
543 if f in files.copied_from_p1:
543 if f in files.copied_from_p1:
544 copy_parent = b"p1"
544 copy_parent = b"p1"
545 copy_source = files.copied_from_p1[f]
545 copy_source = files.copied_from_p1[f]
546 elif f in files.copied_from_p2:
546 elif f in files.copied_from_p2:
547 copy_parent = b"p2"
547 copy_parent = b"p2"
548 copy_source = files.copied_from_p2[f]
548 copy_source = files.copied_from_p2[f]
549
549
550 data = (action, copy_parent, f, copy_source)
550 data = (action, copy_parent, f, copy_source)
551 template = b"%-8s %2s: %s, %s;\n"
551 template = b"%-8s %2s: %s, %s;\n"
552 ui.write(template % data)
552 ui.write(template % data)
553
553
554
554
555 @command(b'debugcheckstate', [], b'')
555 @command(b'debugcheckstate', [], b'')
556 def debugcheckstate(ui, repo):
556 def debugcheckstate(ui, repo):
557 """validate the correctness of the current dirstate"""
557 """validate the correctness of the current dirstate"""
558 errors = verify.verifier(repo)._verify_dirstate()
558 errors = verify.verifier(repo)._verify_dirstate()
559 if errors:
559 if errors:
560 errstr = _(b"dirstate inconsistent with current parent's manifest")
560 errstr = _(b"dirstate inconsistent with current parent's manifest")
561 raise error.Abort(errstr)
561 raise error.Abort(errstr)
562
562
563
563
564 @command(
564 @command(
565 b'debugcolor',
565 b'debugcolor',
566 [(b'', b'style', None, _(b'show all configured styles'))],
566 [(b'', b'style', None, _(b'show all configured styles'))],
567 b'hg debugcolor',
567 b'hg debugcolor',
568 )
568 )
569 def debugcolor(ui, repo, **opts):
569 def debugcolor(ui, repo, **opts):
570 """show available color, effects or style"""
570 """show available color, effects or style"""
571 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
571 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
572 if opts.get('style'):
572 if opts.get('style'):
573 return _debugdisplaystyle(ui)
573 return _debugdisplaystyle(ui)
574 else:
574 else:
575 return _debugdisplaycolor(ui)
575 return _debugdisplaycolor(ui)
576
576
577
577
578 def _debugdisplaycolor(ui):
578 def _debugdisplaycolor(ui):
579 ui = ui.copy()
579 ui = ui.copy()
580 ui._styles.clear()
580 ui._styles.clear()
581 for effect in color._activeeffects(ui).keys():
581 for effect in color._activeeffects(ui).keys():
582 ui._styles[effect] = effect
582 ui._styles[effect] = effect
583 if ui._terminfoparams:
583 if ui._terminfoparams:
584 for k, v in ui.configitems(b'color'):
584 for k, v in ui.configitems(b'color'):
585 if k.startswith(b'color.'):
585 if k.startswith(b'color.'):
586 ui._styles[k] = k[6:]
586 ui._styles[k] = k[6:]
587 elif k.startswith(b'terminfo.'):
587 elif k.startswith(b'terminfo.'):
588 ui._styles[k] = k[9:]
588 ui._styles[k] = k[9:]
589 ui.write(_(b'available colors:\n'))
589 ui.write(_(b'available colors:\n'))
590 # sort label with a '_' after the other to group '_background' entry.
590 # sort label with a '_' after the other to group '_background' entry.
591 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
591 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
592 for colorname, label in items:
592 for colorname, label in items:
593 ui.write(b'%s\n' % colorname, label=label)
593 ui.write(b'%s\n' % colorname, label=label)
594
594
595
595
596 def _debugdisplaystyle(ui):
596 def _debugdisplaystyle(ui):
597 ui.write(_(b'available style:\n'))
597 ui.write(_(b'available style:\n'))
598 if not ui._styles:
598 if not ui._styles:
599 return
599 return
600 width = max(len(s) for s in ui._styles)
600 width = max(len(s) for s in ui._styles)
601 for label, effects in sorted(ui._styles.items()):
601 for label, effects in sorted(ui._styles.items()):
602 ui.write(b'%s' % label, label=label)
602 ui.write(b'%s' % label, label=label)
603 if effects:
603 if effects:
604 # 50
604 # 50
605 ui.write(b': ')
605 ui.write(b': ')
606 ui.write(b' ' * (max(0, width - len(label))))
606 ui.write(b' ' * (max(0, width - len(label))))
607 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
607 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
608 ui.write(b'\n')
608 ui.write(b'\n')
609
609
610
610
611 @command(b'debugcreatestreamclonebundle', [], b'FILE')
611 @command(b'debugcreatestreamclonebundle', [], b'FILE')
612 def debugcreatestreamclonebundle(ui, repo, fname):
612 def debugcreatestreamclonebundle(ui, repo, fname):
613 """create a stream clone bundle file
613 """create a stream clone bundle file
614
614
615 Stream bundles are special bundles that are essentially archives of
615 Stream bundles are special bundles that are essentially archives of
616 revlog files. They are commonly used for cloning very quickly.
616 revlog files. They are commonly used for cloning very quickly.
617
617
618 This command creates a "version 1" stream clone, which is deprecated in
618 This command creates a "version 1" stream clone, which is deprecated in
619 favor of newer versions of the stream protocol. Bundles using such newer
619 favor of newer versions of the stream protocol. Bundles using such newer
620 versions can be generated using the `hg bundle` command.
620 versions can be generated using the `hg bundle` command.
621 """
621 """
622 # TODO we may want to turn this into an abort when this functionality
622 # TODO we may want to turn this into an abort when this functionality
623 # is moved into `hg bundle`.
623 # is moved into `hg bundle`.
624 if phases.hassecret(repo):
624 if phases.hassecret(repo):
625 ui.warn(
625 ui.warn(
626 _(
626 _(
627 b'(warning: stream clone bundle will contain secret '
627 b'(warning: stream clone bundle will contain secret '
628 b'revisions)\n'
628 b'revisions)\n'
629 )
629 )
630 )
630 )
631
631
632 requirements, gen = streamclone.generatebundlev1(repo)
632 requirements, gen = streamclone.generatebundlev1(repo)
633 changegroup.writechunks(ui, gen, fname)
633 changegroup.writechunks(ui, gen, fname)
634
634
635 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
635 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
636
636
637
637
638 @command(
638 @command(
639 b'debugdag',
639 b'debugdag',
640 [
640 [
641 (b't', b'tags', None, _(b'use tags as labels')),
641 (b't', b'tags', None, _(b'use tags as labels')),
642 (b'b', b'branches', None, _(b'annotate with branch names')),
642 (b'b', b'branches', None, _(b'annotate with branch names')),
643 (b'', b'dots', None, _(b'use dots for runs')),
643 (b'', b'dots', None, _(b'use dots for runs')),
644 (b's', b'spaces', None, _(b'separate elements by spaces')),
644 (b's', b'spaces', None, _(b'separate elements by spaces')),
645 ],
645 ],
646 _(b'[OPTION]... [FILE [REV]...]'),
646 _(b'[OPTION]... [FILE [REV]...]'),
647 optionalrepo=True,
647 optionalrepo=True,
648 )
648 )
649 def debugdag(ui, repo, file_=None, *revs, **opts):
649 def debugdag(ui, repo, file_=None, *revs, **opts):
650 """format the changelog or an index DAG as a concise textual description
650 """format the changelog or an index DAG as a concise textual description
651
651
652 If you pass a revlog index, the revlog's DAG is emitted. If you list
652 If you pass a revlog index, the revlog's DAG is emitted. If you list
653 revision numbers, they get labeled in the output as rN.
653 revision numbers, they get labeled in the output as rN.
654
654
655 Otherwise, the changelog DAG of the current repo is emitted.
655 Otherwise, the changelog DAG of the current repo is emitted.
656 """
656 """
657 spaces = opts.get('spaces')
657 spaces = opts.get('spaces')
658 dots = opts.get('dots')
658 dots = opts.get('dots')
659 if file_:
659 if file_:
660 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
660 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
661 revs = {int(r) for r in revs}
661 revs = {int(r) for r in revs}
662
662
663 def events():
663 def events():
664 for r in rlog:
664 for r in rlog:
665 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
665 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
666 if r in revs:
666 if r in revs:
667 yield b'l', (r, b"r%i" % r)
667 yield b'l', (r, b"r%i" % r)
668
668
669 elif repo:
669 elif repo:
670 cl = repo.changelog
670 cl = repo.changelog
671 tags = opts.get('tags')
671 tags = opts.get('tags')
672 branches = opts.get('branches')
672 branches = opts.get('branches')
673 if tags:
673 if tags:
674 labels = {}
674 labels = {}
675 for l, n in repo.tags().items():
675 for l, n in repo.tags().items():
676 labels.setdefault(cl.rev(n), []).append(l)
676 labels.setdefault(cl.rev(n), []).append(l)
677
677
678 def events():
678 def events():
679 b = b"default"
679 b = b"default"
680 for r in cl:
680 for r in cl:
681 if branches:
681 if branches:
682 newb = cl.read(cl.node(r))[5][b'branch']
682 newb = cl.read(cl.node(r))[5][b'branch']
683 if newb != b:
683 if newb != b:
684 yield b'a', newb
684 yield b'a', newb
685 b = newb
685 b = newb
686 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
686 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
687 if tags:
687 if tags:
688 ls = labels.get(r)
688 ls = labels.get(r)
689 if ls:
689 if ls:
690 for l in ls:
690 for l in ls:
691 yield b'l', (r, l)
691 yield b'l', (r, l)
692
692
693 else:
693 else:
694 raise error.Abort(_(b'need repo for changelog dag'))
694 raise error.Abort(_(b'need repo for changelog dag'))
695
695
696 for line in dagparser.dagtextlines(
696 for line in dagparser.dagtextlines(
697 events(),
697 events(),
698 addspaces=spaces,
698 addspaces=spaces,
699 wraplabels=True,
699 wraplabels=True,
700 wrapannotations=True,
700 wrapannotations=True,
701 wrapnonlinear=dots,
701 wrapnonlinear=dots,
702 usedots=dots,
702 usedots=dots,
703 maxlinewidth=70,
703 maxlinewidth=70,
704 ):
704 ):
705 ui.write(line)
705 ui.write(line)
706 ui.write(b"\n")
706 ui.write(b"\n")
707
707
708
708
709 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
709 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
710 def debugdata(ui, repo, file_, rev=None, **opts):
710 def debugdata(ui, repo, file_, rev=None, **opts):
711 """dump the contents of a data file revision"""
711 """dump the contents of a data file revision"""
712 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
712 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
713 if rev is not None:
713 if rev is not None:
714 raise error.InputError(
714 raise error.InputError(
715 _(b'cannot specify a revision with other arguments')
715 _(b'cannot specify a revision with other arguments')
716 )
716 )
717 file_, rev = None, file_
717 file_, rev = None, file_
718 elif rev is None:
718 elif rev is None:
719 raise error.InputError(_(b'please specify a revision'))
719 raise error.InputError(_(b'please specify a revision'))
720 r = cmdutil.openstorage(
720 r = cmdutil.openstorage(
721 repo, b'debugdata', file_, pycompat.byteskwargs(opts)
721 repo, b'debugdata', file_, pycompat.byteskwargs(opts)
722 )
722 )
723 try:
723 try:
724 ui.write(r.rawdata(r.lookup(rev)))
724 ui.write(r.rawdata(r.lookup(rev)))
725 except KeyError:
725 except KeyError:
726 raise error.Abort(_(b'invalid revision identifier %s') % rev)
726 raise error.Abort(_(b'invalid revision identifier %s') % rev)
727
727
728
728
729 @command(
729 @command(
730 b'debugdate',
730 b'debugdate',
731 [(b'e', b'extended', None, _(b'try extended date formats'))],
731 [(b'e', b'extended', None, _(b'try extended date formats'))],
732 _(b'[-e] DATE [RANGE]'),
732 _(b'[-e] DATE [RANGE]'),
733 norepo=True,
733 norepo=True,
734 optionalrepo=True,
734 optionalrepo=True,
735 )
735 )
736 def debugdate(ui, date, range=None, **opts):
736 def debugdate(ui, date, range=None, **opts):
737 """parse and display a date"""
737 """parse and display a date"""
738 if opts["extended"]:
738 if opts["extended"]:
739 d = dateutil.parsedate(date, dateutil.extendeddateformats)
739 d = dateutil.parsedate(date, dateutil.extendeddateformats)
740 else:
740 else:
741 d = dateutil.parsedate(date)
741 d = dateutil.parsedate(date)
742 ui.writenoi18n(b"internal: %d %d\n" % d)
742 ui.writenoi18n(b"internal: %d %d\n" % d)
743 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
743 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
744 if range:
744 if range:
745 m = dateutil.matchdate(range)
745 m = dateutil.matchdate(range)
746 ui.writenoi18n(b"match: %s\n" % m(d[0]))
746 ui.writenoi18n(b"match: %s\n" % m(d[0]))
747
747
748
748
749 @command(
749 @command(
750 b'debugdeltachain',
750 b'debugdeltachain',
751 cmdutil.debugrevlogopts + cmdutil.formatteropts,
751 cmdutil.debugrevlogopts + cmdutil.formatteropts,
752 _(b'-c|-m|FILE'),
752 _(b'-c|-m|FILE'),
753 optionalrepo=True,
753 optionalrepo=True,
754 )
754 )
755 def debugdeltachain(ui, repo, file_=None, **opts):
755 def debugdeltachain(ui, repo, file_=None, **opts):
756 """dump information about delta chains in a revlog
756 """dump information about delta chains in a revlog
757
757
758 Output can be templatized. Available template keywords are:
758 Output can be templatized. Available template keywords are:
759
759
760 :``rev``: revision number
760 :``rev``: revision number
761 :``p1``: parent 1 revision number (for reference)
761 :``p1``: parent 1 revision number (for reference)
762 :``p2``: parent 2 revision number (for reference)
762 :``p2``: parent 2 revision number (for reference)
763 :``chainid``: delta chain identifier (numbered by unique base)
763 :``chainid``: delta chain identifier (numbered by unique base)
764 :``chainlen``: delta chain length to this revision
764 :``chainlen``: delta chain length to this revision
765 :``prevrev``: previous revision in delta chain
765 :``prevrev``: previous revision in delta chain
766 :``deltatype``: role of delta / how it was computed
766 :``deltatype``: role of delta / how it was computed
767 - base: a full snapshot
767 - base: a full snapshot
768 - snap: an intermediate snapshot
768 - snap: an intermediate snapshot
769 - p1: a delta against the first parent
769 - p1: a delta against the first parent
770 - p2: a delta against the second parent
770 - p2: a delta against the second parent
771 - skip1: a delta against the same base as p1
771 - skip1: a delta against the same base as p1
772 (when p1 has empty delta
772 (when p1 has empty delta
773 - skip2: a delta against the same base as p2
773 - skip2: a delta against the same base as p2
774 (when p2 has empty delta
774 (when p2 has empty delta
775 - prev: a delta against the previous revision
775 - prev: a delta against the previous revision
776 - other: a delta against an arbitrary revision
776 - other: a delta against an arbitrary revision
777 :``compsize``: compressed size of revision
777 :``compsize``: compressed size of revision
778 :``uncompsize``: uncompressed size of revision
778 :``uncompsize``: uncompressed size of revision
779 :``chainsize``: total size of compressed revisions in chain
779 :``chainsize``: total size of compressed revisions in chain
780 :``chainratio``: total chain size divided by uncompressed revision size
780 :``chainratio``: total chain size divided by uncompressed revision size
781 (new delta chains typically start at ratio 2.00)
781 (new delta chains typically start at ratio 2.00)
782 :``lindist``: linear distance from base revision in delta chain to end
782 :``lindist``: linear distance from base revision in delta chain to end
783 of this revision
783 of this revision
784 :``extradist``: total size of revisions not part of this delta chain from
784 :``extradist``: total size of revisions not part of this delta chain from
785 base of delta chain to end of this revision; a measurement
785 base of delta chain to end of this revision; a measurement
786 of how much extra data we need to read/seek across to read
786 of how much extra data we need to read/seek across to read
787 the delta chain for this revision
787 the delta chain for this revision
788 :``extraratio``: extradist divided by chainsize; another representation of
788 :``extraratio``: extradist divided by chainsize; another representation of
789 how much unrelated data is needed to load this delta chain
789 how much unrelated data is needed to load this delta chain
790
790
791 If the repository is configured to use the sparse read, additional keywords
791 If the repository is configured to use the sparse read, additional keywords
792 are available:
792 are available:
793
793
794 :``readsize``: total size of data read from the disk for a revision
794 :``readsize``: total size of data read from the disk for a revision
795 (sum of the sizes of all the blocks)
795 (sum of the sizes of all the blocks)
796 :``largestblock``: size of the largest block of data read from the disk
796 :``largestblock``: size of the largest block of data read from the disk
797 :``readdensity``: density of useful bytes in the data read from the disk
797 :``readdensity``: density of useful bytes in the data read from the disk
798 :``srchunks``: in how many data hunks the whole revision would be read
798 :``srchunks``: in how many data hunks the whole revision would be read
799
799
800 The sparse read can be enabled with experimental.sparse-read = True
800 The sparse read can be enabled with experimental.sparse-read = True
801 """
801 """
802 r = cmdutil.openrevlog(
802 r = cmdutil.openrevlog(
803 repo, b'debugdeltachain', file_, pycompat.byteskwargs(opts)
803 repo, b'debugdeltachain', file_, pycompat.byteskwargs(opts)
804 )
804 )
805 index = r.index
805 index = r.index
806 start = r.start
806 start = r.start
807 length = r.length
807 length = r.length
808 generaldelta = r.delta_config.general_delta
808 generaldelta = r.delta_config.general_delta
809 withsparseread = getattr(r, '_withsparseread', False)
809 withsparseread = r.data_config.with_sparse_read
810
810
811 # security to avoid crash on corrupted revlogs
811 # security to avoid crash on corrupted revlogs
812 total_revs = len(index)
812 total_revs = len(index)
813
813
814 chain_size_cache = {}
814 chain_size_cache = {}
815
815
816 def revinfo(rev):
816 def revinfo(rev):
817 e = index[rev]
817 e = index[rev]
818 compsize = e[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH]
818 compsize = e[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH]
819 uncompsize = e[revlog_constants.ENTRY_DATA_UNCOMPRESSED_LENGTH]
819 uncompsize = e[revlog_constants.ENTRY_DATA_UNCOMPRESSED_LENGTH]
820
820
821 base = e[revlog_constants.ENTRY_DELTA_BASE]
821 base = e[revlog_constants.ENTRY_DELTA_BASE]
822 p1 = e[revlog_constants.ENTRY_PARENT_1]
822 p1 = e[revlog_constants.ENTRY_PARENT_1]
823 p2 = e[revlog_constants.ENTRY_PARENT_2]
823 p2 = e[revlog_constants.ENTRY_PARENT_2]
824
824
825 # If the parents of a revision has an empty delta, we never try to delta
825 # If the parents of a revision has an empty delta, we never try to delta
826 # against that parent, but directly against the delta base of that
826 # against that parent, but directly against the delta base of that
827 # parent (recursively). It avoids adding a useless entry in the chain.
827 # parent (recursively). It avoids adding a useless entry in the chain.
828 #
828 #
829 # However we need to detect that as a special case for delta-type, that
829 # However we need to detect that as a special case for delta-type, that
830 # is not simply "other".
830 # is not simply "other".
831 p1_base = p1
831 p1_base = p1
832 if p1 != nullrev and p1 < total_revs:
832 if p1 != nullrev and p1 < total_revs:
833 e1 = index[p1]
833 e1 = index[p1]
834 while e1[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH] == 0:
834 while e1[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH] == 0:
835 new_base = e1[revlog_constants.ENTRY_DELTA_BASE]
835 new_base = e1[revlog_constants.ENTRY_DELTA_BASE]
836 if (
836 if (
837 new_base == p1_base
837 new_base == p1_base
838 or new_base == nullrev
838 or new_base == nullrev
839 or new_base >= total_revs
839 or new_base >= total_revs
840 ):
840 ):
841 break
841 break
842 p1_base = new_base
842 p1_base = new_base
843 e1 = index[p1_base]
843 e1 = index[p1_base]
844 p2_base = p2
844 p2_base = p2
845 if p2 != nullrev and p2 < total_revs:
845 if p2 != nullrev and p2 < total_revs:
846 e2 = index[p2]
846 e2 = index[p2]
847 while e2[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH] == 0:
847 while e2[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH] == 0:
848 new_base = e2[revlog_constants.ENTRY_DELTA_BASE]
848 new_base = e2[revlog_constants.ENTRY_DELTA_BASE]
849 if (
849 if (
850 new_base == p2_base
850 new_base == p2_base
851 or new_base == nullrev
851 or new_base == nullrev
852 or new_base >= total_revs
852 or new_base >= total_revs
853 ):
853 ):
854 break
854 break
855 p2_base = new_base
855 p2_base = new_base
856 e2 = index[p2_base]
856 e2 = index[p2_base]
857
857
858 if generaldelta:
858 if generaldelta:
859 if base == p1:
859 if base == p1:
860 deltatype = b'p1'
860 deltatype = b'p1'
861 elif base == p2:
861 elif base == p2:
862 deltatype = b'p2'
862 deltatype = b'p2'
863 elif base == rev:
863 elif base == rev:
864 deltatype = b'base'
864 deltatype = b'base'
865 elif base == p1_base:
865 elif base == p1_base:
866 deltatype = b'skip1'
866 deltatype = b'skip1'
867 elif base == p2_base:
867 elif base == p2_base:
868 deltatype = b'skip2'
868 deltatype = b'skip2'
869 elif r.issnapshot(rev):
869 elif r.issnapshot(rev):
870 deltatype = b'snap'
870 deltatype = b'snap'
871 elif base == rev - 1:
871 elif base == rev - 1:
872 deltatype = b'prev'
872 deltatype = b'prev'
873 else:
873 else:
874 deltatype = b'other'
874 deltatype = b'other'
875 else:
875 else:
876 if base == rev:
876 if base == rev:
877 deltatype = b'base'
877 deltatype = b'base'
878 else:
878 else:
879 deltatype = b'prev'
879 deltatype = b'prev'
880
880
881 chain = r._deltachain(rev)[0]
881 chain = r._deltachain(rev)[0]
882 chain_size = 0
882 chain_size = 0
883 for iter_rev in reversed(chain):
883 for iter_rev in reversed(chain):
884 cached = chain_size_cache.get(iter_rev)
884 cached = chain_size_cache.get(iter_rev)
885 if cached is not None:
885 if cached is not None:
886 chain_size += cached
886 chain_size += cached
887 break
887 break
888 e = index[iter_rev]
888 e = index[iter_rev]
889 chain_size += e[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH]
889 chain_size += e[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH]
890 chain_size_cache[rev] = chain_size
890 chain_size_cache[rev] = chain_size
891
891
892 return p1, p2, compsize, uncompsize, deltatype, chain, chain_size
892 return p1, p2, compsize, uncompsize, deltatype, chain, chain_size
893
893
894 fm = ui.formatter(b'debugdeltachain', pycompat.byteskwargs(opts))
894 fm = ui.formatter(b'debugdeltachain', pycompat.byteskwargs(opts))
895
895
896 fm.plain(
896 fm.plain(
897 b' rev p1 p2 chain# chainlen prev delta '
897 b' rev p1 p2 chain# chainlen prev delta '
898 b'size rawsize chainsize ratio lindist extradist '
898 b'size rawsize chainsize ratio lindist extradist '
899 b'extraratio'
899 b'extraratio'
900 )
900 )
901 if withsparseread:
901 if withsparseread:
902 fm.plain(b' readsize largestblk rddensity srchunks')
902 fm.plain(b' readsize largestblk rddensity srchunks')
903 fm.plain(b'\n')
903 fm.plain(b'\n')
904
904
905 chainbases = {}
905 chainbases = {}
906 for rev in r:
906 for rev in r:
907 p1, p2, comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
907 p1, p2, comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
908 chainbase = chain[0]
908 chainbase = chain[0]
909 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
909 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
910 basestart = start(chainbase)
910 basestart = start(chainbase)
911 revstart = start(rev)
911 revstart = start(rev)
912 lineardist = revstart + comp - basestart
912 lineardist = revstart + comp - basestart
913 extradist = lineardist - chainsize
913 extradist = lineardist - chainsize
914 try:
914 try:
915 prevrev = chain[-2]
915 prevrev = chain[-2]
916 except IndexError:
916 except IndexError:
917 prevrev = -1
917 prevrev = -1
918
918
919 if uncomp != 0:
919 if uncomp != 0:
920 chainratio = float(chainsize) / float(uncomp)
920 chainratio = float(chainsize) / float(uncomp)
921 else:
921 else:
922 chainratio = chainsize
922 chainratio = chainsize
923
923
924 if chainsize != 0:
924 if chainsize != 0:
925 extraratio = float(extradist) / float(chainsize)
925 extraratio = float(extradist) / float(chainsize)
926 else:
926 else:
927 extraratio = extradist
927 extraratio = extradist
928
928
929 fm.startitem()
929 fm.startitem()
930 fm.write(
930 fm.write(
931 b'rev p1 p2 chainid chainlen prevrev deltatype compsize '
931 b'rev p1 p2 chainid chainlen prevrev deltatype compsize '
932 b'uncompsize chainsize chainratio lindist extradist '
932 b'uncompsize chainsize chainratio lindist extradist '
933 b'extraratio',
933 b'extraratio',
934 b'%7d %7d %7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
934 b'%7d %7d %7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
935 rev,
935 rev,
936 p1,
936 p1,
937 p2,
937 p2,
938 chainid,
938 chainid,
939 len(chain),
939 len(chain),
940 prevrev,
940 prevrev,
941 deltatype,
941 deltatype,
942 comp,
942 comp,
943 uncomp,
943 uncomp,
944 chainsize,
944 chainsize,
945 chainratio,
945 chainratio,
946 lineardist,
946 lineardist,
947 extradist,
947 extradist,
948 extraratio,
948 extraratio,
949 rev=rev,
949 rev=rev,
950 chainid=chainid,
950 chainid=chainid,
951 chainlen=len(chain),
951 chainlen=len(chain),
952 prevrev=prevrev,
952 prevrev=prevrev,
953 deltatype=deltatype,
953 deltatype=deltatype,
954 compsize=comp,
954 compsize=comp,
955 uncompsize=uncomp,
955 uncompsize=uncomp,
956 chainsize=chainsize,
956 chainsize=chainsize,
957 chainratio=chainratio,
957 chainratio=chainratio,
958 lindist=lineardist,
958 lindist=lineardist,
959 extradist=extradist,
959 extradist=extradist,
960 extraratio=extraratio,
960 extraratio=extraratio,
961 )
961 )
962 if withsparseread:
962 if withsparseread:
963 readsize = 0
963 readsize = 0
964 largestblock = 0
964 largestblock = 0
965 srchunks = 0
965 srchunks = 0
966
966
967 for revschunk in deltautil.slicechunk(r, chain):
967 for revschunk in deltautil.slicechunk(r, chain):
968 srchunks += 1
968 srchunks += 1
969 blkend = start(revschunk[-1]) + length(revschunk[-1])
969 blkend = start(revschunk[-1]) + length(revschunk[-1])
970 blksize = blkend - start(revschunk[0])
970 blksize = blkend - start(revschunk[0])
971
971
972 readsize += blksize
972 readsize += blksize
973 if largestblock < blksize:
973 if largestblock < blksize:
974 largestblock = blksize
974 largestblock = blksize
975
975
976 if readsize:
976 if readsize:
977 readdensity = float(chainsize) / float(readsize)
977 readdensity = float(chainsize) / float(readsize)
978 else:
978 else:
979 readdensity = 1
979 readdensity = 1
980
980
981 fm.write(
981 fm.write(
982 b'readsize largestblock readdensity srchunks',
982 b'readsize largestblock readdensity srchunks',
983 b' %10d %10d %9.5f %8d',
983 b' %10d %10d %9.5f %8d',
984 readsize,
984 readsize,
985 largestblock,
985 largestblock,
986 readdensity,
986 readdensity,
987 srchunks,
987 srchunks,
988 readsize=readsize,
988 readsize=readsize,
989 largestblock=largestblock,
989 largestblock=largestblock,
990 readdensity=readdensity,
990 readdensity=readdensity,
991 srchunks=srchunks,
991 srchunks=srchunks,
992 )
992 )
993
993
994 fm.plain(b'\n')
994 fm.plain(b'\n')
995
995
996 fm.end()
996 fm.end()
997
997
998
998
999 @command(
999 @command(
1000 b'debug-delta-find',
1000 b'debug-delta-find',
1001 cmdutil.debugrevlogopts
1001 cmdutil.debugrevlogopts
1002 + cmdutil.formatteropts
1002 + cmdutil.formatteropts
1003 + [
1003 + [
1004 (
1004 (
1005 b'',
1005 b'',
1006 b'source',
1006 b'source',
1007 b'full',
1007 b'full',
1008 _(b'input data feed to the process (full, storage, p1, p2, prev)'),
1008 _(b'input data feed to the process (full, storage, p1, p2, prev)'),
1009 ),
1009 ),
1010 ],
1010 ],
1011 _(b'-c|-m|FILE REV'),
1011 _(b'-c|-m|FILE REV'),
1012 optionalrepo=True,
1012 optionalrepo=True,
1013 )
1013 )
1014 def debugdeltafind(ui, repo, arg_1, arg_2=None, source=b'full', **opts):
1014 def debugdeltafind(ui, repo, arg_1, arg_2=None, source=b'full', **opts):
1015 """display the computation to get to a valid delta for storing REV
1015 """display the computation to get to a valid delta for storing REV
1016
1016
1017 This command will replay the process used to find the "best" delta to store
1017 This command will replay the process used to find the "best" delta to store
1018 a revision and display information about all the steps used to get to that
1018 a revision and display information about all the steps used to get to that
1019 result.
1019 result.
1020
1020
1021 By default, the process is fed with a the full-text for the revision. This
1021 By default, the process is fed with a the full-text for the revision. This
1022 can be controlled with the --source flag.
1022 can be controlled with the --source flag.
1023
1023
1024 The revision use the revision number of the target storage (not changelog
1024 The revision use the revision number of the target storage (not changelog
1025 revision number).
1025 revision number).
1026
1026
1027 note: the process is initiated from a full text of the revision to store.
1027 note: the process is initiated from a full text of the revision to store.
1028 """
1028 """
1029 if arg_2 is None:
1029 if arg_2 is None:
1030 file_ = None
1030 file_ = None
1031 rev = arg_1
1031 rev = arg_1
1032 else:
1032 else:
1033 file_ = arg_1
1033 file_ = arg_1
1034 rev = arg_2
1034 rev = arg_2
1035
1035
1036 rev = int(rev)
1036 rev = int(rev)
1037
1037
1038 revlog = cmdutil.openrevlog(
1038 revlog = cmdutil.openrevlog(
1039 repo, b'debugdeltachain', file_, pycompat.byteskwargs(opts)
1039 repo, b'debugdeltachain', file_, pycompat.byteskwargs(opts)
1040 )
1040 )
1041 p1r, p2r = revlog.parentrevs(rev)
1041 p1r, p2r = revlog.parentrevs(rev)
1042
1042
1043 if source == b'full':
1043 if source == b'full':
1044 base_rev = nullrev
1044 base_rev = nullrev
1045 elif source == b'storage':
1045 elif source == b'storage':
1046 base_rev = revlog.deltaparent(rev)
1046 base_rev = revlog.deltaparent(rev)
1047 elif source == b'p1':
1047 elif source == b'p1':
1048 base_rev = p1r
1048 base_rev = p1r
1049 elif source == b'p2':
1049 elif source == b'p2':
1050 base_rev = p2r
1050 base_rev = p2r
1051 elif source == b'prev':
1051 elif source == b'prev':
1052 base_rev = rev - 1
1052 base_rev = rev - 1
1053 else:
1053 else:
1054 raise error.InputError(b"invalid --source value: %s" % source)
1054 raise error.InputError(b"invalid --source value: %s" % source)
1055
1055
1056 revlog_debug.debug_delta_find(ui, revlog, rev, base_rev=base_rev)
1056 revlog_debug.debug_delta_find(ui, revlog, rev, base_rev=base_rev)
1057
1057
1058
1058
1059 @command(
1059 @command(
1060 b'debugdirstate|debugstate',
1060 b'debugdirstate|debugstate',
1061 [
1061 [
1062 (
1062 (
1063 b'',
1063 b'',
1064 b'nodates',
1064 b'nodates',
1065 None,
1065 None,
1066 _(b'do not display the saved mtime (DEPRECATED)'),
1066 _(b'do not display the saved mtime (DEPRECATED)'),
1067 ),
1067 ),
1068 (b'', b'dates', True, _(b'display the saved mtime')),
1068 (b'', b'dates', True, _(b'display the saved mtime')),
1069 (b'', b'datesort', None, _(b'sort by saved mtime')),
1069 (b'', b'datesort', None, _(b'sort by saved mtime')),
1070 (
1070 (
1071 b'',
1071 b'',
1072 b'docket',
1072 b'docket',
1073 False,
1073 False,
1074 _(b'display the docket (metadata file) instead'),
1074 _(b'display the docket (metadata file) instead'),
1075 ),
1075 ),
1076 (
1076 (
1077 b'',
1077 b'',
1078 b'all',
1078 b'all',
1079 False,
1079 False,
1080 _(b'display dirstate-v2 tree nodes that would not exist in v1'),
1080 _(b'display dirstate-v2 tree nodes that would not exist in v1'),
1081 ),
1081 ),
1082 ],
1082 ],
1083 _(b'[OPTION]...'),
1083 _(b'[OPTION]...'),
1084 )
1084 )
1085 def debugstate(ui, repo, **opts):
1085 def debugstate(ui, repo, **opts):
1086 """show the contents of the current dirstate"""
1086 """show the contents of the current dirstate"""
1087
1087
1088 if opts.get("docket"):
1088 if opts.get("docket"):
1089 if not repo.dirstate._use_dirstate_v2:
1089 if not repo.dirstate._use_dirstate_v2:
1090 raise error.Abort(_(b'dirstate v1 does not have a docket'))
1090 raise error.Abort(_(b'dirstate v1 does not have a docket'))
1091
1091
1092 docket = repo.dirstate._map.docket
1092 docket = repo.dirstate._map.docket
1093 (
1093 (
1094 start_offset,
1094 start_offset,
1095 root_nodes,
1095 root_nodes,
1096 nodes_with_entry,
1096 nodes_with_entry,
1097 nodes_with_copy,
1097 nodes_with_copy,
1098 unused_bytes,
1098 unused_bytes,
1099 _unused,
1099 _unused,
1100 ignore_pattern,
1100 ignore_pattern,
1101 ) = dirstateutils.v2.TREE_METADATA.unpack(docket.tree_metadata)
1101 ) = dirstateutils.v2.TREE_METADATA.unpack(docket.tree_metadata)
1102
1102
1103 ui.write(_(b"size of dirstate data: %d\n") % docket.data_size)
1103 ui.write(_(b"size of dirstate data: %d\n") % docket.data_size)
1104 ui.write(_(b"data file uuid: %s\n") % docket.uuid)
1104 ui.write(_(b"data file uuid: %s\n") % docket.uuid)
1105 ui.write(_(b"start offset of root nodes: %d\n") % start_offset)
1105 ui.write(_(b"start offset of root nodes: %d\n") % start_offset)
1106 ui.write(_(b"number of root nodes: %d\n") % root_nodes)
1106 ui.write(_(b"number of root nodes: %d\n") % root_nodes)
1107 ui.write(_(b"nodes with entries: %d\n") % nodes_with_entry)
1107 ui.write(_(b"nodes with entries: %d\n") % nodes_with_entry)
1108 ui.write(_(b"nodes with copies: %d\n") % nodes_with_copy)
1108 ui.write(_(b"nodes with copies: %d\n") % nodes_with_copy)
1109 ui.write(_(b"number of unused bytes: %d\n") % unused_bytes)
1109 ui.write(_(b"number of unused bytes: %d\n") % unused_bytes)
1110 ui.write(
1110 ui.write(
1111 _(b"ignore pattern hash: %s\n") % binascii.hexlify(ignore_pattern)
1111 _(b"ignore pattern hash: %s\n") % binascii.hexlify(ignore_pattern)
1112 )
1112 )
1113 return
1113 return
1114
1114
1115 nodates = not opts['dates']
1115 nodates = not opts['dates']
1116 if opts.get('nodates') is not None:
1116 if opts.get('nodates') is not None:
1117 nodates = True
1117 nodates = True
1118 datesort = opts.get('datesort')
1118 datesort = opts.get('datesort')
1119
1119
1120 if datesort:
1120 if datesort:
1121
1121
1122 def keyfunc(entry):
1122 def keyfunc(entry):
1123 filename, _state, _mode, _size, mtime = entry
1123 filename, _state, _mode, _size, mtime = entry
1124 return (mtime, filename)
1124 return (mtime, filename)
1125
1125
1126 else:
1126 else:
1127 keyfunc = None # sort by filename
1127 keyfunc = None # sort by filename
1128 entries = list(repo.dirstate._map.debug_iter(all=opts['all']))
1128 entries = list(repo.dirstate._map.debug_iter(all=opts['all']))
1129 entries.sort(key=keyfunc)
1129 entries.sort(key=keyfunc)
1130 for entry in entries:
1130 for entry in entries:
1131 filename, state, mode, size, mtime = entry
1131 filename, state, mode, size, mtime = entry
1132 if mtime == -1:
1132 if mtime == -1:
1133 timestr = b'unset '
1133 timestr = b'unset '
1134 elif nodates:
1134 elif nodates:
1135 timestr = b'set '
1135 timestr = b'set '
1136 else:
1136 else:
1137 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime))
1137 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime))
1138 timestr = encoding.strtolocal(timestr)
1138 timestr = encoding.strtolocal(timestr)
1139 if mode & 0o20000:
1139 if mode & 0o20000:
1140 mode = b'lnk'
1140 mode = b'lnk'
1141 else:
1141 else:
1142 mode = b'%3o' % (mode & 0o777 & ~util.umask)
1142 mode = b'%3o' % (mode & 0o777 & ~util.umask)
1143 ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename))
1143 ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename))
1144 for f in repo.dirstate.copies():
1144 for f in repo.dirstate.copies():
1145 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
1145 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
1146
1146
1147
1147
1148 @command(
1148 @command(
1149 b'debugdirstateignorepatternshash',
1149 b'debugdirstateignorepatternshash',
1150 [],
1150 [],
1151 _(b''),
1151 _(b''),
1152 )
1152 )
1153 def debugdirstateignorepatternshash(ui, repo, **opts):
1153 def debugdirstateignorepatternshash(ui, repo, **opts):
1154 """show the hash of ignore patterns stored in dirstate if v2,
1154 """show the hash of ignore patterns stored in dirstate if v2,
1155 or nothing for dirstate-v2
1155 or nothing for dirstate-v2
1156 """
1156 """
1157 if repo.dirstate._use_dirstate_v2:
1157 if repo.dirstate._use_dirstate_v2:
1158 docket = repo.dirstate._map.docket
1158 docket = repo.dirstate._map.docket
1159 hash_len = 20 # 160 bits for SHA-1
1159 hash_len = 20 # 160 bits for SHA-1
1160 hash_bytes = docket.tree_metadata[-hash_len:]
1160 hash_bytes = docket.tree_metadata[-hash_len:]
1161 ui.write(binascii.hexlify(hash_bytes) + b'\n')
1161 ui.write(binascii.hexlify(hash_bytes) + b'\n')
1162
1162
1163
1163
1164 @command(
1164 @command(
1165 b'debugdiscovery',
1165 b'debugdiscovery',
1166 [
1166 [
1167 (b'', b'old', None, _(b'use old-style discovery')),
1167 (b'', b'old', None, _(b'use old-style discovery')),
1168 (
1168 (
1169 b'',
1169 b'',
1170 b'nonheads',
1170 b'nonheads',
1171 None,
1171 None,
1172 _(b'use old-style discovery with non-heads included'),
1172 _(b'use old-style discovery with non-heads included'),
1173 ),
1173 ),
1174 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1174 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1175 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1175 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1176 (
1176 (
1177 b'',
1177 b'',
1178 b'local-as-revs',
1178 b'local-as-revs',
1179 b"",
1179 b"",
1180 b'treat local has having these revisions only',
1180 b'treat local has having these revisions only',
1181 ),
1181 ),
1182 (
1182 (
1183 b'',
1183 b'',
1184 b'remote-as-revs',
1184 b'remote-as-revs',
1185 b"",
1185 b"",
1186 b'use local as remote, with only these revisions',
1186 b'use local as remote, with only these revisions',
1187 ),
1187 ),
1188 ]
1188 ]
1189 + cmdutil.remoteopts
1189 + cmdutil.remoteopts
1190 + cmdutil.formatteropts,
1190 + cmdutil.formatteropts,
1191 _(b'[--rev REV] [OTHER]'),
1191 _(b'[--rev REV] [OTHER]'),
1192 )
1192 )
1193 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1193 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1194 """runs the changeset discovery protocol in isolation
1194 """runs the changeset discovery protocol in isolation
1195
1195
1196 The local peer can be "replaced" by a subset of the local repository by
1196 The local peer can be "replaced" by a subset of the local repository by
1197 using the `--local-as-revs` flag. In the same way, the usual `remote` peer
1197 using the `--local-as-revs` flag. In the same way, the usual `remote` peer
1198 can be "replaced" by a subset of the local repository using the
1198 can be "replaced" by a subset of the local repository using the
1199 `--remote-as-revs` flag. This is useful to efficiently debug pathological
1199 `--remote-as-revs` flag. This is useful to efficiently debug pathological
1200 discovery situations.
1200 discovery situations.
1201
1201
1202 The following developer oriented config are relevant for people playing with this command:
1202 The following developer oriented config are relevant for people playing with this command:
1203
1203
1204 * devel.discovery.exchange-heads=True
1204 * devel.discovery.exchange-heads=True
1205
1205
1206 If False, the discovery will not start with
1206 If False, the discovery will not start with
1207 remote head fetching and local head querying.
1207 remote head fetching and local head querying.
1208
1208
1209 * devel.discovery.grow-sample=True
1209 * devel.discovery.grow-sample=True
1210
1210
1211 If False, the sample size used in set discovery will not be increased
1211 If False, the sample size used in set discovery will not be increased
1212 through the process
1212 through the process
1213
1213
1214 * devel.discovery.grow-sample.dynamic=True
1214 * devel.discovery.grow-sample.dynamic=True
1215
1215
1216 When discovery.grow-sample.dynamic is True, the default, the sample size is
1216 When discovery.grow-sample.dynamic is True, the default, the sample size is
1217 adapted to the shape of the undecided set (it is set to the max of:
1217 adapted to the shape of the undecided set (it is set to the max of:
1218 <target-size>, len(roots(undecided)), len(heads(undecided)
1218 <target-size>, len(roots(undecided)), len(heads(undecided)
1219
1219
1220 * devel.discovery.grow-sample.rate=1.05
1220 * devel.discovery.grow-sample.rate=1.05
1221
1221
1222 the rate at which the sample grow
1222 the rate at which the sample grow
1223
1223
1224 * devel.discovery.randomize=True
1224 * devel.discovery.randomize=True
1225
1225
1226 If andom sampling during discovery are deterministic. It is meant for
1226 If andom sampling during discovery are deterministic. It is meant for
1227 integration tests.
1227 integration tests.
1228
1228
1229 * devel.discovery.sample-size=200
1229 * devel.discovery.sample-size=200
1230
1230
1231 Control the initial size of the discovery sample
1231 Control the initial size of the discovery sample
1232
1232
1233 * devel.discovery.sample-size.initial=100
1233 * devel.discovery.sample-size.initial=100
1234
1234
1235 Control the initial size of the discovery for initial change
1235 Control the initial size of the discovery for initial change
1236 """
1236 """
1237 unfi = repo.unfiltered()
1237 unfi = repo.unfiltered()
1238
1238
1239 # setup potential extra filtering
1239 # setup potential extra filtering
1240 local_revs = opts["local_as_revs"]
1240 local_revs = opts["local_as_revs"]
1241 remote_revs = opts["remote_as_revs"]
1241 remote_revs = opts["remote_as_revs"]
1242
1242
1243 # make sure tests are repeatable
1243 # make sure tests are repeatable
1244 random.seed(int(opts['seed']))
1244 random.seed(int(opts['seed']))
1245
1245
1246 if not remote_revs:
1246 if not remote_revs:
1247 path = urlutil.get_unique_pull_path_obj(
1247 path = urlutil.get_unique_pull_path_obj(
1248 b'debugdiscovery', ui, remoteurl
1248 b'debugdiscovery', ui, remoteurl
1249 )
1249 )
1250 branches = (path.branch, [])
1250 branches = (path.branch, [])
1251 remote = hg.peer(repo, pycompat.byteskwargs(opts), path)
1251 remote = hg.peer(repo, pycompat.byteskwargs(opts), path)
1252 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
1252 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
1253 else:
1253 else:
1254 branches = (None, [])
1254 branches = (None, [])
1255 remote_filtered_revs = logcmdutil.revrange(
1255 remote_filtered_revs = logcmdutil.revrange(
1256 unfi, [b"not (::(%s))" % remote_revs]
1256 unfi, [b"not (::(%s))" % remote_revs]
1257 )
1257 )
1258 remote_filtered_revs = frozenset(remote_filtered_revs)
1258 remote_filtered_revs = frozenset(remote_filtered_revs)
1259
1259
1260 def remote_func(x):
1260 def remote_func(x):
1261 return remote_filtered_revs
1261 return remote_filtered_revs
1262
1262
1263 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1263 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1264
1264
1265 remote = repo.peer()
1265 remote = repo.peer()
1266 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1266 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1267
1267
1268 if local_revs:
1268 if local_revs:
1269 local_filtered_revs = logcmdutil.revrange(
1269 local_filtered_revs = logcmdutil.revrange(
1270 unfi, [b"not (::(%s))" % local_revs]
1270 unfi, [b"not (::(%s))" % local_revs]
1271 )
1271 )
1272 local_filtered_revs = frozenset(local_filtered_revs)
1272 local_filtered_revs = frozenset(local_filtered_revs)
1273
1273
1274 def local_func(x):
1274 def local_func(x):
1275 return local_filtered_revs
1275 return local_filtered_revs
1276
1276
1277 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1277 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1278 repo = repo.filtered(b'debug-discovery-local-filter')
1278 repo = repo.filtered(b'debug-discovery-local-filter')
1279
1279
1280 data = {}
1280 data = {}
1281 if opts.get('old'):
1281 if opts.get('old'):
1282
1282
1283 def doit(pushedrevs, remoteheads, remote=remote):
1283 def doit(pushedrevs, remoteheads, remote=remote):
1284 if not hasattr(remote, 'branches'):
1284 if not hasattr(remote, 'branches'):
1285 # enable in-client legacy support
1285 # enable in-client legacy support
1286 remote = localrepo.locallegacypeer(remote.local())
1286 remote = localrepo.locallegacypeer(remote.local())
1287 if remote_revs:
1287 if remote_revs:
1288 r = remote._repo.filtered(b'debug-discovery-remote-filter')
1288 r = remote._repo.filtered(b'debug-discovery-remote-filter')
1289 remote._repo = r
1289 remote._repo = r
1290 common, _in, hds = treediscovery.findcommonincoming(
1290 common, _in, hds = treediscovery.findcommonincoming(
1291 repo, remote, force=True, audit=data
1291 repo, remote, force=True, audit=data
1292 )
1292 )
1293 common = set(common)
1293 common = set(common)
1294 if not opts.get('nonheads'):
1294 if not opts.get('nonheads'):
1295 ui.writenoi18n(
1295 ui.writenoi18n(
1296 b"unpruned common: %s\n"
1296 b"unpruned common: %s\n"
1297 % b" ".join(sorted(short(n) for n in common))
1297 % b" ".join(sorted(short(n) for n in common))
1298 )
1298 )
1299
1299
1300 clnode = repo.changelog.node
1300 clnode = repo.changelog.node
1301 common = repo.revs(b'heads(::%ln)', common)
1301 common = repo.revs(b'heads(::%ln)', common)
1302 common = {clnode(r) for r in common}
1302 common = {clnode(r) for r in common}
1303 return common, hds
1303 return common, hds
1304
1304
1305 else:
1305 else:
1306
1306
1307 def doit(pushedrevs, remoteheads, remote=remote):
1307 def doit(pushedrevs, remoteheads, remote=remote):
1308 nodes = None
1308 nodes = None
1309 if pushedrevs:
1309 if pushedrevs:
1310 revs = logcmdutil.revrange(repo, pushedrevs)
1310 revs = logcmdutil.revrange(repo, pushedrevs)
1311 nodes = [repo[r].node() for r in revs]
1311 nodes = [repo[r].node() for r in revs]
1312 common, any, hds = setdiscovery.findcommonheads(
1312 common, any, hds = setdiscovery.findcommonheads(
1313 ui,
1313 ui,
1314 repo,
1314 repo,
1315 remote,
1315 remote,
1316 ancestorsof=nodes,
1316 ancestorsof=nodes,
1317 audit=data,
1317 audit=data,
1318 abortwhenunrelated=False,
1318 abortwhenunrelated=False,
1319 )
1319 )
1320 return common, hds
1320 return common, hds
1321
1321
1322 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1322 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1323 localrevs = opts['rev']
1323 localrevs = opts['rev']
1324
1324
1325 fm = ui.formatter(b'debugdiscovery', pycompat.byteskwargs(opts))
1325 fm = ui.formatter(b'debugdiscovery', pycompat.byteskwargs(opts))
1326 if fm.strict_format:
1326 if fm.strict_format:
1327
1327
1328 @contextlib.contextmanager
1328 @contextlib.contextmanager
1329 def may_capture_output():
1329 def may_capture_output():
1330 ui.pushbuffer()
1330 ui.pushbuffer()
1331 yield
1331 yield
1332 data[b'output'] = ui.popbuffer()
1332 data[b'output'] = ui.popbuffer()
1333
1333
1334 else:
1334 else:
1335 may_capture_output = util.nullcontextmanager
1335 may_capture_output = util.nullcontextmanager
1336 with may_capture_output():
1336 with may_capture_output():
1337 with util.timedcm('debug-discovery') as t:
1337 with util.timedcm('debug-discovery') as t:
1338 common, hds = doit(localrevs, remoterevs)
1338 common, hds = doit(localrevs, remoterevs)
1339
1339
1340 # compute all statistics
1340 # compute all statistics
1341 if len(common) == 1 and repo.nullid in common:
1341 if len(common) == 1 and repo.nullid in common:
1342 common = set()
1342 common = set()
1343 heads_common = set(common)
1343 heads_common = set(common)
1344 heads_remote = set(hds)
1344 heads_remote = set(hds)
1345 heads_local = set(repo.heads())
1345 heads_local = set(repo.heads())
1346 # note: they cannot be a local or remote head that is in common and not
1346 # note: they cannot be a local or remote head that is in common and not
1347 # itself a head of common.
1347 # itself a head of common.
1348 heads_common_local = heads_common & heads_local
1348 heads_common_local = heads_common & heads_local
1349 heads_common_remote = heads_common & heads_remote
1349 heads_common_remote = heads_common & heads_remote
1350 heads_common_both = heads_common & heads_remote & heads_local
1350 heads_common_both = heads_common & heads_remote & heads_local
1351
1351
1352 all = repo.revs(b'all()')
1352 all = repo.revs(b'all()')
1353 common = repo.revs(b'::%ln', common)
1353 common = repo.revs(b'::%ln', common)
1354 roots_common = repo.revs(b'roots(::%ld)', common)
1354 roots_common = repo.revs(b'roots(::%ld)', common)
1355 missing = repo.revs(b'not ::%ld', common)
1355 missing = repo.revs(b'not ::%ld', common)
1356 heads_missing = repo.revs(b'heads(%ld)', missing)
1356 heads_missing = repo.revs(b'heads(%ld)', missing)
1357 roots_missing = repo.revs(b'roots(%ld)', missing)
1357 roots_missing = repo.revs(b'roots(%ld)', missing)
1358 assert len(common) + len(missing) == len(all)
1358 assert len(common) + len(missing) == len(all)
1359
1359
1360 initial_undecided = repo.revs(
1360 initial_undecided = repo.revs(
1361 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1361 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1362 )
1362 )
1363 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1363 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1364 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1364 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1365 common_initial_undecided = initial_undecided & common
1365 common_initial_undecided = initial_undecided & common
1366 missing_initial_undecided = initial_undecided & missing
1366 missing_initial_undecided = initial_undecided & missing
1367
1367
1368 data[b'elapsed'] = t.elapsed
1368 data[b'elapsed'] = t.elapsed
1369 data[b'nb-common-heads'] = len(heads_common)
1369 data[b'nb-common-heads'] = len(heads_common)
1370 data[b'nb-common-heads-local'] = len(heads_common_local)
1370 data[b'nb-common-heads-local'] = len(heads_common_local)
1371 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1371 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1372 data[b'nb-common-heads-both'] = len(heads_common_both)
1372 data[b'nb-common-heads-both'] = len(heads_common_both)
1373 data[b'nb-common-roots'] = len(roots_common)
1373 data[b'nb-common-roots'] = len(roots_common)
1374 data[b'nb-head-local'] = len(heads_local)
1374 data[b'nb-head-local'] = len(heads_local)
1375 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1375 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1376 data[b'nb-head-remote'] = len(heads_remote)
1376 data[b'nb-head-remote'] = len(heads_remote)
1377 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1377 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1378 heads_common_remote
1378 heads_common_remote
1379 )
1379 )
1380 data[b'nb-revs'] = len(all)
1380 data[b'nb-revs'] = len(all)
1381 data[b'nb-revs-common'] = len(common)
1381 data[b'nb-revs-common'] = len(common)
1382 data[b'nb-revs-missing'] = len(missing)
1382 data[b'nb-revs-missing'] = len(missing)
1383 data[b'nb-missing-heads'] = len(heads_missing)
1383 data[b'nb-missing-heads'] = len(heads_missing)
1384 data[b'nb-missing-roots'] = len(roots_missing)
1384 data[b'nb-missing-roots'] = len(roots_missing)
1385 data[b'nb-ini_und'] = len(initial_undecided)
1385 data[b'nb-ini_und'] = len(initial_undecided)
1386 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1386 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1387 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1387 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1388 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1388 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1389 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1389 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1390
1390
1391 fm.startitem()
1391 fm.startitem()
1392 fm.data(**pycompat.strkwargs(data))
1392 fm.data(**pycompat.strkwargs(data))
1393 # display discovery summary
1393 # display discovery summary
1394 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1394 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1395 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1395 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1396 if b'total-round-trips-heads' in data:
1396 if b'total-round-trips-heads' in data:
1397 fm.plain(
1397 fm.plain(
1398 b" round-trips-heads: %(total-round-trips-heads)9d\n" % data
1398 b" round-trips-heads: %(total-round-trips-heads)9d\n" % data
1399 )
1399 )
1400 if b'total-round-trips-branches' in data:
1400 if b'total-round-trips-branches' in data:
1401 fm.plain(
1401 fm.plain(
1402 b" round-trips-branches: %(total-round-trips-branches)9d\n"
1402 b" round-trips-branches: %(total-round-trips-branches)9d\n"
1403 % data
1403 % data
1404 )
1404 )
1405 if b'total-round-trips-between' in data:
1405 if b'total-round-trips-between' in data:
1406 fm.plain(
1406 fm.plain(
1407 b" round-trips-between: %(total-round-trips-between)9d\n" % data
1407 b" round-trips-between: %(total-round-trips-between)9d\n" % data
1408 )
1408 )
1409 fm.plain(b"queries: %(total-queries)9d\n" % data)
1409 fm.plain(b"queries: %(total-queries)9d\n" % data)
1410 if b'total-queries-branches' in data:
1410 if b'total-queries-branches' in data:
1411 fm.plain(b" queries-branches: %(total-queries-branches)9d\n" % data)
1411 fm.plain(b" queries-branches: %(total-queries-branches)9d\n" % data)
1412 if b'total-queries-between' in data:
1412 if b'total-queries-between' in data:
1413 fm.plain(b" queries-between: %(total-queries-between)9d\n" % data)
1413 fm.plain(b" queries-between: %(total-queries-between)9d\n" % data)
1414 fm.plain(b"heads summary:\n")
1414 fm.plain(b"heads summary:\n")
1415 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1415 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1416 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1416 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1417 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1417 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1418 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1418 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1419 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1419 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1420 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1420 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1421 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1421 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1422 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1422 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1423 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1423 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1424 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1424 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1425 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1425 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1426 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1426 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1427 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1427 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1428 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1428 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1429 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1429 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1430 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1430 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1431 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1431 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1432 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1432 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1433 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1433 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1434 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1434 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1435 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1435 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1436 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1436 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1437
1437
1438 if ui.verbose:
1438 if ui.verbose:
1439 fm.plain(
1439 fm.plain(
1440 b"common heads: %s\n"
1440 b"common heads: %s\n"
1441 % b" ".join(sorted(short(n) for n in heads_common))
1441 % b" ".join(sorted(short(n) for n in heads_common))
1442 )
1442 )
1443 fm.end()
1443 fm.end()
1444
1444
1445
1445
1446 _chunksize = 4 << 10
1446 _chunksize = 4 << 10
1447
1447
1448
1448
1449 @command(
1449 @command(
1450 b'debugdownload',
1450 b'debugdownload',
1451 [
1451 [
1452 (b'o', b'output', b'', _(b'path')),
1452 (b'o', b'output', b'', _(b'path')),
1453 ],
1453 ],
1454 optionalrepo=True,
1454 optionalrepo=True,
1455 )
1455 )
1456 def debugdownload(ui, repo, url, output=None, **opts):
1456 def debugdownload(ui, repo, url, output=None, **opts):
1457 """download a resource using Mercurial logic and config"""
1457 """download a resource using Mercurial logic and config"""
1458 fh = urlmod.open(ui, url, output)
1458 fh = urlmod.open(ui, url, output)
1459
1459
1460 dest = ui
1460 dest = ui
1461 if output:
1461 if output:
1462 dest = open(output, b"wb", _chunksize)
1462 dest = open(output, b"wb", _chunksize)
1463 try:
1463 try:
1464 data = fh.read(_chunksize)
1464 data = fh.read(_chunksize)
1465 while data:
1465 while data:
1466 dest.write(data)
1466 dest.write(data)
1467 data = fh.read(_chunksize)
1467 data = fh.read(_chunksize)
1468 finally:
1468 finally:
1469 if output:
1469 if output:
1470 dest.close()
1470 dest.close()
1471
1471
1472
1472
1473 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1473 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1474 def debugextensions(ui, repo, **opts):
1474 def debugextensions(ui, repo, **opts):
1475 '''show information about active extensions'''
1475 '''show information about active extensions'''
1476 exts = extensions.extensions(ui)
1476 exts = extensions.extensions(ui)
1477 hgver = util.version()
1477 hgver = util.version()
1478 fm = ui.formatter(b'debugextensions', pycompat.byteskwargs(opts))
1478 fm = ui.formatter(b'debugextensions', pycompat.byteskwargs(opts))
1479 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1479 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1480 isinternal = extensions.ismoduleinternal(extmod)
1480 isinternal = extensions.ismoduleinternal(extmod)
1481 extsource = None
1481 extsource = None
1482
1482
1483 if hasattr(extmod, '__file__'):
1483 if hasattr(extmod, '__file__'):
1484 extsource = pycompat.fsencode(extmod.__file__)
1484 extsource = pycompat.fsencode(extmod.__file__)
1485 elif getattr(sys, 'oxidized', False):
1485 elif getattr(sys, 'oxidized', False):
1486 extsource = pycompat.sysexecutable
1486 extsource = pycompat.sysexecutable
1487 if isinternal:
1487 if isinternal:
1488 exttestedwith = [] # never expose magic string to users
1488 exttestedwith = [] # never expose magic string to users
1489 else:
1489 else:
1490 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1490 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1491 extbuglink = getattr(extmod, 'buglink', None)
1491 extbuglink = getattr(extmod, 'buglink', None)
1492
1492
1493 fm.startitem()
1493 fm.startitem()
1494
1494
1495 if ui.quiet or ui.verbose:
1495 if ui.quiet or ui.verbose:
1496 fm.write(b'name', b'%s\n', extname)
1496 fm.write(b'name', b'%s\n', extname)
1497 else:
1497 else:
1498 fm.write(b'name', b'%s', extname)
1498 fm.write(b'name', b'%s', extname)
1499 if isinternal or hgver in exttestedwith:
1499 if isinternal or hgver in exttestedwith:
1500 fm.plain(b'\n')
1500 fm.plain(b'\n')
1501 elif not exttestedwith:
1501 elif not exttestedwith:
1502 fm.plain(_(b' (untested!)\n'))
1502 fm.plain(_(b' (untested!)\n'))
1503 else:
1503 else:
1504 lasttestedversion = exttestedwith[-1]
1504 lasttestedversion = exttestedwith[-1]
1505 fm.plain(b' (%s!)\n' % lasttestedversion)
1505 fm.plain(b' (%s!)\n' % lasttestedversion)
1506
1506
1507 fm.condwrite(
1507 fm.condwrite(
1508 ui.verbose and extsource,
1508 ui.verbose and extsource,
1509 b'source',
1509 b'source',
1510 _(b' location: %s\n'),
1510 _(b' location: %s\n'),
1511 extsource or b"",
1511 extsource or b"",
1512 )
1512 )
1513
1513
1514 if ui.verbose:
1514 if ui.verbose:
1515 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1515 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1516 fm.data(bundled=isinternal)
1516 fm.data(bundled=isinternal)
1517
1517
1518 fm.condwrite(
1518 fm.condwrite(
1519 ui.verbose and exttestedwith,
1519 ui.verbose and exttestedwith,
1520 b'testedwith',
1520 b'testedwith',
1521 _(b' tested with: %s\n'),
1521 _(b' tested with: %s\n'),
1522 fm.formatlist(exttestedwith, name=b'ver'),
1522 fm.formatlist(exttestedwith, name=b'ver'),
1523 )
1523 )
1524
1524
1525 fm.condwrite(
1525 fm.condwrite(
1526 ui.verbose and extbuglink,
1526 ui.verbose and extbuglink,
1527 b'buglink',
1527 b'buglink',
1528 _(b' bug reporting: %s\n'),
1528 _(b' bug reporting: %s\n'),
1529 extbuglink or b"",
1529 extbuglink or b"",
1530 )
1530 )
1531
1531
1532 fm.end()
1532 fm.end()
1533
1533
1534
1534
1535 @command(
1535 @command(
1536 b'debugfileset',
1536 b'debugfileset',
1537 [
1537 [
1538 (
1538 (
1539 b'r',
1539 b'r',
1540 b'rev',
1540 b'rev',
1541 b'',
1541 b'',
1542 _(b'apply the filespec on this revision'),
1542 _(b'apply the filespec on this revision'),
1543 _(b'REV'),
1543 _(b'REV'),
1544 ),
1544 ),
1545 (
1545 (
1546 b'',
1546 b'',
1547 b'all-files',
1547 b'all-files',
1548 False,
1548 False,
1549 _(b'test files from all revisions and working directory'),
1549 _(b'test files from all revisions and working directory'),
1550 ),
1550 ),
1551 (
1551 (
1552 b's',
1552 b's',
1553 b'show-matcher',
1553 b'show-matcher',
1554 None,
1554 None,
1555 _(b'print internal representation of matcher'),
1555 _(b'print internal representation of matcher'),
1556 ),
1556 ),
1557 (
1557 (
1558 b'p',
1558 b'p',
1559 b'show-stage',
1559 b'show-stage',
1560 [],
1560 [],
1561 _(b'print parsed tree at the given stage'),
1561 _(b'print parsed tree at the given stage'),
1562 _(b'NAME'),
1562 _(b'NAME'),
1563 ),
1563 ),
1564 ],
1564 ],
1565 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1565 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1566 )
1566 )
1567 def debugfileset(ui, repo, expr, **opts):
1567 def debugfileset(ui, repo, expr, **opts):
1568 '''parse and apply a fileset specification'''
1568 '''parse and apply a fileset specification'''
1569 from . import fileset
1569 from . import fileset
1570
1570
1571 fileset.symbols # force import of fileset so we have predicates to optimize
1571 fileset.symbols # force import of fileset so we have predicates to optimize
1572
1572
1573 ctx = logcmdutil.revsingle(repo, opts.get('rev'), None)
1573 ctx = logcmdutil.revsingle(repo, opts.get('rev'), None)
1574
1574
1575 stages = [
1575 stages = [
1576 (b'parsed', pycompat.identity),
1576 (b'parsed', pycompat.identity),
1577 (b'analyzed', filesetlang.analyze),
1577 (b'analyzed', filesetlang.analyze),
1578 (b'optimized', filesetlang.optimize),
1578 (b'optimized', filesetlang.optimize),
1579 ]
1579 ]
1580 stagenames = {n for n, f in stages}
1580 stagenames = {n for n, f in stages}
1581
1581
1582 showalways = set()
1582 showalways = set()
1583 if ui.verbose and not opts['show_stage']:
1583 if ui.verbose and not opts['show_stage']:
1584 # show parsed tree by --verbose (deprecated)
1584 # show parsed tree by --verbose (deprecated)
1585 showalways.add(b'parsed')
1585 showalways.add(b'parsed')
1586 if opts['show_stage'] == [b'all']:
1586 if opts['show_stage'] == [b'all']:
1587 showalways.update(stagenames)
1587 showalways.update(stagenames)
1588 else:
1588 else:
1589 for n in opts['show_stage']:
1589 for n in opts['show_stage']:
1590 if n not in stagenames:
1590 if n not in stagenames:
1591 raise error.Abort(_(b'invalid stage name: %s') % n)
1591 raise error.Abort(_(b'invalid stage name: %s') % n)
1592 showalways.update(opts['show_stage'])
1592 showalways.update(opts['show_stage'])
1593
1593
1594 tree = filesetlang.parse(expr)
1594 tree = filesetlang.parse(expr)
1595 for n, f in stages:
1595 for n, f in stages:
1596 tree = f(tree)
1596 tree = f(tree)
1597 if n in showalways:
1597 if n in showalways:
1598 if opts['show_stage'] or n != b'parsed':
1598 if opts['show_stage'] or n != b'parsed':
1599 ui.write(b"* %s:\n" % n)
1599 ui.write(b"* %s:\n" % n)
1600 ui.write(filesetlang.prettyformat(tree), b"\n")
1600 ui.write(filesetlang.prettyformat(tree), b"\n")
1601
1601
1602 files = set()
1602 files = set()
1603 if opts['all_files']:
1603 if opts['all_files']:
1604 for r in repo:
1604 for r in repo:
1605 c = repo[r]
1605 c = repo[r]
1606 files.update(c.files())
1606 files.update(c.files())
1607 files.update(c.substate)
1607 files.update(c.substate)
1608 if opts['all_files'] or ctx.rev() is None:
1608 if opts['all_files'] or ctx.rev() is None:
1609 wctx = repo[None]
1609 wctx = repo[None]
1610 files.update(
1610 files.update(
1611 repo.dirstate.walk(
1611 repo.dirstate.walk(
1612 scmutil.matchall(repo),
1612 scmutil.matchall(repo),
1613 subrepos=list(wctx.substate),
1613 subrepos=list(wctx.substate),
1614 unknown=True,
1614 unknown=True,
1615 ignored=True,
1615 ignored=True,
1616 )
1616 )
1617 )
1617 )
1618 files.update(wctx.substate)
1618 files.update(wctx.substate)
1619 else:
1619 else:
1620 files.update(ctx.files())
1620 files.update(ctx.files())
1621 files.update(ctx.substate)
1621 files.update(ctx.substate)
1622
1622
1623 m = ctx.matchfileset(repo.getcwd(), expr)
1623 m = ctx.matchfileset(repo.getcwd(), expr)
1624 if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
1624 if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
1625 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1625 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1626 for f in sorted(files):
1626 for f in sorted(files):
1627 if not m(f):
1627 if not m(f):
1628 continue
1628 continue
1629 ui.write(b"%s\n" % f)
1629 ui.write(b"%s\n" % f)
1630
1630
1631
1631
1632 @command(
1632 @command(
1633 b"debug-repair-issue6528",
1633 b"debug-repair-issue6528",
1634 [
1634 [
1635 (
1635 (
1636 b'',
1636 b'',
1637 b'to-report',
1637 b'to-report',
1638 b'',
1638 b'',
1639 _(b'build a report of affected revisions to this file'),
1639 _(b'build a report of affected revisions to this file'),
1640 _(b'FILE'),
1640 _(b'FILE'),
1641 ),
1641 ),
1642 (
1642 (
1643 b'',
1643 b'',
1644 b'from-report',
1644 b'from-report',
1645 b'',
1645 b'',
1646 _(b'repair revisions listed in this report file'),
1646 _(b'repair revisions listed in this report file'),
1647 _(b'FILE'),
1647 _(b'FILE'),
1648 ),
1648 ),
1649 (
1649 (
1650 b'',
1650 b'',
1651 b'paranoid',
1651 b'paranoid',
1652 False,
1652 False,
1653 _(b'check that both detection methods do the same thing'),
1653 _(b'check that both detection methods do the same thing'),
1654 ),
1654 ),
1655 ]
1655 ]
1656 + cmdutil.dryrunopts,
1656 + cmdutil.dryrunopts,
1657 )
1657 )
1658 def debug_repair_issue6528(ui, repo, **opts):
1658 def debug_repair_issue6528(ui, repo, **opts):
1659 """find affected revisions and repair them. See issue6528 for more details.
1659 """find affected revisions and repair them. See issue6528 for more details.
1660
1660
1661 The `--to-report` and `--from-report` flags allow you to cache and reuse the
1661 The `--to-report` and `--from-report` flags allow you to cache and reuse the
1662 computation of affected revisions for a given repository across clones.
1662 computation of affected revisions for a given repository across clones.
1663 The report format is line-based (with empty lines ignored):
1663 The report format is line-based (with empty lines ignored):
1664
1664
1665 ```
1665 ```
1666 <ascii-hex of the affected revision>,... <unencoded filelog index filename>
1666 <ascii-hex of the affected revision>,... <unencoded filelog index filename>
1667 ```
1667 ```
1668
1668
1669 There can be multiple broken revisions per filelog, they are separated by
1669 There can be multiple broken revisions per filelog, they are separated by
1670 a comma with no spaces. The only space is between the revision(s) and the
1670 a comma with no spaces. The only space is between the revision(s) and the
1671 filename.
1671 filename.
1672
1672
1673 Note that this does *not* mean that this repairs future affected revisions,
1673 Note that this does *not* mean that this repairs future affected revisions,
1674 that needs a separate fix at the exchange level that was introduced in
1674 that needs a separate fix at the exchange level that was introduced in
1675 Mercurial 5.9.1.
1675 Mercurial 5.9.1.
1676
1676
1677 There is a `--paranoid` flag to test that the fast implementation is correct
1677 There is a `--paranoid` flag to test that the fast implementation is correct
1678 by checking it against the slow implementation. Since this matter is quite
1678 by checking it against the slow implementation. Since this matter is quite
1679 urgent and testing every edge-case is probably quite costly, we use this
1679 urgent and testing every edge-case is probably quite costly, we use this
1680 method to test on large repositories as a fuzzing method of sorts.
1680 method to test on large repositories as a fuzzing method of sorts.
1681 """
1681 """
1682 cmdutil.check_incompatible_arguments(
1682 cmdutil.check_incompatible_arguments(
1683 opts, 'to_report', ['from_report', 'dry_run']
1683 opts, 'to_report', ['from_report', 'dry_run']
1684 )
1684 )
1685 dry_run = opts.get('dry_run')
1685 dry_run = opts.get('dry_run')
1686 to_report = opts.get('to_report')
1686 to_report = opts.get('to_report')
1687 from_report = opts.get('from_report')
1687 from_report = opts.get('from_report')
1688 paranoid = opts.get('paranoid')
1688 paranoid = opts.get('paranoid')
1689 # TODO maybe add filelog pattern and revision pattern parameters to help
1689 # TODO maybe add filelog pattern and revision pattern parameters to help
1690 # narrow down the search for users that know what they're looking for?
1690 # narrow down the search for users that know what they're looking for?
1691
1691
1692 if requirements.REVLOGV1_REQUIREMENT not in repo.requirements:
1692 if requirements.REVLOGV1_REQUIREMENT not in repo.requirements:
1693 msg = b"can only repair revlogv1 repositories, v2 is not affected"
1693 msg = b"can only repair revlogv1 repositories, v2 is not affected"
1694 raise error.Abort(_(msg))
1694 raise error.Abort(_(msg))
1695
1695
1696 rewrite.repair_issue6528(
1696 rewrite.repair_issue6528(
1697 ui,
1697 ui,
1698 repo,
1698 repo,
1699 dry_run=dry_run,
1699 dry_run=dry_run,
1700 to_report=to_report,
1700 to_report=to_report,
1701 from_report=from_report,
1701 from_report=from_report,
1702 paranoid=paranoid,
1702 paranoid=paranoid,
1703 )
1703 )
1704
1704
1705
1705
1706 @command(b'debugformat', [] + cmdutil.formatteropts)
1706 @command(b'debugformat', [] + cmdutil.formatteropts)
1707 def debugformat(ui, repo, **opts):
1707 def debugformat(ui, repo, **opts):
1708 """display format information about the current repository
1708 """display format information about the current repository
1709
1709
1710 Use --verbose to get extra information about current config value and
1710 Use --verbose to get extra information about current config value and
1711 Mercurial default."""
1711 Mercurial default."""
1712 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1712 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1713 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1713 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1714
1714
1715 def makeformatname(name):
1715 def makeformatname(name):
1716 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1716 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1717
1717
1718 fm = ui.formatter(b'debugformat', pycompat.byteskwargs(opts))
1718 fm = ui.formatter(b'debugformat', pycompat.byteskwargs(opts))
1719 if fm.isplain():
1719 if fm.isplain():
1720
1720
1721 def formatvalue(value):
1721 def formatvalue(value):
1722 if hasattr(value, 'startswith'):
1722 if hasattr(value, 'startswith'):
1723 return value
1723 return value
1724 if value:
1724 if value:
1725 return b'yes'
1725 return b'yes'
1726 else:
1726 else:
1727 return b'no'
1727 return b'no'
1728
1728
1729 else:
1729 else:
1730 formatvalue = pycompat.identity
1730 formatvalue = pycompat.identity
1731
1731
1732 fm.plain(b'format-variant')
1732 fm.plain(b'format-variant')
1733 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1733 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1734 fm.plain(b' repo')
1734 fm.plain(b' repo')
1735 if ui.verbose:
1735 if ui.verbose:
1736 fm.plain(b' config default')
1736 fm.plain(b' config default')
1737 fm.plain(b'\n')
1737 fm.plain(b'\n')
1738 for fv in upgrade.allformatvariant:
1738 for fv in upgrade.allformatvariant:
1739 fm.startitem()
1739 fm.startitem()
1740 repovalue = fv.fromrepo(repo)
1740 repovalue = fv.fromrepo(repo)
1741 configvalue = fv.fromconfig(repo)
1741 configvalue = fv.fromconfig(repo)
1742
1742
1743 if repovalue != configvalue:
1743 if repovalue != configvalue:
1744 namelabel = b'formatvariant.name.mismatchconfig'
1744 namelabel = b'formatvariant.name.mismatchconfig'
1745 repolabel = b'formatvariant.repo.mismatchconfig'
1745 repolabel = b'formatvariant.repo.mismatchconfig'
1746 elif repovalue != fv.default:
1746 elif repovalue != fv.default:
1747 namelabel = b'formatvariant.name.mismatchdefault'
1747 namelabel = b'formatvariant.name.mismatchdefault'
1748 repolabel = b'formatvariant.repo.mismatchdefault'
1748 repolabel = b'formatvariant.repo.mismatchdefault'
1749 else:
1749 else:
1750 namelabel = b'formatvariant.name.uptodate'
1750 namelabel = b'formatvariant.name.uptodate'
1751 repolabel = b'formatvariant.repo.uptodate'
1751 repolabel = b'formatvariant.repo.uptodate'
1752
1752
1753 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1753 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1754 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1754 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1755 if fv.default != configvalue:
1755 if fv.default != configvalue:
1756 configlabel = b'formatvariant.config.special'
1756 configlabel = b'formatvariant.config.special'
1757 else:
1757 else:
1758 configlabel = b'formatvariant.config.default'
1758 configlabel = b'formatvariant.config.default'
1759 fm.condwrite(
1759 fm.condwrite(
1760 ui.verbose,
1760 ui.verbose,
1761 b'config',
1761 b'config',
1762 b' %6s',
1762 b' %6s',
1763 formatvalue(configvalue),
1763 formatvalue(configvalue),
1764 label=configlabel,
1764 label=configlabel,
1765 )
1765 )
1766 fm.condwrite(
1766 fm.condwrite(
1767 ui.verbose,
1767 ui.verbose,
1768 b'default',
1768 b'default',
1769 b' %7s',
1769 b' %7s',
1770 formatvalue(fv.default),
1770 formatvalue(fv.default),
1771 label=b'formatvariant.default',
1771 label=b'formatvariant.default',
1772 )
1772 )
1773 fm.plain(b'\n')
1773 fm.plain(b'\n')
1774 fm.end()
1774 fm.end()
1775
1775
1776
1776
1777 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1777 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1778 def debugfsinfo(ui, path=b"."):
1778 def debugfsinfo(ui, path=b"."):
1779 """show information detected about current filesystem"""
1779 """show information detected about current filesystem"""
1780 ui.writenoi18n(b'path: %s\n' % path)
1780 ui.writenoi18n(b'path: %s\n' % path)
1781 ui.writenoi18n(
1781 ui.writenoi18n(
1782 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1782 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1783 )
1783 )
1784 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1784 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1785 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1785 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1786 ui.writenoi18n(
1786 ui.writenoi18n(
1787 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1787 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1788 )
1788 )
1789 ui.writenoi18n(
1789 ui.writenoi18n(
1790 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1790 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1791 )
1791 )
1792 casesensitive = b'(unknown)'
1792 casesensitive = b'(unknown)'
1793 try:
1793 try:
1794 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1794 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1795 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1795 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1796 except OSError:
1796 except OSError:
1797 pass
1797 pass
1798 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1798 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1799
1799
1800
1800
1801 @command(
1801 @command(
1802 b'debuggetbundle',
1802 b'debuggetbundle',
1803 [
1803 [
1804 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1804 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1805 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1805 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1806 (
1806 (
1807 b't',
1807 b't',
1808 b'type',
1808 b'type',
1809 b'bzip2',
1809 b'bzip2',
1810 _(b'bundle compression type to use'),
1810 _(b'bundle compression type to use'),
1811 _(b'TYPE'),
1811 _(b'TYPE'),
1812 ),
1812 ),
1813 ],
1813 ],
1814 _(b'REPO FILE [-H|-C ID]...'),
1814 _(b'REPO FILE [-H|-C ID]...'),
1815 norepo=True,
1815 norepo=True,
1816 )
1816 )
1817 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1817 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1818 """retrieves a bundle from a repo
1818 """retrieves a bundle from a repo
1819
1819
1820 Every ID must be a full-length hex node id string. Saves the bundle to the
1820 Every ID must be a full-length hex node id string. Saves the bundle to the
1821 given file.
1821 given file.
1822 """
1822 """
1823 repo = hg.peer(ui, pycompat.byteskwargs(opts), repopath)
1823 repo = hg.peer(ui, pycompat.byteskwargs(opts), repopath)
1824 if not repo.capable(b'getbundle'):
1824 if not repo.capable(b'getbundle'):
1825 raise error.Abort(b"getbundle() not supported by target repository")
1825 raise error.Abort(b"getbundle() not supported by target repository")
1826 args = {}
1826 args = {}
1827 if common:
1827 if common:
1828 args['common'] = [bin(s) for s in common]
1828 args['common'] = [bin(s) for s in common]
1829 if head:
1829 if head:
1830 args['heads'] = [bin(s) for s in head]
1830 args['heads'] = [bin(s) for s in head]
1831 # TODO: get desired bundlecaps from command line.
1831 # TODO: get desired bundlecaps from command line.
1832 args['bundlecaps'] = None
1832 args['bundlecaps'] = None
1833 bundle = repo.getbundle(b'debug', **args)
1833 bundle = repo.getbundle(b'debug', **args)
1834
1834
1835 bundletype = opts.get('type', b'bzip2').lower()
1835 bundletype = opts.get('type', b'bzip2').lower()
1836 btypes = {
1836 btypes = {
1837 b'none': b'HG10UN',
1837 b'none': b'HG10UN',
1838 b'bzip2': b'HG10BZ',
1838 b'bzip2': b'HG10BZ',
1839 b'gzip': b'HG10GZ',
1839 b'gzip': b'HG10GZ',
1840 b'bundle2': b'HG20',
1840 b'bundle2': b'HG20',
1841 }
1841 }
1842 bundletype = btypes.get(bundletype)
1842 bundletype = btypes.get(bundletype)
1843 if bundletype not in bundle2.bundletypes:
1843 if bundletype not in bundle2.bundletypes:
1844 raise error.Abort(_(b'unknown bundle type specified with --type'))
1844 raise error.Abort(_(b'unknown bundle type specified with --type'))
1845 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1845 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1846
1846
1847
1847
1848 @command(b'debugignore', [], b'[FILE]...')
1848 @command(b'debugignore', [], b'[FILE]...')
1849 def debugignore(ui, repo, *files, **opts):
1849 def debugignore(ui, repo, *files, **opts):
1850 """display the combined ignore pattern and information about ignored files
1850 """display the combined ignore pattern and information about ignored files
1851
1851
1852 With no argument display the combined ignore pattern.
1852 With no argument display the combined ignore pattern.
1853
1853
1854 Given space separated file names, shows if the given file is ignored and
1854 Given space separated file names, shows if the given file is ignored and
1855 if so, show the ignore rule (file and line number) that matched it.
1855 if so, show the ignore rule (file and line number) that matched it.
1856 """
1856 """
1857 ignore = repo.dirstate._ignore
1857 ignore = repo.dirstate._ignore
1858 if not files:
1858 if not files:
1859 # Show all the patterns
1859 # Show all the patterns
1860 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1860 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1861 else:
1861 else:
1862 m = scmutil.match(repo[None], pats=files)
1862 m = scmutil.match(repo[None], pats=files)
1863 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1863 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1864 for f in m.files():
1864 for f in m.files():
1865 nf = util.normpath(f)
1865 nf = util.normpath(f)
1866 ignored = None
1866 ignored = None
1867 ignoredata = None
1867 ignoredata = None
1868 if nf != b'.':
1868 if nf != b'.':
1869 if ignore(nf):
1869 if ignore(nf):
1870 ignored = nf
1870 ignored = nf
1871 ignoredata = repo.dirstate._ignorefileandline(nf)
1871 ignoredata = repo.dirstate._ignorefileandline(nf)
1872 else:
1872 else:
1873 for p in pathutil.finddirs(nf):
1873 for p in pathutil.finddirs(nf):
1874 if ignore(p):
1874 if ignore(p):
1875 ignored = p
1875 ignored = p
1876 ignoredata = repo.dirstate._ignorefileandline(p)
1876 ignoredata = repo.dirstate._ignorefileandline(p)
1877 break
1877 break
1878 if ignored:
1878 if ignored:
1879 if ignored == nf:
1879 if ignored == nf:
1880 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1880 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1881 else:
1881 else:
1882 ui.write(
1882 ui.write(
1883 _(
1883 _(
1884 b"%s is ignored because of "
1884 b"%s is ignored because of "
1885 b"containing directory %s\n"
1885 b"containing directory %s\n"
1886 )
1886 )
1887 % (uipathfn(f), ignored)
1887 % (uipathfn(f), ignored)
1888 )
1888 )
1889 ignorefile, lineno, line = ignoredata
1889 ignorefile, lineno, line = ignoredata
1890 ui.write(
1890 ui.write(
1891 _(b"(ignore rule in %s, line %d: '%s')\n")
1891 _(b"(ignore rule in %s, line %d: '%s')\n")
1892 % (ignorefile, lineno, line)
1892 % (ignorefile, lineno, line)
1893 )
1893 )
1894 else:
1894 else:
1895 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1895 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1896
1896
1897
1897
1898 @command(
1898 @command(
1899 b'debug-revlog-index|debugindex',
1899 b'debug-revlog-index|debugindex',
1900 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1900 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1901 _(b'-c|-m|FILE'),
1901 _(b'-c|-m|FILE'),
1902 )
1902 )
1903 def debugindex(ui, repo, file_=None, **opts):
1903 def debugindex(ui, repo, file_=None, **opts):
1904 """dump index data for a revlog"""
1904 """dump index data for a revlog"""
1905 opts = pycompat.byteskwargs(opts)
1905 opts = pycompat.byteskwargs(opts)
1906 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1906 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1907
1907
1908 fm = ui.formatter(b'debugindex', opts)
1908 fm = ui.formatter(b'debugindex', opts)
1909
1909
1910 revlog = getattr(store, '_revlog', store)
1910 revlog = getattr(store, '_revlog', store)
1911
1911
1912 return revlog_debug.debug_index(
1912 return revlog_debug.debug_index(
1913 ui,
1913 ui,
1914 repo,
1914 repo,
1915 formatter=fm,
1915 formatter=fm,
1916 revlog=revlog,
1916 revlog=revlog,
1917 full_node=ui.debugflag,
1917 full_node=ui.debugflag,
1918 )
1918 )
1919
1919
1920
1920
1921 @command(
1921 @command(
1922 b'debugindexdot',
1922 b'debugindexdot',
1923 cmdutil.debugrevlogopts,
1923 cmdutil.debugrevlogopts,
1924 _(b'-c|-m|FILE'),
1924 _(b'-c|-m|FILE'),
1925 optionalrepo=True,
1925 optionalrepo=True,
1926 )
1926 )
1927 def debugindexdot(ui, repo, file_=None, **opts):
1927 def debugindexdot(ui, repo, file_=None, **opts):
1928 """dump an index DAG as a graphviz dot file"""
1928 """dump an index DAG as a graphviz dot file"""
1929 r = cmdutil.openstorage(
1929 r = cmdutil.openstorage(
1930 repo, b'debugindexdot', file_, pycompat.byteskwargs(opts)
1930 repo, b'debugindexdot', file_, pycompat.byteskwargs(opts)
1931 )
1931 )
1932 ui.writenoi18n(b"digraph G {\n")
1932 ui.writenoi18n(b"digraph G {\n")
1933 for i in r:
1933 for i in r:
1934 node = r.node(i)
1934 node = r.node(i)
1935 pp = r.parents(node)
1935 pp = r.parents(node)
1936 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1936 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1937 if pp[1] != repo.nullid:
1937 if pp[1] != repo.nullid:
1938 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1938 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1939 ui.write(b"}\n")
1939 ui.write(b"}\n")
1940
1940
1941
1941
1942 @command(b'debugindexstats', [])
1942 @command(b'debugindexstats', [])
1943 def debugindexstats(ui, repo):
1943 def debugindexstats(ui, repo):
1944 """show stats related to the changelog index"""
1944 """show stats related to the changelog index"""
1945 repo.changelog.shortest(repo.nullid, 1)
1945 repo.changelog.shortest(repo.nullid, 1)
1946 index = repo.changelog.index
1946 index = repo.changelog.index
1947 if not hasattr(index, 'stats'):
1947 if not hasattr(index, 'stats'):
1948 raise error.Abort(_(b'debugindexstats only works with native code'))
1948 raise error.Abort(_(b'debugindexstats only works with native code'))
1949 for k, v in sorted(index.stats().items()):
1949 for k, v in sorted(index.stats().items()):
1950 ui.write(b'%s: %d\n' % (k, v))
1950 ui.write(b'%s: %d\n' % (k, v))
1951
1951
1952
1952
1953 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1953 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1954 def debuginstall(ui, **opts):
1954 def debuginstall(ui, **opts):
1955 """test Mercurial installation
1955 """test Mercurial installation
1956
1956
1957 Returns 0 on success.
1957 Returns 0 on success.
1958 """
1958 """
1959 problems = 0
1959 problems = 0
1960
1960
1961 fm = ui.formatter(b'debuginstall', pycompat.byteskwargs(opts))
1961 fm = ui.formatter(b'debuginstall', pycompat.byteskwargs(opts))
1962 fm.startitem()
1962 fm.startitem()
1963
1963
1964 # encoding might be unknown or wrong. don't translate these messages.
1964 # encoding might be unknown or wrong. don't translate these messages.
1965 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1965 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1966 err = None
1966 err = None
1967 try:
1967 try:
1968 codecs.lookup(pycompat.sysstr(encoding.encoding))
1968 codecs.lookup(pycompat.sysstr(encoding.encoding))
1969 except LookupError as inst:
1969 except LookupError as inst:
1970 err = stringutil.forcebytestr(inst)
1970 err = stringutil.forcebytestr(inst)
1971 problems += 1
1971 problems += 1
1972 fm.condwrite(
1972 fm.condwrite(
1973 err,
1973 err,
1974 b'encodingerror',
1974 b'encodingerror',
1975 b" %s\n (check that your locale is properly set)\n",
1975 b" %s\n (check that your locale is properly set)\n",
1976 err,
1976 err,
1977 )
1977 )
1978
1978
1979 # Python
1979 # Python
1980 pythonlib = None
1980 pythonlib = None
1981 if hasattr(os, '__file__'):
1981 if hasattr(os, '__file__'):
1982 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1982 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1983 elif getattr(sys, 'oxidized', False):
1983 elif getattr(sys, 'oxidized', False):
1984 pythonlib = pycompat.sysexecutable
1984 pythonlib = pycompat.sysexecutable
1985
1985
1986 fm.write(
1986 fm.write(
1987 b'pythonexe',
1987 b'pythonexe',
1988 _(b"checking Python executable (%s)\n"),
1988 _(b"checking Python executable (%s)\n"),
1989 pycompat.sysexecutable or _(b"unknown"),
1989 pycompat.sysexecutable or _(b"unknown"),
1990 )
1990 )
1991 fm.write(
1991 fm.write(
1992 b'pythonimplementation',
1992 b'pythonimplementation',
1993 _(b"checking Python implementation (%s)\n"),
1993 _(b"checking Python implementation (%s)\n"),
1994 pycompat.sysbytes(platform.python_implementation()),
1994 pycompat.sysbytes(platform.python_implementation()),
1995 )
1995 )
1996 fm.write(
1996 fm.write(
1997 b'pythonver',
1997 b'pythonver',
1998 _(b"checking Python version (%s)\n"),
1998 _(b"checking Python version (%s)\n"),
1999 (b"%d.%d.%d" % sys.version_info[:3]),
1999 (b"%d.%d.%d" % sys.version_info[:3]),
2000 )
2000 )
2001 fm.write(
2001 fm.write(
2002 b'pythonlib',
2002 b'pythonlib',
2003 _(b"checking Python lib (%s)...\n"),
2003 _(b"checking Python lib (%s)...\n"),
2004 pythonlib or _(b"unknown"),
2004 pythonlib or _(b"unknown"),
2005 )
2005 )
2006
2006
2007 try:
2007 try:
2008 from . import rustext # pytype: disable=import-error
2008 from . import rustext # pytype: disable=import-error
2009
2009
2010 rustext.__doc__ # trigger lazy import
2010 rustext.__doc__ # trigger lazy import
2011 except ImportError:
2011 except ImportError:
2012 rustext = None
2012 rustext = None
2013
2013
2014 security = set(sslutil.supportedprotocols)
2014 security = set(sslutil.supportedprotocols)
2015 if sslutil.hassni:
2015 if sslutil.hassni:
2016 security.add(b'sni')
2016 security.add(b'sni')
2017
2017
2018 fm.write(
2018 fm.write(
2019 b'pythonsecurity',
2019 b'pythonsecurity',
2020 _(b"checking Python security support (%s)\n"),
2020 _(b"checking Python security support (%s)\n"),
2021 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
2021 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
2022 )
2022 )
2023
2023
2024 # These are warnings, not errors. So don't increment problem count. This
2024 # These are warnings, not errors. So don't increment problem count. This
2025 # may change in the future.
2025 # may change in the future.
2026 if b'tls1.2' not in security:
2026 if b'tls1.2' not in security:
2027 fm.plain(
2027 fm.plain(
2028 _(
2028 _(
2029 b' TLS 1.2 not supported by Python install; '
2029 b' TLS 1.2 not supported by Python install; '
2030 b'network connections lack modern security\n'
2030 b'network connections lack modern security\n'
2031 )
2031 )
2032 )
2032 )
2033 if b'sni' not in security:
2033 if b'sni' not in security:
2034 fm.plain(
2034 fm.plain(
2035 _(
2035 _(
2036 b' SNI not supported by Python install; may have '
2036 b' SNI not supported by Python install; may have '
2037 b'connectivity issues with some servers\n'
2037 b'connectivity issues with some servers\n'
2038 )
2038 )
2039 )
2039 )
2040
2040
2041 fm.plain(
2041 fm.plain(
2042 _(
2042 _(
2043 b"checking Rust extensions (%s)\n"
2043 b"checking Rust extensions (%s)\n"
2044 % (b'missing' if rustext is None else b'installed')
2044 % (b'missing' if rustext is None else b'installed')
2045 ),
2045 ),
2046 )
2046 )
2047
2047
2048 # TODO print CA cert info
2048 # TODO print CA cert info
2049
2049
2050 # hg version
2050 # hg version
2051 hgver = util.version()
2051 hgver = util.version()
2052 fm.write(
2052 fm.write(
2053 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
2053 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
2054 )
2054 )
2055 fm.write(
2055 fm.write(
2056 b'hgverextra',
2056 b'hgverextra',
2057 _(b"checking Mercurial custom build (%s)\n"),
2057 _(b"checking Mercurial custom build (%s)\n"),
2058 b'+'.join(hgver.split(b'+')[1:]),
2058 b'+'.join(hgver.split(b'+')[1:]),
2059 )
2059 )
2060
2060
2061 # compiled modules
2061 # compiled modules
2062 hgmodules = None
2062 hgmodules = None
2063 if hasattr(sys.modules[__name__], '__file__'):
2063 if hasattr(sys.modules[__name__], '__file__'):
2064 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
2064 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
2065 elif getattr(sys, 'oxidized', False):
2065 elif getattr(sys, 'oxidized', False):
2066 hgmodules = pycompat.sysexecutable
2066 hgmodules = pycompat.sysexecutable
2067
2067
2068 fm.write(
2068 fm.write(
2069 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
2069 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
2070 )
2070 )
2071 fm.write(
2071 fm.write(
2072 b'hgmodules',
2072 b'hgmodules',
2073 _(b"checking installed modules (%s)...\n"),
2073 _(b"checking installed modules (%s)...\n"),
2074 hgmodules or _(b"unknown"),
2074 hgmodules or _(b"unknown"),
2075 )
2075 )
2076
2076
2077 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
2077 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
2078 rustext = rustandc # for now, that's the only case
2078 rustext = rustandc # for now, that's the only case
2079 cext = policy.policy in (b'c', b'allow') or rustandc
2079 cext = policy.policy in (b'c', b'allow') or rustandc
2080 nopure = cext or rustext
2080 nopure = cext or rustext
2081 if nopure:
2081 if nopure:
2082 err = None
2082 err = None
2083 try:
2083 try:
2084 if cext:
2084 if cext:
2085 from .cext import ( # pytype: disable=import-error
2085 from .cext import ( # pytype: disable=import-error
2086 base85,
2086 base85,
2087 bdiff,
2087 bdiff,
2088 mpatch,
2088 mpatch,
2089 osutil,
2089 osutil,
2090 )
2090 )
2091
2091
2092 # quiet pyflakes
2092 # quiet pyflakes
2093 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
2093 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
2094 if rustext:
2094 if rustext:
2095 from .rustext import ( # pytype: disable=import-error
2095 from .rustext import ( # pytype: disable=import-error
2096 ancestor,
2096 ancestor,
2097 dirstate,
2097 dirstate,
2098 )
2098 )
2099
2099
2100 dir(ancestor), dir(dirstate) # quiet pyflakes
2100 dir(ancestor), dir(dirstate) # quiet pyflakes
2101 except Exception as inst:
2101 except Exception as inst:
2102 err = stringutil.forcebytestr(inst)
2102 err = stringutil.forcebytestr(inst)
2103 problems += 1
2103 problems += 1
2104 fm.condwrite(err, b'extensionserror', b" %s\n", err)
2104 fm.condwrite(err, b'extensionserror', b" %s\n", err)
2105
2105
2106 compengines = util.compengines._engines.values()
2106 compengines = util.compengines._engines.values()
2107 fm.write(
2107 fm.write(
2108 b'compengines',
2108 b'compengines',
2109 _(b'checking registered compression engines (%s)\n'),
2109 _(b'checking registered compression engines (%s)\n'),
2110 fm.formatlist(
2110 fm.formatlist(
2111 sorted(e.name() for e in compengines),
2111 sorted(e.name() for e in compengines),
2112 name=b'compengine',
2112 name=b'compengine',
2113 fmt=b'%s',
2113 fmt=b'%s',
2114 sep=b', ',
2114 sep=b', ',
2115 ),
2115 ),
2116 )
2116 )
2117 fm.write(
2117 fm.write(
2118 b'compenginesavail',
2118 b'compenginesavail',
2119 _(b'checking available compression engines (%s)\n'),
2119 _(b'checking available compression engines (%s)\n'),
2120 fm.formatlist(
2120 fm.formatlist(
2121 sorted(e.name() for e in compengines if e.available()),
2121 sorted(e.name() for e in compengines if e.available()),
2122 name=b'compengine',
2122 name=b'compengine',
2123 fmt=b'%s',
2123 fmt=b'%s',
2124 sep=b', ',
2124 sep=b', ',
2125 ),
2125 ),
2126 )
2126 )
2127 wirecompengines = compression.compengines.supportedwireengines(
2127 wirecompengines = compression.compengines.supportedwireengines(
2128 compression.SERVERROLE
2128 compression.SERVERROLE
2129 )
2129 )
2130 fm.write(
2130 fm.write(
2131 b'compenginesserver',
2131 b'compenginesserver',
2132 _(
2132 _(
2133 b'checking available compression engines '
2133 b'checking available compression engines '
2134 b'for wire protocol (%s)\n'
2134 b'for wire protocol (%s)\n'
2135 ),
2135 ),
2136 fm.formatlist(
2136 fm.formatlist(
2137 [e.name() for e in wirecompengines if e.wireprotosupport()],
2137 [e.name() for e in wirecompengines if e.wireprotosupport()],
2138 name=b'compengine',
2138 name=b'compengine',
2139 fmt=b'%s',
2139 fmt=b'%s',
2140 sep=b', ',
2140 sep=b', ',
2141 ),
2141 ),
2142 )
2142 )
2143 re2 = b'missing'
2143 re2 = b'missing'
2144 if util.has_re2():
2144 if util.has_re2():
2145 re2 = b'available'
2145 re2 = b'available'
2146 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
2146 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
2147 fm.data(re2=bool(util._re2))
2147 fm.data(re2=bool(util._re2))
2148
2148
2149 # templates
2149 # templates
2150 p = templater.templatedir()
2150 p = templater.templatedir()
2151 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
2151 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
2152 fm.condwrite(not p, b'', _(b" no template directories found\n"))
2152 fm.condwrite(not p, b'', _(b" no template directories found\n"))
2153 if p:
2153 if p:
2154 (m, fp) = templater.try_open_template(b"map-cmdline.default")
2154 (m, fp) = templater.try_open_template(b"map-cmdline.default")
2155 if m:
2155 if m:
2156 # template found, check if it is working
2156 # template found, check if it is working
2157 err = None
2157 err = None
2158 try:
2158 try:
2159 templater.templater.frommapfile(m)
2159 templater.templater.frommapfile(m)
2160 except Exception as inst:
2160 except Exception as inst:
2161 err = stringutil.forcebytestr(inst)
2161 err = stringutil.forcebytestr(inst)
2162 p = None
2162 p = None
2163 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
2163 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
2164 else:
2164 else:
2165 p = None
2165 p = None
2166 fm.condwrite(
2166 fm.condwrite(
2167 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
2167 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
2168 )
2168 )
2169 fm.condwrite(
2169 fm.condwrite(
2170 not m,
2170 not m,
2171 b'defaulttemplatenotfound',
2171 b'defaulttemplatenotfound',
2172 _(b" template '%s' not found\n"),
2172 _(b" template '%s' not found\n"),
2173 b"default",
2173 b"default",
2174 )
2174 )
2175 if not p:
2175 if not p:
2176 problems += 1
2176 problems += 1
2177 fm.condwrite(
2177 fm.condwrite(
2178 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
2178 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
2179 )
2179 )
2180
2180
2181 # editor
2181 # editor
2182 editor = ui.geteditor()
2182 editor = ui.geteditor()
2183 editor = util.expandpath(editor)
2183 editor = util.expandpath(editor)
2184 editorbin = procutil.shellsplit(editor)[0]
2184 editorbin = procutil.shellsplit(editor)[0]
2185 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
2185 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
2186 cmdpath = procutil.findexe(editorbin)
2186 cmdpath = procutil.findexe(editorbin)
2187 fm.condwrite(
2187 fm.condwrite(
2188 not cmdpath and editor == b'vi',
2188 not cmdpath and editor == b'vi',
2189 b'vinotfound',
2189 b'vinotfound',
2190 _(
2190 _(
2191 b" No commit editor set and can't find %s in PATH\n"
2191 b" No commit editor set and can't find %s in PATH\n"
2192 b" (specify a commit editor in your configuration"
2192 b" (specify a commit editor in your configuration"
2193 b" file)\n"
2193 b" file)\n"
2194 ),
2194 ),
2195 not cmdpath and editor == b'vi' and editorbin,
2195 not cmdpath and editor == b'vi' and editorbin,
2196 )
2196 )
2197 fm.condwrite(
2197 fm.condwrite(
2198 not cmdpath and editor != b'vi',
2198 not cmdpath and editor != b'vi',
2199 b'editornotfound',
2199 b'editornotfound',
2200 _(
2200 _(
2201 b" Can't find editor '%s' in PATH\n"
2201 b" Can't find editor '%s' in PATH\n"
2202 b" (specify a commit editor in your configuration"
2202 b" (specify a commit editor in your configuration"
2203 b" file)\n"
2203 b" file)\n"
2204 ),
2204 ),
2205 not cmdpath and editorbin,
2205 not cmdpath and editorbin,
2206 )
2206 )
2207 if not cmdpath and editor != b'vi':
2207 if not cmdpath and editor != b'vi':
2208 problems += 1
2208 problems += 1
2209
2209
2210 # check username
2210 # check username
2211 username = None
2211 username = None
2212 err = None
2212 err = None
2213 try:
2213 try:
2214 username = ui.username()
2214 username = ui.username()
2215 except error.Abort as e:
2215 except error.Abort as e:
2216 err = e.message
2216 err = e.message
2217 problems += 1
2217 problems += 1
2218
2218
2219 fm.condwrite(
2219 fm.condwrite(
2220 username, b'username', _(b"checking username (%s)\n"), username
2220 username, b'username', _(b"checking username (%s)\n"), username
2221 )
2221 )
2222 fm.condwrite(
2222 fm.condwrite(
2223 err,
2223 err,
2224 b'usernameerror',
2224 b'usernameerror',
2225 _(
2225 _(
2226 b"checking username...\n %s\n"
2226 b"checking username...\n %s\n"
2227 b" (specify a username in your configuration file)\n"
2227 b" (specify a username in your configuration file)\n"
2228 ),
2228 ),
2229 err,
2229 err,
2230 )
2230 )
2231
2231
2232 for name, mod in extensions.extensions():
2232 for name, mod in extensions.extensions():
2233 handler = getattr(mod, 'debuginstall', None)
2233 handler = getattr(mod, 'debuginstall', None)
2234 if handler is not None:
2234 if handler is not None:
2235 problems += handler(ui, fm)
2235 problems += handler(ui, fm)
2236
2236
2237 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
2237 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
2238 if not problems:
2238 if not problems:
2239 fm.data(problems=problems)
2239 fm.data(problems=problems)
2240 fm.condwrite(
2240 fm.condwrite(
2241 problems,
2241 problems,
2242 b'problems',
2242 b'problems',
2243 _(b"%d problems detected, please check your install!\n"),
2243 _(b"%d problems detected, please check your install!\n"),
2244 problems,
2244 problems,
2245 )
2245 )
2246 fm.end()
2246 fm.end()
2247
2247
2248 return problems
2248 return problems
2249
2249
2250
2250
2251 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
2251 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
2252 def debugknown(ui, repopath, *ids, **opts):
2252 def debugknown(ui, repopath, *ids, **opts):
2253 """test whether node ids are known to a repo
2253 """test whether node ids are known to a repo
2254
2254
2255 Every ID must be a full-length hex node id string. Returns a list of 0s
2255 Every ID must be a full-length hex node id string. Returns a list of 0s
2256 and 1s indicating unknown/known.
2256 and 1s indicating unknown/known.
2257 """
2257 """
2258 repo = hg.peer(ui, pycompat.byteskwargs(opts), repopath)
2258 repo = hg.peer(ui, pycompat.byteskwargs(opts), repopath)
2259 if not repo.capable(b'known'):
2259 if not repo.capable(b'known'):
2260 raise error.Abort(b"known() not supported by target repository")
2260 raise error.Abort(b"known() not supported by target repository")
2261 flags = repo.known([bin(s) for s in ids])
2261 flags = repo.known([bin(s) for s in ids])
2262 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2262 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2263
2263
2264
2264
2265 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2265 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2266 def debuglabelcomplete(ui, repo, *args):
2266 def debuglabelcomplete(ui, repo, *args):
2267 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2267 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2268 debugnamecomplete(ui, repo, *args)
2268 debugnamecomplete(ui, repo, *args)
2269
2269
2270
2270
2271 @command(
2271 @command(
2272 b'debuglocks',
2272 b'debuglocks',
2273 [
2273 [
2274 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2274 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2275 (
2275 (
2276 b'W',
2276 b'W',
2277 b'force-free-wlock',
2277 b'force-free-wlock',
2278 None,
2278 None,
2279 _(b'free the working state lock (DANGEROUS)'),
2279 _(b'free the working state lock (DANGEROUS)'),
2280 ),
2280 ),
2281 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2281 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2282 (
2282 (
2283 b'S',
2283 b'S',
2284 b'set-wlock',
2284 b'set-wlock',
2285 None,
2285 None,
2286 _(b'set the working state lock until stopped'),
2286 _(b'set the working state lock until stopped'),
2287 ),
2287 ),
2288 ],
2288 ],
2289 _(b'[OPTION]...'),
2289 _(b'[OPTION]...'),
2290 )
2290 )
2291 def debuglocks(ui, repo, **opts):
2291 def debuglocks(ui, repo, **opts):
2292 """show or modify state of locks
2292 """show or modify state of locks
2293
2293
2294 By default, this command will show which locks are held. This
2294 By default, this command will show which locks are held. This
2295 includes the user and process holding the lock, the amount of time
2295 includes the user and process holding the lock, the amount of time
2296 the lock has been held, and the machine name where the process is
2296 the lock has been held, and the machine name where the process is
2297 running if it's not local.
2297 running if it's not local.
2298
2298
2299 Locks protect the integrity of Mercurial's data, so should be
2299 Locks protect the integrity of Mercurial's data, so should be
2300 treated with care. System crashes or other interruptions may cause
2300 treated with care. System crashes or other interruptions may cause
2301 locks to not be properly released, though Mercurial will usually
2301 locks to not be properly released, though Mercurial will usually
2302 detect and remove such stale locks automatically.
2302 detect and remove such stale locks automatically.
2303
2303
2304 However, detecting stale locks may not always be possible (for
2304 However, detecting stale locks may not always be possible (for
2305 instance, on a shared filesystem). Removing locks may also be
2305 instance, on a shared filesystem). Removing locks may also be
2306 blocked by filesystem permissions.
2306 blocked by filesystem permissions.
2307
2307
2308 Setting a lock will prevent other commands from changing the data.
2308 Setting a lock will prevent other commands from changing the data.
2309 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2309 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2310 The set locks are removed when the command exits.
2310 The set locks are removed when the command exits.
2311
2311
2312 Returns 0 if no locks are held.
2312 Returns 0 if no locks are held.
2313
2313
2314 """
2314 """
2315
2315
2316 if opts.get('force_free_lock'):
2316 if opts.get('force_free_lock'):
2317 repo.svfs.tryunlink(b'lock')
2317 repo.svfs.tryunlink(b'lock')
2318 if opts.get('force_free_wlock'):
2318 if opts.get('force_free_wlock'):
2319 repo.vfs.tryunlink(b'wlock')
2319 repo.vfs.tryunlink(b'wlock')
2320 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2320 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2321 return 0
2321 return 0
2322
2322
2323 locks = []
2323 locks = []
2324 try:
2324 try:
2325 if opts.get('set_wlock'):
2325 if opts.get('set_wlock'):
2326 try:
2326 try:
2327 locks.append(repo.wlock(False))
2327 locks.append(repo.wlock(False))
2328 except error.LockHeld:
2328 except error.LockHeld:
2329 raise error.Abort(_(b'wlock is already held'))
2329 raise error.Abort(_(b'wlock is already held'))
2330 if opts.get('set_lock'):
2330 if opts.get('set_lock'):
2331 try:
2331 try:
2332 locks.append(repo.lock(False))
2332 locks.append(repo.lock(False))
2333 except error.LockHeld:
2333 except error.LockHeld:
2334 raise error.Abort(_(b'lock is already held'))
2334 raise error.Abort(_(b'lock is already held'))
2335 if len(locks):
2335 if len(locks):
2336 try:
2336 try:
2337 if ui.interactive():
2337 if ui.interactive():
2338 prompt = _(b"ready to release the lock (y)? $$ &Yes")
2338 prompt = _(b"ready to release the lock (y)? $$ &Yes")
2339 ui.promptchoice(prompt)
2339 ui.promptchoice(prompt)
2340 else:
2340 else:
2341 msg = b"%d locks held, waiting for signal\n"
2341 msg = b"%d locks held, waiting for signal\n"
2342 msg %= len(locks)
2342 msg %= len(locks)
2343 ui.status(msg)
2343 ui.status(msg)
2344 while True: # XXX wait for a signal
2344 while True: # XXX wait for a signal
2345 time.sleep(0.1)
2345 time.sleep(0.1)
2346 except KeyboardInterrupt:
2346 except KeyboardInterrupt:
2347 msg = b"signal-received releasing locks\n"
2347 msg = b"signal-received releasing locks\n"
2348 ui.status(msg)
2348 ui.status(msg)
2349 return 0
2349 return 0
2350 finally:
2350 finally:
2351 release(*locks)
2351 release(*locks)
2352
2352
2353 now = time.time()
2353 now = time.time()
2354 held = 0
2354 held = 0
2355
2355
2356 def report(vfs, name, method):
2356 def report(vfs, name, method):
2357 # this causes stale locks to get reaped for more accurate reporting
2357 # this causes stale locks to get reaped for more accurate reporting
2358 try:
2358 try:
2359 l = method(False)
2359 l = method(False)
2360 except error.LockHeld:
2360 except error.LockHeld:
2361 l = None
2361 l = None
2362
2362
2363 if l:
2363 if l:
2364 l.release()
2364 l.release()
2365 else:
2365 else:
2366 try:
2366 try:
2367 st = vfs.lstat(name)
2367 st = vfs.lstat(name)
2368 age = now - st[stat.ST_MTIME]
2368 age = now - st[stat.ST_MTIME]
2369 user = util.username(st.st_uid)
2369 user = util.username(st.st_uid)
2370 locker = vfs.readlock(name)
2370 locker = vfs.readlock(name)
2371 if b":" in locker:
2371 if b":" in locker:
2372 host, pid = locker.split(b':')
2372 host, pid = locker.split(b':')
2373 if host == socket.gethostname():
2373 if host == socket.gethostname():
2374 locker = b'user %s, process %s' % (user or b'None', pid)
2374 locker = b'user %s, process %s' % (user or b'None', pid)
2375 else:
2375 else:
2376 locker = b'user %s, process %s, host %s' % (
2376 locker = b'user %s, process %s, host %s' % (
2377 user or b'None',
2377 user or b'None',
2378 pid,
2378 pid,
2379 host,
2379 host,
2380 )
2380 )
2381 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2381 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2382 return 1
2382 return 1
2383 except FileNotFoundError:
2383 except FileNotFoundError:
2384 pass
2384 pass
2385
2385
2386 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2386 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2387 return 0
2387 return 0
2388
2388
2389 held += report(repo.svfs, b"lock", repo.lock)
2389 held += report(repo.svfs, b"lock", repo.lock)
2390 held += report(repo.vfs, b"wlock", repo.wlock)
2390 held += report(repo.vfs, b"wlock", repo.wlock)
2391
2391
2392 return held
2392 return held
2393
2393
2394
2394
2395 @command(
2395 @command(
2396 b'debugmanifestfulltextcache',
2396 b'debugmanifestfulltextcache',
2397 [
2397 [
2398 (b'', b'clear', False, _(b'clear the cache')),
2398 (b'', b'clear', False, _(b'clear the cache')),
2399 (
2399 (
2400 b'a',
2400 b'a',
2401 b'add',
2401 b'add',
2402 [],
2402 [],
2403 _(b'add the given manifest nodes to the cache'),
2403 _(b'add the given manifest nodes to the cache'),
2404 _(b'NODE'),
2404 _(b'NODE'),
2405 ),
2405 ),
2406 ],
2406 ],
2407 b'',
2407 b'',
2408 )
2408 )
2409 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2409 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2410 """show, clear or amend the contents of the manifest fulltext cache"""
2410 """show, clear or amend the contents of the manifest fulltext cache"""
2411
2411
2412 def getcache():
2412 def getcache():
2413 r = repo.manifestlog.getstorage(b'')
2413 r = repo.manifestlog.getstorage(b'')
2414 try:
2414 try:
2415 return r._fulltextcache
2415 return r._fulltextcache
2416 except AttributeError:
2416 except AttributeError:
2417 msg = _(
2417 msg = _(
2418 b"Current revlog implementation doesn't appear to have a "
2418 b"Current revlog implementation doesn't appear to have a "
2419 b"manifest fulltext cache\n"
2419 b"manifest fulltext cache\n"
2420 )
2420 )
2421 raise error.Abort(msg)
2421 raise error.Abort(msg)
2422
2422
2423 if opts.get('clear'):
2423 if opts.get('clear'):
2424 with repo.wlock():
2424 with repo.wlock():
2425 cache = getcache()
2425 cache = getcache()
2426 cache.clear(clear_persisted_data=True)
2426 cache.clear(clear_persisted_data=True)
2427 return
2427 return
2428
2428
2429 if add:
2429 if add:
2430 with repo.wlock():
2430 with repo.wlock():
2431 m = repo.manifestlog
2431 m = repo.manifestlog
2432 store = m.getstorage(b'')
2432 store = m.getstorage(b'')
2433 for n in add:
2433 for n in add:
2434 try:
2434 try:
2435 manifest = m[store.lookup(n)]
2435 manifest = m[store.lookup(n)]
2436 except error.LookupError as e:
2436 except error.LookupError as e:
2437 raise error.Abort(
2437 raise error.Abort(
2438 bytes(e), hint=b"Check your manifest node id"
2438 bytes(e), hint=b"Check your manifest node id"
2439 )
2439 )
2440 manifest.read() # stores revisision in cache too
2440 manifest.read() # stores revisision in cache too
2441 return
2441 return
2442
2442
2443 cache = getcache()
2443 cache = getcache()
2444 if not len(cache):
2444 if not len(cache):
2445 ui.write(_(b'cache empty\n'))
2445 ui.write(_(b'cache empty\n'))
2446 else:
2446 else:
2447 ui.write(
2447 ui.write(
2448 _(
2448 _(
2449 b'cache contains %d manifest entries, in order of most to '
2449 b'cache contains %d manifest entries, in order of most to '
2450 b'least recent:\n'
2450 b'least recent:\n'
2451 )
2451 )
2452 % (len(cache),)
2452 % (len(cache),)
2453 )
2453 )
2454 totalsize = 0
2454 totalsize = 0
2455 for nodeid in cache:
2455 for nodeid in cache:
2456 # Use cache.get to not update the LRU order
2456 # Use cache.get to not update the LRU order
2457 data = cache.peek(nodeid)
2457 data = cache.peek(nodeid)
2458 size = len(data)
2458 size = len(data)
2459 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2459 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2460 ui.write(
2460 ui.write(
2461 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2461 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2462 )
2462 )
2463 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2463 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2464 ui.write(
2464 ui.write(
2465 _(b'total cache data size %s, on-disk %s\n')
2465 _(b'total cache data size %s, on-disk %s\n')
2466 % (util.bytecount(totalsize), util.bytecount(ondisk))
2466 % (util.bytecount(totalsize), util.bytecount(ondisk))
2467 )
2467 )
2468
2468
2469
2469
2470 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2470 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2471 def debugmergestate(ui, repo, *args, **opts):
2471 def debugmergestate(ui, repo, *args, **opts):
2472 """print merge state
2472 """print merge state
2473
2473
2474 Use --verbose to print out information about whether v1 or v2 merge state
2474 Use --verbose to print out information about whether v1 or v2 merge state
2475 was chosen."""
2475 was chosen."""
2476
2476
2477 if ui.verbose:
2477 if ui.verbose:
2478 ms = mergestatemod.mergestate(repo)
2478 ms = mergestatemod.mergestate(repo)
2479
2479
2480 # sort so that reasonable information is on top
2480 # sort so that reasonable information is on top
2481 v1records = ms._readrecordsv1()
2481 v1records = ms._readrecordsv1()
2482 v2records = ms._readrecordsv2()
2482 v2records = ms._readrecordsv2()
2483
2483
2484 if not v1records and not v2records:
2484 if not v1records and not v2records:
2485 pass
2485 pass
2486 elif not v2records:
2486 elif not v2records:
2487 ui.writenoi18n(b'no version 2 merge state\n')
2487 ui.writenoi18n(b'no version 2 merge state\n')
2488 elif ms._v1v2match(v1records, v2records):
2488 elif ms._v1v2match(v1records, v2records):
2489 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2489 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2490 else:
2490 else:
2491 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2491 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2492
2492
2493 if not opts['template']:
2493 if not opts['template']:
2494 opts['template'] = (
2494 opts['template'] = (
2495 b'{if(commits, "", "no merge state found\n")}'
2495 b'{if(commits, "", "no merge state found\n")}'
2496 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2496 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2497 b'{files % "file: {path} (state \\"{state}\\")\n'
2497 b'{files % "file: {path} (state \\"{state}\\")\n'
2498 b'{if(local_path, "'
2498 b'{if(local_path, "'
2499 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2499 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2500 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2500 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2501 b' other path: {other_path} (node {other_node})\n'
2501 b' other path: {other_path} (node {other_node})\n'
2502 b'")}'
2502 b'")}'
2503 b'{if(rename_side, "'
2503 b'{if(rename_side, "'
2504 b' rename side: {rename_side}\n'
2504 b' rename side: {rename_side}\n'
2505 b' renamed path: {renamed_path}\n'
2505 b' renamed path: {renamed_path}\n'
2506 b'")}'
2506 b'")}'
2507 b'{extras % " extra: {key} = {value}\n"}'
2507 b'{extras % " extra: {key} = {value}\n"}'
2508 b'"}'
2508 b'"}'
2509 b'{extras % "extra: {file} ({key} = {value})\n"}'
2509 b'{extras % "extra: {file} ({key} = {value})\n"}'
2510 )
2510 )
2511
2511
2512 ms = mergestatemod.mergestate.read(repo)
2512 ms = mergestatemod.mergestate.read(repo)
2513
2513
2514 fm = ui.formatter(b'debugmergestate', pycompat.byteskwargs(opts))
2514 fm = ui.formatter(b'debugmergestate', pycompat.byteskwargs(opts))
2515 fm.startitem()
2515 fm.startitem()
2516
2516
2517 fm_commits = fm.nested(b'commits')
2517 fm_commits = fm.nested(b'commits')
2518 if ms.active():
2518 if ms.active():
2519 for name, node, label_index in (
2519 for name, node, label_index in (
2520 (b'local', ms.local, 0),
2520 (b'local', ms.local, 0),
2521 (b'other', ms.other, 1),
2521 (b'other', ms.other, 1),
2522 ):
2522 ):
2523 fm_commits.startitem()
2523 fm_commits.startitem()
2524 fm_commits.data(name=name)
2524 fm_commits.data(name=name)
2525 fm_commits.data(node=hex(node))
2525 fm_commits.data(node=hex(node))
2526 if ms._labels and len(ms._labels) > label_index:
2526 if ms._labels and len(ms._labels) > label_index:
2527 fm_commits.data(label=ms._labels[label_index])
2527 fm_commits.data(label=ms._labels[label_index])
2528 fm_commits.end()
2528 fm_commits.end()
2529
2529
2530 fm_files = fm.nested(b'files')
2530 fm_files = fm.nested(b'files')
2531 if ms.active():
2531 if ms.active():
2532 for f in ms:
2532 for f in ms:
2533 fm_files.startitem()
2533 fm_files.startitem()
2534 fm_files.data(path=f)
2534 fm_files.data(path=f)
2535 state = ms._state[f]
2535 state = ms._state[f]
2536 fm_files.data(state=state[0])
2536 fm_files.data(state=state[0])
2537 if state[0] in (
2537 if state[0] in (
2538 mergestatemod.MERGE_RECORD_UNRESOLVED,
2538 mergestatemod.MERGE_RECORD_UNRESOLVED,
2539 mergestatemod.MERGE_RECORD_RESOLVED,
2539 mergestatemod.MERGE_RECORD_RESOLVED,
2540 ):
2540 ):
2541 fm_files.data(local_key=state[1])
2541 fm_files.data(local_key=state[1])
2542 fm_files.data(local_path=state[2])
2542 fm_files.data(local_path=state[2])
2543 fm_files.data(ancestor_path=state[3])
2543 fm_files.data(ancestor_path=state[3])
2544 fm_files.data(ancestor_node=state[4])
2544 fm_files.data(ancestor_node=state[4])
2545 fm_files.data(other_path=state[5])
2545 fm_files.data(other_path=state[5])
2546 fm_files.data(other_node=state[6])
2546 fm_files.data(other_node=state[6])
2547 fm_files.data(local_flags=state[7])
2547 fm_files.data(local_flags=state[7])
2548 elif state[0] in (
2548 elif state[0] in (
2549 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2549 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2550 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2550 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2551 ):
2551 ):
2552 fm_files.data(renamed_path=state[1])
2552 fm_files.data(renamed_path=state[1])
2553 fm_files.data(rename_side=state[2])
2553 fm_files.data(rename_side=state[2])
2554 fm_extras = fm_files.nested(b'extras')
2554 fm_extras = fm_files.nested(b'extras')
2555 for k, v in sorted(ms.extras(f).items()):
2555 for k, v in sorted(ms.extras(f).items()):
2556 fm_extras.startitem()
2556 fm_extras.startitem()
2557 fm_extras.data(key=k)
2557 fm_extras.data(key=k)
2558 fm_extras.data(value=v)
2558 fm_extras.data(value=v)
2559 fm_extras.end()
2559 fm_extras.end()
2560
2560
2561 fm_files.end()
2561 fm_files.end()
2562
2562
2563 fm_extras = fm.nested(b'extras')
2563 fm_extras = fm.nested(b'extras')
2564 for f, d in sorted(ms.allextras().items()):
2564 for f, d in sorted(ms.allextras().items()):
2565 if f in ms:
2565 if f in ms:
2566 # If file is in mergestate, we have already processed it's extras
2566 # If file is in mergestate, we have already processed it's extras
2567 continue
2567 continue
2568 for k, v in d.items():
2568 for k, v in d.items():
2569 fm_extras.startitem()
2569 fm_extras.startitem()
2570 fm_extras.data(file=f)
2570 fm_extras.data(file=f)
2571 fm_extras.data(key=k)
2571 fm_extras.data(key=k)
2572 fm_extras.data(value=v)
2572 fm_extras.data(value=v)
2573 fm_extras.end()
2573 fm_extras.end()
2574
2574
2575 fm.end()
2575 fm.end()
2576
2576
2577
2577
2578 @command(b'debugnamecomplete', [], _(b'NAME...'))
2578 @command(b'debugnamecomplete', [], _(b'NAME...'))
2579 def debugnamecomplete(ui, repo, *args):
2579 def debugnamecomplete(ui, repo, *args):
2580 '''complete "names" - tags, open branch names, bookmark names'''
2580 '''complete "names" - tags, open branch names, bookmark names'''
2581
2581
2582 names = set()
2582 names = set()
2583 # since we previously only listed open branches, we will handle that
2583 # since we previously only listed open branches, we will handle that
2584 # specially (after this for loop)
2584 # specially (after this for loop)
2585 for name, ns in repo.names.items():
2585 for name, ns in repo.names.items():
2586 if name != b'branches':
2586 if name != b'branches':
2587 names.update(ns.listnames(repo))
2587 names.update(ns.listnames(repo))
2588 names.update(
2588 names.update(
2589 tag
2589 tag
2590 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2590 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2591 if not closed
2591 if not closed
2592 )
2592 )
2593 completions = set()
2593 completions = set()
2594 if not args:
2594 if not args:
2595 args = [b'']
2595 args = [b'']
2596 for a in args:
2596 for a in args:
2597 completions.update(n for n in names if n.startswith(a))
2597 completions.update(n for n in names if n.startswith(a))
2598 ui.write(b'\n'.join(sorted(completions)))
2598 ui.write(b'\n'.join(sorted(completions)))
2599 ui.write(b'\n')
2599 ui.write(b'\n')
2600
2600
2601
2601
2602 @command(
2602 @command(
2603 b'debugnodemap',
2603 b'debugnodemap',
2604 (
2604 (
2605 cmdutil.debugrevlogopts
2605 cmdutil.debugrevlogopts
2606 + [
2606 + [
2607 (
2607 (
2608 b'',
2608 b'',
2609 b'dump-new',
2609 b'dump-new',
2610 False,
2610 False,
2611 _(b'write a (new) persistent binary nodemap on stdout'),
2611 _(b'write a (new) persistent binary nodemap on stdout'),
2612 ),
2612 ),
2613 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2613 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2614 (
2614 (
2615 b'',
2615 b'',
2616 b'check',
2616 b'check',
2617 False,
2617 False,
2618 _(b'check that the data on disk data are correct.'),
2618 _(b'check that the data on disk data are correct.'),
2619 ),
2619 ),
2620 (
2620 (
2621 b'',
2621 b'',
2622 b'metadata',
2622 b'metadata',
2623 False,
2623 False,
2624 _(b'display the on disk meta data for the nodemap'),
2624 _(b'display the on disk meta data for the nodemap'),
2625 ),
2625 ),
2626 ]
2626 ]
2627 ),
2627 ),
2628 _(b'-c|-m|FILE'),
2628 _(b'-c|-m|FILE'),
2629 )
2629 )
2630 def debugnodemap(ui, repo, file_=None, **opts):
2630 def debugnodemap(ui, repo, file_=None, **opts):
2631 """write and inspect on disk nodemap"""
2631 """write and inspect on disk nodemap"""
2632 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
2632 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
2633 if file_ is not None:
2633 if file_ is not None:
2634 raise error.InputError(
2634 raise error.InputError(
2635 _(b'cannot specify a file with other arguments')
2635 _(b'cannot specify a file with other arguments')
2636 )
2636 )
2637 elif file_ is None:
2637 elif file_ is None:
2638 opts['changelog'] = True
2638 opts['changelog'] = True
2639 r = cmdutil.openstorage(
2639 r = cmdutil.openstorage(
2640 repo.unfiltered(), b'debugnodemap', file_, pycompat.byteskwargs(opts)
2640 repo.unfiltered(), b'debugnodemap', file_, pycompat.byteskwargs(opts)
2641 )
2641 )
2642 if isinstance(r, (manifest.manifestrevlog, filelog.filelog)):
2642 if isinstance(r, (manifest.manifestrevlog, filelog.filelog)):
2643 r = r._revlog
2643 r = r._revlog
2644 if opts['dump_new']:
2644 if opts['dump_new']:
2645 if hasattr(r.index, "nodemap_data_all"):
2645 if hasattr(r.index, "nodemap_data_all"):
2646 data = r.index.nodemap_data_all()
2646 data = r.index.nodemap_data_all()
2647 else:
2647 else:
2648 data = nodemap.persistent_data(r.index)
2648 data = nodemap.persistent_data(r.index)
2649 ui.write(data)
2649 ui.write(data)
2650 elif opts['dump_disk']:
2650 elif opts['dump_disk']:
2651 nm_data = nodemap.persisted_data(r)
2651 nm_data = nodemap.persisted_data(r)
2652 if nm_data is not None:
2652 if nm_data is not None:
2653 docket, data = nm_data
2653 docket, data = nm_data
2654 ui.write(data[:])
2654 ui.write(data[:])
2655 elif opts['check']:
2655 elif opts['check']:
2656 nm_data = nodemap.persisted_data(r)
2656 nm_data = nodemap.persisted_data(r)
2657 if nm_data is not None:
2657 if nm_data is not None:
2658 docket, data = nm_data
2658 docket, data = nm_data
2659 return nodemap.check_data(ui, r.index, data)
2659 return nodemap.check_data(ui, r.index, data)
2660 elif opts['metadata']:
2660 elif opts['metadata']:
2661 nm_data = nodemap.persisted_data(r)
2661 nm_data = nodemap.persisted_data(r)
2662 if nm_data is not None:
2662 if nm_data is not None:
2663 docket, data = nm_data
2663 docket, data = nm_data
2664 ui.write((b"uid: %s\n") % docket.uid)
2664 ui.write((b"uid: %s\n") % docket.uid)
2665 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2665 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2666 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2666 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2667 ui.write((b"data-length: %d\n") % docket.data_length)
2667 ui.write((b"data-length: %d\n") % docket.data_length)
2668 ui.write((b"data-unused: %d\n") % docket.data_unused)
2668 ui.write((b"data-unused: %d\n") % docket.data_unused)
2669 unused_perc = docket.data_unused * 100.0 / docket.data_length
2669 unused_perc = docket.data_unused * 100.0 / docket.data_length
2670 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2670 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2671
2671
2672
2672
2673 @command(
2673 @command(
2674 b'debugobsolete',
2674 b'debugobsolete',
2675 [
2675 [
2676 (b'', b'flags', 0, _(b'markers flag')),
2676 (b'', b'flags', 0, _(b'markers flag')),
2677 (
2677 (
2678 b'',
2678 b'',
2679 b'record-parents',
2679 b'record-parents',
2680 False,
2680 False,
2681 _(b'record parent information for the precursor'),
2681 _(b'record parent information for the precursor'),
2682 ),
2682 ),
2683 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2683 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2684 (
2684 (
2685 b'',
2685 b'',
2686 b'exclusive',
2686 b'exclusive',
2687 False,
2687 False,
2688 _(b'restrict display to markers only relevant to REV'),
2688 _(b'restrict display to markers only relevant to REV'),
2689 ),
2689 ),
2690 (b'', b'index', False, _(b'display index of the marker')),
2690 (b'', b'index', False, _(b'display index of the marker')),
2691 (b'', b'delete', [], _(b'delete markers specified by indices')),
2691 (b'', b'delete', [], _(b'delete markers specified by indices')),
2692 ]
2692 ]
2693 + cmdutil.commitopts2
2693 + cmdutil.commitopts2
2694 + cmdutil.formatteropts,
2694 + cmdutil.formatteropts,
2695 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2695 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2696 )
2696 )
2697 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2697 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2698 """create arbitrary obsolete marker
2698 """create arbitrary obsolete marker
2699
2699
2700 With no arguments, displays the list of obsolescence markers."""
2700 With no arguments, displays the list of obsolescence markers."""
2701
2701
2702 def parsenodeid(s):
2702 def parsenodeid(s):
2703 try:
2703 try:
2704 # We do not use revsingle/revrange functions here to accept
2704 # We do not use revsingle/revrange functions here to accept
2705 # arbitrary node identifiers, possibly not present in the
2705 # arbitrary node identifiers, possibly not present in the
2706 # local repository.
2706 # local repository.
2707 n = bin(s)
2707 n = bin(s)
2708 if len(n) != repo.nodeconstants.nodelen:
2708 if len(n) != repo.nodeconstants.nodelen:
2709 raise ValueError
2709 raise ValueError
2710 return n
2710 return n
2711 except ValueError:
2711 except ValueError:
2712 raise error.InputError(
2712 raise error.InputError(
2713 b'changeset references must be full hexadecimal '
2713 b'changeset references must be full hexadecimal '
2714 b'node identifiers'
2714 b'node identifiers'
2715 )
2715 )
2716
2716
2717 if opts.get('delete'):
2717 if opts.get('delete'):
2718 indices = []
2718 indices = []
2719 for v in opts.get('delete'):
2719 for v in opts.get('delete'):
2720 try:
2720 try:
2721 indices.append(int(v))
2721 indices.append(int(v))
2722 except ValueError:
2722 except ValueError:
2723 raise error.InputError(
2723 raise error.InputError(
2724 _(b'invalid index value: %r') % v,
2724 _(b'invalid index value: %r') % v,
2725 hint=_(b'use integers for indices'),
2725 hint=_(b'use integers for indices'),
2726 )
2726 )
2727
2727
2728 if repo.currenttransaction():
2728 if repo.currenttransaction():
2729 raise error.Abort(
2729 raise error.Abort(
2730 _(b'cannot delete obsmarkers in the middle of transaction.')
2730 _(b'cannot delete obsmarkers in the middle of transaction.')
2731 )
2731 )
2732
2732
2733 with repo.lock():
2733 with repo.lock():
2734 n = repair.deleteobsmarkers(repo.obsstore, indices)
2734 n = repair.deleteobsmarkers(repo.obsstore, indices)
2735 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2735 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2736
2736
2737 return
2737 return
2738
2738
2739 if precursor is not None:
2739 if precursor is not None:
2740 if opts['rev']:
2740 if opts['rev']:
2741 raise error.InputError(
2741 raise error.InputError(
2742 b'cannot select revision when creating marker'
2742 b'cannot select revision when creating marker'
2743 )
2743 )
2744 metadata = {}
2744 metadata = {}
2745 metadata[b'user'] = encoding.fromlocal(opts['user'] or ui.username())
2745 metadata[b'user'] = encoding.fromlocal(opts['user'] or ui.username())
2746 succs = tuple(parsenodeid(succ) for succ in successors)
2746 succs = tuple(parsenodeid(succ) for succ in successors)
2747 l = repo.lock()
2747 l = repo.lock()
2748 try:
2748 try:
2749 tr = repo.transaction(b'debugobsolete')
2749 tr = repo.transaction(b'debugobsolete')
2750 try:
2750 try:
2751 date = opts.get('date')
2751 date = opts.get('date')
2752 if date:
2752 if date:
2753 date = dateutil.parsedate(date)
2753 date = dateutil.parsedate(date)
2754 else:
2754 else:
2755 date = None
2755 date = None
2756 prec = parsenodeid(precursor)
2756 prec = parsenodeid(precursor)
2757 parents = None
2757 parents = None
2758 if opts['record_parents']:
2758 if opts['record_parents']:
2759 if prec not in repo.unfiltered():
2759 if prec not in repo.unfiltered():
2760 raise error.Abort(
2760 raise error.Abort(
2761 b'cannot used --record-parents on '
2761 b'cannot used --record-parents on '
2762 b'unknown changesets'
2762 b'unknown changesets'
2763 )
2763 )
2764 parents = repo.unfiltered()[prec].parents()
2764 parents = repo.unfiltered()[prec].parents()
2765 parents = tuple(p.node() for p in parents)
2765 parents = tuple(p.node() for p in parents)
2766 repo.obsstore.create(
2766 repo.obsstore.create(
2767 tr,
2767 tr,
2768 prec,
2768 prec,
2769 succs,
2769 succs,
2770 opts['flags'],
2770 opts['flags'],
2771 parents=parents,
2771 parents=parents,
2772 date=date,
2772 date=date,
2773 metadata=metadata,
2773 metadata=metadata,
2774 ui=ui,
2774 ui=ui,
2775 )
2775 )
2776 tr.close()
2776 tr.close()
2777 except ValueError as exc:
2777 except ValueError as exc:
2778 raise error.Abort(
2778 raise error.Abort(
2779 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2779 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2780 )
2780 )
2781 finally:
2781 finally:
2782 tr.release()
2782 tr.release()
2783 finally:
2783 finally:
2784 l.release()
2784 l.release()
2785 else:
2785 else:
2786 if opts['rev']:
2786 if opts['rev']:
2787 revs = logcmdutil.revrange(repo, opts['rev'])
2787 revs = logcmdutil.revrange(repo, opts['rev'])
2788 nodes = [repo[r].node() for r in revs]
2788 nodes = [repo[r].node() for r in revs]
2789 markers = list(
2789 markers = list(
2790 obsutil.getmarkers(
2790 obsutil.getmarkers(
2791 repo, nodes=nodes, exclusive=opts['exclusive']
2791 repo, nodes=nodes, exclusive=opts['exclusive']
2792 )
2792 )
2793 )
2793 )
2794 markers.sort(key=lambda x: x._data)
2794 markers.sort(key=lambda x: x._data)
2795 else:
2795 else:
2796 markers = obsutil.getmarkers(repo)
2796 markers = obsutil.getmarkers(repo)
2797
2797
2798 markerstoiter = markers
2798 markerstoiter = markers
2799 isrelevant = lambda m: True
2799 isrelevant = lambda m: True
2800 if opts.get('rev') and opts.get('index'):
2800 if opts.get('rev') and opts.get('index'):
2801 markerstoiter = obsutil.getmarkers(repo)
2801 markerstoiter = obsutil.getmarkers(repo)
2802 markerset = set(markers)
2802 markerset = set(markers)
2803 isrelevant = lambda m: m in markerset
2803 isrelevant = lambda m: m in markerset
2804
2804
2805 fm = ui.formatter(b'debugobsolete', pycompat.byteskwargs(opts))
2805 fm = ui.formatter(b'debugobsolete', pycompat.byteskwargs(opts))
2806 for i, m in enumerate(markerstoiter):
2806 for i, m in enumerate(markerstoiter):
2807 if not isrelevant(m):
2807 if not isrelevant(m):
2808 # marker can be irrelevant when we're iterating over a set
2808 # marker can be irrelevant when we're iterating over a set
2809 # of markers (markerstoiter) which is bigger than the set
2809 # of markers (markerstoiter) which is bigger than the set
2810 # of markers we want to display (markers)
2810 # of markers we want to display (markers)
2811 # this can happen if both --index and --rev options are
2811 # this can happen if both --index and --rev options are
2812 # provided and thus we need to iterate over all of the markers
2812 # provided and thus we need to iterate over all of the markers
2813 # to get the correct indices, but only display the ones that
2813 # to get the correct indices, but only display the ones that
2814 # are relevant to --rev value
2814 # are relevant to --rev value
2815 continue
2815 continue
2816 fm.startitem()
2816 fm.startitem()
2817 ind = i if opts.get('index') else None
2817 ind = i if opts.get('index') else None
2818 cmdutil.showmarker(fm, m, index=ind)
2818 cmdutil.showmarker(fm, m, index=ind)
2819 fm.end()
2819 fm.end()
2820
2820
2821
2821
2822 @command(
2822 @command(
2823 b'debugp1copies',
2823 b'debugp1copies',
2824 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2824 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2825 _(b'[-r REV]'),
2825 _(b'[-r REV]'),
2826 )
2826 )
2827 def debugp1copies(ui, repo, **opts):
2827 def debugp1copies(ui, repo, **opts):
2828 """dump copy information compared to p1"""
2828 """dump copy information compared to p1"""
2829
2829
2830 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
2830 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
2831 for dst, src in ctx.p1copies().items():
2831 for dst, src in ctx.p1copies().items():
2832 ui.write(b'%s -> %s\n' % (src, dst))
2832 ui.write(b'%s -> %s\n' % (src, dst))
2833
2833
2834
2834
2835 @command(
2835 @command(
2836 b'debugp2copies',
2836 b'debugp2copies',
2837 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2837 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2838 _(b'[-r REV]'),
2838 _(b'[-r REV]'),
2839 )
2839 )
2840 def debugp2copies(ui, repo, **opts):
2840 def debugp2copies(ui, repo, **opts):
2841 """dump copy information compared to p2"""
2841 """dump copy information compared to p2"""
2842
2842
2843 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
2843 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
2844 for dst, src in ctx.p2copies().items():
2844 for dst, src in ctx.p2copies().items():
2845 ui.write(b'%s -> %s\n' % (src, dst))
2845 ui.write(b'%s -> %s\n' % (src, dst))
2846
2846
2847
2847
2848 @command(
2848 @command(
2849 b'debugpathcomplete',
2849 b'debugpathcomplete',
2850 [
2850 [
2851 (b'f', b'full', None, _(b'complete an entire path')),
2851 (b'f', b'full', None, _(b'complete an entire path')),
2852 (b'n', b'normal', None, _(b'show only normal files')),
2852 (b'n', b'normal', None, _(b'show only normal files')),
2853 (b'a', b'added', None, _(b'show only added files')),
2853 (b'a', b'added', None, _(b'show only added files')),
2854 (b'r', b'removed', None, _(b'show only removed files')),
2854 (b'r', b'removed', None, _(b'show only removed files')),
2855 ],
2855 ],
2856 _(b'FILESPEC...'),
2856 _(b'FILESPEC...'),
2857 )
2857 )
2858 def debugpathcomplete(ui, repo, *specs, **opts):
2858 def debugpathcomplete(ui, repo, *specs, **opts):
2859 """complete part or all of a tracked path
2859 """complete part or all of a tracked path
2860
2860
2861 This command supports shells that offer path name completion. It
2861 This command supports shells that offer path name completion. It
2862 currently completes only files already known to the dirstate.
2862 currently completes only files already known to the dirstate.
2863
2863
2864 Completion extends only to the next path segment unless
2864 Completion extends only to the next path segment unless
2865 --full is specified, in which case entire paths are used."""
2865 --full is specified, in which case entire paths are used."""
2866
2866
2867 def complete(path, acceptable):
2867 def complete(path, acceptable):
2868 dirstate = repo.dirstate
2868 dirstate = repo.dirstate
2869 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2869 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2870 rootdir = repo.root + pycompat.ossep
2870 rootdir = repo.root + pycompat.ossep
2871 if spec != repo.root and not spec.startswith(rootdir):
2871 if spec != repo.root and not spec.startswith(rootdir):
2872 return [], []
2872 return [], []
2873 if os.path.isdir(spec):
2873 if os.path.isdir(spec):
2874 spec += b'/'
2874 spec += b'/'
2875 spec = spec[len(rootdir) :]
2875 spec = spec[len(rootdir) :]
2876 fixpaths = pycompat.ossep != b'/'
2876 fixpaths = pycompat.ossep != b'/'
2877 if fixpaths:
2877 if fixpaths:
2878 spec = spec.replace(pycompat.ossep, b'/')
2878 spec = spec.replace(pycompat.ossep, b'/')
2879 speclen = len(spec)
2879 speclen = len(spec)
2880 fullpaths = opts['full']
2880 fullpaths = opts['full']
2881 files, dirs = set(), set()
2881 files, dirs = set(), set()
2882 adddir, addfile = dirs.add, files.add
2882 adddir, addfile = dirs.add, files.add
2883 for f, st in dirstate.items():
2883 for f, st in dirstate.items():
2884 if f.startswith(spec) and st.state in acceptable:
2884 if f.startswith(spec) and st.state in acceptable:
2885 if fixpaths:
2885 if fixpaths:
2886 f = f.replace(b'/', pycompat.ossep)
2886 f = f.replace(b'/', pycompat.ossep)
2887 if fullpaths:
2887 if fullpaths:
2888 addfile(f)
2888 addfile(f)
2889 continue
2889 continue
2890 s = f.find(pycompat.ossep, speclen)
2890 s = f.find(pycompat.ossep, speclen)
2891 if s >= 0:
2891 if s >= 0:
2892 adddir(f[:s])
2892 adddir(f[:s])
2893 else:
2893 else:
2894 addfile(f)
2894 addfile(f)
2895 return files, dirs
2895 return files, dirs
2896
2896
2897 acceptable = b''
2897 acceptable = b''
2898 if opts['normal']:
2898 if opts['normal']:
2899 acceptable += b'nm'
2899 acceptable += b'nm'
2900 if opts['added']:
2900 if opts['added']:
2901 acceptable += b'a'
2901 acceptable += b'a'
2902 if opts['removed']:
2902 if opts['removed']:
2903 acceptable += b'r'
2903 acceptable += b'r'
2904 cwd = repo.getcwd()
2904 cwd = repo.getcwd()
2905 if not specs:
2905 if not specs:
2906 specs = [b'.']
2906 specs = [b'.']
2907
2907
2908 files, dirs = set(), set()
2908 files, dirs = set(), set()
2909 for spec in specs:
2909 for spec in specs:
2910 f, d = complete(spec, acceptable or b'nmar')
2910 f, d = complete(spec, acceptable or b'nmar')
2911 files.update(f)
2911 files.update(f)
2912 dirs.update(d)
2912 dirs.update(d)
2913 files.update(dirs)
2913 files.update(dirs)
2914 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2914 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2915 ui.write(b'\n')
2915 ui.write(b'\n')
2916
2916
2917
2917
2918 @command(
2918 @command(
2919 b'debugpathcopies',
2919 b'debugpathcopies',
2920 cmdutil.walkopts,
2920 cmdutil.walkopts,
2921 b'hg debugpathcopies REV1 REV2 [FILE]',
2921 b'hg debugpathcopies REV1 REV2 [FILE]',
2922 inferrepo=True,
2922 inferrepo=True,
2923 )
2923 )
2924 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2924 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2925 """show copies between two revisions"""
2925 """show copies between two revisions"""
2926 ctx1 = scmutil.revsingle(repo, rev1)
2926 ctx1 = scmutil.revsingle(repo, rev1)
2927 ctx2 = scmutil.revsingle(repo, rev2)
2927 ctx2 = scmutil.revsingle(repo, rev2)
2928 m = scmutil.match(ctx1, pats, opts)
2928 m = scmutil.match(ctx1, pats, opts)
2929 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2929 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2930 ui.write(b'%s -> %s\n' % (src, dst))
2930 ui.write(b'%s -> %s\n' % (src, dst))
2931
2931
2932
2932
2933 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2933 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2934 def debugpeer(ui, path):
2934 def debugpeer(ui, path):
2935 """establish a connection to a peer repository"""
2935 """establish a connection to a peer repository"""
2936 # Always enable peer request logging. Requires --debug to display
2936 # Always enable peer request logging. Requires --debug to display
2937 # though.
2937 # though.
2938 overrides = {
2938 overrides = {
2939 (b'devel', b'debug.peer-request'): True,
2939 (b'devel', b'debug.peer-request'): True,
2940 }
2940 }
2941
2941
2942 with ui.configoverride(overrides):
2942 with ui.configoverride(overrides):
2943 peer = hg.peer(ui, {}, path)
2943 peer = hg.peer(ui, {}, path)
2944
2944
2945 try:
2945 try:
2946 local = peer.local() is not None
2946 local = peer.local() is not None
2947 canpush = peer.canpush()
2947 canpush = peer.canpush()
2948
2948
2949 ui.write(_(b'url: %s\n') % peer.url())
2949 ui.write(_(b'url: %s\n') % peer.url())
2950 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2950 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2951 ui.write(
2951 ui.write(
2952 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2952 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2953 )
2953 )
2954 finally:
2954 finally:
2955 peer.close()
2955 peer.close()
2956
2956
2957
2957
2958 @command(
2958 @command(
2959 b'debugpickmergetool',
2959 b'debugpickmergetool',
2960 [
2960 [
2961 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2961 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2962 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2962 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2963 ]
2963 ]
2964 + cmdutil.walkopts
2964 + cmdutil.walkopts
2965 + cmdutil.mergetoolopts,
2965 + cmdutil.mergetoolopts,
2966 _(b'[PATTERN]...'),
2966 _(b'[PATTERN]...'),
2967 inferrepo=True,
2967 inferrepo=True,
2968 )
2968 )
2969 def debugpickmergetool(ui, repo, *pats, **opts):
2969 def debugpickmergetool(ui, repo, *pats, **opts):
2970 """examine which merge tool is chosen for specified file
2970 """examine which merge tool is chosen for specified file
2971
2971
2972 As described in :hg:`help merge-tools`, Mercurial examines
2972 As described in :hg:`help merge-tools`, Mercurial examines
2973 configurations below in this order to decide which merge tool is
2973 configurations below in this order to decide which merge tool is
2974 chosen for specified file.
2974 chosen for specified file.
2975
2975
2976 1. ``--tool`` option
2976 1. ``--tool`` option
2977 2. ``HGMERGE`` environment variable
2977 2. ``HGMERGE`` environment variable
2978 3. configurations in ``merge-patterns`` section
2978 3. configurations in ``merge-patterns`` section
2979 4. configuration of ``ui.merge``
2979 4. configuration of ``ui.merge``
2980 5. configurations in ``merge-tools`` section
2980 5. configurations in ``merge-tools`` section
2981 6. ``hgmerge`` tool (for historical reason only)
2981 6. ``hgmerge`` tool (for historical reason only)
2982 7. default tool for fallback (``:merge`` or ``:prompt``)
2982 7. default tool for fallback (``:merge`` or ``:prompt``)
2983
2983
2984 This command writes out examination result in the style below::
2984 This command writes out examination result in the style below::
2985
2985
2986 FILE = MERGETOOL
2986 FILE = MERGETOOL
2987
2987
2988 By default, all files known in the first parent context of the
2988 By default, all files known in the first parent context of the
2989 working directory are examined. Use file patterns and/or -I/-X
2989 working directory are examined. Use file patterns and/or -I/-X
2990 options to limit target files. -r/--rev is also useful to examine
2990 options to limit target files. -r/--rev is also useful to examine
2991 files in another context without actual updating to it.
2991 files in another context without actual updating to it.
2992
2992
2993 With --debug, this command shows warning messages while matching
2993 With --debug, this command shows warning messages while matching
2994 against ``merge-patterns`` and so on, too. It is recommended to
2994 against ``merge-patterns`` and so on, too. It is recommended to
2995 use this option with explicit file patterns and/or -I/-X options,
2995 use this option with explicit file patterns and/or -I/-X options,
2996 because this option increases amount of output per file according
2996 because this option increases amount of output per file according
2997 to configurations in hgrc.
2997 to configurations in hgrc.
2998
2998
2999 With -v/--verbose, this command shows configurations below at
2999 With -v/--verbose, this command shows configurations below at
3000 first (only if specified).
3000 first (only if specified).
3001
3001
3002 - ``--tool`` option
3002 - ``--tool`` option
3003 - ``HGMERGE`` environment variable
3003 - ``HGMERGE`` environment variable
3004 - configuration of ``ui.merge``
3004 - configuration of ``ui.merge``
3005
3005
3006 If merge tool is chosen before matching against
3006 If merge tool is chosen before matching against
3007 ``merge-patterns``, this command can't show any helpful
3007 ``merge-patterns``, this command can't show any helpful
3008 information, even with --debug. In such case, information above is
3008 information, even with --debug. In such case, information above is
3009 useful to know why a merge tool is chosen.
3009 useful to know why a merge tool is chosen.
3010 """
3010 """
3011 overrides = {}
3011 overrides = {}
3012 if opts['tool']:
3012 if opts['tool']:
3013 overrides[(b'ui', b'forcemerge')] = opts['tool']
3013 overrides[(b'ui', b'forcemerge')] = opts['tool']
3014 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts['tool'])))
3014 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts['tool'])))
3015
3015
3016 with ui.configoverride(overrides, b'debugmergepatterns'):
3016 with ui.configoverride(overrides, b'debugmergepatterns'):
3017 hgmerge = encoding.environ.get(b"HGMERGE")
3017 hgmerge = encoding.environ.get(b"HGMERGE")
3018 if hgmerge is not None:
3018 if hgmerge is not None:
3019 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
3019 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
3020 uimerge = ui.config(b"ui", b"merge")
3020 uimerge = ui.config(b"ui", b"merge")
3021 if uimerge:
3021 if uimerge:
3022 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
3022 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
3023
3023
3024 ctx = scmutil.revsingle(repo, opts.get('rev'))
3024 ctx = scmutil.revsingle(repo, opts.get('rev'))
3025 m = scmutil.match(ctx, pats, pycompat.byteskwargs(opts))
3025 m = scmutil.match(ctx, pats, pycompat.byteskwargs(opts))
3026 changedelete = opts['changedelete']
3026 changedelete = opts['changedelete']
3027 for path in ctx.walk(m):
3027 for path in ctx.walk(m):
3028 fctx = ctx[path]
3028 fctx = ctx[path]
3029 with ui.silent(
3029 with ui.silent(
3030 error=True
3030 error=True
3031 ) if not ui.debugflag else util.nullcontextmanager():
3031 ) if not ui.debugflag else util.nullcontextmanager():
3032 tool, toolpath = filemerge._picktool(
3032 tool, toolpath = filemerge._picktool(
3033 repo,
3033 repo,
3034 ui,
3034 ui,
3035 path,
3035 path,
3036 fctx.isbinary(),
3036 fctx.isbinary(),
3037 b'l' in fctx.flags(),
3037 b'l' in fctx.flags(),
3038 changedelete,
3038 changedelete,
3039 )
3039 )
3040 ui.write(b'%s = %s\n' % (path, tool))
3040 ui.write(b'%s = %s\n' % (path, tool))
3041
3041
3042
3042
3043 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
3043 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
3044 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
3044 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
3045 """access the pushkey key/value protocol
3045 """access the pushkey key/value protocol
3046
3046
3047 With two args, list the keys in the given namespace.
3047 With two args, list the keys in the given namespace.
3048
3048
3049 With five args, set a key to new if it currently is set to old.
3049 With five args, set a key to new if it currently is set to old.
3050 Reports success or failure.
3050 Reports success or failure.
3051 """
3051 """
3052
3052
3053 target = hg.peer(ui, {}, repopath)
3053 target = hg.peer(ui, {}, repopath)
3054 try:
3054 try:
3055 if keyinfo:
3055 if keyinfo:
3056 key, old, new = keyinfo
3056 key, old, new = keyinfo
3057 with target.commandexecutor() as e:
3057 with target.commandexecutor() as e:
3058 r = e.callcommand(
3058 r = e.callcommand(
3059 b'pushkey',
3059 b'pushkey',
3060 {
3060 {
3061 b'namespace': namespace,
3061 b'namespace': namespace,
3062 b'key': key,
3062 b'key': key,
3063 b'old': old,
3063 b'old': old,
3064 b'new': new,
3064 b'new': new,
3065 },
3065 },
3066 ).result()
3066 ).result()
3067
3067
3068 ui.status(pycompat.bytestr(r) + b'\n')
3068 ui.status(pycompat.bytestr(r) + b'\n')
3069 return not r
3069 return not r
3070 else:
3070 else:
3071 for k, v in sorted(target.listkeys(namespace).items()):
3071 for k, v in sorted(target.listkeys(namespace).items()):
3072 ui.write(
3072 ui.write(
3073 b"%s\t%s\n"
3073 b"%s\t%s\n"
3074 % (stringutil.escapestr(k), stringutil.escapestr(v))
3074 % (stringutil.escapestr(k), stringutil.escapestr(v))
3075 )
3075 )
3076 finally:
3076 finally:
3077 target.close()
3077 target.close()
3078
3078
3079
3079
3080 @command(b'debugpvec', [], _(b'A B'))
3080 @command(b'debugpvec', [], _(b'A B'))
3081 def debugpvec(ui, repo, a, b=None):
3081 def debugpvec(ui, repo, a, b=None):
3082 ca = scmutil.revsingle(repo, a)
3082 ca = scmutil.revsingle(repo, a)
3083 cb = scmutil.revsingle(repo, b)
3083 cb = scmutil.revsingle(repo, b)
3084 pa = pvec.ctxpvec(ca)
3084 pa = pvec.ctxpvec(ca)
3085 pb = pvec.ctxpvec(cb)
3085 pb = pvec.ctxpvec(cb)
3086 if pa == pb:
3086 if pa == pb:
3087 rel = b"="
3087 rel = b"="
3088 elif pa > pb:
3088 elif pa > pb:
3089 rel = b">"
3089 rel = b">"
3090 elif pa < pb:
3090 elif pa < pb:
3091 rel = b"<"
3091 rel = b"<"
3092 elif pa | pb:
3092 elif pa | pb:
3093 rel = b"|"
3093 rel = b"|"
3094 ui.write(_(b"a: %s\n") % pa)
3094 ui.write(_(b"a: %s\n") % pa)
3095 ui.write(_(b"b: %s\n") % pb)
3095 ui.write(_(b"b: %s\n") % pb)
3096 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
3096 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
3097 ui.write(
3097 ui.write(
3098 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
3098 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
3099 % (
3099 % (
3100 abs(pa._depth - pb._depth),
3100 abs(pa._depth - pb._depth),
3101 pvec._hamming(pa._vec, pb._vec),
3101 pvec._hamming(pa._vec, pb._vec),
3102 pa.distance(pb),
3102 pa.distance(pb),
3103 rel,
3103 rel,
3104 )
3104 )
3105 )
3105 )
3106
3106
3107
3107
3108 @command(
3108 @command(
3109 b'debugrebuilddirstate|debugrebuildstate',
3109 b'debugrebuilddirstate|debugrebuildstate',
3110 [
3110 [
3111 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
3111 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
3112 (
3112 (
3113 b'',
3113 b'',
3114 b'minimal',
3114 b'minimal',
3115 None,
3115 None,
3116 _(
3116 _(
3117 b'only rebuild files that are inconsistent with '
3117 b'only rebuild files that are inconsistent with '
3118 b'the working copy parent'
3118 b'the working copy parent'
3119 ),
3119 ),
3120 ),
3120 ),
3121 ],
3121 ],
3122 _(b'[-r REV]'),
3122 _(b'[-r REV]'),
3123 )
3123 )
3124 def debugrebuilddirstate(ui, repo, rev, **opts):
3124 def debugrebuilddirstate(ui, repo, rev, **opts):
3125 """rebuild the dirstate as it would look like for the given revision
3125 """rebuild the dirstate as it would look like for the given revision
3126
3126
3127 If no revision is specified the first current parent will be used.
3127 If no revision is specified the first current parent will be used.
3128
3128
3129 The dirstate will be set to the files of the given revision.
3129 The dirstate will be set to the files of the given revision.
3130 The actual working directory content or existing dirstate
3130 The actual working directory content or existing dirstate
3131 information such as adds or removes is not considered.
3131 information such as adds or removes is not considered.
3132
3132
3133 ``minimal`` will only rebuild the dirstate status for files that claim to be
3133 ``minimal`` will only rebuild the dirstate status for files that claim to be
3134 tracked but are not in the parent manifest, or that exist in the parent
3134 tracked but are not in the parent manifest, or that exist in the parent
3135 manifest but are not in the dirstate. It will not change adds, removes, or
3135 manifest but are not in the dirstate. It will not change adds, removes, or
3136 modified files that are in the working copy parent.
3136 modified files that are in the working copy parent.
3137
3137
3138 One use of this command is to make the next :hg:`status` invocation
3138 One use of this command is to make the next :hg:`status` invocation
3139 check the actual file content.
3139 check the actual file content.
3140 """
3140 """
3141 ctx = scmutil.revsingle(repo, rev)
3141 ctx = scmutil.revsingle(repo, rev)
3142 with repo.wlock():
3142 with repo.wlock():
3143 if repo.currenttransaction() is not None:
3143 if repo.currenttransaction() is not None:
3144 msg = b'rebuild the dirstate outside of a transaction'
3144 msg = b'rebuild the dirstate outside of a transaction'
3145 raise error.ProgrammingError(msg)
3145 raise error.ProgrammingError(msg)
3146 dirstate = repo.dirstate
3146 dirstate = repo.dirstate
3147 changedfiles = None
3147 changedfiles = None
3148 # See command doc for what minimal does.
3148 # See command doc for what minimal does.
3149 if opts.get('minimal'):
3149 if opts.get('minimal'):
3150 manifestfiles = set(ctx.manifest().keys())
3150 manifestfiles = set(ctx.manifest().keys())
3151 dirstatefiles = set(dirstate)
3151 dirstatefiles = set(dirstate)
3152 manifestonly = manifestfiles - dirstatefiles
3152 manifestonly = manifestfiles - dirstatefiles
3153 dsonly = dirstatefiles - manifestfiles
3153 dsonly = dirstatefiles - manifestfiles
3154 dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added}
3154 dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added}
3155 changedfiles = manifestonly | dsnotadded
3155 changedfiles = manifestonly | dsnotadded
3156
3156
3157 with dirstate.changing_parents(repo):
3157 with dirstate.changing_parents(repo):
3158 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
3158 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
3159
3159
3160
3160
3161 @command(
3161 @command(
3162 b'debugrebuildfncache',
3162 b'debugrebuildfncache',
3163 [
3163 [
3164 (
3164 (
3165 b'',
3165 b'',
3166 b'only-data',
3166 b'only-data',
3167 False,
3167 False,
3168 _(b'only look for wrong .d files (much faster)'),
3168 _(b'only look for wrong .d files (much faster)'),
3169 )
3169 )
3170 ],
3170 ],
3171 b'',
3171 b'',
3172 )
3172 )
3173 def debugrebuildfncache(ui, repo, **opts):
3173 def debugrebuildfncache(ui, repo, **opts):
3174 """rebuild the fncache file"""
3174 """rebuild the fncache file"""
3175 repair.rebuildfncache(ui, repo, opts.get("only_data"))
3175 repair.rebuildfncache(ui, repo, opts.get("only_data"))
3176
3176
3177
3177
3178 @command(
3178 @command(
3179 b'debugrename',
3179 b'debugrename',
3180 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
3180 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
3181 _(b'[-r REV] [FILE]...'),
3181 _(b'[-r REV] [FILE]...'),
3182 )
3182 )
3183 def debugrename(ui, repo, *pats, **opts):
3183 def debugrename(ui, repo, *pats, **opts):
3184 """dump rename information"""
3184 """dump rename information"""
3185
3185
3186 ctx = scmutil.revsingle(repo, opts.get('rev'))
3186 ctx = scmutil.revsingle(repo, opts.get('rev'))
3187 m = scmutil.match(ctx, pats, pycompat.byteskwargs(opts))
3187 m = scmutil.match(ctx, pats, pycompat.byteskwargs(opts))
3188 for abs in ctx.walk(m):
3188 for abs in ctx.walk(m):
3189 fctx = ctx[abs]
3189 fctx = ctx[abs]
3190 o = fctx.filelog().renamed(fctx.filenode())
3190 o = fctx.filelog().renamed(fctx.filenode())
3191 rel = repo.pathto(abs)
3191 rel = repo.pathto(abs)
3192 if o:
3192 if o:
3193 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
3193 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
3194 else:
3194 else:
3195 ui.write(_(b"%s not renamed\n") % rel)
3195 ui.write(_(b"%s not renamed\n") % rel)
3196
3196
3197
3197
3198 @command(b'debugrequires|debugrequirements', [], b'')
3198 @command(b'debugrequires|debugrequirements', [], b'')
3199 def debugrequirements(ui, repo):
3199 def debugrequirements(ui, repo):
3200 """print the current repo requirements"""
3200 """print the current repo requirements"""
3201 for r in sorted(repo.requirements):
3201 for r in sorted(repo.requirements):
3202 ui.write(b"%s\n" % r)
3202 ui.write(b"%s\n" % r)
3203
3203
3204
3204
3205 @command(
3205 @command(
3206 b'debugrevlog',
3206 b'debugrevlog',
3207 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
3207 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
3208 _(b'-c|-m|FILE'),
3208 _(b'-c|-m|FILE'),
3209 optionalrepo=True,
3209 optionalrepo=True,
3210 )
3210 )
3211 def debugrevlog(ui, repo, file_=None, **opts):
3211 def debugrevlog(ui, repo, file_=None, **opts):
3212 """show data and statistics about a revlog"""
3212 """show data and statistics about a revlog"""
3213 r = cmdutil.openrevlog(
3213 r = cmdutil.openrevlog(
3214 repo, b'debugrevlog', file_, pycompat.byteskwargs(opts)
3214 repo, b'debugrevlog', file_, pycompat.byteskwargs(opts)
3215 )
3215 )
3216
3216
3217 if opts.get("dump"):
3217 if opts.get("dump"):
3218 revlog_debug.dump(ui, r)
3218 revlog_debug.dump(ui, r)
3219 else:
3219 else:
3220 revlog_debug.debug_revlog(ui, r)
3220 revlog_debug.debug_revlog(ui, r)
3221 return 0
3221 return 0
3222
3222
3223
3223
3224 @command(
3224 @command(
3225 b'debugrevlogindex',
3225 b'debugrevlogindex',
3226 cmdutil.debugrevlogopts
3226 cmdutil.debugrevlogopts
3227 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3227 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3228 _(b'[-f FORMAT] -c|-m|FILE'),
3228 _(b'[-f FORMAT] -c|-m|FILE'),
3229 optionalrepo=True,
3229 optionalrepo=True,
3230 )
3230 )
3231 def debugrevlogindex(ui, repo, file_=None, **opts):
3231 def debugrevlogindex(ui, repo, file_=None, **opts):
3232 """dump the contents of a revlog index"""
3232 """dump the contents of a revlog index"""
3233 r = cmdutil.openrevlog(
3233 r = cmdutil.openrevlog(
3234 repo, b'debugrevlogindex', file_, pycompat.byteskwargs(opts)
3234 repo, b'debugrevlogindex', file_, pycompat.byteskwargs(opts)
3235 )
3235 )
3236 format = opts.get('format', 0)
3236 format = opts.get('format', 0)
3237 if format not in (0, 1):
3237 if format not in (0, 1):
3238 raise error.Abort(_(b"unknown format %d") % format)
3238 raise error.Abort(_(b"unknown format %d") % format)
3239
3239
3240 if ui.debugflag:
3240 if ui.debugflag:
3241 shortfn = hex
3241 shortfn = hex
3242 else:
3242 else:
3243 shortfn = short
3243 shortfn = short
3244
3244
3245 # There might not be anything in r, so have a sane default
3245 # There might not be anything in r, so have a sane default
3246 idlen = 12
3246 idlen = 12
3247 for i in r:
3247 for i in r:
3248 idlen = len(shortfn(r.node(i)))
3248 idlen = len(shortfn(r.node(i)))
3249 break
3249 break
3250
3250
3251 if format == 0:
3251 if format == 0:
3252 if ui.verbose:
3252 if ui.verbose:
3253 ui.writenoi18n(
3253 ui.writenoi18n(
3254 b" rev offset length linkrev %s %s p2\n"
3254 b" rev offset length linkrev %s %s p2\n"
3255 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3255 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3256 )
3256 )
3257 else:
3257 else:
3258 ui.writenoi18n(
3258 ui.writenoi18n(
3259 b" rev linkrev %s %s p2\n"
3259 b" rev linkrev %s %s p2\n"
3260 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3260 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3261 )
3261 )
3262 elif format == 1:
3262 elif format == 1:
3263 if ui.verbose:
3263 if ui.verbose:
3264 ui.writenoi18n(
3264 ui.writenoi18n(
3265 (
3265 (
3266 b" rev flag offset length size link p1"
3266 b" rev flag offset length size link p1"
3267 b" p2 %s\n"
3267 b" p2 %s\n"
3268 )
3268 )
3269 % b"nodeid".rjust(idlen)
3269 % b"nodeid".rjust(idlen)
3270 )
3270 )
3271 else:
3271 else:
3272 ui.writenoi18n(
3272 ui.writenoi18n(
3273 b" rev flag size link p1 p2 %s\n"
3273 b" rev flag size link p1 p2 %s\n"
3274 % b"nodeid".rjust(idlen)
3274 % b"nodeid".rjust(idlen)
3275 )
3275 )
3276
3276
3277 for i in r:
3277 for i in r:
3278 node = r.node(i)
3278 node = r.node(i)
3279 if format == 0:
3279 if format == 0:
3280 try:
3280 try:
3281 pp = r.parents(node)
3281 pp = r.parents(node)
3282 except Exception:
3282 except Exception:
3283 pp = [repo.nullid, repo.nullid]
3283 pp = [repo.nullid, repo.nullid]
3284 if ui.verbose:
3284 if ui.verbose:
3285 ui.write(
3285 ui.write(
3286 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3286 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3287 % (
3287 % (
3288 i,
3288 i,
3289 r.start(i),
3289 r.start(i),
3290 r.length(i),
3290 r.length(i),
3291 r.linkrev(i),
3291 r.linkrev(i),
3292 shortfn(node),
3292 shortfn(node),
3293 shortfn(pp[0]),
3293 shortfn(pp[0]),
3294 shortfn(pp[1]),
3294 shortfn(pp[1]),
3295 )
3295 )
3296 )
3296 )
3297 else:
3297 else:
3298 ui.write(
3298 ui.write(
3299 b"% 6d % 7d %s %s %s\n"
3299 b"% 6d % 7d %s %s %s\n"
3300 % (
3300 % (
3301 i,
3301 i,
3302 r.linkrev(i),
3302 r.linkrev(i),
3303 shortfn(node),
3303 shortfn(node),
3304 shortfn(pp[0]),
3304 shortfn(pp[0]),
3305 shortfn(pp[1]),
3305 shortfn(pp[1]),
3306 )
3306 )
3307 )
3307 )
3308 elif format == 1:
3308 elif format == 1:
3309 pr = r.parentrevs(i)
3309 pr = r.parentrevs(i)
3310 if ui.verbose:
3310 if ui.verbose:
3311 ui.write(
3311 ui.write(
3312 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3312 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3313 % (
3313 % (
3314 i,
3314 i,
3315 r.flags(i),
3315 r.flags(i),
3316 r.start(i),
3316 r.start(i),
3317 r.length(i),
3317 r.length(i),
3318 r.rawsize(i),
3318 r.rawsize(i),
3319 r.linkrev(i),
3319 r.linkrev(i),
3320 pr[0],
3320 pr[0],
3321 pr[1],
3321 pr[1],
3322 shortfn(node),
3322 shortfn(node),
3323 )
3323 )
3324 )
3324 )
3325 else:
3325 else:
3326 ui.write(
3326 ui.write(
3327 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3327 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3328 % (
3328 % (
3329 i,
3329 i,
3330 r.flags(i),
3330 r.flags(i),
3331 r.rawsize(i),
3331 r.rawsize(i),
3332 r.linkrev(i),
3332 r.linkrev(i),
3333 pr[0],
3333 pr[0],
3334 pr[1],
3334 pr[1],
3335 shortfn(node),
3335 shortfn(node),
3336 )
3336 )
3337 )
3337 )
3338
3338
3339
3339
3340 @command(
3340 @command(
3341 b'debugrevspec',
3341 b'debugrevspec',
3342 [
3342 [
3343 (
3343 (
3344 b'',
3344 b'',
3345 b'optimize',
3345 b'optimize',
3346 None,
3346 None,
3347 _(b'print parsed tree after optimizing (DEPRECATED)'),
3347 _(b'print parsed tree after optimizing (DEPRECATED)'),
3348 ),
3348 ),
3349 (
3349 (
3350 b'',
3350 b'',
3351 b'show-revs',
3351 b'show-revs',
3352 True,
3352 True,
3353 _(b'print list of result revisions (default)'),
3353 _(b'print list of result revisions (default)'),
3354 ),
3354 ),
3355 (
3355 (
3356 b's',
3356 b's',
3357 b'show-set',
3357 b'show-set',
3358 None,
3358 None,
3359 _(b'print internal representation of result set'),
3359 _(b'print internal representation of result set'),
3360 ),
3360 ),
3361 (
3361 (
3362 b'p',
3362 b'p',
3363 b'show-stage',
3363 b'show-stage',
3364 [],
3364 [],
3365 _(b'print parsed tree at the given stage'),
3365 _(b'print parsed tree at the given stage'),
3366 _(b'NAME'),
3366 _(b'NAME'),
3367 ),
3367 ),
3368 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3368 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3369 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3369 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3370 ],
3370 ],
3371 b'REVSPEC',
3371 b'REVSPEC',
3372 )
3372 )
3373 def debugrevspec(ui, repo, expr, **opts):
3373 def debugrevspec(ui, repo, expr, **opts):
3374 """parse and apply a revision specification
3374 """parse and apply a revision specification
3375
3375
3376 Use -p/--show-stage option to print the parsed tree at the given stages.
3376 Use -p/--show-stage option to print the parsed tree at the given stages.
3377 Use -p all to print tree at every stage.
3377 Use -p all to print tree at every stage.
3378
3378
3379 Use --no-show-revs option with -s or -p to print only the set
3379 Use --no-show-revs option with -s or -p to print only the set
3380 representation or the parsed tree respectively.
3380 representation or the parsed tree respectively.
3381
3381
3382 Use --verify-optimized to compare the optimized result with the unoptimized
3382 Use --verify-optimized to compare the optimized result with the unoptimized
3383 one. Returns 1 if the optimized result differs.
3383 one. Returns 1 if the optimized result differs.
3384 """
3384 """
3385 aliases = ui.configitems(b'revsetalias')
3385 aliases = ui.configitems(b'revsetalias')
3386 stages = [
3386 stages = [
3387 (b'parsed', lambda tree: tree),
3387 (b'parsed', lambda tree: tree),
3388 (
3388 (
3389 b'expanded',
3389 b'expanded',
3390 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3390 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3391 ),
3391 ),
3392 (b'concatenated', revsetlang.foldconcat),
3392 (b'concatenated', revsetlang.foldconcat),
3393 (b'analyzed', revsetlang.analyze),
3393 (b'analyzed', revsetlang.analyze),
3394 (b'optimized', revsetlang.optimize),
3394 (b'optimized', revsetlang.optimize),
3395 ]
3395 ]
3396 if opts['no_optimized']:
3396 if opts['no_optimized']:
3397 stages = stages[:-1]
3397 stages = stages[:-1]
3398 if opts['verify_optimized'] and opts['no_optimized']:
3398 if opts['verify_optimized'] and opts['no_optimized']:
3399 raise error.Abort(
3399 raise error.Abort(
3400 _(b'cannot use --verify-optimized with --no-optimized')
3400 _(b'cannot use --verify-optimized with --no-optimized')
3401 )
3401 )
3402 stagenames = {n for n, f in stages}
3402 stagenames = {n for n, f in stages}
3403
3403
3404 showalways = set()
3404 showalways = set()
3405 showchanged = set()
3405 showchanged = set()
3406 if ui.verbose and not opts['show_stage']:
3406 if ui.verbose and not opts['show_stage']:
3407 # show parsed tree by --verbose (deprecated)
3407 # show parsed tree by --verbose (deprecated)
3408 showalways.add(b'parsed')
3408 showalways.add(b'parsed')
3409 showchanged.update([b'expanded', b'concatenated'])
3409 showchanged.update([b'expanded', b'concatenated'])
3410 if opts['optimize']:
3410 if opts['optimize']:
3411 showalways.add(b'optimized')
3411 showalways.add(b'optimized')
3412 if opts['show_stage'] and opts['optimize']:
3412 if opts['show_stage'] and opts['optimize']:
3413 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3413 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3414 if opts['show_stage'] == [b'all']:
3414 if opts['show_stage'] == [b'all']:
3415 showalways.update(stagenames)
3415 showalways.update(stagenames)
3416 else:
3416 else:
3417 for n in opts['show_stage']:
3417 for n in opts['show_stage']:
3418 if n not in stagenames:
3418 if n not in stagenames:
3419 raise error.Abort(_(b'invalid stage name: %s') % n)
3419 raise error.Abort(_(b'invalid stage name: %s') % n)
3420 showalways.update(opts['show_stage'])
3420 showalways.update(opts['show_stage'])
3421
3421
3422 treebystage = {}
3422 treebystage = {}
3423 printedtree = None
3423 printedtree = None
3424 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3424 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3425 for n, f in stages:
3425 for n, f in stages:
3426 treebystage[n] = tree = f(tree)
3426 treebystage[n] = tree = f(tree)
3427 if n in showalways or (n in showchanged and tree != printedtree):
3427 if n in showalways or (n in showchanged and tree != printedtree):
3428 if opts['show_stage'] or n != b'parsed':
3428 if opts['show_stage'] or n != b'parsed':
3429 ui.write(b"* %s:\n" % n)
3429 ui.write(b"* %s:\n" % n)
3430 ui.write(revsetlang.prettyformat(tree), b"\n")
3430 ui.write(revsetlang.prettyformat(tree), b"\n")
3431 printedtree = tree
3431 printedtree = tree
3432
3432
3433 if opts['verify_optimized']:
3433 if opts['verify_optimized']:
3434 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3434 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3435 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3435 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3436 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
3436 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
3437 ui.writenoi18n(
3437 ui.writenoi18n(
3438 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3438 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3439 )
3439 )
3440 ui.writenoi18n(
3440 ui.writenoi18n(
3441 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3441 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3442 )
3442 )
3443 arevs = list(arevs)
3443 arevs = list(arevs)
3444 brevs = list(brevs)
3444 brevs = list(brevs)
3445 if arevs == brevs:
3445 if arevs == brevs:
3446 return 0
3446 return 0
3447 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3447 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3448 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3448 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3449 sm = difflib.SequenceMatcher(None, arevs, brevs)
3449 sm = difflib.SequenceMatcher(None, arevs, brevs)
3450 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3450 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3451 if tag in ('delete', 'replace'):
3451 if tag in ('delete', 'replace'):
3452 for c in arevs[alo:ahi]:
3452 for c in arevs[alo:ahi]:
3453 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3453 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3454 if tag in ('insert', 'replace'):
3454 if tag in ('insert', 'replace'):
3455 for c in brevs[blo:bhi]:
3455 for c in brevs[blo:bhi]:
3456 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3456 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3457 if tag == 'equal':
3457 if tag == 'equal':
3458 for c in arevs[alo:ahi]:
3458 for c in arevs[alo:ahi]:
3459 ui.write(b' %d\n' % c)
3459 ui.write(b' %d\n' % c)
3460 return 1
3460 return 1
3461
3461
3462 func = revset.makematcher(tree)
3462 func = revset.makematcher(tree)
3463 revs = func(repo)
3463 revs = func(repo)
3464 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
3464 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
3465 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3465 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3466 if not opts['show_revs']:
3466 if not opts['show_revs']:
3467 return
3467 return
3468 for c in revs:
3468 for c in revs:
3469 ui.write(b"%d\n" % c)
3469 ui.write(b"%d\n" % c)
3470
3470
3471
3471
3472 @command(
3472 @command(
3473 b'debugserve',
3473 b'debugserve',
3474 [
3474 [
3475 (
3475 (
3476 b'',
3476 b'',
3477 b'sshstdio',
3477 b'sshstdio',
3478 False,
3478 False,
3479 _(b'run an SSH server bound to process handles'),
3479 _(b'run an SSH server bound to process handles'),
3480 ),
3480 ),
3481 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3481 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3482 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3482 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3483 ],
3483 ],
3484 b'',
3484 b'',
3485 )
3485 )
3486 def debugserve(ui, repo, **opts):
3486 def debugserve(ui, repo, **opts):
3487 """run a server with advanced settings
3487 """run a server with advanced settings
3488
3488
3489 This command is similar to :hg:`serve`. It exists partially as a
3489 This command is similar to :hg:`serve`. It exists partially as a
3490 workaround to the fact that ``hg serve --stdio`` must have specific
3490 workaround to the fact that ``hg serve --stdio`` must have specific
3491 arguments for security reasons.
3491 arguments for security reasons.
3492 """
3492 """
3493 if not opts['sshstdio']:
3493 if not opts['sshstdio']:
3494 raise error.Abort(_(b'only --sshstdio is currently supported'))
3494 raise error.Abort(_(b'only --sshstdio is currently supported'))
3495
3495
3496 logfh = None
3496 logfh = None
3497
3497
3498 if opts['logiofd'] and opts['logiofile']:
3498 if opts['logiofd'] and opts['logiofile']:
3499 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3499 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3500
3500
3501 if opts['logiofd']:
3501 if opts['logiofd']:
3502 # Ideally we would be line buffered. But line buffering in binary
3502 # Ideally we would be line buffered. But line buffering in binary
3503 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3503 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3504 # buffering could have performance impacts. But since this isn't
3504 # buffering could have performance impacts. But since this isn't
3505 # performance critical code, it should be fine.
3505 # performance critical code, it should be fine.
3506 try:
3506 try:
3507 logfh = os.fdopen(int(opts['logiofd']), 'ab', 0)
3507 logfh = os.fdopen(int(opts['logiofd']), 'ab', 0)
3508 except OSError as e:
3508 except OSError as e:
3509 if e.errno != errno.ESPIPE:
3509 if e.errno != errno.ESPIPE:
3510 raise
3510 raise
3511 # can't seek a pipe, so `ab` mode fails on py3
3511 # can't seek a pipe, so `ab` mode fails on py3
3512 logfh = os.fdopen(int(opts['logiofd']), 'wb', 0)
3512 logfh = os.fdopen(int(opts['logiofd']), 'wb', 0)
3513 elif opts['logiofile']:
3513 elif opts['logiofile']:
3514 logfh = open(opts['logiofile'], b'ab', 0)
3514 logfh = open(opts['logiofile'], b'ab', 0)
3515
3515
3516 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3516 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3517 s.serve_forever()
3517 s.serve_forever()
3518
3518
3519
3519
3520 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3520 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3521 def debugsetparents(ui, repo, rev1, rev2=None):
3521 def debugsetparents(ui, repo, rev1, rev2=None):
3522 """manually set the parents of the current working directory (DANGEROUS)
3522 """manually set the parents of the current working directory (DANGEROUS)
3523
3523
3524 This command is not what you are looking for and should not be used. Using
3524 This command is not what you are looking for and should not be used. Using
3525 this command will most certainly results in slight corruption of the file
3525 this command will most certainly results in slight corruption of the file
3526 level histories withing your repository. DO NOT USE THIS COMMAND.
3526 level histories withing your repository. DO NOT USE THIS COMMAND.
3527
3527
3528 The command update the p1 and p2 field in the dirstate, and not touching
3528 The command update the p1 and p2 field in the dirstate, and not touching
3529 anything else. This useful for writing repository conversion tools, but
3529 anything else. This useful for writing repository conversion tools, but
3530 should be used with extreme care. For example, neither the working
3530 should be used with extreme care. For example, neither the working
3531 directory nor the dirstate is updated, so file status may be incorrect
3531 directory nor the dirstate is updated, so file status may be incorrect
3532 after running this command. Only used if you are one of the few people that
3532 after running this command. Only used if you are one of the few people that
3533 deeply unstand both conversion tools and file level histories. If you are
3533 deeply unstand both conversion tools and file level histories. If you are
3534 reading this help, you are not one of this people (most of them sailed west
3534 reading this help, you are not one of this people (most of them sailed west
3535 from Mithlond anyway.
3535 from Mithlond anyway.
3536
3536
3537 So one last time DO NOT USE THIS COMMAND.
3537 So one last time DO NOT USE THIS COMMAND.
3538
3538
3539 Returns 0 on success.
3539 Returns 0 on success.
3540 """
3540 """
3541
3541
3542 node1 = scmutil.revsingle(repo, rev1).node()
3542 node1 = scmutil.revsingle(repo, rev1).node()
3543 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3543 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3544
3544
3545 with repo.wlock():
3545 with repo.wlock():
3546 repo.setparents(node1, node2)
3546 repo.setparents(node1, node2)
3547
3547
3548
3548
3549 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3549 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3550 def debugsidedata(ui, repo, file_, rev=None, **opts):
3550 def debugsidedata(ui, repo, file_, rev=None, **opts):
3551 """dump the side data for a cl/manifest/file revision
3551 """dump the side data for a cl/manifest/file revision
3552
3552
3553 Use --verbose to dump the sidedata content."""
3553 Use --verbose to dump the sidedata content."""
3554 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
3554 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
3555 if rev is not None:
3555 if rev is not None:
3556 raise error.InputError(
3556 raise error.InputError(
3557 _(b'cannot specify a revision with other arguments')
3557 _(b'cannot specify a revision with other arguments')
3558 )
3558 )
3559 file_, rev = None, file_
3559 file_, rev = None, file_
3560 elif rev is None:
3560 elif rev is None:
3561 raise error.InputError(_(b'please specify a revision'))
3561 raise error.InputError(_(b'please specify a revision'))
3562 r = cmdutil.openstorage(
3562 r = cmdutil.openstorage(
3563 repo, b'debugdata', file_, pycompat.byteskwargs(opts)
3563 repo, b'debugdata', file_, pycompat.byteskwargs(opts)
3564 )
3564 )
3565 r = getattr(r, '_revlog', r)
3565 r = getattr(r, '_revlog', r)
3566 try:
3566 try:
3567 sidedata = r.sidedata(r.lookup(rev))
3567 sidedata = r.sidedata(r.lookup(rev))
3568 except KeyError:
3568 except KeyError:
3569 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3569 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3570 if sidedata:
3570 if sidedata:
3571 sidedata = list(sidedata.items())
3571 sidedata = list(sidedata.items())
3572 sidedata.sort()
3572 sidedata.sort()
3573 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3573 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3574 for key, value in sidedata:
3574 for key, value in sidedata:
3575 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3575 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3576 if ui.verbose:
3576 if ui.verbose:
3577 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3577 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3578
3578
3579
3579
3580 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3580 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3581 def debugssl(ui, repo, source=None, **opts):
3581 def debugssl(ui, repo, source=None, **opts):
3582 """test a secure connection to a server
3582 """test a secure connection to a server
3583
3583
3584 This builds the certificate chain for the server on Windows, installing the
3584 This builds the certificate chain for the server on Windows, installing the
3585 missing intermediates and trusted root via Windows Update if necessary. It
3585 missing intermediates and trusted root via Windows Update if necessary. It
3586 does nothing on other platforms.
3586 does nothing on other platforms.
3587
3587
3588 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3588 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3589 that server is used. See :hg:`help urls` for more information.
3589 that server is used. See :hg:`help urls` for more information.
3590
3590
3591 If the update succeeds, retry the original operation. Otherwise, the cause
3591 If the update succeeds, retry the original operation. Otherwise, the cause
3592 of the SSL error is likely another issue.
3592 of the SSL error is likely another issue.
3593 """
3593 """
3594 if not pycompat.iswindows:
3594 if not pycompat.iswindows:
3595 raise error.Abort(
3595 raise error.Abort(
3596 _(b'certificate chain building is only possible on Windows')
3596 _(b'certificate chain building is only possible on Windows')
3597 )
3597 )
3598
3598
3599 if not source:
3599 if not source:
3600 if not repo:
3600 if not repo:
3601 raise error.Abort(
3601 raise error.Abort(
3602 _(
3602 _(
3603 b"there is no Mercurial repository here, and no "
3603 b"there is no Mercurial repository here, and no "
3604 b"server specified"
3604 b"server specified"
3605 )
3605 )
3606 )
3606 )
3607 source = b"default"
3607 source = b"default"
3608
3608
3609 path = urlutil.get_unique_pull_path_obj(b'debugssl', ui, source)
3609 path = urlutil.get_unique_pull_path_obj(b'debugssl', ui, source)
3610 url = path.url
3610 url = path.url
3611
3611
3612 defaultport = {b'https': 443, b'ssh': 22}
3612 defaultport = {b'https': 443, b'ssh': 22}
3613 if url.scheme in defaultport:
3613 if url.scheme in defaultport:
3614 try:
3614 try:
3615 addr = (url.host, int(url.port or defaultport[url.scheme]))
3615 addr = (url.host, int(url.port or defaultport[url.scheme]))
3616 except ValueError:
3616 except ValueError:
3617 raise error.Abort(_(b"malformed port number in URL"))
3617 raise error.Abort(_(b"malformed port number in URL"))
3618 else:
3618 else:
3619 raise error.Abort(_(b"only https and ssh connections are supported"))
3619 raise error.Abort(_(b"only https and ssh connections are supported"))
3620
3620
3621 from . import win32
3621 from . import win32
3622
3622
3623 s = ssl.wrap_socket(
3623 s = ssl.wrap_socket(
3624 socket.socket(),
3624 socket.socket(),
3625 ssl_version=ssl.PROTOCOL_TLS,
3625 ssl_version=ssl.PROTOCOL_TLS,
3626 cert_reqs=ssl.CERT_NONE,
3626 cert_reqs=ssl.CERT_NONE,
3627 ca_certs=None,
3627 ca_certs=None,
3628 )
3628 )
3629
3629
3630 try:
3630 try:
3631 s.connect(addr)
3631 s.connect(addr)
3632 cert = s.getpeercert(True)
3632 cert = s.getpeercert(True)
3633
3633
3634 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3634 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3635
3635
3636 complete = win32.checkcertificatechain(cert, build=False)
3636 complete = win32.checkcertificatechain(cert, build=False)
3637
3637
3638 if not complete:
3638 if not complete:
3639 ui.status(_(b'certificate chain is incomplete, updating... '))
3639 ui.status(_(b'certificate chain is incomplete, updating... '))
3640
3640
3641 if not win32.checkcertificatechain(cert):
3641 if not win32.checkcertificatechain(cert):
3642 ui.status(_(b'failed.\n'))
3642 ui.status(_(b'failed.\n'))
3643 else:
3643 else:
3644 ui.status(_(b'done.\n'))
3644 ui.status(_(b'done.\n'))
3645 else:
3645 else:
3646 ui.status(_(b'full certificate chain is available\n'))
3646 ui.status(_(b'full certificate chain is available\n'))
3647 finally:
3647 finally:
3648 s.close()
3648 s.close()
3649
3649
3650
3650
3651 @command(
3651 @command(
3652 b'debug::stable-tail-sort',
3652 b'debug::stable-tail-sort',
3653 [
3653 [
3654 (
3654 (
3655 b'T',
3655 b'T',
3656 b'template',
3656 b'template',
3657 b'{rev}\n',
3657 b'{rev}\n',
3658 _(b'display with template'),
3658 _(b'display with template'),
3659 _(b'TEMPLATE'),
3659 _(b'TEMPLATE'),
3660 ),
3660 ),
3661 ],
3661 ],
3662 b'REV',
3662 b'REV',
3663 )
3663 )
3664 def debug_stable_tail_sort(ui, repo, revspec, template, **opts):
3664 def debug_stable_tail_sort(ui, repo, revspec, template, **opts):
3665 """display the stable-tail sort of the ancestors of a given node"""
3665 """display the stable-tail sort of the ancestors of a given node"""
3666 rev = logcmdutil.revsingle(repo, revspec).rev()
3666 rev = logcmdutil.revsingle(repo, revspec).rev()
3667 cl = repo.changelog
3667 cl = repo.changelog
3668
3668
3669 displayer = logcmdutil.maketemplater(ui, repo, template)
3669 displayer = logcmdutil.maketemplater(ui, repo, template)
3670 sorted_revs = stabletailsort._stable_tail_sort_naive(cl, rev)
3670 sorted_revs = stabletailsort._stable_tail_sort_naive(cl, rev)
3671 for ancestor_rev in sorted_revs:
3671 for ancestor_rev in sorted_revs:
3672 displayer.show(repo[ancestor_rev])
3672 displayer.show(repo[ancestor_rev])
3673
3673
3674
3674
3675 @command(
3675 @command(
3676 b'debug::stable-tail-sort-leaps',
3676 b'debug::stable-tail-sort-leaps',
3677 [
3677 [
3678 (
3678 (
3679 b'T',
3679 b'T',
3680 b'template',
3680 b'template',
3681 b'{rev}',
3681 b'{rev}',
3682 _(b'display with template'),
3682 _(b'display with template'),
3683 _(b'TEMPLATE'),
3683 _(b'TEMPLATE'),
3684 ),
3684 ),
3685 (b's', b'specific', False, _(b'restrict to specific leaps')),
3685 (b's', b'specific', False, _(b'restrict to specific leaps')),
3686 ],
3686 ],
3687 b'REV',
3687 b'REV',
3688 )
3688 )
3689 def debug_stable_tail_sort_leaps(ui, repo, rspec, template, specific, **opts):
3689 def debug_stable_tail_sort_leaps(ui, repo, rspec, template, specific, **opts):
3690 """display the leaps in the stable-tail sort of a node, one per line"""
3690 """display the leaps in the stable-tail sort of a node, one per line"""
3691 rev = logcmdutil.revsingle(repo, rspec).rev()
3691 rev = logcmdutil.revsingle(repo, rspec).rev()
3692
3692
3693 if specific:
3693 if specific:
3694 get_leaps = stabletailsort._find_specific_leaps_naive
3694 get_leaps = stabletailsort._find_specific_leaps_naive
3695 else:
3695 else:
3696 get_leaps = stabletailsort._find_all_leaps_naive
3696 get_leaps = stabletailsort._find_all_leaps_naive
3697
3697
3698 displayer = logcmdutil.maketemplater(ui, repo, template)
3698 displayer = logcmdutil.maketemplater(ui, repo, template)
3699 for source, target in get_leaps(repo.changelog, rev):
3699 for source, target in get_leaps(repo.changelog, rev):
3700 displayer.show(repo[source])
3700 displayer.show(repo[source])
3701 displayer.show(repo[target])
3701 displayer.show(repo[target])
3702 ui.write(b'\n')
3702 ui.write(b'\n')
3703
3703
3704
3704
3705 @command(
3705 @command(
3706 b"debugbackupbundle",
3706 b"debugbackupbundle",
3707 [
3707 [
3708 (
3708 (
3709 b"",
3709 b"",
3710 b"recover",
3710 b"recover",
3711 b"",
3711 b"",
3712 b"brings the specified changeset back into the repository",
3712 b"brings the specified changeset back into the repository",
3713 )
3713 )
3714 ]
3714 ]
3715 + cmdutil.logopts,
3715 + cmdutil.logopts,
3716 _(b"hg debugbackupbundle [--recover HASH]"),
3716 _(b"hg debugbackupbundle [--recover HASH]"),
3717 )
3717 )
3718 def debugbackupbundle(ui, repo, *pats, **opts):
3718 def debugbackupbundle(ui, repo, *pats, **opts):
3719 """lists the changesets available in backup bundles
3719 """lists the changesets available in backup bundles
3720
3720
3721 Without any arguments, this command prints a list of the changesets in each
3721 Without any arguments, this command prints a list of the changesets in each
3722 backup bundle.
3722 backup bundle.
3723
3723
3724 --recover takes a changeset hash and unbundles the first bundle that
3724 --recover takes a changeset hash and unbundles the first bundle that
3725 contains that hash, which puts that changeset back in your repository.
3725 contains that hash, which puts that changeset back in your repository.
3726
3726
3727 --verbose will print the entire commit message and the bundle path for that
3727 --verbose will print the entire commit message and the bundle path for that
3728 backup.
3728 backup.
3729 """
3729 """
3730 backups = list(
3730 backups = list(
3731 filter(
3731 filter(
3732 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3732 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3733 )
3733 )
3734 )
3734 )
3735 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3735 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3736
3736
3737 opts["bundle"] = b""
3737 opts["bundle"] = b""
3738 opts["force"] = None
3738 opts["force"] = None
3739 limit = logcmdutil.getlimit(pycompat.byteskwargs(opts))
3739 limit = logcmdutil.getlimit(pycompat.byteskwargs(opts))
3740
3740
3741 def display(other, chlist, displayer):
3741 def display(other, chlist, displayer):
3742 if opts.get("newest_first"):
3742 if opts.get("newest_first"):
3743 chlist.reverse()
3743 chlist.reverse()
3744 count = 0
3744 count = 0
3745 for n in chlist:
3745 for n in chlist:
3746 if limit is not None and count >= limit:
3746 if limit is not None and count >= limit:
3747 break
3747 break
3748 parents = [
3748 parents = [
3749 True for p in other.changelog.parents(n) if p != repo.nullid
3749 True for p in other.changelog.parents(n) if p != repo.nullid
3750 ]
3750 ]
3751 if opts.get("no_merges") and len(parents) == 2:
3751 if opts.get("no_merges") and len(parents) == 2:
3752 continue
3752 continue
3753 count += 1
3753 count += 1
3754 displayer.show(other[n])
3754 displayer.show(other[n])
3755
3755
3756 recovernode = opts.get("recover")
3756 recovernode = opts.get("recover")
3757 if recovernode:
3757 if recovernode:
3758 if scmutil.isrevsymbol(repo, recovernode):
3758 if scmutil.isrevsymbol(repo, recovernode):
3759 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3759 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3760 return
3760 return
3761 elif backups:
3761 elif backups:
3762 msg = _(
3762 msg = _(
3763 b"Recover changesets using: hg debugbackupbundle --recover "
3763 b"Recover changesets using: hg debugbackupbundle --recover "
3764 b"<changeset hash>\n\nAvailable backup changesets:"
3764 b"<changeset hash>\n\nAvailable backup changesets:"
3765 )
3765 )
3766 ui.status(msg, label=b"status.removed")
3766 ui.status(msg, label=b"status.removed")
3767 else:
3767 else:
3768 ui.status(_(b"no backup changesets found\n"))
3768 ui.status(_(b"no backup changesets found\n"))
3769 return
3769 return
3770
3770
3771 for backup in backups:
3771 for backup in backups:
3772 # Much of this is copied from the hg incoming logic
3772 # Much of this is copied from the hg incoming logic
3773 source = os.path.relpath(backup, encoding.getcwd())
3773 source = os.path.relpath(backup, encoding.getcwd())
3774 path = urlutil.get_unique_pull_path_obj(
3774 path = urlutil.get_unique_pull_path_obj(
3775 b'debugbackupbundle',
3775 b'debugbackupbundle',
3776 ui,
3776 ui,
3777 source,
3777 source,
3778 )
3778 )
3779 try:
3779 try:
3780 other = hg.peer(repo, pycompat.byteskwargs(opts), path)
3780 other = hg.peer(repo, pycompat.byteskwargs(opts), path)
3781 except error.LookupError as ex:
3781 except error.LookupError as ex:
3782 msg = _(b"\nwarning: unable to open bundle %s") % path.loc
3782 msg = _(b"\nwarning: unable to open bundle %s") % path.loc
3783 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3783 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3784 ui.warn(msg, hint=hint)
3784 ui.warn(msg, hint=hint)
3785 continue
3785 continue
3786 branches = (path.branch, opts.get('branch', []))
3786 branches = (path.branch, opts.get('branch', []))
3787 revs, checkout = hg.addbranchrevs(
3787 revs, checkout = hg.addbranchrevs(
3788 repo, other, branches, opts.get("rev")
3788 repo, other, branches, opts.get("rev")
3789 )
3789 )
3790
3790
3791 if revs:
3791 if revs:
3792 revs = [other.lookup(rev) for rev in revs]
3792 revs = [other.lookup(rev) for rev in revs]
3793
3793
3794 with ui.silent():
3794 with ui.silent():
3795 try:
3795 try:
3796 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3796 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3797 ui, repo, other, revs, opts["bundle"], opts["force"]
3797 ui, repo, other, revs, opts["bundle"], opts["force"]
3798 )
3798 )
3799 except error.LookupError:
3799 except error.LookupError:
3800 continue
3800 continue
3801
3801
3802 try:
3802 try:
3803 if not chlist:
3803 if not chlist:
3804 continue
3804 continue
3805 if recovernode:
3805 if recovernode:
3806 with repo.lock(), repo.transaction(b"unbundle") as tr:
3806 with repo.lock(), repo.transaction(b"unbundle") as tr:
3807 if scmutil.isrevsymbol(other, recovernode):
3807 if scmutil.isrevsymbol(other, recovernode):
3808 ui.status(_(b"Unbundling %s\n") % (recovernode))
3808 ui.status(_(b"Unbundling %s\n") % (recovernode))
3809 f = hg.openpath(ui, path.loc)
3809 f = hg.openpath(ui, path.loc)
3810 gen = exchange.readbundle(ui, f, path.loc)
3810 gen = exchange.readbundle(ui, f, path.loc)
3811 if isinstance(gen, bundle2.unbundle20):
3811 if isinstance(gen, bundle2.unbundle20):
3812 bundle2.applybundle(
3812 bundle2.applybundle(
3813 repo,
3813 repo,
3814 gen,
3814 gen,
3815 tr,
3815 tr,
3816 source=b"unbundle",
3816 source=b"unbundle",
3817 url=b"bundle:" + path.loc,
3817 url=b"bundle:" + path.loc,
3818 )
3818 )
3819 else:
3819 else:
3820 gen.apply(repo, b"unbundle", b"bundle:" + path.loc)
3820 gen.apply(repo, b"unbundle", b"bundle:" + path.loc)
3821 break
3821 break
3822 else:
3822 else:
3823 backupdate = encoding.strtolocal(
3823 backupdate = encoding.strtolocal(
3824 time.strftime(
3824 time.strftime(
3825 "%a %H:%M, %Y-%m-%d",
3825 "%a %H:%M, %Y-%m-%d",
3826 time.localtime(os.path.getmtime(path.loc)),
3826 time.localtime(os.path.getmtime(path.loc)),
3827 )
3827 )
3828 )
3828 )
3829 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3829 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3830 if ui.verbose:
3830 if ui.verbose:
3831 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), path.loc))
3831 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), path.loc))
3832 else:
3832 else:
3833 opts[
3833 opts[
3834 "template"
3834 "template"
3835 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3835 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3836 displayer = logcmdutil.changesetdisplayer(
3836 displayer = logcmdutil.changesetdisplayer(
3837 ui, other, pycompat.byteskwargs(opts), False
3837 ui, other, pycompat.byteskwargs(opts), False
3838 )
3838 )
3839 display(other, chlist, displayer)
3839 display(other, chlist, displayer)
3840 displayer.close()
3840 displayer.close()
3841 finally:
3841 finally:
3842 cleanupfn()
3842 cleanupfn()
3843
3843
3844
3844
3845 @command(
3845 @command(
3846 b'debugsub',
3846 b'debugsub',
3847 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3847 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3848 _(b'[-r REV] [REV]'),
3848 _(b'[-r REV] [REV]'),
3849 )
3849 )
3850 def debugsub(ui, repo, rev=None):
3850 def debugsub(ui, repo, rev=None):
3851 ctx = scmutil.revsingle(repo, rev, None)
3851 ctx = scmutil.revsingle(repo, rev, None)
3852 for k, v in sorted(ctx.substate.items()):
3852 for k, v in sorted(ctx.substate.items()):
3853 ui.writenoi18n(b'path %s\n' % k)
3853 ui.writenoi18n(b'path %s\n' % k)
3854 ui.writenoi18n(b' source %s\n' % v[0])
3854 ui.writenoi18n(b' source %s\n' % v[0])
3855 ui.writenoi18n(b' revision %s\n' % v[1])
3855 ui.writenoi18n(b' revision %s\n' % v[1])
3856
3856
3857
3857
3858 @command(
3858 @command(
3859 b'debugshell',
3859 b'debugshell',
3860 [
3860 [
3861 (
3861 (
3862 b'c',
3862 b'c',
3863 b'command',
3863 b'command',
3864 b'',
3864 b'',
3865 _(b'program passed in as a string'),
3865 _(b'program passed in as a string'),
3866 _(b'COMMAND'),
3866 _(b'COMMAND'),
3867 )
3867 )
3868 ],
3868 ],
3869 _(b'[-c COMMAND]'),
3869 _(b'[-c COMMAND]'),
3870 optionalrepo=True,
3870 optionalrepo=True,
3871 )
3871 )
3872 def debugshell(ui, repo, **opts):
3872 def debugshell(ui, repo, **opts):
3873 """run an interactive Python interpreter
3873 """run an interactive Python interpreter
3874
3874
3875 The local namespace is provided with a reference to the ui and
3875 The local namespace is provided with a reference to the ui and
3876 the repo instance (if available).
3876 the repo instance (if available).
3877 """
3877 """
3878 import code
3878 import code
3879
3879
3880 imported_objects = {
3880 imported_objects = {
3881 'ui': ui,
3881 'ui': ui,
3882 'repo': repo,
3882 'repo': repo,
3883 }
3883 }
3884
3884
3885 # py2exe disables initialization of the site module, which is responsible
3885 # py2exe disables initialization of the site module, which is responsible
3886 # for arranging for ``quit()`` to exit the interpreter. Manually initialize
3886 # for arranging for ``quit()`` to exit the interpreter. Manually initialize
3887 # the stuff that site normally does here, so that the interpreter can be
3887 # the stuff that site normally does here, so that the interpreter can be
3888 # quit in a consistent manner, whether run with pyoxidizer, exewrapper.c,
3888 # quit in a consistent manner, whether run with pyoxidizer, exewrapper.c,
3889 # py.exe, or py2exe.
3889 # py.exe, or py2exe.
3890 if getattr(sys, "frozen", None) == 'console_exe':
3890 if getattr(sys, "frozen", None) == 'console_exe':
3891 try:
3891 try:
3892 import site
3892 import site
3893
3893
3894 site.setcopyright()
3894 site.setcopyright()
3895 site.sethelper()
3895 site.sethelper()
3896 site.setquit()
3896 site.setquit()
3897 except ImportError:
3897 except ImportError:
3898 site = None # Keep PyCharm happy
3898 site = None # Keep PyCharm happy
3899
3899
3900 command = opts.get('command')
3900 command = opts.get('command')
3901 if command:
3901 if command:
3902 compiled = code.compile_command(encoding.strfromlocal(command))
3902 compiled = code.compile_command(encoding.strfromlocal(command))
3903 code.InteractiveInterpreter(locals=imported_objects).runcode(compiled)
3903 code.InteractiveInterpreter(locals=imported_objects).runcode(compiled)
3904 return
3904 return
3905
3905
3906 code.interact(local=imported_objects)
3906 code.interact(local=imported_objects)
3907
3907
3908
3908
3909 @command(
3909 @command(
3910 b'debug-revlog-stats',
3910 b'debug-revlog-stats',
3911 [
3911 [
3912 (b'c', b'changelog', None, _(b'Display changelog statistics')),
3912 (b'c', b'changelog', None, _(b'Display changelog statistics')),
3913 (b'm', b'manifest', None, _(b'Display manifest statistics')),
3913 (b'm', b'manifest', None, _(b'Display manifest statistics')),
3914 (b'f', b'filelogs', None, _(b'Display filelogs statistics')),
3914 (b'f', b'filelogs', None, _(b'Display filelogs statistics')),
3915 ]
3915 ]
3916 + cmdutil.formatteropts,
3916 + cmdutil.formatteropts,
3917 )
3917 )
3918 def debug_revlog_stats(ui, repo, **opts):
3918 def debug_revlog_stats(ui, repo, **opts):
3919 """display statistics about revlogs in the store"""
3919 """display statistics about revlogs in the store"""
3920 changelog = opts["changelog"]
3920 changelog = opts["changelog"]
3921 manifest = opts["manifest"]
3921 manifest = opts["manifest"]
3922 filelogs = opts["filelogs"]
3922 filelogs = opts["filelogs"]
3923
3923
3924 if changelog is None and manifest is None and filelogs is None:
3924 if changelog is None and manifest is None and filelogs is None:
3925 changelog = True
3925 changelog = True
3926 manifest = True
3926 manifest = True
3927 filelogs = True
3927 filelogs = True
3928
3928
3929 repo = repo.unfiltered()
3929 repo = repo.unfiltered()
3930 fm = ui.formatter(b'debug-revlog-stats', pycompat.byteskwargs(opts))
3930 fm = ui.formatter(b'debug-revlog-stats', pycompat.byteskwargs(opts))
3931 revlog_debug.debug_revlog_stats(repo, fm, changelog, manifest, filelogs)
3931 revlog_debug.debug_revlog_stats(repo, fm, changelog, manifest, filelogs)
3932 fm.end()
3932 fm.end()
3933
3933
3934
3934
3935 @command(
3935 @command(
3936 b'debugsuccessorssets',
3936 b'debugsuccessorssets',
3937 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3937 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3938 _(b'[REV]'),
3938 _(b'[REV]'),
3939 )
3939 )
3940 def debugsuccessorssets(ui, repo, *revs, **opts):
3940 def debugsuccessorssets(ui, repo, *revs, **opts):
3941 """show set of successors for revision
3941 """show set of successors for revision
3942
3942
3943 A successors set of changeset A is a consistent group of revisions that
3943 A successors set of changeset A is a consistent group of revisions that
3944 succeed A. It contains non-obsolete changesets only unless closests
3944 succeed A. It contains non-obsolete changesets only unless closests
3945 successors set is set.
3945 successors set is set.
3946
3946
3947 In most cases a changeset A has a single successors set containing a single
3947 In most cases a changeset A has a single successors set containing a single
3948 successor (changeset A replaced by A').
3948 successor (changeset A replaced by A').
3949
3949
3950 A changeset that is made obsolete with no successors are called "pruned".
3950 A changeset that is made obsolete with no successors are called "pruned".
3951 Such changesets have no successors sets at all.
3951 Such changesets have no successors sets at all.
3952
3952
3953 A changeset that has been "split" will have a successors set containing
3953 A changeset that has been "split" will have a successors set containing
3954 more than one successor.
3954 more than one successor.
3955
3955
3956 A changeset that has been rewritten in multiple different ways is called
3956 A changeset that has been rewritten in multiple different ways is called
3957 "divergent". Such changesets have multiple successor sets (each of which
3957 "divergent". Such changesets have multiple successor sets (each of which
3958 may also be split, i.e. have multiple successors).
3958 may also be split, i.e. have multiple successors).
3959
3959
3960 Results are displayed as follows::
3960 Results are displayed as follows::
3961
3961
3962 <rev1>
3962 <rev1>
3963 <successors-1A>
3963 <successors-1A>
3964 <rev2>
3964 <rev2>
3965 <successors-2A>
3965 <successors-2A>
3966 <successors-2B1> <successors-2B2> <successors-2B3>
3966 <successors-2B1> <successors-2B2> <successors-2B3>
3967
3967
3968 Here rev2 has two possible (i.e. divergent) successors sets. The first
3968 Here rev2 has two possible (i.e. divergent) successors sets. The first
3969 holds one element, whereas the second holds three (i.e. the changeset has
3969 holds one element, whereas the second holds three (i.e. the changeset has
3970 been split).
3970 been split).
3971 """
3971 """
3972 # passed to successorssets caching computation from one call to another
3972 # passed to successorssets caching computation from one call to another
3973 cache = {}
3973 cache = {}
3974 ctx2str = bytes
3974 ctx2str = bytes
3975 node2str = short
3975 node2str = short
3976 for rev in logcmdutil.revrange(repo, revs):
3976 for rev in logcmdutil.revrange(repo, revs):
3977 ctx = repo[rev]
3977 ctx = repo[rev]
3978 ui.write(b'%s\n' % ctx2str(ctx))
3978 ui.write(b'%s\n' % ctx2str(ctx))
3979 for succsset in obsutil.successorssets(
3979 for succsset in obsutil.successorssets(
3980 repo, ctx.node(), closest=opts['closest'], cache=cache
3980 repo, ctx.node(), closest=opts['closest'], cache=cache
3981 ):
3981 ):
3982 if succsset:
3982 if succsset:
3983 ui.write(b' ')
3983 ui.write(b' ')
3984 ui.write(node2str(succsset[0]))
3984 ui.write(node2str(succsset[0]))
3985 for node in succsset[1:]:
3985 for node in succsset[1:]:
3986 ui.write(b' ')
3986 ui.write(b' ')
3987 ui.write(node2str(node))
3987 ui.write(node2str(node))
3988 ui.write(b'\n')
3988 ui.write(b'\n')
3989
3989
3990
3990
3991 @command(b'debugtagscache', [])
3991 @command(b'debugtagscache', [])
3992 def debugtagscache(ui, repo):
3992 def debugtagscache(ui, repo):
3993 """display the contents of .hg/cache/hgtagsfnodes1"""
3993 """display the contents of .hg/cache/hgtagsfnodes1"""
3994 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3994 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3995 flog = repo.file(b'.hgtags')
3995 flog = repo.file(b'.hgtags')
3996 for r in repo:
3996 for r in repo:
3997 node = repo[r].node()
3997 node = repo[r].node()
3998 tagsnode = cache.getfnode(node, computemissing=False)
3998 tagsnode = cache.getfnode(node, computemissing=False)
3999 if tagsnode:
3999 if tagsnode:
4000 tagsnodedisplay = hex(tagsnode)
4000 tagsnodedisplay = hex(tagsnode)
4001 if not flog.hasnode(tagsnode):
4001 if not flog.hasnode(tagsnode):
4002 tagsnodedisplay += b' (unknown node)'
4002 tagsnodedisplay += b' (unknown node)'
4003 elif tagsnode is None:
4003 elif tagsnode is None:
4004 tagsnodedisplay = b'missing'
4004 tagsnodedisplay = b'missing'
4005 else:
4005 else:
4006 tagsnodedisplay = b'invalid'
4006 tagsnodedisplay = b'invalid'
4007
4007
4008 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
4008 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
4009
4009
4010
4010
4011 @command(
4011 @command(
4012 b'debugtemplate',
4012 b'debugtemplate',
4013 [
4013 [
4014 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
4014 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
4015 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
4015 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
4016 ],
4016 ],
4017 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
4017 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
4018 optionalrepo=True,
4018 optionalrepo=True,
4019 )
4019 )
4020 def debugtemplate(ui, repo, tmpl, **opts):
4020 def debugtemplate(ui, repo, tmpl, **opts):
4021 """parse and apply a template
4021 """parse and apply a template
4022
4022
4023 If -r/--rev is given, the template is processed as a log template and
4023 If -r/--rev is given, the template is processed as a log template and
4024 applied to the given changesets. Otherwise, it is processed as a generic
4024 applied to the given changesets. Otherwise, it is processed as a generic
4025 template.
4025 template.
4026
4026
4027 Use --verbose to print the parsed tree.
4027 Use --verbose to print the parsed tree.
4028 """
4028 """
4029 revs = None
4029 revs = None
4030 if opts['rev']:
4030 if opts['rev']:
4031 if repo is None:
4031 if repo is None:
4032 raise error.RepoError(
4032 raise error.RepoError(
4033 _(b'there is no Mercurial repository here (.hg not found)')
4033 _(b'there is no Mercurial repository here (.hg not found)')
4034 )
4034 )
4035 revs = logcmdutil.revrange(repo, opts['rev'])
4035 revs = logcmdutil.revrange(repo, opts['rev'])
4036
4036
4037 props = {}
4037 props = {}
4038 for d in opts['define']:
4038 for d in opts['define']:
4039 try:
4039 try:
4040 k, v = (e.strip() for e in d.split(b'=', 1))
4040 k, v = (e.strip() for e in d.split(b'=', 1))
4041 if not k or k == b'ui':
4041 if not k or k == b'ui':
4042 raise ValueError
4042 raise ValueError
4043 props[k] = v
4043 props[k] = v
4044 except ValueError:
4044 except ValueError:
4045 raise error.Abort(_(b'malformed keyword definition: %s') % d)
4045 raise error.Abort(_(b'malformed keyword definition: %s') % d)
4046
4046
4047 if ui.verbose:
4047 if ui.verbose:
4048 aliases = ui.configitems(b'templatealias')
4048 aliases = ui.configitems(b'templatealias')
4049 tree = templater.parse(tmpl)
4049 tree = templater.parse(tmpl)
4050 ui.note(templater.prettyformat(tree), b'\n')
4050 ui.note(templater.prettyformat(tree), b'\n')
4051 newtree = templater.expandaliases(tree, aliases)
4051 newtree = templater.expandaliases(tree, aliases)
4052 if newtree != tree:
4052 if newtree != tree:
4053 ui.notenoi18n(
4053 ui.notenoi18n(
4054 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
4054 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
4055 )
4055 )
4056
4056
4057 if revs is None:
4057 if revs is None:
4058 tres = formatter.templateresources(ui, repo)
4058 tres = formatter.templateresources(ui, repo)
4059 t = formatter.maketemplater(ui, tmpl, resources=tres)
4059 t = formatter.maketemplater(ui, tmpl, resources=tres)
4060 if ui.verbose:
4060 if ui.verbose:
4061 kwds, funcs = t.symbolsuseddefault()
4061 kwds, funcs = t.symbolsuseddefault()
4062 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4062 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4063 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4063 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4064 ui.write(t.renderdefault(props))
4064 ui.write(t.renderdefault(props))
4065 else:
4065 else:
4066 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4066 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4067 if ui.verbose:
4067 if ui.verbose:
4068 kwds, funcs = displayer.t.symbolsuseddefault()
4068 kwds, funcs = displayer.t.symbolsuseddefault()
4069 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4069 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4070 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4070 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4071 for r in revs:
4071 for r in revs:
4072 displayer.show(repo[r], **pycompat.strkwargs(props))
4072 displayer.show(repo[r], **pycompat.strkwargs(props))
4073 displayer.close()
4073 displayer.close()
4074
4074
4075
4075
4076 @command(
4076 @command(
4077 b'debuguigetpass',
4077 b'debuguigetpass',
4078 [
4078 [
4079 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4079 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4080 ],
4080 ],
4081 _(b'[-p TEXT]'),
4081 _(b'[-p TEXT]'),
4082 norepo=True,
4082 norepo=True,
4083 )
4083 )
4084 def debuguigetpass(ui, prompt=b''):
4084 def debuguigetpass(ui, prompt=b''):
4085 """show prompt to type password"""
4085 """show prompt to type password"""
4086 r = ui.getpass(prompt)
4086 r = ui.getpass(prompt)
4087 if r is None:
4087 if r is None:
4088 r = b"<default response>"
4088 r = b"<default response>"
4089 ui.writenoi18n(b'response: %s\n' % r)
4089 ui.writenoi18n(b'response: %s\n' % r)
4090
4090
4091
4091
4092 @command(
4092 @command(
4093 b'debuguiprompt',
4093 b'debuguiprompt',
4094 [
4094 [
4095 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4095 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4096 ],
4096 ],
4097 _(b'[-p TEXT]'),
4097 _(b'[-p TEXT]'),
4098 norepo=True,
4098 norepo=True,
4099 )
4099 )
4100 def debuguiprompt(ui, prompt=b''):
4100 def debuguiprompt(ui, prompt=b''):
4101 """show plain prompt"""
4101 """show plain prompt"""
4102 r = ui.prompt(prompt)
4102 r = ui.prompt(prompt)
4103 ui.writenoi18n(b'response: %s\n' % r)
4103 ui.writenoi18n(b'response: %s\n' % r)
4104
4104
4105
4105
4106 @command(b'debugupdatecaches', [])
4106 @command(b'debugupdatecaches', [])
4107 def debugupdatecaches(ui, repo, *pats, **opts):
4107 def debugupdatecaches(ui, repo, *pats, **opts):
4108 """warm all known caches in the repository"""
4108 """warm all known caches in the repository"""
4109 with repo.wlock(), repo.lock():
4109 with repo.wlock(), repo.lock():
4110 repo.updatecaches(caches=repository.CACHES_ALL)
4110 repo.updatecaches(caches=repository.CACHES_ALL)
4111
4111
4112
4112
4113 @command(
4113 @command(
4114 b'debugupgraderepo',
4114 b'debugupgraderepo',
4115 [
4115 [
4116 (
4116 (
4117 b'o',
4117 b'o',
4118 b'optimize',
4118 b'optimize',
4119 [],
4119 [],
4120 _(b'extra optimization to perform'),
4120 _(b'extra optimization to perform'),
4121 _(b'NAME'),
4121 _(b'NAME'),
4122 ),
4122 ),
4123 (b'', b'run', False, _(b'performs an upgrade')),
4123 (b'', b'run', False, _(b'performs an upgrade')),
4124 (b'', b'backup', True, _(b'keep the old repository content around')),
4124 (b'', b'backup', True, _(b'keep the old repository content around')),
4125 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4125 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4126 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4126 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4127 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4127 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4128 ],
4128 ],
4129 )
4129 )
4130 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4130 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4131 """upgrade a repository to use different features
4131 """upgrade a repository to use different features
4132
4132
4133 If no arguments are specified, the repository is evaluated for upgrade
4133 If no arguments are specified, the repository is evaluated for upgrade
4134 and a list of problems and potential optimizations is printed.
4134 and a list of problems and potential optimizations is printed.
4135
4135
4136 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4136 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4137 can be influenced via additional arguments. More details will be provided
4137 can be influenced via additional arguments. More details will be provided
4138 by the command output when run without ``--run``.
4138 by the command output when run without ``--run``.
4139
4139
4140 During the upgrade, the repository will be locked and no writes will be
4140 During the upgrade, the repository will be locked and no writes will be
4141 allowed.
4141 allowed.
4142
4142
4143 At the end of the upgrade, the repository may not be readable while new
4143 At the end of the upgrade, the repository may not be readable while new
4144 repository data is swapped in. This window will be as long as it takes to
4144 repository data is swapped in. This window will be as long as it takes to
4145 rename some directories inside the ``.hg`` directory. On most machines, this
4145 rename some directories inside the ``.hg`` directory. On most machines, this
4146 should complete almost instantaneously and the chances of a consumer being
4146 should complete almost instantaneously and the chances of a consumer being
4147 unable to access the repository should be low.
4147 unable to access the repository should be low.
4148
4148
4149 By default, all revlogs will be upgraded. You can restrict this using flags
4149 By default, all revlogs will be upgraded. You can restrict this using flags
4150 such as `--manifest`:
4150 such as `--manifest`:
4151
4151
4152 * `--manifest`: only optimize the manifest
4152 * `--manifest`: only optimize the manifest
4153 * `--no-manifest`: optimize all revlog but the manifest
4153 * `--no-manifest`: optimize all revlog but the manifest
4154 * `--changelog`: optimize the changelog only
4154 * `--changelog`: optimize the changelog only
4155 * `--no-changelog --no-manifest`: optimize filelogs only
4155 * `--no-changelog --no-manifest`: optimize filelogs only
4156 * `--filelogs`: optimize the filelogs only
4156 * `--filelogs`: optimize the filelogs only
4157 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4157 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4158 """
4158 """
4159 return upgrade.upgraderepo(
4159 return upgrade.upgraderepo(
4160 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4160 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4161 )
4161 )
4162
4162
4163
4163
4164 @command(
4164 @command(
4165 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4165 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4166 )
4166 )
4167 def debugwalk(ui, repo, *pats, **opts):
4167 def debugwalk(ui, repo, *pats, **opts):
4168 """show how files match on given patterns"""
4168 """show how files match on given patterns"""
4169 m = scmutil.match(repo[None], pats, pycompat.byteskwargs(opts))
4169 m = scmutil.match(repo[None], pats, pycompat.byteskwargs(opts))
4170 if ui.verbose:
4170 if ui.verbose:
4171 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4171 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4172 items = list(repo[None].walk(m))
4172 items = list(repo[None].walk(m))
4173 if not items:
4173 if not items:
4174 return
4174 return
4175 f = lambda fn: fn
4175 f = lambda fn: fn
4176 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4176 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4177 f = lambda fn: util.normpath(fn)
4177 f = lambda fn: util.normpath(fn)
4178 fmt = b'f %%-%ds %%-%ds %%s' % (
4178 fmt = b'f %%-%ds %%-%ds %%s' % (
4179 max([len(abs) for abs in items]),
4179 max([len(abs) for abs in items]),
4180 max([len(repo.pathto(abs)) for abs in items]),
4180 max([len(repo.pathto(abs)) for abs in items]),
4181 )
4181 )
4182 for abs in items:
4182 for abs in items:
4183 line = fmt % (
4183 line = fmt % (
4184 abs,
4184 abs,
4185 f(repo.pathto(abs)),
4185 f(repo.pathto(abs)),
4186 m.exact(abs) and b'exact' or b'',
4186 m.exact(abs) and b'exact' or b'',
4187 )
4187 )
4188 ui.write(b"%s\n" % line.rstrip())
4188 ui.write(b"%s\n" % line.rstrip())
4189
4189
4190
4190
4191 @command(b'debugwhyunstable', [], _(b'REV'))
4191 @command(b'debugwhyunstable', [], _(b'REV'))
4192 def debugwhyunstable(ui, repo, rev):
4192 def debugwhyunstable(ui, repo, rev):
4193 """explain instabilities of a changeset"""
4193 """explain instabilities of a changeset"""
4194 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4194 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4195 dnodes = b''
4195 dnodes = b''
4196 if entry.get(b'divergentnodes'):
4196 if entry.get(b'divergentnodes'):
4197 dnodes = (
4197 dnodes = (
4198 b' '.join(
4198 b' '.join(
4199 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4199 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4200 for ctx in entry[b'divergentnodes']
4200 for ctx in entry[b'divergentnodes']
4201 )
4201 )
4202 + b' '
4202 + b' '
4203 )
4203 )
4204 ui.write(
4204 ui.write(
4205 b'%s: %s%s %s\n'
4205 b'%s: %s%s %s\n'
4206 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4206 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4207 )
4207 )
4208
4208
4209
4209
4210 @command(
4210 @command(
4211 b'debugwireargs',
4211 b'debugwireargs',
4212 [
4212 [
4213 (b'', b'three', b'', b'three'),
4213 (b'', b'three', b'', b'three'),
4214 (b'', b'four', b'', b'four'),
4214 (b'', b'four', b'', b'four'),
4215 (b'', b'five', b'', b'five'),
4215 (b'', b'five', b'', b'five'),
4216 ]
4216 ]
4217 + cmdutil.remoteopts,
4217 + cmdutil.remoteopts,
4218 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4218 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4219 norepo=True,
4219 norepo=True,
4220 )
4220 )
4221 def debugwireargs(ui, repopath, *vals, **opts):
4221 def debugwireargs(ui, repopath, *vals, **opts):
4222 repo = hg.peer(ui, pycompat.byteskwargs(opts), repopath)
4222 repo = hg.peer(ui, pycompat.byteskwargs(opts), repopath)
4223 try:
4223 try:
4224 for opt in cmdutil.remoteopts:
4224 for opt in cmdutil.remoteopts:
4225 del opts[pycompat.sysstr(opt[1])]
4225 del opts[pycompat.sysstr(opt[1])]
4226 args = {}
4226 args = {}
4227 for k, v in opts.items():
4227 for k, v in opts.items():
4228 if v:
4228 if v:
4229 args[k] = v
4229 args[k] = v
4230
4230
4231 # run twice to check that we don't mess up the stream for the next command
4231 # run twice to check that we don't mess up the stream for the next command
4232 res1 = repo.debugwireargs(*vals, **args)
4232 res1 = repo.debugwireargs(*vals, **args)
4233 res2 = repo.debugwireargs(*vals, **args)
4233 res2 = repo.debugwireargs(*vals, **args)
4234 ui.write(b"%s\n" % res1)
4234 ui.write(b"%s\n" % res1)
4235 if res1 != res2:
4235 if res1 != res2:
4236 ui.warn(b"%s\n" % res2)
4236 ui.warn(b"%s\n" % res2)
4237 finally:
4237 finally:
4238 repo.close()
4238 repo.close()
4239
4239
4240
4240
4241 def _parsewirelangblocks(fh):
4241 def _parsewirelangblocks(fh):
4242 activeaction = None
4242 activeaction = None
4243 blocklines = []
4243 blocklines = []
4244 lastindent = 0
4244 lastindent = 0
4245
4245
4246 for line in fh:
4246 for line in fh:
4247 line = line.rstrip()
4247 line = line.rstrip()
4248 if not line:
4248 if not line:
4249 continue
4249 continue
4250
4250
4251 if line.startswith(b'#'):
4251 if line.startswith(b'#'):
4252 continue
4252 continue
4253
4253
4254 if not line.startswith(b' '):
4254 if not line.startswith(b' '):
4255 # New block. Flush previous one.
4255 # New block. Flush previous one.
4256 if activeaction:
4256 if activeaction:
4257 yield activeaction, blocklines
4257 yield activeaction, blocklines
4258
4258
4259 activeaction = line
4259 activeaction = line
4260 blocklines = []
4260 blocklines = []
4261 lastindent = 0
4261 lastindent = 0
4262 continue
4262 continue
4263
4263
4264 # Else we start with an indent.
4264 # Else we start with an indent.
4265
4265
4266 if not activeaction:
4266 if not activeaction:
4267 raise error.Abort(_(b'indented line outside of block'))
4267 raise error.Abort(_(b'indented line outside of block'))
4268
4268
4269 indent = len(line) - len(line.lstrip())
4269 indent = len(line) - len(line.lstrip())
4270
4270
4271 # If this line is indented more than the last line, concatenate it.
4271 # If this line is indented more than the last line, concatenate it.
4272 if indent > lastindent and blocklines:
4272 if indent > lastindent and blocklines:
4273 blocklines[-1] += line.lstrip()
4273 blocklines[-1] += line.lstrip()
4274 else:
4274 else:
4275 blocklines.append(line)
4275 blocklines.append(line)
4276 lastindent = indent
4276 lastindent = indent
4277
4277
4278 # Flush last block.
4278 # Flush last block.
4279 if activeaction:
4279 if activeaction:
4280 yield activeaction, blocklines
4280 yield activeaction, blocklines
4281
4281
4282
4282
4283 @command(
4283 @command(
4284 b'debugwireproto',
4284 b'debugwireproto',
4285 [
4285 [
4286 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4286 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4287 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4287 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4288 (
4288 (
4289 b'',
4289 b'',
4290 b'noreadstderr',
4290 b'noreadstderr',
4291 False,
4291 False,
4292 _(b'do not read from stderr of the remote'),
4292 _(b'do not read from stderr of the remote'),
4293 ),
4293 ),
4294 (
4294 (
4295 b'',
4295 b'',
4296 b'nologhandshake',
4296 b'nologhandshake',
4297 False,
4297 False,
4298 _(b'do not log I/O related to the peer handshake'),
4298 _(b'do not log I/O related to the peer handshake'),
4299 ),
4299 ),
4300 ]
4300 ]
4301 + cmdutil.remoteopts,
4301 + cmdutil.remoteopts,
4302 _(b'[PATH]'),
4302 _(b'[PATH]'),
4303 optionalrepo=True,
4303 optionalrepo=True,
4304 )
4304 )
4305 def debugwireproto(ui, repo, path=None, **opts):
4305 def debugwireproto(ui, repo, path=None, **opts):
4306 """send wire protocol commands to a server
4306 """send wire protocol commands to a server
4307
4307
4308 This command can be used to issue wire protocol commands to remote
4308 This command can be used to issue wire protocol commands to remote
4309 peers and to debug the raw data being exchanged.
4309 peers and to debug the raw data being exchanged.
4310
4310
4311 ``--localssh`` will start an SSH server against the current repository
4311 ``--localssh`` will start an SSH server against the current repository
4312 and connect to that. By default, the connection will perform a handshake
4312 and connect to that. By default, the connection will perform a handshake
4313 and establish an appropriate peer instance.
4313 and establish an appropriate peer instance.
4314
4314
4315 ``--peer`` can be used to bypass the handshake protocol and construct a
4315 ``--peer`` can be used to bypass the handshake protocol and construct a
4316 peer instance using the specified class type. Valid values are ``raw``,
4316 peer instance using the specified class type. Valid values are ``raw``,
4317 ``ssh1``. ``raw`` instances only allow sending raw data payloads and
4317 ``ssh1``. ``raw`` instances only allow sending raw data payloads and
4318 don't support higher-level command actions.
4318 don't support higher-level command actions.
4319
4319
4320 ``--noreadstderr`` can be used to disable automatic reading from stderr
4320 ``--noreadstderr`` can be used to disable automatic reading from stderr
4321 of the peer (for SSH connections only). Disabling automatic reading of
4321 of the peer (for SSH connections only). Disabling automatic reading of
4322 stderr is useful for making output more deterministic.
4322 stderr is useful for making output more deterministic.
4323
4323
4324 Commands are issued via a mini language which is specified via stdin.
4324 Commands are issued via a mini language which is specified via stdin.
4325 The language consists of individual actions to perform. An action is
4325 The language consists of individual actions to perform. An action is
4326 defined by a block. A block is defined as a line with no leading
4326 defined by a block. A block is defined as a line with no leading
4327 space followed by 0 or more lines with leading space. Blocks are
4327 space followed by 0 or more lines with leading space. Blocks are
4328 effectively a high-level command with additional metadata.
4328 effectively a high-level command with additional metadata.
4329
4329
4330 Lines beginning with ``#`` are ignored.
4330 Lines beginning with ``#`` are ignored.
4331
4331
4332 The following sections denote available actions.
4332 The following sections denote available actions.
4333
4333
4334 raw
4334 raw
4335 ---
4335 ---
4336
4336
4337 Send raw data to the server.
4337 Send raw data to the server.
4338
4338
4339 The block payload contains the raw data to send as one atomic send
4339 The block payload contains the raw data to send as one atomic send
4340 operation. The data may not actually be delivered in a single system
4340 operation. The data may not actually be delivered in a single system
4341 call: it depends on the abilities of the transport being used.
4341 call: it depends on the abilities of the transport being used.
4342
4342
4343 Each line in the block is de-indented and concatenated. Then, that
4343 Each line in the block is de-indented and concatenated. Then, that
4344 value is evaluated as a Python b'' literal. This allows the use of
4344 value is evaluated as a Python b'' literal. This allows the use of
4345 backslash escaping, etc.
4345 backslash escaping, etc.
4346
4346
4347 raw+
4347 raw+
4348 ----
4348 ----
4349
4349
4350 Behaves like ``raw`` except flushes output afterwards.
4350 Behaves like ``raw`` except flushes output afterwards.
4351
4351
4352 command <X>
4352 command <X>
4353 -----------
4353 -----------
4354
4354
4355 Send a request to run a named command, whose name follows the ``command``
4355 Send a request to run a named command, whose name follows the ``command``
4356 string.
4356 string.
4357
4357
4358 Arguments to the command are defined as lines in this block. The format of
4358 Arguments to the command are defined as lines in this block. The format of
4359 each line is ``<key> <value>``. e.g.::
4359 each line is ``<key> <value>``. e.g.::
4360
4360
4361 command listkeys
4361 command listkeys
4362 namespace bookmarks
4362 namespace bookmarks
4363
4363
4364 If the value begins with ``eval:``, it will be interpreted as a Python
4364 If the value begins with ``eval:``, it will be interpreted as a Python
4365 literal expression. Otherwise values are interpreted as Python b'' literals.
4365 literal expression. Otherwise values are interpreted as Python b'' literals.
4366 This allows sending complex types and encoding special byte sequences via
4366 This allows sending complex types and encoding special byte sequences via
4367 backslash escaping.
4367 backslash escaping.
4368
4368
4369 The following arguments have special meaning:
4369 The following arguments have special meaning:
4370
4370
4371 ``PUSHFILE``
4371 ``PUSHFILE``
4372 When defined, the *push* mechanism of the peer will be used instead
4372 When defined, the *push* mechanism of the peer will be used instead
4373 of the static request-response mechanism and the content of the
4373 of the static request-response mechanism and the content of the
4374 file specified in the value of this argument will be sent as the
4374 file specified in the value of this argument will be sent as the
4375 command payload.
4375 command payload.
4376
4376
4377 This can be used to submit a local bundle file to the remote.
4377 This can be used to submit a local bundle file to the remote.
4378
4378
4379 batchbegin
4379 batchbegin
4380 ----------
4380 ----------
4381
4381
4382 Instruct the peer to begin a batched send.
4382 Instruct the peer to begin a batched send.
4383
4383
4384 All ``command`` blocks are queued for execution until the next
4384 All ``command`` blocks are queued for execution until the next
4385 ``batchsubmit`` block.
4385 ``batchsubmit`` block.
4386
4386
4387 batchsubmit
4387 batchsubmit
4388 -----------
4388 -----------
4389
4389
4390 Submit previously queued ``command`` blocks as a batch request.
4390 Submit previously queued ``command`` blocks as a batch request.
4391
4391
4392 This action MUST be paired with a ``batchbegin`` action.
4392 This action MUST be paired with a ``batchbegin`` action.
4393
4393
4394 httprequest <method> <path>
4394 httprequest <method> <path>
4395 ---------------------------
4395 ---------------------------
4396
4396
4397 (HTTP peer only)
4397 (HTTP peer only)
4398
4398
4399 Send an HTTP request to the peer.
4399 Send an HTTP request to the peer.
4400
4400
4401 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4401 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4402
4402
4403 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4403 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4404 headers to add to the request. e.g. ``Accept: foo``.
4404 headers to add to the request. e.g. ``Accept: foo``.
4405
4405
4406 The following arguments are special:
4406 The following arguments are special:
4407
4407
4408 ``BODYFILE``
4408 ``BODYFILE``
4409 The content of the file defined as the value to this argument will be
4409 The content of the file defined as the value to this argument will be
4410 transferred verbatim as the HTTP request body.
4410 transferred verbatim as the HTTP request body.
4411
4411
4412 ``frame <type> <flags> <payload>``
4412 ``frame <type> <flags> <payload>``
4413 Send a unified protocol frame as part of the request body.
4413 Send a unified protocol frame as part of the request body.
4414
4414
4415 All frames will be collected and sent as the body to the HTTP
4415 All frames will be collected and sent as the body to the HTTP
4416 request.
4416 request.
4417
4417
4418 close
4418 close
4419 -----
4419 -----
4420
4420
4421 Close the connection to the server.
4421 Close the connection to the server.
4422
4422
4423 flush
4423 flush
4424 -----
4424 -----
4425
4425
4426 Flush data written to the server.
4426 Flush data written to the server.
4427
4427
4428 readavailable
4428 readavailable
4429 -------------
4429 -------------
4430
4430
4431 Close the write end of the connection and read all available data from
4431 Close the write end of the connection and read all available data from
4432 the server.
4432 the server.
4433
4433
4434 If the connection to the server encompasses multiple pipes, we poll both
4434 If the connection to the server encompasses multiple pipes, we poll both
4435 pipes and read available data.
4435 pipes and read available data.
4436
4436
4437 readline
4437 readline
4438 --------
4438 --------
4439
4439
4440 Read a line of output from the server. If there are multiple output
4440 Read a line of output from the server. If there are multiple output
4441 pipes, reads only the main pipe.
4441 pipes, reads only the main pipe.
4442
4442
4443 ereadline
4443 ereadline
4444 ---------
4444 ---------
4445
4445
4446 Like ``readline``, but read from the stderr pipe, if available.
4446 Like ``readline``, but read from the stderr pipe, if available.
4447
4447
4448 read <X>
4448 read <X>
4449 --------
4449 --------
4450
4450
4451 ``read()`` N bytes from the server's main output pipe.
4451 ``read()`` N bytes from the server's main output pipe.
4452
4452
4453 eread <X>
4453 eread <X>
4454 ---------
4454 ---------
4455
4455
4456 ``read()`` N bytes from the server's stderr pipe, if available.
4456 ``read()`` N bytes from the server's stderr pipe, if available.
4457
4457
4458 Specifying Unified Frame-Based Protocol Frames
4458 Specifying Unified Frame-Based Protocol Frames
4459 ----------------------------------------------
4459 ----------------------------------------------
4460
4460
4461 It is possible to emit a *Unified Frame-Based Protocol* by using special
4461 It is possible to emit a *Unified Frame-Based Protocol* by using special
4462 syntax.
4462 syntax.
4463
4463
4464 A frame is composed as a type, flags, and payload. These can be parsed
4464 A frame is composed as a type, flags, and payload. These can be parsed
4465 from a string of the form:
4465 from a string of the form:
4466
4466
4467 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4467 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4468
4468
4469 ``request-id`` and ``stream-id`` are integers defining the request and
4469 ``request-id`` and ``stream-id`` are integers defining the request and
4470 stream identifiers.
4470 stream identifiers.
4471
4471
4472 ``type`` can be an integer value for the frame type or the string name
4472 ``type`` can be an integer value for the frame type or the string name
4473 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4473 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4474 ``command-name``.
4474 ``command-name``.
4475
4475
4476 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4476 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4477 components. Each component (and there can be just one) can be an integer
4477 components. Each component (and there can be just one) can be an integer
4478 or a flag name for stream flags or frame flags, respectively. Values are
4478 or a flag name for stream flags or frame flags, respectively. Values are
4479 resolved to integers and then bitwise OR'd together.
4479 resolved to integers and then bitwise OR'd together.
4480
4480
4481 ``payload`` represents the raw frame payload. If it begins with
4481 ``payload`` represents the raw frame payload. If it begins with
4482 ``cbor:``, the following string is evaluated as Python code and the
4482 ``cbor:``, the following string is evaluated as Python code and the
4483 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4483 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4484 as a Python byte string literal.
4484 as a Python byte string literal.
4485 """
4485 """
4486 if opts['localssh'] and not repo:
4486 if opts['localssh'] and not repo:
4487 raise error.Abort(_(b'--localssh requires a repository'))
4487 raise error.Abort(_(b'--localssh requires a repository'))
4488
4488
4489 if opts['peer'] and opts['peer'] not in (
4489 if opts['peer'] and opts['peer'] not in (
4490 b'raw',
4490 b'raw',
4491 b'ssh1',
4491 b'ssh1',
4492 ):
4492 ):
4493 raise error.Abort(
4493 raise error.Abort(
4494 _(b'invalid value for --peer'),
4494 _(b'invalid value for --peer'),
4495 hint=_(b'valid values are "raw" and "ssh1"'),
4495 hint=_(b'valid values are "raw" and "ssh1"'),
4496 )
4496 )
4497
4497
4498 if path and opts['localssh']:
4498 if path and opts['localssh']:
4499 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4499 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4500
4500
4501 if ui.interactive():
4501 if ui.interactive():
4502 ui.write(_(b'(waiting for commands on stdin)\n'))
4502 ui.write(_(b'(waiting for commands on stdin)\n'))
4503
4503
4504 blocks = list(_parsewirelangblocks(ui.fin))
4504 blocks = list(_parsewirelangblocks(ui.fin))
4505
4505
4506 proc = None
4506 proc = None
4507 stdin = None
4507 stdin = None
4508 stdout = None
4508 stdout = None
4509 stderr = None
4509 stderr = None
4510 opener = None
4510 opener = None
4511
4511
4512 if opts['localssh']:
4512 if opts['localssh']:
4513 # We start the SSH server in its own process so there is process
4513 # We start the SSH server in its own process so there is process
4514 # separation. This prevents a whole class of potential bugs around
4514 # separation. This prevents a whole class of potential bugs around
4515 # shared state from interfering with server operation.
4515 # shared state from interfering with server operation.
4516 args = procutil.hgcmd() + [
4516 args = procutil.hgcmd() + [
4517 b'-R',
4517 b'-R',
4518 repo.root,
4518 repo.root,
4519 b'debugserve',
4519 b'debugserve',
4520 b'--sshstdio',
4520 b'--sshstdio',
4521 ]
4521 ]
4522 proc = subprocess.Popen(
4522 proc = subprocess.Popen(
4523 pycompat.rapply(procutil.tonativestr, args),
4523 pycompat.rapply(procutil.tonativestr, args),
4524 stdin=subprocess.PIPE,
4524 stdin=subprocess.PIPE,
4525 stdout=subprocess.PIPE,
4525 stdout=subprocess.PIPE,
4526 stderr=subprocess.PIPE,
4526 stderr=subprocess.PIPE,
4527 bufsize=0,
4527 bufsize=0,
4528 )
4528 )
4529
4529
4530 stdin = proc.stdin
4530 stdin = proc.stdin
4531 stdout = proc.stdout
4531 stdout = proc.stdout
4532 stderr = proc.stderr
4532 stderr = proc.stderr
4533
4533
4534 # We turn the pipes into observers so we can log I/O.
4534 # We turn the pipes into observers so we can log I/O.
4535 if ui.verbose or opts['peer'] == b'raw':
4535 if ui.verbose or opts['peer'] == b'raw':
4536 stdin = util.makeloggingfileobject(
4536 stdin = util.makeloggingfileobject(
4537 ui, proc.stdin, b'i', logdata=True
4537 ui, proc.stdin, b'i', logdata=True
4538 )
4538 )
4539 stdout = util.makeloggingfileobject(
4539 stdout = util.makeloggingfileobject(
4540 ui, proc.stdout, b'o', logdata=True
4540 ui, proc.stdout, b'o', logdata=True
4541 )
4541 )
4542 stderr = util.makeloggingfileobject(
4542 stderr = util.makeloggingfileobject(
4543 ui, proc.stderr, b'e', logdata=True
4543 ui, proc.stderr, b'e', logdata=True
4544 )
4544 )
4545
4545
4546 # --localssh also implies the peer connection settings.
4546 # --localssh also implies the peer connection settings.
4547
4547
4548 url = b'ssh://localserver'
4548 url = b'ssh://localserver'
4549 autoreadstderr = not opts['noreadstderr']
4549 autoreadstderr = not opts['noreadstderr']
4550
4550
4551 if opts['peer'] == b'ssh1':
4551 if opts['peer'] == b'ssh1':
4552 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4552 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4553 peer = sshpeer.sshv1peer(
4553 peer = sshpeer.sshv1peer(
4554 ui,
4554 ui,
4555 url,
4555 url,
4556 proc,
4556 proc,
4557 stdin,
4557 stdin,
4558 stdout,
4558 stdout,
4559 stderr,
4559 stderr,
4560 None,
4560 None,
4561 autoreadstderr=autoreadstderr,
4561 autoreadstderr=autoreadstderr,
4562 )
4562 )
4563 elif opts['peer'] == b'raw':
4563 elif opts['peer'] == b'raw':
4564 ui.write(_(b'using raw connection to peer\n'))
4564 ui.write(_(b'using raw connection to peer\n'))
4565 peer = None
4565 peer = None
4566 else:
4566 else:
4567 ui.write(_(b'creating ssh peer from handshake results\n'))
4567 ui.write(_(b'creating ssh peer from handshake results\n'))
4568 peer = sshpeer._make_peer(
4568 peer = sshpeer._make_peer(
4569 ui,
4569 ui,
4570 url,
4570 url,
4571 proc,
4571 proc,
4572 stdin,
4572 stdin,
4573 stdout,
4573 stdout,
4574 stderr,
4574 stderr,
4575 autoreadstderr=autoreadstderr,
4575 autoreadstderr=autoreadstderr,
4576 )
4576 )
4577
4577
4578 elif path:
4578 elif path:
4579 # We bypass hg.peer() so we can proxy the sockets.
4579 # We bypass hg.peer() so we can proxy the sockets.
4580 # TODO consider not doing this because we skip
4580 # TODO consider not doing this because we skip
4581 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4581 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4582 u = urlutil.url(path)
4582 u = urlutil.url(path)
4583 if u.scheme != b'http':
4583 if u.scheme != b'http':
4584 raise error.Abort(_(b'only http:// paths are currently supported'))
4584 raise error.Abort(_(b'only http:// paths are currently supported'))
4585
4585
4586 url, authinfo = u.authinfo()
4586 url, authinfo = u.authinfo()
4587 openerargs = {
4587 openerargs = {
4588 'useragent': b'Mercurial debugwireproto',
4588 'useragent': b'Mercurial debugwireproto',
4589 }
4589 }
4590
4590
4591 # Turn pipes/sockets into observers so we can log I/O.
4591 # Turn pipes/sockets into observers so we can log I/O.
4592 if ui.verbose:
4592 if ui.verbose:
4593 openerargs.update(
4593 openerargs.update(
4594 {
4594 {
4595 'loggingfh': ui,
4595 'loggingfh': ui,
4596 'loggingname': b's',
4596 'loggingname': b's',
4597 'loggingopts': {
4597 'loggingopts': {
4598 'logdata': True,
4598 'logdata': True,
4599 'logdataapis': False,
4599 'logdataapis': False,
4600 },
4600 },
4601 }
4601 }
4602 )
4602 )
4603
4603
4604 if ui.debugflag:
4604 if ui.debugflag:
4605 openerargs['loggingopts']['logdataapis'] = True
4605 openerargs['loggingopts']['logdataapis'] = True
4606
4606
4607 # Don't send default headers when in raw mode. This allows us to
4607 # Don't send default headers when in raw mode. This allows us to
4608 # bypass most of the behavior of our URL handling code so we can
4608 # bypass most of the behavior of our URL handling code so we can
4609 # have near complete control over what's sent on the wire.
4609 # have near complete control over what's sent on the wire.
4610 if opts['peer'] == b'raw':
4610 if opts['peer'] == b'raw':
4611 openerargs['sendaccept'] = False
4611 openerargs['sendaccept'] = False
4612
4612
4613 opener = urlmod.opener(ui, authinfo, **openerargs)
4613 opener = urlmod.opener(ui, authinfo, **openerargs)
4614
4614
4615 if opts['peer'] == b'raw':
4615 if opts['peer'] == b'raw':
4616 ui.write(_(b'using raw connection to peer\n'))
4616 ui.write(_(b'using raw connection to peer\n'))
4617 peer = None
4617 peer = None
4618 elif opts['peer']:
4618 elif opts['peer']:
4619 raise error.Abort(
4619 raise error.Abort(
4620 _(b'--peer %s not supported with HTTP peers') % opts['peer']
4620 _(b'--peer %s not supported with HTTP peers') % opts['peer']
4621 )
4621 )
4622 else:
4622 else:
4623 peer_path = urlutil.try_path(ui, path)
4623 peer_path = urlutil.try_path(ui, path)
4624 peer = httppeer._make_peer(ui, peer_path, opener=opener)
4624 peer = httppeer._make_peer(ui, peer_path, opener=opener)
4625
4625
4626 # We /could/ populate stdin/stdout with sock.makefile()...
4626 # We /could/ populate stdin/stdout with sock.makefile()...
4627 else:
4627 else:
4628 raise error.Abort(_(b'unsupported connection configuration'))
4628 raise error.Abort(_(b'unsupported connection configuration'))
4629
4629
4630 batchedcommands = None
4630 batchedcommands = None
4631
4631
4632 # Now perform actions based on the parsed wire language instructions.
4632 # Now perform actions based on the parsed wire language instructions.
4633 for action, lines in blocks:
4633 for action, lines in blocks:
4634 if action in (b'raw', b'raw+'):
4634 if action in (b'raw', b'raw+'):
4635 if not stdin:
4635 if not stdin:
4636 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4636 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4637
4637
4638 # Concatenate the data together.
4638 # Concatenate the data together.
4639 data = b''.join(l.lstrip() for l in lines)
4639 data = b''.join(l.lstrip() for l in lines)
4640 data = stringutil.unescapestr(data)
4640 data = stringutil.unescapestr(data)
4641 stdin.write(data)
4641 stdin.write(data)
4642
4642
4643 if action == b'raw+':
4643 if action == b'raw+':
4644 stdin.flush()
4644 stdin.flush()
4645 elif action == b'flush':
4645 elif action == b'flush':
4646 if not stdin:
4646 if not stdin:
4647 raise error.Abort(_(b'cannot call flush on this peer'))
4647 raise error.Abort(_(b'cannot call flush on this peer'))
4648 stdin.flush()
4648 stdin.flush()
4649 elif action.startswith(b'command'):
4649 elif action.startswith(b'command'):
4650 if not peer:
4650 if not peer:
4651 raise error.Abort(
4651 raise error.Abort(
4652 _(
4652 _(
4653 b'cannot send commands unless peer instance '
4653 b'cannot send commands unless peer instance '
4654 b'is available'
4654 b'is available'
4655 )
4655 )
4656 )
4656 )
4657
4657
4658 command = action.split(b' ', 1)[1]
4658 command = action.split(b' ', 1)[1]
4659
4659
4660 args = {}
4660 args = {}
4661 for line in lines:
4661 for line in lines:
4662 # We need to allow empty values.
4662 # We need to allow empty values.
4663 fields = line.lstrip().split(b' ', 1)
4663 fields = line.lstrip().split(b' ', 1)
4664 if len(fields) == 1:
4664 if len(fields) == 1:
4665 key = fields[0]
4665 key = fields[0]
4666 value = b''
4666 value = b''
4667 else:
4667 else:
4668 key, value = fields
4668 key, value = fields
4669
4669
4670 if value.startswith(b'eval:'):
4670 if value.startswith(b'eval:'):
4671 value = stringutil.evalpythonliteral(value[5:])
4671 value = stringutil.evalpythonliteral(value[5:])
4672 else:
4672 else:
4673 value = stringutil.unescapestr(value)
4673 value = stringutil.unescapestr(value)
4674
4674
4675 args[key] = value
4675 args[key] = value
4676
4676
4677 if batchedcommands is not None:
4677 if batchedcommands is not None:
4678 batchedcommands.append((command, args))
4678 batchedcommands.append((command, args))
4679 continue
4679 continue
4680
4680
4681 ui.status(_(b'sending %s command\n') % command)
4681 ui.status(_(b'sending %s command\n') % command)
4682
4682
4683 if b'PUSHFILE' in args:
4683 if b'PUSHFILE' in args:
4684 with open(args[b'PUSHFILE'], 'rb') as fh:
4684 with open(args[b'PUSHFILE'], 'rb') as fh:
4685 del args[b'PUSHFILE']
4685 del args[b'PUSHFILE']
4686 res, output = peer._callpush(
4686 res, output = peer._callpush(
4687 command, fh, **pycompat.strkwargs(args)
4687 command, fh, **pycompat.strkwargs(args)
4688 )
4688 )
4689 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4689 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4690 ui.status(
4690 ui.status(
4691 _(b'remote output: %s\n') % stringutil.escapestr(output)
4691 _(b'remote output: %s\n') % stringutil.escapestr(output)
4692 )
4692 )
4693 else:
4693 else:
4694 with peer.commandexecutor() as e:
4694 with peer.commandexecutor() as e:
4695 res = e.callcommand(command, args).result()
4695 res = e.callcommand(command, args).result()
4696
4696
4697 ui.status(
4697 ui.status(
4698 _(b'response: %s\n')
4698 _(b'response: %s\n')
4699 % stringutil.pprint(res, bprefix=True, indent=2)
4699 % stringutil.pprint(res, bprefix=True, indent=2)
4700 )
4700 )
4701
4701
4702 elif action == b'batchbegin':
4702 elif action == b'batchbegin':
4703 if batchedcommands is not None:
4703 if batchedcommands is not None:
4704 raise error.Abort(_(b'nested batchbegin not allowed'))
4704 raise error.Abort(_(b'nested batchbegin not allowed'))
4705
4705
4706 batchedcommands = []
4706 batchedcommands = []
4707 elif action == b'batchsubmit':
4707 elif action == b'batchsubmit':
4708 # There is a batching API we could go through. But it would be
4708 # There is a batching API we could go through. But it would be
4709 # difficult to normalize requests into function calls. It is easier
4709 # difficult to normalize requests into function calls. It is easier
4710 # to bypass this layer and normalize to commands + args.
4710 # to bypass this layer and normalize to commands + args.
4711 ui.status(
4711 ui.status(
4712 _(b'sending batch with %d sub-commands\n')
4712 _(b'sending batch with %d sub-commands\n')
4713 % len(batchedcommands)
4713 % len(batchedcommands)
4714 )
4714 )
4715 assert peer is not None
4715 assert peer is not None
4716 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4716 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4717 ui.status(
4717 ui.status(
4718 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4718 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4719 )
4719 )
4720
4720
4721 batchedcommands = None
4721 batchedcommands = None
4722
4722
4723 elif action.startswith(b'httprequest '):
4723 elif action.startswith(b'httprequest '):
4724 if not opener:
4724 if not opener:
4725 raise error.Abort(
4725 raise error.Abort(
4726 _(b'cannot use httprequest without an HTTP peer')
4726 _(b'cannot use httprequest without an HTTP peer')
4727 )
4727 )
4728
4728
4729 request = action.split(b' ', 2)
4729 request = action.split(b' ', 2)
4730 if len(request) != 3:
4730 if len(request) != 3:
4731 raise error.Abort(
4731 raise error.Abort(
4732 _(
4732 _(
4733 b'invalid httprequest: expected format is '
4733 b'invalid httprequest: expected format is '
4734 b'"httprequest <method> <path>'
4734 b'"httprequest <method> <path>'
4735 )
4735 )
4736 )
4736 )
4737
4737
4738 method, httppath = request[1:]
4738 method, httppath = request[1:]
4739 headers = {}
4739 headers = {}
4740 body = None
4740 body = None
4741 frames = []
4741 frames = []
4742 for line in lines:
4742 for line in lines:
4743 line = line.lstrip()
4743 line = line.lstrip()
4744 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4744 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4745 if m:
4745 if m:
4746 # Headers need to use native strings.
4746 # Headers need to use native strings.
4747 key = pycompat.strurl(m.group(1))
4747 key = pycompat.strurl(m.group(1))
4748 value = pycompat.strurl(m.group(2))
4748 value = pycompat.strurl(m.group(2))
4749 headers[key] = value
4749 headers[key] = value
4750 continue
4750 continue
4751
4751
4752 if line.startswith(b'BODYFILE '):
4752 if line.startswith(b'BODYFILE '):
4753 with open(line.split(b' ', 1), b'rb') as fh:
4753 with open(line.split(b' ', 1), b'rb') as fh:
4754 body = fh.read()
4754 body = fh.read()
4755 elif line.startswith(b'frame '):
4755 elif line.startswith(b'frame '):
4756 frame = wireprotoframing.makeframefromhumanstring(
4756 frame = wireprotoframing.makeframefromhumanstring(
4757 line[len(b'frame ') :]
4757 line[len(b'frame ') :]
4758 )
4758 )
4759
4759
4760 frames.append(frame)
4760 frames.append(frame)
4761 else:
4761 else:
4762 raise error.Abort(
4762 raise error.Abort(
4763 _(b'unknown argument to httprequest: %s') % line
4763 _(b'unknown argument to httprequest: %s') % line
4764 )
4764 )
4765
4765
4766 url = path + httppath
4766 url = path + httppath
4767
4767
4768 if frames:
4768 if frames:
4769 body = b''.join(bytes(f) for f in frames)
4769 body = b''.join(bytes(f) for f in frames)
4770
4770
4771 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4771 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4772
4772
4773 # urllib.Request insists on using has_data() as a proxy for
4773 # urllib.Request insists on using has_data() as a proxy for
4774 # determining the request method. Override that to use our
4774 # determining the request method. Override that to use our
4775 # explicitly requested method.
4775 # explicitly requested method.
4776 req.get_method = lambda: pycompat.sysstr(method)
4776 req.get_method = lambda: pycompat.sysstr(method)
4777
4777
4778 try:
4778 try:
4779 res = opener.open(req)
4779 res = opener.open(req)
4780 body = res.read()
4780 body = res.read()
4781 except util.urlerr.urlerror as e:
4781 except util.urlerr.urlerror as e:
4782 # read() method must be called, but only exists in Python 2
4782 # read() method must be called, but only exists in Python 2
4783 getattr(e, 'read', lambda: None)()
4783 getattr(e, 'read', lambda: None)()
4784 continue
4784 continue
4785
4785
4786 ct = res.headers.get('Content-Type')
4786 ct = res.headers.get('Content-Type')
4787 if ct == 'application/mercurial-cbor':
4787 if ct == 'application/mercurial-cbor':
4788 ui.write(
4788 ui.write(
4789 _(b'cbor> %s\n')
4789 _(b'cbor> %s\n')
4790 % stringutil.pprint(
4790 % stringutil.pprint(
4791 cborutil.decodeall(body), bprefix=True, indent=2
4791 cborutil.decodeall(body), bprefix=True, indent=2
4792 )
4792 )
4793 )
4793 )
4794
4794
4795 elif action == b'close':
4795 elif action == b'close':
4796 assert peer is not None
4796 assert peer is not None
4797 peer.close()
4797 peer.close()
4798 elif action == b'readavailable':
4798 elif action == b'readavailable':
4799 if not stdout or not stderr:
4799 if not stdout or not stderr:
4800 raise error.Abort(
4800 raise error.Abort(
4801 _(b'readavailable not available on this peer')
4801 _(b'readavailable not available on this peer')
4802 )
4802 )
4803
4803
4804 stdin.close()
4804 stdin.close()
4805 stdout.read()
4805 stdout.read()
4806 stderr.read()
4806 stderr.read()
4807
4807
4808 elif action == b'readline':
4808 elif action == b'readline':
4809 if not stdout:
4809 if not stdout:
4810 raise error.Abort(_(b'readline not available on this peer'))
4810 raise error.Abort(_(b'readline not available on this peer'))
4811 stdout.readline()
4811 stdout.readline()
4812 elif action == b'ereadline':
4812 elif action == b'ereadline':
4813 if not stderr:
4813 if not stderr:
4814 raise error.Abort(_(b'ereadline not available on this peer'))
4814 raise error.Abort(_(b'ereadline not available on this peer'))
4815 stderr.readline()
4815 stderr.readline()
4816 elif action.startswith(b'read '):
4816 elif action.startswith(b'read '):
4817 count = int(action.split(b' ', 1)[1])
4817 count = int(action.split(b' ', 1)[1])
4818 if not stdout:
4818 if not stdout:
4819 raise error.Abort(_(b'read not available on this peer'))
4819 raise error.Abort(_(b'read not available on this peer'))
4820 stdout.read(count)
4820 stdout.read(count)
4821 elif action.startswith(b'eread '):
4821 elif action.startswith(b'eread '):
4822 count = int(action.split(b' ', 1)[1])
4822 count = int(action.split(b' ', 1)[1])
4823 if not stderr:
4823 if not stderr:
4824 raise error.Abort(_(b'eread not available on this peer'))
4824 raise error.Abort(_(b'eread not available on this peer'))
4825 stderr.read(count)
4825 stderr.read(count)
4826 else:
4826 else:
4827 raise error.Abort(_(b'unknown action: %s') % action)
4827 raise error.Abort(_(b'unknown action: %s') % action)
4828
4828
4829 if batchedcommands is not None:
4829 if batchedcommands is not None:
4830 raise error.Abort(_(b'unclosed "batchbegin" request'))
4830 raise error.Abort(_(b'unclosed "batchbegin" request'))
4831
4831
4832 if peer:
4832 if peer:
4833 peer.close()
4833 peer.close()
4834
4834
4835 if proc:
4835 if proc:
4836 proc.kill()
4836 proc.kill()
@@ -1,3724 +1,3724 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 # coding: utf8
2 # coding: utf8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Storage back-end for Mercurial.
9 """Storage back-end for Mercurial.
10
10
11 This provides efficient delta storage with O(1) retrieve and append
11 This provides efficient delta storage with O(1) retrieve and append
12 and O(changes) merge between branches.
12 and O(changes) merge between branches.
13 """
13 """
14
14
15
15
16 import binascii
16 import binascii
17 import collections
17 import collections
18 import contextlib
18 import contextlib
19 import io
19 import io
20 import os
20 import os
21 import struct
21 import struct
22 import weakref
22 import weakref
23 import zlib
23 import zlib
24
24
25 # import stuff from node for others to import from revlog
25 # import stuff from node for others to import from revlog
26 from .node import (
26 from .node import (
27 bin,
27 bin,
28 hex,
28 hex,
29 nullrev,
29 nullrev,
30 sha1nodeconstants,
30 sha1nodeconstants,
31 short,
31 short,
32 wdirrev,
32 wdirrev,
33 )
33 )
34 from .i18n import _
34 from .i18n import _
35 from .revlogutils.constants import (
35 from .revlogutils.constants import (
36 ALL_KINDS,
36 ALL_KINDS,
37 CHANGELOGV2,
37 CHANGELOGV2,
38 COMP_MODE_DEFAULT,
38 COMP_MODE_DEFAULT,
39 COMP_MODE_INLINE,
39 COMP_MODE_INLINE,
40 COMP_MODE_PLAIN,
40 COMP_MODE_PLAIN,
41 DELTA_BASE_REUSE_NO,
41 DELTA_BASE_REUSE_NO,
42 DELTA_BASE_REUSE_TRY,
42 DELTA_BASE_REUSE_TRY,
43 ENTRY_RANK,
43 ENTRY_RANK,
44 FEATURES_BY_VERSION,
44 FEATURES_BY_VERSION,
45 FLAG_GENERALDELTA,
45 FLAG_GENERALDELTA,
46 FLAG_INLINE_DATA,
46 FLAG_INLINE_DATA,
47 INDEX_HEADER,
47 INDEX_HEADER,
48 KIND_CHANGELOG,
48 KIND_CHANGELOG,
49 KIND_FILELOG,
49 KIND_FILELOG,
50 RANK_UNKNOWN,
50 RANK_UNKNOWN,
51 REVLOGV0,
51 REVLOGV0,
52 REVLOGV1,
52 REVLOGV1,
53 REVLOGV1_FLAGS,
53 REVLOGV1_FLAGS,
54 REVLOGV2,
54 REVLOGV2,
55 REVLOGV2_FLAGS,
55 REVLOGV2_FLAGS,
56 REVLOG_DEFAULT_FLAGS,
56 REVLOG_DEFAULT_FLAGS,
57 REVLOG_DEFAULT_FORMAT,
57 REVLOG_DEFAULT_FORMAT,
58 REVLOG_DEFAULT_VERSION,
58 REVLOG_DEFAULT_VERSION,
59 SUPPORTED_FLAGS,
59 SUPPORTED_FLAGS,
60 )
60 )
61 from .revlogutils.flagutil import (
61 from .revlogutils.flagutil import (
62 REVIDX_DEFAULT_FLAGS,
62 REVIDX_DEFAULT_FLAGS,
63 REVIDX_ELLIPSIS,
63 REVIDX_ELLIPSIS,
64 REVIDX_EXTSTORED,
64 REVIDX_EXTSTORED,
65 REVIDX_FLAGS_ORDER,
65 REVIDX_FLAGS_ORDER,
66 REVIDX_HASCOPIESINFO,
66 REVIDX_HASCOPIESINFO,
67 REVIDX_ISCENSORED,
67 REVIDX_ISCENSORED,
68 REVIDX_RAWTEXT_CHANGING_FLAGS,
68 REVIDX_RAWTEXT_CHANGING_FLAGS,
69 )
69 )
70 from .thirdparty import attr
70 from .thirdparty import attr
71 from . import (
71 from . import (
72 ancestor,
72 ancestor,
73 dagop,
73 dagop,
74 error,
74 error,
75 mdiff,
75 mdiff,
76 policy,
76 policy,
77 pycompat,
77 pycompat,
78 revlogutils,
78 revlogutils,
79 templatefilters,
79 templatefilters,
80 util,
80 util,
81 )
81 )
82 from .interfaces import (
82 from .interfaces import (
83 repository,
83 repository,
84 util as interfaceutil,
84 util as interfaceutil,
85 )
85 )
86 from .revlogutils import (
86 from .revlogutils import (
87 deltas as deltautil,
87 deltas as deltautil,
88 docket as docketutil,
88 docket as docketutil,
89 flagutil,
89 flagutil,
90 nodemap as nodemaputil,
90 nodemap as nodemaputil,
91 randomaccessfile,
91 randomaccessfile,
92 revlogv0,
92 revlogv0,
93 rewrite,
93 rewrite,
94 sidedata as sidedatautil,
94 sidedata as sidedatautil,
95 )
95 )
96 from .utils import (
96 from .utils import (
97 storageutil,
97 storageutil,
98 stringutil,
98 stringutil,
99 )
99 )
100
100
101 # blanked usage of all the name to prevent pyflakes constraints
101 # blanked usage of all the name to prevent pyflakes constraints
102 # We need these name available in the module for extensions.
102 # We need these name available in the module for extensions.
103
103
104 REVLOGV0
104 REVLOGV0
105 REVLOGV1
105 REVLOGV1
106 REVLOGV2
106 REVLOGV2
107 CHANGELOGV2
107 CHANGELOGV2
108 FLAG_INLINE_DATA
108 FLAG_INLINE_DATA
109 FLAG_GENERALDELTA
109 FLAG_GENERALDELTA
110 REVLOG_DEFAULT_FLAGS
110 REVLOG_DEFAULT_FLAGS
111 REVLOG_DEFAULT_FORMAT
111 REVLOG_DEFAULT_FORMAT
112 REVLOG_DEFAULT_VERSION
112 REVLOG_DEFAULT_VERSION
113 REVLOGV1_FLAGS
113 REVLOGV1_FLAGS
114 REVLOGV2_FLAGS
114 REVLOGV2_FLAGS
115 REVIDX_ISCENSORED
115 REVIDX_ISCENSORED
116 REVIDX_ELLIPSIS
116 REVIDX_ELLIPSIS
117 REVIDX_HASCOPIESINFO
117 REVIDX_HASCOPIESINFO
118 REVIDX_EXTSTORED
118 REVIDX_EXTSTORED
119 REVIDX_DEFAULT_FLAGS
119 REVIDX_DEFAULT_FLAGS
120 REVIDX_FLAGS_ORDER
120 REVIDX_FLAGS_ORDER
121 REVIDX_RAWTEXT_CHANGING_FLAGS
121 REVIDX_RAWTEXT_CHANGING_FLAGS
122
122
123 parsers = policy.importmod('parsers')
123 parsers = policy.importmod('parsers')
124 rustancestor = policy.importrust('ancestor')
124 rustancestor = policy.importrust('ancestor')
125 rustdagop = policy.importrust('dagop')
125 rustdagop = policy.importrust('dagop')
126 rustrevlog = policy.importrust('revlog')
126 rustrevlog = policy.importrust('revlog')
127
127
128 # Aliased for performance.
128 # Aliased for performance.
129 _zlibdecompress = zlib.decompress
129 _zlibdecompress = zlib.decompress
130
130
131 # max size of inline data embedded into a revlog
131 # max size of inline data embedded into a revlog
132 _maxinline = 131072
132 _maxinline = 131072
133
133
134 # Flag processors for REVIDX_ELLIPSIS.
134 # Flag processors for REVIDX_ELLIPSIS.
135 def ellipsisreadprocessor(rl, text):
135 def ellipsisreadprocessor(rl, text):
136 return text, False
136 return text, False
137
137
138
138
139 def ellipsiswriteprocessor(rl, text):
139 def ellipsiswriteprocessor(rl, text):
140 return text, False
140 return text, False
141
141
142
142
143 def ellipsisrawprocessor(rl, text):
143 def ellipsisrawprocessor(rl, text):
144 return False
144 return False
145
145
146
146
147 ellipsisprocessor = (
147 ellipsisprocessor = (
148 ellipsisreadprocessor,
148 ellipsisreadprocessor,
149 ellipsiswriteprocessor,
149 ellipsiswriteprocessor,
150 ellipsisrawprocessor,
150 ellipsisrawprocessor,
151 )
151 )
152
152
153
153
154 def _verify_revision(rl, skipflags, state, node):
154 def _verify_revision(rl, skipflags, state, node):
155 """Verify the integrity of the given revlog ``node`` while providing a hook
155 """Verify the integrity of the given revlog ``node`` while providing a hook
156 point for extensions to influence the operation."""
156 point for extensions to influence the operation."""
157 if skipflags:
157 if skipflags:
158 state[b'skipread'].add(node)
158 state[b'skipread'].add(node)
159 else:
159 else:
160 # Side-effect: read content and verify hash.
160 # Side-effect: read content and verify hash.
161 rl.revision(node)
161 rl.revision(node)
162
162
163
163
164 # True if a fast implementation for persistent-nodemap is available
164 # True if a fast implementation for persistent-nodemap is available
165 #
165 #
166 # We also consider we have a "fast" implementation in "pure" python because
166 # We also consider we have a "fast" implementation in "pure" python because
167 # people using pure don't really have performance consideration (and a
167 # people using pure don't really have performance consideration (and a
168 # wheelbarrow of other slowness source)
168 # wheelbarrow of other slowness source)
169 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or hasattr(
169 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or hasattr(
170 parsers, 'BaseIndexObject'
170 parsers, 'BaseIndexObject'
171 )
171 )
172
172
173
173
174 @interfaceutil.implementer(repository.irevisiondelta)
174 @interfaceutil.implementer(repository.irevisiondelta)
175 @attr.s(slots=True)
175 @attr.s(slots=True)
176 class revlogrevisiondelta:
176 class revlogrevisiondelta:
177 node = attr.ib()
177 node = attr.ib()
178 p1node = attr.ib()
178 p1node = attr.ib()
179 p2node = attr.ib()
179 p2node = attr.ib()
180 basenode = attr.ib()
180 basenode = attr.ib()
181 flags = attr.ib()
181 flags = attr.ib()
182 baserevisionsize = attr.ib()
182 baserevisionsize = attr.ib()
183 revision = attr.ib()
183 revision = attr.ib()
184 delta = attr.ib()
184 delta = attr.ib()
185 sidedata = attr.ib()
185 sidedata = attr.ib()
186 protocol_flags = attr.ib()
186 protocol_flags = attr.ib()
187 linknode = attr.ib(default=None)
187 linknode = attr.ib(default=None)
188
188
189
189
190 @interfaceutil.implementer(repository.iverifyproblem)
190 @interfaceutil.implementer(repository.iverifyproblem)
191 @attr.s(frozen=True)
191 @attr.s(frozen=True)
192 class revlogproblem:
192 class revlogproblem:
193 warning = attr.ib(default=None)
193 warning = attr.ib(default=None)
194 error = attr.ib(default=None)
194 error = attr.ib(default=None)
195 node = attr.ib(default=None)
195 node = attr.ib(default=None)
196
196
197
197
198 def parse_index_v1(data, inline):
198 def parse_index_v1(data, inline):
199 # call the C implementation to parse the index data
199 # call the C implementation to parse the index data
200 index, cache = parsers.parse_index2(data, inline)
200 index, cache = parsers.parse_index2(data, inline)
201 return index, cache
201 return index, cache
202
202
203
203
204 def parse_index_v2(data, inline):
204 def parse_index_v2(data, inline):
205 # call the C implementation to parse the index data
205 # call the C implementation to parse the index data
206 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
206 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
207 return index, cache
207 return index, cache
208
208
209
209
210 def parse_index_cl_v2(data, inline):
210 def parse_index_cl_v2(data, inline):
211 # call the C implementation to parse the index data
211 # call the C implementation to parse the index data
212 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
212 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
213 return index, cache
213 return index, cache
214
214
215
215
216 if hasattr(parsers, 'parse_index_devel_nodemap'):
216 if hasattr(parsers, 'parse_index_devel_nodemap'):
217
217
218 def parse_index_v1_nodemap(data, inline):
218 def parse_index_v1_nodemap(data, inline):
219 index, cache = parsers.parse_index_devel_nodemap(data, inline)
219 index, cache = parsers.parse_index_devel_nodemap(data, inline)
220 return index, cache
220 return index, cache
221
221
222
222
223 else:
223 else:
224 parse_index_v1_nodemap = None
224 parse_index_v1_nodemap = None
225
225
226
226
227 def parse_index_v1_mixed(data, inline):
227 def parse_index_v1_mixed(data, inline):
228 index, cache = parse_index_v1(data, inline)
228 index, cache = parse_index_v1(data, inline)
229 return rustrevlog.MixedIndex(index), cache
229 return rustrevlog.MixedIndex(index), cache
230
230
231
231
232 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
232 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
233 # signed integer)
233 # signed integer)
234 _maxentrysize = 0x7FFFFFFF
234 _maxentrysize = 0x7FFFFFFF
235
235
236 FILE_TOO_SHORT_MSG = _(
236 FILE_TOO_SHORT_MSG = _(
237 b'cannot read from revlog %s;'
237 b'cannot read from revlog %s;'
238 b' expected %d bytes from offset %d, data size is %d'
238 b' expected %d bytes from offset %d, data size is %d'
239 )
239 )
240
240
241 hexdigits = b'0123456789abcdefABCDEF'
241 hexdigits = b'0123456789abcdefABCDEF'
242
242
243
243
244 class _Config:
244 class _Config:
245 def copy(self):
245 def copy(self):
246 return self.__class__(**self.__dict__)
246 return self.__class__(**self.__dict__)
247
247
248
248
249 @attr.s()
249 @attr.s()
250 class FeatureConfig(_Config):
250 class FeatureConfig(_Config):
251 """Hold configuration values about the available revlog features"""
251 """Hold configuration values about the available revlog features"""
252
252
253 # the default compression engine
253 # the default compression engine
254 compression_engine = attr.ib(default=b'zlib')
254 compression_engine = attr.ib(default=b'zlib')
255 # compression engines options
255 # compression engines options
256 compression_engine_options = attr.ib(default=attr.Factory(dict))
256 compression_engine_options = attr.ib(default=attr.Factory(dict))
257
257
258 # can we use censor on this revlog
258 # can we use censor on this revlog
259 censorable = attr.ib(default=False)
259 censorable = attr.ib(default=False)
260 # does this revlog use the "side data" feature
260 # does this revlog use the "side data" feature
261 has_side_data = attr.ib(default=False)
261 has_side_data = attr.ib(default=False)
262 # might remove rank configuration once the computation has no impact
262 # might remove rank configuration once the computation has no impact
263 compute_rank = attr.ib(default=False)
263 compute_rank = attr.ib(default=False)
264 # parent order is supposed to be semantically irrelevant, so we
264 # parent order is supposed to be semantically irrelevant, so we
265 # normally resort parents to ensure that the first parent is non-null,
265 # normally resort parents to ensure that the first parent is non-null,
266 # if there is a non-null parent at all.
266 # if there is a non-null parent at all.
267 # filelog abuses the parent order as flag to mark some instances of
267 # filelog abuses the parent order as flag to mark some instances of
268 # meta-encoded files, so allow it to disable this behavior.
268 # meta-encoded files, so allow it to disable this behavior.
269 canonical_parent_order = attr.ib(default=False)
269 canonical_parent_order = attr.ib(default=False)
270 # can ellipsis commit be used
270 # can ellipsis commit be used
271 enable_ellipsis = attr.ib(default=False)
271 enable_ellipsis = attr.ib(default=False)
272
272
273 def copy(self):
273 def copy(self):
274 new = super().copy()
274 new = super().copy()
275 new.compression_engine_options = self.compression_engine_options.copy()
275 new.compression_engine_options = self.compression_engine_options.copy()
276 return new
276 return new
277
277
278
278
279 @attr.s()
279 @attr.s()
280 class DataConfig(_Config):
280 class DataConfig(_Config):
281 """Hold configuration value about how the revlog data are read"""
281 """Hold configuration value about how the revlog data are read"""
282
282
283 # should we try to open the "pending" version of the revlog
283 # should we try to open the "pending" version of the revlog
284 try_pending = attr.ib(default=False)
284 try_pending = attr.ib(default=False)
285 # should we try to open the "splitted" version of the revlog
285 # should we try to open the "splitted" version of the revlog
286 try_split = attr.ib(default=False)
286 try_split = attr.ib(default=False)
287 # When True, indexfile should be opened with checkambig=True at writing,
287 # When True, indexfile should be opened with checkambig=True at writing,
288 # to avoid file stat ambiguity.
288 # to avoid file stat ambiguity.
289 check_ambig = attr.ib(default=False)
289 check_ambig = attr.ib(default=False)
290
290
291 # If true, use mmap instead of reading to deal with large index
291 # If true, use mmap instead of reading to deal with large index
292 mmap_large_index = attr.ib(default=False)
292 mmap_large_index = attr.ib(default=False)
293 # how much data is large
293 # how much data is large
294 mmap_index_threshold = attr.ib(default=None)
294 mmap_index_threshold = attr.ib(default=None)
295 # How much data to read and cache into the raw revlog data cache.
295 # How much data to read and cache into the raw revlog data cache.
296 chunk_cache_size = attr.ib(default=65536)
296 chunk_cache_size = attr.ib(default=65536)
297
297
298 # Allow sparse reading of the revlog data
298 # Allow sparse reading of the revlog data
299 with_sparse_read = attr.ib(default=False)
299 with_sparse_read = attr.ib(default=False)
300 # minimal density of a sparse read chunk
300 # minimal density of a sparse read chunk
301 sr_density_threshold = attr.ib(default=0.50)
301 sr_density_threshold = attr.ib(default=0.50)
302 # minimal size of data we skip when performing sparse read
302 # minimal size of data we skip when performing sparse read
303 sr_min_gap_size = attr.ib(default=262144)
303 sr_min_gap_size = attr.ib(default=262144)
304
304
305 # are delta encoded against arbitrary bases.
305 # are delta encoded against arbitrary bases.
306 generaldelta = attr.ib(default=False)
306 generaldelta = attr.ib(default=False)
307
307
308
308
309 @attr.s()
309 @attr.s()
310 class DeltaConfig(_Config):
310 class DeltaConfig(_Config):
311 """Hold configuration value about how new delta are computed
311 """Hold configuration value about how new delta are computed
312
312
313 Some attributes are duplicated from DataConfig to help havign each object
313 Some attributes are duplicated from DataConfig to help havign each object
314 self contained.
314 self contained.
315 """
315 """
316
316
317 # can delta be encoded against arbitrary bases.
317 # can delta be encoded against arbitrary bases.
318 general_delta = attr.ib(default=False)
318 general_delta = attr.ib(default=False)
319 # Allow sparse writing of the revlog data
319 # Allow sparse writing of the revlog data
320 sparse_revlog = attr.ib(default=False)
320 sparse_revlog = attr.ib(default=False)
321 # maximum length of a delta chain
321 # maximum length of a delta chain
322 max_chain_len = attr.ib(default=None)
322 max_chain_len = attr.ib(default=None)
323 # Maximum distance between delta chain base start and end
323 # Maximum distance between delta chain base start and end
324 max_deltachain_span = attr.ib(default=-1)
324 max_deltachain_span = attr.ib(default=-1)
325 # If `upper_bound_comp` is not None, this is the expected maximal gain from
325 # If `upper_bound_comp` is not None, this is the expected maximal gain from
326 # compression for the data content.
326 # compression for the data content.
327 upper_bound_comp = attr.ib(default=None)
327 upper_bound_comp = attr.ib(default=None)
328 # Should we try a delta against both parent
328 # Should we try a delta against both parent
329 delta_both_parents = attr.ib(default=True)
329 delta_both_parents = attr.ib(default=True)
330 # Test delta base candidate group by chunk of this maximal size.
330 # Test delta base candidate group by chunk of this maximal size.
331 candidate_group_chunk_size = attr.ib(default=0)
331 candidate_group_chunk_size = attr.ib(default=0)
332 # Should we display debug information about delta computation
332 # Should we display debug information about delta computation
333 debug_delta = attr.ib(default=False)
333 debug_delta = attr.ib(default=False)
334 # trust incoming delta by default
334 # trust incoming delta by default
335 lazy_delta = attr.ib(default=True)
335 lazy_delta = attr.ib(default=True)
336 # trust the base of incoming delta by default
336 # trust the base of incoming delta by default
337 lazy_delta_base = attr.ib(default=False)
337 lazy_delta_base = attr.ib(default=False)
338
338
339
339
340 class revlog:
340 class revlog:
341 """
341 """
342 the underlying revision storage object
342 the underlying revision storage object
343
343
344 A revlog consists of two parts, an index and the revision data.
344 A revlog consists of two parts, an index and the revision data.
345
345
346 The index is a file with a fixed record size containing
346 The index is a file with a fixed record size containing
347 information on each revision, including its nodeid (hash), the
347 information on each revision, including its nodeid (hash), the
348 nodeids of its parents, the position and offset of its data within
348 nodeids of its parents, the position and offset of its data within
349 the data file, and the revision it's based on. Finally, each entry
349 the data file, and the revision it's based on. Finally, each entry
350 contains a linkrev entry that can serve as a pointer to external
350 contains a linkrev entry that can serve as a pointer to external
351 data.
351 data.
352
352
353 The revision data itself is a linear collection of data chunks.
353 The revision data itself is a linear collection of data chunks.
354 Each chunk represents a revision and is usually represented as a
354 Each chunk represents a revision and is usually represented as a
355 delta against the previous chunk. To bound lookup time, runs of
355 delta against the previous chunk. To bound lookup time, runs of
356 deltas are limited to about 2 times the length of the original
356 deltas are limited to about 2 times the length of the original
357 version data. This makes retrieval of a version proportional to
357 version data. This makes retrieval of a version proportional to
358 its size, or O(1) relative to the number of revisions.
358 its size, or O(1) relative to the number of revisions.
359
359
360 Both pieces of the revlog are written to in an append-only
360 Both pieces of the revlog are written to in an append-only
361 fashion, which means we never need to rewrite a file to insert or
361 fashion, which means we never need to rewrite a file to insert or
362 remove data, and can use some simple techniques to avoid the need
362 remove data, and can use some simple techniques to avoid the need
363 for locking while reading.
363 for locking while reading.
364
364
365 If checkambig, indexfile is opened with checkambig=True at
365 If checkambig, indexfile is opened with checkambig=True at
366 writing, to avoid file stat ambiguity.
366 writing, to avoid file stat ambiguity.
367
367
368 If mmaplargeindex is True, and an mmapindexthreshold is set, the
368 If mmaplargeindex is True, and an mmapindexthreshold is set, the
369 index will be mmapped rather than read if it is larger than the
369 index will be mmapped rather than read if it is larger than the
370 configured threshold.
370 configured threshold.
371
371
372 If censorable is True, the revlog can have censored revisions.
372 If censorable is True, the revlog can have censored revisions.
373
373
374 If `upperboundcomp` is not None, this is the expected maximal gain from
374 If `upperboundcomp` is not None, this is the expected maximal gain from
375 compression for the data content.
375 compression for the data content.
376
376
377 `concurrencychecker` is an optional function that receives 3 arguments: a
377 `concurrencychecker` is an optional function that receives 3 arguments: a
378 file handle, a filename, and an expected position. It should check whether
378 file handle, a filename, and an expected position. It should check whether
379 the current position in the file handle is valid, and log/warn/fail (by
379 the current position in the file handle is valid, and log/warn/fail (by
380 raising).
380 raising).
381
381
382 See mercurial/revlogutils/contants.py for details about the content of an
382 See mercurial/revlogutils/contants.py for details about the content of an
383 index entry.
383 index entry.
384 """
384 """
385
385
386 _flagserrorclass = error.RevlogError
386 _flagserrorclass = error.RevlogError
387
387
388 @staticmethod
388 @staticmethod
389 def is_inline_index(header_bytes):
389 def is_inline_index(header_bytes):
390 """Determine if a revlog is inline from the initial bytes of the index"""
390 """Determine if a revlog is inline from the initial bytes of the index"""
391 header = INDEX_HEADER.unpack(header_bytes)[0]
391 header = INDEX_HEADER.unpack(header_bytes)[0]
392
392
393 _format_flags = header & ~0xFFFF
393 _format_flags = header & ~0xFFFF
394 _format_version = header & 0xFFFF
394 _format_version = header & 0xFFFF
395
395
396 features = FEATURES_BY_VERSION[_format_version]
396 features = FEATURES_BY_VERSION[_format_version]
397 return features[b'inline'](_format_flags)
397 return features[b'inline'](_format_flags)
398
398
399 def __init__(
399 def __init__(
400 self,
400 self,
401 opener,
401 opener,
402 target,
402 target,
403 radix,
403 radix,
404 postfix=None, # only exist for `tmpcensored` now
404 postfix=None, # only exist for `tmpcensored` now
405 checkambig=False,
405 checkambig=False,
406 mmaplargeindex=False,
406 mmaplargeindex=False,
407 censorable=False,
407 censorable=False,
408 upperboundcomp=None,
408 upperboundcomp=None,
409 persistentnodemap=False,
409 persistentnodemap=False,
410 concurrencychecker=None,
410 concurrencychecker=None,
411 trypending=False,
411 trypending=False,
412 try_split=False,
412 try_split=False,
413 canonical_parent_order=True,
413 canonical_parent_order=True,
414 ):
414 ):
415 """
415 """
416 create a revlog object
416 create a revlog object
417
417
418 opener is a function that abstracts the file opening operation
418 opener is a function that abstracts the file opening operation
419 and can be used to implement COW semantics or the like.
419 and can be used to implement COW semantics or the like.
420
420
421 `target`: a (KIND, ID) tuple that identify the content stored in
421 `target`: a (KIND, ID) tuple that identify the content stored in
422 this revlog. It help the rest of the code to understand what the revlog
422 this revlog. It help the rest of the code to understand what the revlog
423 is about without having to resort to heuristic and index filename
423 is about without having to resort to heuristic and index filename
424 analysis. Note: that this must be reliably be set by normal code, but
424 analysis. Note: that this must be reliably be set by normal code, but
425 that test, debug, or performance measurement code might not set this to
425 that test, debug, or performance measurement code might not set this to
426 accurate value.
426 accurate value.
427 """
427 """
428 self.upperboundcomp = upperboundcomp
428 self.upperboundcomp = upperboundcomp
429
429
430 self.radix = radix
430 self.radix = radix
431
431
432 self._docket_file = None
432 self._docket_file = None
433 self._indexfile = None
433 self._indexfile = None
434 self._datafile = None
434 self._datafile = None
435 self._sidedatafile = None
435 self._sidedatafile = None
436 self._nodemap_file = None
436 self._nodemap_file = None
437 self.postfix = postfix
437 self.postfix = postfix
438 self._trypending = trypending
438 self._trypending = trypending
439 self._try_split = try_split
439 self._try_split = try_split
440 self.opener = opener
440 self.opener = opener
441 if persistentnodemap:
441 if persistentnodemap:
442 self._nodemap_file = nodemaputil.get_nodemap_file(self)
442 self._nodemap_file = nodemaputil.get_nodemap_file(self)
443
443
444 assert target[0] in ALL_KINDS
444 assert target[0] in ALL_KINDS
445 assert len(target) == 2
445 assert len(target) == 2
446 self.target = target
446 self.target = target
447 if b'feature-config' in self.opener.options:
447 if b'feature-config' in self.opener.options:
448 self.feature_config = self.opener.options[b'feature-config'].copy()
448 self.feature_config = self.opener.options[b'feature-config'].copy()
449 else:
449 else:
450 self.feature_config = FeatureConfig()
450 self.feature_config = FeatureConfig()
451 self.feature_config.censorable = censorable
451 self.feature_config.censorable = censorable
452 self.feature_config.canonical_parent_order = canonical_parent_order
452 self.feature_config.canonical_parent_order = canonical_parent_order
453 if b'data-config' in self.opener.options:
453 if b'data-config' in self.opener.options:
454 self.data_config = self.opener.options[b'data-config'].copy()
454 self.data_config = self.opener.options[b'data-config'].copy()
455 else:
455 else:
456 self.data_config = DataConfig()
456 self.data_config = DataConfig()
457 self.data_config.check_ambig = checkambig
457 self.data_config.check_ambig = checkambig
458 self.data_config.mmap_large_index = mmaplargeindex
458 self.data_config.mmap_large_index = mmaplargeindex
459 if b'delta-config' in self.opener.options:
459 if b'delta-config' in self.opener.options:
460 self.delta_config = self.opener.options[b'delta-config'].copy()
460 self.delta_config = self.opener.options[b'delta-config'].copy()
461 else:
461 else:
462 self.delta_config = DeltaConfig()
462 self.delta_config = DeltaConfig()
463
463
464 # 3-tuple of (node, rev, text) for a raw revision.
464 # 3-tuple of (node, rev, text) for a raw revision.
465 self._revisioncache = None
465 self._revisioncache = None
466 # Maps rev to chain base rev.
466 # Maps rev to chain base rev.
467 self._chainbasecache = util.lrucachedict(100)
467 self._chainbasecache = util.lrucachedict(100)
468 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
468 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
469 self._chunkcache = (0, b'')
469 self._chunkcache = (0, b'')
470
470
471 self.index = None
471 self.index = None
472 self._docket = None
472 self._docket = None
473 self._nodemap_docket = None
473 self._nodemap_docket = None
474 # Mapping of partial identifiers to full nodes.
474 # Mapping of partial identifiers to full nodes.
475 self._pcache = {}
475 self._pcache = {}
476
476
477 # other optionnals features
477 # other optionnals features
478
478
479 # Make copy of flag processors so each revlog instance can support
479 # Make copy of flag processors so each revlog instance can support
480 # custom flags.
480 # custom flags.
481 self._flagprocessors = dict(flagutil.flagprocessors)
481 self._flagprocessors = dict(flagutil.flagprocessors)
482
482
483 # 3-tuple of file handles being used for active writing.
483 # 3-tuple of file handles being used for active writing.
484 self._writinghandles = None
484 self._writinghandles = None
485 # prevent nesting of addgroup
485 # prevent nesting of addgroup
486 self._adding_group = None
486 self._adding_group = None
487
487
488 self._loadindex()
488 self._loadindex()
489
489
490 self._concurrencychecker = concurrencychecker
490 self._concurrencychecker = concurrencychecker
491
491
492 @property
492 @property
493 def _generaldelta(self):
493 def _generaldelta(self):
494 """temporary compatibility proxy"""
494 """temporary compatibility proxy"""
495 return self.delta_config.general_delta
495 return self.delta_config.general_delta
496
496
497 @property
497 @property
498 def _checkambig(self):
498 def _checkambig(self):
499 """temporary compatibility proxy"""
499 """temporary compatibility proxy"""
500 return self.data_config.check_ambig
500 return self.data_config.check_ambig
501
501
502 @property
502 @property
503 def _mmaplargeindex(self):
503 def _mmaplargeindex(self):
504 """temporary compatibility proxy"""
504 """temporary compatibility proxy"""
505 return self.data_config.mmap_large_index
505 return self.data_config.mmap_large_index
506
506
507 @property
507 @property
508 def _censorable(self):
508 def _censorable(self):
509 """temporary compatibility proxy"""
509 """temporary compatibility proxy"""
510 return self.feature_config.censorable
510 return self.feature_config.censorable
511
511
512 @property
512 @property
513 def _chunkcachesize(self):
513 def _chunkcachesize(self):
514 """temporary compatibility proxy"""
514 """temporary compatibility proxy"""
515 return self.data_config.chunk_cache_size
515 return self.data_config.chunk_cache_size
516
516
517 @property
517 @property
518 def _maxchainlen(self):
518 def _maxchainlen(self):
519 """temporary compatibility proxy"""
519 """temporary compatibility proxy"""
520 return self.delta_config.max_chain_len
520 return self.delta_config.max_chain_len
521
521
522 @property
522 @property
523 def _deltabothparents(self):
523 def _deltabothparents(self):
524 """temporary compatibility proxy"""
524 """temporary compatibility proxy"""
525 return self.delta_config.delta_both_parents
525 return self.delta_config.delta_both_parents
526
526
527 @property
527 @property
528 def _candidate_group_chunk_size(self):
528 def _candidate_group_chunk_size(self):
529 """temporary compatibility proxy"""
529 """temporary compatibility proxy"""
530 return self.delta_config.candidate_group_chunk_size
530 return self.delta_config.candidate_group_chunk_size
531
531
532 @property
532 @property
533 def _debug_delta(self):
533 def _debug_delta(self):
534 """temporary compatibility proxy"""
534 """temporary compatibility proxy"""
535 return self.delta_config.debug_delta
535 return self.delta_config.debug_delta
536
536
537 @property
537 @property
538 def _compengine(self):
538 def _compengine(self):
539 """temporary compatibility proxy"""
539 """temporary compatibility proxy"""
540 return self.feature_config.compression_engine
540 return self.feature_config.compression_engine
541
541
542 @property
542 @property
543 def _compengineopts(self):
543 def _compengineopts(self):
544 """temporary compatibility proxy"""
544 """temporary compatibility proxy"""
545 return self.feature_config.compression_engine_options
545 return self.feature_config.compression_engine_options
546
546
547 @property
547 @property
548 def _maxdeltachainspan(self):
548 def _maxdeltachainspan(self):
549 """temporary compatibility proxy"""
549 """temporary compatibility proxy"""
550 return self.delta_config.max_deltachain_span
550 return self.delta_config.max_deltachain_span
551
551
552 @property
552 @property
553 def _withsparseread(self):
553 def _withsparseread(self):
554 """temporary compatibility proxy"""
554 """temporary compatibility proxy"""
555 return self.data_config.with_sparse_read
555 return self.data_config.with_sparse_read
556
556
557 @property
557 @property
558 def _sparserevlog(self):
558 def _sparserevlog(self):
559 """temporary compatibility proxy"""
559 """temporary compatibility proxy"""
560 return self.delta_config.sparse_revlog
560 return self.delta_config.sparse_revlog
561
561
562 @property
562 @property
563 def hassidedata(self):
563 def hassidedata(self):
564 """temporary compatibility proxy"""
564 """temporary compatibility proxy"""
565 return self.feature_config.has_side_data
565 return self.feature_config.has_side_data
566
566
567 @property
567 @property
568 def _srdensitythreshold(self):
568 def _srdensitythreshold(self):
569 """temporary compatibility proxy"""
569 """temporary compatibility proxy"""
570 return self.data_config.sr_density_threshold
570 return self.data_config.sr_density_threshold
571
571
572 @property
572 @property
573 def _srmingapsize(self):
573 def _srmingapsize(self):
574 """temporary compatibility proxy"""
574 """temporary compatibility proxy"""
575 return self.data_config.sr_min_gap_size
575 return self.data_config.sr_min_gap_size
576
576
577 @property
577 @property
578 def _compute_rank(self):
578 def _compute_rank(self):
579 """temporary compatibility proxy"""
579 """temporary compatibility proxy"""
580 return self.feature_config.compute_rank
580 return self.feature_config.compute_rank
581
581
582 @property
582 @property
583 def canonical_parent_order(self):
583 def canonical_parent_order(self):
584 """temporary compatibility proxy"""
584 """temporary compatibility proxy"""
585 return self.feature_config.canonical_parent_order
585 return self.feature_config.canonical_parent_order
586
586
587 @property
587 @property
588 def _lazydelta(self):
588 def _lazydelta(self):
589 """temporary compatibility proxy"""
589 """temporary compatibility proxy"""
590 return self.delta_config.lazy_delta
590 return self.delta_config.lazy_delta
591
591
592 @property
592 @property
593 def _lazydeltabase(self):
593 def _lazydeltabase(self):
594 """temporary compatibility proxy"""
594 """temporary compatibility proxy"""
595 return self.delta_config.lazy_delta_base
595 return self.delta_config.lazy_delta_base
596
596
597 def _init_opts(self):
597 def _init_opts(self):
598 """process options (from above/config) to setup associated default revlog mode
598 """process options (from above/config) to setup associated default revlog mode
599
599
600 These values might be affected when actually reading on disk information.
600 These values might be affected when actually reading on disk information.
601
601
602 The relevant values are returned for use in _loadindex().
602 The relevant values are returned for use in _loadindex().
603
603
604 * newversionflags:
604 * newversionflags:
605 version header to use if we need to create a new revlog
605 version header to use if we need to create a new revlog
606
606
607 * mmapindexthreshold:
607 * mmapindexthreshold:
608 minimal index size for start to use mmap
608 minimal index size for start to use mmap
609
609
610 * force_nodemap:
610 * force_nodemap:
611 force the usage of a "development" version of the nodemap code
611 force the usage of a "development" version of the nodemap code
612 """
612 """
613 opts = self.opener.options
613 opts = self.opener.options
614
614
615 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
615 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
616 new_header = CHANGELOGV2
616 new_header = CHANGELOGV2
617 compute_rank = opts.get(b'changelogv2.compute-rank', True)
617 compute_rank = opts.get(b'changelogv2.compute-rank', True)
618 self.feature_config.compute_rank = compute_rank
618 self.feature_config.compute_rank = compute_rank
619 elif b'revlogv2' in opts:
619 elif b'revlogv2' in opts:
620 new_header = REVLOGV2
620 new_header = REVLOGV2
621 elif b'revlogv1' in opts:
621 elif b'revlogv1' in opts:
622 new_header = REVLOGV1 | FLAG_INLINE_DATA
622 new_header = REVLOGV1 | FLAG_INLINE_DATA
623 if b'generaldelta' in opts:
623 if b'generaldelta' in opts:
624 new_header |= FLAG_GENERALDELTA
624 new_header |= FLAG_GENERALDELTA
625 elif b'revlogv0' in self.opener.options:
625 elif b'revlogv0' in self.opener.options:
626 new_header = REVLOGV0
626 new_header = REVLOGV0
627 else:
627 else:
628 new_header = REVLOG_DEFAULT_VERSION
628 new_header = REVLOG_DEFAULT_VERSION
629
629
630 mmapindexthreshold = None
630 mmapindexthreshold = None
631 if self.data_config.mmap_large_index:
631 if self.data_config.mmap_large_index:
632 mmapindexthreshold = self.data_config.mmap_index_threshold
632 mmapindexthreshold = self.data_config.mmap_index_threshold
633 if self.feature_config.enable_ellipsis:
633 if self.feature_config.enable_ellipsis:
634 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
634 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
635
635
636 # revlog v0 doesn't have flag processors
636 # revlog v0 doesn't have flag processors
637 for flag, processor in opts.get(b'flagprocessors', {}).items():
637 for flag, processor in opts.get(b'flagprocessors', {}).items():
638 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
638 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
639
639
640 chunk_cache_size = self.data_config.chunk_cache_size
640 chunk_cache_size = self.data_config.chunk_cache_size
641 if chunk_cache_size <= 0:
641 if chunk_cache_size <= 0:
642 raise error.RevlogError(
642 raise error.RevlogError(
643 _(b'revlog chunk cache size %r is not greater than 0')
643 _(b'revlog chunk cache size %r is not greater than 0')
644 % chunk_cache_size
644 % chunk_cache_size
645 )
645 )
646 elif chunk_cache_size & (chunk_cache_size - 1):
646 elif chunk_cache_size & (chunk_cache_size - 1):
647 raise error.RevlogError(
647 raise error.RevlogError(
648 _(b'revlog chunk cache size %r is not a power of 2')
648 _(b'revlog chunk cache size %r is not a power of 2')
649 % chunk_cache_size
649 % chunk_cache_size
650 )
650 )
651 force_nodemap = opts.get(b'devel-force-nodemap', False)
651 force_nodemap = opts.get(b'devel-force-nodemap', False)
652 return new_header, mmapindexthreshold, force_nodemap
652 return new_header, mmapindexthreshold, force_nodemap
653
653
654 def _get_data(self, filepath, mmap_threshold, size=None):
654 def _get_data(self, filepath, mmap_threshold, size=None):
655 """return a file content with or without mmap
655 """return a file content with or without mmap
656
656
657 If the file is missing return the empty string"""
657 If the file is missing return the empty string"""
658 try:
658 try:
659 with self.opener(filepath) as fp:
659 with self.opener(filepath) as fp:
660 if mmap_threshold is not None:
660 if mmap_threshold is not None:
661 file_size = self.opener.fstat(fp).st_size
661 file_size = self.opener.fstat(fp).st_size
662 if file_size >= mmap_threshold:
662 if file_size >= mmap_threshold:
663 if size is not None:
663 if size is not None:
664 # avoid potentiel mmap crash
664 # avoid potentiel mmap crash
665 size = min(file_size, size)
665 size = min(file_size, size)
666 # TODO: should .close() to release resources without
666 # TODO: should .close() to release resources without
667 # relying on Python GC
667 # relying on Python GC
668 if size is None:
668 if size is None:
669 return util.buffer(util.mmapread(fp))
669 return util.buffer(util.mmapread(fp))
670 else:
670 else:
671 return util.buffer(util.mmapread(fp, size))
671 return util.buffer(util.mmapread(fp, size))
672 if size is None:
672 if size is None:
673 return fp.read()
673 return fp.read()
674 else:
674 else:
675 return fp.read(size)
675 return fp.read(size)
676 except FileNotFoundError:
676 except FileNotFoundError:
677 return b''
677 return b''
678
678
679 def get_streams(self, max_linkrev, force_inline=False):
679 def get_streams(self, max_linkrev, force_inline=False):
680 """return a list of streams that represent this revlog
680 """return a list of streams that represent this revlog
681
681
682 This is used by stream-clone to do bytes to bytes copies of a repository.
682 This is used by stream-clone to do bytes to bytes copies of a repository.
683
683
684 This streams data for all revisions that refer to a changelog revision up
684 This streams data for all revisions that refer to a changelog revision up
685 to `max_linkrev`.
685 to `max_linkrev`.
686
686
687 If `force_inline` is set, it enforces that the stream will represent an inline revlog.
687 If `force_inline` is set, it enforces that the stream will represent an inline revlog.
688
688
689 It returns is a list of three-tuple:
689 It returns is a list of three-tuple:
690
690
691 [
691 [
692 (filename, bytes_stream, stream_size),
692 (filename, bytes_stream, stream_size),
693 …
693 …
694 ]
694 ]
695 """
695 """
696 n = len(self)
696 n = len(self)
697 index = self.index
697 index = self.index
698 while n > 0:
698 while n > 0:
699 linkrev = index[n - 1][4]
699 linkrev = index[n - 1][4]
700 if linkrev < max_linkrev:
700 if linkrev < max_linkrev:
701 break
701 break
702 # note: this loop will rarely go through multiple iterations, since
702 # note: this loop will rarely go through multiple iterations, since
703 # it only traverses commits created during the current streaming
703 # it only traverses commits created during the current streaming
704 # pull operation.
704 # pull operation.
705 #
705 #
706 # If this become a problem, using a binary search should cap the
706 # If this become a problem, using a binary search should cap the
707 # runtime of this.
707 # runtime of this.
708 n = n - 1
708 n = n - 1
709 if n == 0:
709 if n == 0:
710 # no data to send
710 # no data to send
711 return []
711 return []
712 index_size = n * index.entry_size
712 index_size = n * index.entry_size
713 data_size = self.end(n - 1)
713 data_size = self.end(n - 1)
714
714
715 # XXX we might have been split (or stripped) since the object
715 # XXX we might have been split (or stripped) since the object
716 # initialization, We need to close this race too, but having a way to
716 # initialization, We need to close this race too, but having a way to
717 # pre-open the file we feed to the revlog and never closing them before
717 # pre-open the file we feed to the revlog and never closing them before
718 # we are done streaming.
718 # we are done streaming.
719
719
720 if self._inline:
720 if self._inline:
721
721
722 def get_stream():
722 def get_stream():
723 with self._indexfp() as fp:
723 with self._indexfp() as fp:
724 yield None
724 yield None
725 size = index_size + data_size
725 size = index_size + data_size
726 if size <= 65536:
726 if size <= 65536:
727 yield fp.read(size)
727 yield fp.read(size)
728 else:
728 else:
729 yield from util.filechunkiter(fp, limit=size)
729 yield from util.filechunkiter(fp, limit=size)
730
730
731 inline_stream = get_stream()
731 inline_stream = get_stream()
732 next(inline_stream)
732 next(inline_stream)
733 return [
733 return [
734 (self._indexfile, inline_stream, index_size + data_size),
734 (self._indexfile, inline_stream, index_size + data_size),
735 ]
735 ]
736 elif force_inline:
736 elif force_inline:
737
737
738 def get_stream():
738 def get_stream():
739 with self.reading():
739 with self.reading():
740 yield None
740 yield None
741
741
742 for rev in range(n):
742 for rev in range(n):
743 idx = self.index.entry_binary(rev)
743 idx = self.index.entry_binary(rev)
744 if rev == 0 and self._docket is None:
744 if rev == 0 and self._docket is None:
745 # re-inject the inline flag
745 # re-inject the inline flag
746 header = self._format_flags
746 header = self._format_flags
747 header |= self._format_version
747 header |= self._format_version
748 header |= FLAG_INLINE_DATA
748 header |= FLAG_INLINE_DATA
749 header = self.index.pack_header(header)
749 header = self.index.pack_header(header)
750 idx = header + idx
750 idx = header + idx
751 yield idx
751 yield idx
752 yield self._getsegmentforrevs(rev, rev)[1]
752 yield self._getsegmentforrevs(rev, rev)[1]
753
753
754 inline_stream = get_stream()
754 inline_stream = get_stream()
755 next(inline_stream)
755 next(inline_stream)
756 return [
756 return [
757 (self._indexfile, inline_stream, index_size + data_size),
757 (self._indexfile, inline_stream, index_size + data_size),
758 ]
758 ]
759 else:
759 else:
760
760
761 def get_index_stream():
761 def get_index_stream():
762 with self._indexfp() as fp:
762 with self._indexfp() as fp:
763 yield None
763 yield None
764 if index_size <= 65536:
764 if index_size <= 65536:
765 yield fp.read(index_size)
765 yield fp.read(index_size)
766 else:
766 else:
767 yield from util.filechunkiter(fp, limit=index_size)
767 yield from util.filechunkiter(fp, limit=index_size)
768
768
769 def get_data_stream():
769 def get_data_stream():
770 with self._datafp() as fp:
770 with self._datafp() as fp:
771 yield None
771 yield None
772 if data_size <= 65536:
772 if data_size <= 65536:
773 yield fp.read(data_size)
773 yield fp.read(data_size)
774 else:
774 else:
775 yield from util.filechunkiter(fp, limit=data_size)
775 yield from util.filechunkiter(fp, limit=data_size)
776
776
777 index_stream = get_index_stream()
777 index_stream = get_index_stream()
778 next(index_stream)
778 next(index_stream)
779 data_stream = get_data_stream()
779 data_stream = get_data_stream()
780 next(data_stream)
780 next(data_stream)
781 return [
781 return [
782 (self._datafile, data_stream, data_size),
782 (self._datafile, data_stream, data_size),
783 (self._indexfile, index_stream, index_size),
783 (self._indexfile, index_stream, index_size),
784 ]
784 ]
785
785
786 def _loadindex(self, docket=None):
786 def _loadindex(self, docket=None):
787
787
788 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
788 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
789
789
790 if self.postfix is not None:
790 if self.postfix is not None:
791 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
791 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
792 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
792 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
793 entry_point = b'%s.i.a' % self.radix
793 entry_point = b'%s.i.a' % self.radix
794 elif self._try_split and self.opener.exists(self._split_index_file):
794 elif self._try_split and self.opener.exists(self._split_index_file):
795 entry_point = self._split_index_file
795 entry_point = self._split_index_file
796 else:
796 else:
797 entry_point = b'%s.i' % self.radix
797 entry_point = b'%s.i' % self.radix
798
798
799 if docket is not None:
799 if docket is not None:
800 self._docket = docket
800 self._docket = docket
801 self._docket_file = entry_point
801 self._docket_file = entry_point
802 else:
802 else:
803 self._initempty = True
803 self._initempty = True
804 entry_data = self._get_data(entry_point, mmapindexthreshold)
804 entry_data = self._get_data(entry_point, mmapindexthreshold)
805 if len(entry_data) > 0:
805 if len(entry_data) > 0:
806 header = INDEX_HEADER.unpack(entry_data[:4])[0]
806 header = INDEX_HEADER.unpack(entry_data[:4])[0]
807 self._initempty = False
807 self._initempty = False
808 else:
808 else:
809 header = new_header
809 header = new_header
810
810
811 self._format_flags = header & ~0xFFFF
811 self._format_flags = header & ~0xFFFF
812 self._format_version = header & 0xFFFF
812 self._format_version = header & 0xFFFF
813
813
814 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
814 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
815 if supported_flags is None:
815 if supported_flags is None:
816 msg = _(b'unknown version (%d) in revlog %s')
816 msg = _(b'unknown version (%d) in revlog %s')
817 msg %= (self._format_version, self.display_id)
817 msg %= (self._format_version, self.display_id)
818 raise error.RevlogError(msg)
818 raise error.RevlogError(msg)
819 elif self._format_flags & ~supported_flags:
819 elif self._format_flags & ~supported_flags:
820 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
820 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
821 display_flag = self._format_flags >> 16
821 display_flag = self._format_flags >> 16
822 msg %= (display_flag, self._format_version, self.display_id)
822 msg %= (display_flag, self._format_version, self.display_id)
823 raise error.RevlogError(msg)
823 raise error.RevlogError(msg)
824
824
825 features = FEATURES_BY_VERSION[self._format_version]
825 features = FEATURES_BY_VERSION[self._format_version]
826 self._inline = features[b'inline'](self._format_flags)
826 self._inline = features[b'inline'](self._format_flags)
827 self.delta_config.general_delta = features[b'generaldelta'](
827 self.delta_config.general_delta = features[b'generaldelta'](
828 self._format_flags
828 self._format_flags
829 )
829 )
830 self.feature_config.has_side_data = features[b'sidedata']
830 self.feature_config.has_side_data = features[b'sidedata']
831
831
832 if not features[b'docket']:
832 if not features[b'docket']:
833 self._indexfile = entry_point
833 self._indexfile = entry_point
834 index_data = entry_data
834 index_data = entry_data
835 else:
835 else:
836 self._docket_file = entry_point
836 self._docket_file = entry_point
837 if self._initempty:
837 if self._initempty:
838 self._docket = docketutil.default_docket(self, header)
838 self._docket = docketutil.default_docket(self, header)
839 else:
839 else:
840 self._docket = docketutil.parse_docket(
840 self._docket = docketutil.parse_docket(
841 self, entry_data, use_pending=self._trypending
841 self, entry_data, use_pending=self._trypending
842 )
842 )
843
843
844 if self._docket is not None:
844 if self._docket is not None:
845 self._indexfile = self._docket.index_filepath()
845 self._indexfile = self._docket.index_filepath()
846 index_data = b''
846 index_data = b''
847 index_size = self._docket.index_end
847 index_size = self._docket.index_end
848 if index_size > 0:
848 if index_size > 0:
849 index_data = self._get_data(
849 index_data = self._get_data(
850 self._indexfile, mmapindexthreshold, size=index_size
850 self._indexfile, mmapindexthreshold, size=index_size
851 )
851 )
852 if len(index_data) < index_size:
852 if len(index_data) < index_size:
853 msg = _(b'too few index data for %s: got %d, expected %d')
853 msg = _(b'too few index data for %s: got %d, expected %d')
854 msg %= (self.display_id, len(index_data), index_size)
854 msg %= (self.display_id, len(index_data), index_size)
855 raise error.RevlogError(msg)
855 raise error.RevlogError(msg)
856
856
857 self._inline = False
857 self._inline = False
858 # generaldelta implied by version 2 revlogs.
858 # generaldelta implied by version 2 revlogs.
859 self.delta_config.general_delta = True
859 self.delta_config.general_delta = True
860 # the logic for persistent nodemap will be dealt with within the
860 # the logic for persistent nodemap will be dealt with within the
861 # main docket, so disable it for now.
861 # main docket, so disable it for now.
862 self._nodemap_file = None
862 self._nodemap_file = None
863
863
864 if self._docket is not None:
864 if self._docket is not None:
865 self._datafile = self._docket.data_filepath()
865 self._datafile = self._docket.data_filepath()
866 self._sidedatafile = self._docket.sidedata_filepath()
866 self._sidedatafile = self._docket.sidedata_filepath()
867 elif self.postfix is None:
867 elif self.postfix is None:
868 self._datafile = b'%s.d' % self.radix
868 self._datafile = b'%s.d' % self.radix
869 else:
869 else:
870 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
870 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
871
871
872 self.nodeconstants = sha1nodeconstants
872 self.nodeconstants = sha1nodeconstants
873 self.nullid = self.nodeconstants.nullid
873 self.nullid = self.nodeconstants.nullid
874
874
875 # sparse-revlog can't be on without general-delta (issue6056)
875 # sparse-revlog can't be on without general-delta (issue6056)
876 if not self.delta_config.general_delta:
876 if not self.delta_config.general_delta:
877 self.delta_config.sparse_revlog = False
877 self.delta_config.sparse_revlog = False
878
878
879 self._storedeltachains = True
879 self._storedeltachains = True
880
880
881 devel_nodemap = (
881 devel_nodemap = (
882 self._nodemap_file
882 self._nodemap_file
883 and force_nodemap
883 and force_nodemap
884 and parse_index_v1_nodemap is not None
884 and parse_index_v1_nodemap is not None
885 )
885 )
886
886
887 use_rust_index = False
887 use_rust_index = False
888 if rustrevlog is not None:
888 if rustrevlog is not None:
889 if self._nodemap_file is not None:
889 if self._nodemap_file is not None:
890 use_rust_index = True
890 use_rust_index = True
891 else:
891 else:
892 use_rust_index = self.opener.options.get(b'rust.index')
892 use_rust_index = self.opener.options.get(b'rust.index')
893
893
894 self._parse_index = parse_index_v1
894 self._parse_index = parse_index_v1
895 if self._format_version == REVLOGV0:
895 if self._format_version == REVLOGV0:
896 self._parse_index = revlogv0.parse_index_v0
896 self._parse_index = revlogv0.parse_index_v0
897 elif self._format_version == REVLOGV2:
897 elif self._format_version == REVLOGV2:
898 self._parse_index = parse_index_v2
898 self._parse_index = parse_index_v2
899 elif self._format_version == CHANGELOGV2:
899 elif self._format_version == CHANGELOGV2:
900 self._parse_index = parse_index_cl_v2
900 self._parse_index = parse_index_cl_v2
901 elif devel_nodemap:
901 elif devel_nodemap:
902 self._parse_index = parse_index_v1_nodemap
902 self._parse_index = parse_index_v1_nodemap
903 elif use_rust_index:
903 elif use_rust_index:
904 self._parse_index = parse_index_v1_mixed
904 self._parse_index = parse_index_v1_mixed
905 try:
905 try:
906 d = self._parse_index(index_data, self._inline)
906 d = self._parse_index(index_data, self._inline)
907 index, chunkcache = d
907 index, chunkcache = d
908 use_nodemap = (
908 use_nodemap = (
909 not self._inline
909 not self._inline
910 and self._nodemap_file is not None
910 and self._nodemap_file is not None
911 and hasattr(index, 'update_nodemap_data')
911 and hasattr(index, 'update_nodemap_data')
912 )
912 )
913 if use_nodemap:
913 if use_nodemap:
914 nodemap_data = nodemaputil.persisted_data(self)
914 nodemap_data = nodemaputil.persisted_data(self)
915 if nodemap_data is not None:
915 if nodemap_data is not None:
916 docket = nodemap_data[0]
916 docket = nodemap_data[0]
917 if (
917 if (
918 len(d[0]) > docket.tip_rev
918 len(d[0]) > docket.tip_rev
919 and d[0][docket.tip_rev][7] == docket.tip_node
919 and d[0][docket.tip_rev][7] == docket.tip_node
920 ):
920 ):
921 # no changelog tampering
921 # no changelog tampering
922 self._nodemap_docket = docket
922 self._nodemap_docket = docket
923 index.update_nodemap_data(*nodemap_data)
923 index.update_nodemap_data(*nodemap_data)
924 except (ValueError, IndexError):
924 except (ValueError, IndexError):
925 raise error.RevlogError(
925 raise error.RevlogError(
926 _(b"index %s is corrupted") % self.display_id
926 _(b"index %s is corrupted") % self.display_id
927 )
927 )
928 self.index = index
928 self.index = index
929 self._segmentfile = randomaccessfile.randomaccessfile(
929 self._segmentfile = randomaccessfile.randomaccessfile(
930 self.opener,
930 self.opener,
931 (self._indexfile if self._inline else self._datafile),
931 (self._indexfile if self._inline else self._datafile),
932 self.data_config.chunk_cache_size,
932 self.data_config.chunk_cache_size,
933 chunkcache,
933 chunkcache,
934 )
934 )
935 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
935 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
936 self.opener,
936 self.opener,
937 self._sidedatafile,
937 self._sidedatafile,
938 self.data_config.chunk_cache_size,
938 self.data_config.chunk_cache_size,
939 )
939 )
940 # revnum -> (chain-length, sum-delta-length)
940 # revnum -> (chain-length, sum-delta-length)
941 self._chaininfocache = util.lrucachedict(500)
941 self._chaininfocache = util.lrucachedict(500)
942 # revlog header -> revlog compressor
942 # revlog header -> revlog compressor
943 self._decompressors = {}
943 self._decompressors = {}
944
944
945 def get_revlog(self):
945 def get_revlog(self):
946 """simple function to mirror API of other not-really-revlog API"""
946 """simple function to mirror API of other not-really-revlog API"""
947 return self
947 return self
948
948
949 @util.propertycache
949 @util.propertycache
950 def revlog_kind(self):
950 def revlog_kind(self):
951 return self.target[0]
951 return self.target[0]
952
952
953 @util.propertycache
953 @util.propertycache
954 def display_id(self):
954 def display_id(self):
955 """The public facing "ID" of the revlog that we use in message"""
955 """The public facing "ID" of the revlog that we use in message"""
956 if self.revlog_kind == KIND_FILELOG:
956 if self.revlog_kind == KIND_FILELOG:
957 # Reference the file without the "data/" prefix, so it is familiar
957 # Reference the file without the "data/" prefix, so it is familiar
958 # to the user.
958 # to the user.
959 return self.target[1]
959 return self.target[1]
960 else:
960 else:
961 return self.radix
961 return self.radix
962
962
963 def _get_decompressor(self, t):
963 def _get_decompressor(self, t):
964 try:
964 try:
965 compressor = self._decompressors[t]
965 compressor = self._decompressors[t]
966 except KeyError:
966 except KeyError:
967 try:
967 try:
968 engine = util.compengines.forrevlogheader(t)
968 engine = util.compengines.forrevlogheader(t)
969 compressor = engine.revlogcompressor(
969 compressor = engine.revlogcompressor(
970 self.feature_config.compression_engine_options
970 self.feature_config.compression_engine_options
971 )
971 )
972 self._decompressors[t] = compressor
972 self._decompressors[t] = compressor
973 except KeyError:
973 except KeyError:
974 raise error.RevlogError(
974 raise error.RevlogError(
975 _(b'unknown compression type %s') % binascii.hexlify(t)
975 _(b'unknown compression type %s') % binascii.hexlify(t)
976 )
976 )
977 return compressor
977 return compressor
978
978
979 @util.propertycache
979 @util.propertycache
980 def _compressor(self):
980 def _compressor(self):
981 engine = util.compengines[self.feature_config.compression_engine]
981 engine = util.compengines[self.feature_config.compression_engine]
982 return engine.revlogcompressor(
982 return engine.revlogcompressor(
983 self.feature_config.compression_engine_options
983 self.feature_config.compression_engine_options
984 )
984 )
985
985
986 @util.propertycache
986 @util.propertycache
987 def _decompressor(self):
987 def _decompressor(self):
988 """the default decompressor"""
988 """the default decompressor"""
989 if self._docket is None:
989 if self._docket is None:
990 return None
990 return None
991 t = self._docket.default_compression_header
991 t = self._docket.default_compression_header
992 c = self._get_decompressor(t)
992 c = self._get_decompressor(t)
993 return c.decompress
993 return c.decompress
994
994
995 def _indexfp(self):
995 def _indexfp(self):
996 """file object for the revlog's index file"""
996 """file object for the revlog's index file"""
997 return self.opener(self._indexfile, mode=b"r")
997 return self.opener(self._indexfile, mode=b"r")
998
998
999 def __index_write_fp(self):
999 def __index_write_fp(self):
1000 # You should not use this directly and use `_writing` instead
1000 # You should not use this directly and use `_writing` instead
1001 try:
1001 try:
1002 f = self.opener(
1002 f = self.opener(
1003 self._indexfile,
1003 self._indexfile,
1004 mode=b"r+",
1004 mode=b"r+",
1005 checkambig=self.data_config.check_ambig,
1005 checkambig=self.data_config.check_ambig,
1006 )
1006 )
1007 if self._docket is None:
1007 if self._docket is None:
1008 f.seek(0, os.SEEK_END)
1008 f.seek(0, os.SEEK_END)
1009 else:
1009 else:
1010 f.seek(self._docket.index_end, os.SEEK_SET)
1010 f.seek(self._docket.index_end, os.SEEK_SET)
1011 return f
1011 return f
1012 except FileNotFoundError:
1012 except FileNotFoundError:
1013 return self.opener(
1013 return self.opener(
1014 self._indexfile,
1014 self._indexfile,
1015 mode=b"w+",
1015 mode=b"w+",
1016 checkambig=self.data_config.check_ambig,
1016 checkambig=self.data_config.check_ambig,
1017 )
1017 )
1018
1018
1019 def __index_new_fp(self):
1019 def __index_new_fp(self):
1020 # You should not use this unless you are upgrading from inline revlog
1020 # You should not use this unless you are upgrading from inline revlog
1021 return self.opener(
1021 return self.opener(
1022 self._indexfile,
1022 self._indexfile,
1023 mode=b"w",
1023 mode=b"w",
1024 checkambig=self.data_config.check_ambig,
1024 checkambig=self.data_config.check_ambig,
1025 atomictemp=True,
1025 atomictemp=True,
1026 )
1026 )
1027
1027
1028 def _datafp(self, mode=b'r'):
1028 def _datafp(self, mode=b'r'):
1029 """file object for the revlog's data file"""
1029 """file object for the revlog's data file"""
1030 return self.opener(self._datafile, mode=mode)
1030 return self.opener(self._datafile, mode=mode)
1031
1031
1032 @contextlib.contextmanager
1032 @contextlib.contextmanager
1033 def _sidedatareadfp(self):
1033 def _sidedatareadfp(self):
1034 """file object suitable to read sidedata"""
1034 """file object suitable to read sidedata"""
1035 if self._writinghandles:
1035 if self._writinghandles:
1036 yield self._writinghandles[2]
1036 yield self._writinghandles[2]
1037 else:
1037 else:
1038 with self.opener(self._sidedatafile) as fp:
1038 with self.opener(self._sidedatafile) as fp:
1039 yield fp
1039 yield fp
1040
1040
1041 def tiprev(self):
1041 def tiprev(self):
1042 return len(self.index) - 1
1042 return len(self.index) - 1
1043
1043
1044 def tip(self):
1044 def tip(self):
1045 return self.node(self.tiprev())
1045 return self.node(self.tiprev())
1046
1046
1047 def __contains__(self, rev):
1047 def __contains__(self, rev):
1048 return 0 <= rev < len(self)
1048 return 0 <= rev < len(self)
1049
1049
1050 def __len__(self):
1050 def __len__(self):
1051 return len(self.index)
1051 return len(self.index)
1052
1052
1053 def __iter__(self):
1053 def __iter__(self):
1054 return iter(range(len(self)))
1054 return iter(range(len(self)))
1055
1055
1056 def revs(self, start=0, stop=None):
1056 def revs(self, start=0, stop=None):
1057 """iterate over all rev in this revlog (from start to stop)"""
1057 """iterate over all rev in this revlog (from start to stop)"""
1058 return storageutil.iterrevs(len(self), start=start, stop=stop)
1058 return storageutil.iterrevs(len(self), start=start, stop=stop)
1059
1059
1060 def hasnode(self, node):
1060 def hasnode(self, node):
1061 try:
1061 try:
1062 self.rev(node)
1062 self.rev(node)
1063 return True
1063 return True
1064 except KeyError:
1064 except KeyError:
1065 return False
1065 return False
1066
1066
1067 def _candelta(self, baserev, rev):
1067 def _candelta(self, baserev, rev):
1068 """whether two revisions (baserev, rev) can be delta-ed or not"""
1068 """whether two revisions (baserev, rev) can be delta-ed or not"""
1069 # Disable delta if either rev requires a content-changing flag
1069 # Disable delta if either rev requires a content-changing flag
1070 # processor (ex. LFS). This is because such flag processor can alter
1070 # processor (ex. LFS). This is because such flag processor can alter
1071 # the rawtext content that the delta will be based on, and two clients
1071 # the rawtext content that the delta will be based on, and two clients
1072 # could have a same revlog node with different flags (i.e. different
1072 # could have a same revlog node with different flags (i.e. different
1073 # rawtext contents) and the delta could be incompatible.
1073 # rawtext contents) and the delta could be incompatible.
1074 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
1074 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
1075 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
1075 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
1076 ):
1076 ):
1077 return False
1077 return False
1078 return True
1078 return True
1079
1079
1080 def update_caches(self, transaction):
1080 def update_caches(self, transaction):
1081 """update on disk cache
1081 """update on disk cache
1082
1082
1083 If a transaction is passed, the update may be delayed to transaction
1083 If a transaction is passed, the update may be delayed to transaction
1084 commit."""
1084 commit."""
1085 if self._nodemap_file is not None:
1085 if self._nodemap_file is not None:
1086 if transaction is None:
1086 if transaction is None:
1087 nodemaputil.update_persistent_nodemap(self)
1087 nodemaputil.update_persistent_nodemap(self)
1088 else:
1088 else:
1089 nodemaputil.setup_persistent_nodemap(transaction, self)
1089 nodemaputil.setup_persistent_nodemap(transaction, self)
1090
1090
1091 def clearcaches(self):
1091 def clearcaches(self):
1092 """Clear in-memory caches"""
1092 """Clear in-memory caches"""
1093 self._revisioncache = None
1093 self._revisioncache = None
1094 self._chainbasecache.clear()
1094 self._chainbasecache.clear()
1095 self._segmentfile.clear_cache()
1095 self._segmentfile.clear_cache()
1096 self._segmentfile_sidedata.clear_cache()
1096 self._segmentfile_sidedata.clear_cache()
1097 self._pcache = {}
1097 self._pcache = {}
1098 self._nodemap_docket = None
1098 self._nodemap_docket = None
1099 self.index.clearcaches()
1099 self.index.clearcaches()
1100 # The python code is the one responsible for validating the docket, we
1100 # The python code is the one responsible for validating the docket, we
1101 # end up having to refresh it here.
1101 # end up having to refresh it here.
1102 use_nodemap = (
1102 use_nodemap = (
1103 not self._inline
1103 not self._inline
1104 and self._nodemap_file is not None
1104 and self._nodemap_file is not None
1105 and hasattr(self.index, 'update_nodemap_data')
1105 and hasattr(self.index, 'update_nodemap_data')
1106 )
1106 )
1107 if use_nodemap:
1107 if use_nodemap:
1108 nodemap_data = nodemaputil.persisted_data(self)
1108 nodemap_data = nodemaputil.persisted_data(self)
1109 if nodemap_data is not None:
1109 if nodemap_data is not None:
1110 self._nodemap_docket = nodemap_data[0]
1110 self._nodemap_docket = nodemap_data[0]
1111 self.index.update_nodemap_data(*nodemap_data)
1111 self.index.update_nodemap_data(*nodemap_data)
1112
1112
1113 def rev(self, node):
1113 def rev(self, node):
1114 """return the revision number associated with a <nodeid>"""
1114 """return the revision number associated with a <nodeid>"""
1115 try:
1115 try:
1116 return self.index.rev(node)
1116 return self.index.rev(node)
1117 except TypeError:
1117 except TypeError:
1118 raise
1118 raise
1119 except error.RevlogError:
1119 except error.RevlogError:
1120 # parsers.c radix tree lookup failed
1120 # parsers.c radix tree lookup failed
1121 if (
1121 if (
1122 node == self.nodeconstants.wdirid
1122 node == self.nodeconstants.wdirid
1123 or node in self.nodeconstants.wdirfilenodeids
1123 or node in self.nodeconstants.wdirfilenodeids
1124 ):
1124 ):
1125 raise error.WdirUnsupported
1125 raise error.WdirUnsupported
1126 raise error.LookupError(node, self.display_id, _(b'no node'))
1126 raise error.LookupError(node, self.display_id, _(b'no node'))
1127
1127
1128 # Accessors for index entries.
1128 # Accessors for index entries.
1129
1129
1130 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
1130 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
1131 # are flags.
1131 # are flags.
1132 def start(self, rev):
1132 def start(self, rev):
1133 return int(self.index[rev][0] >> 16)
1133 return int(self.index[rev][0] >> 16)
1134
1134
1135 def sidedata_cut_off(self, rev):
1135 def sidedata_cut_off(self, rev):
1136 sd_cut_off = self.index[rev][8]
1136 sd_cut_off = self.index[rev][8]
1137 if sd_cut_off != 0:
1137 if sd_cut_off != 0:
1138 return sd_cut_off
1138 return sd_cut_off
1139 # This is some annoying dance, because entries without sidedata
1139 # This is some annoying dance, because entries without sidedata
1140 # currently use 0 as their ofsset. (instead of previous-offset +
1140 # currently use 0 as their ofsset. (instead of previous-offset +
1141 # previous-size)
1141 # previous-size)
1142 #
1142 #
1143 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
1143 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
1144 # In the meantime, we need this.
1144 # In the meantime, we need this.
1145 while 0 <= rev:
1145 while 0 <= rev:
1146 e = self.index[rev]
1146 e = self.index[rev]
1147 if e[9] != 0:
1147 if e[9] != 0:
1148 return e[8] + e[9]
1148 return e[8] + e[9]
1149 rev -= 1
1149 rev -= 1
1150 return 0
1150 return 0
1151
1151
1152 def flags(self, rev):
1152 def flags(self, rev):
1153 return self.index[rev][0] & 0xFFFF
1153 return self.index[rev][0] & 0xFFFF
1154
1154
1155 def length(self, rev):
1155 def length(self, rev):
1156 return self.index[rev][1]
1156 return self.index[rev][1]
1157
1157
1158 def sidedata_length(self, rev):
1158 def sidedata_length(self, rev):
1159 if not self.hassidedata:
1159 if not self.hassidedata:
1160 return 0
1160 return 0
1161 return self.index[rev][9]
1161 return self.index[rev][9]
1162
1162
1163 def rawsize(self, rev):
1163 def rawsize(self, rev):
1164 """return the length of the uncompressed text for a given revision"""
1164 """return the length of the uncompressed text for a given revision"""
1165 l = self.index[rev][2]
1165 l = self.index[rev][2]
1166 if l >= 0:
1166 if l >= 0:
1167 return l
1167 return l
1168
1168
1169 t = self.rawdata(rev)
1169 t = self.rawdata(rev)
1170 return len(t)
1170 return len(t)
1171
1171
1172 def size(self, rev):
1172 def size(self, rev):
1173 """length of non-raw text (processed by a "read" flag processor)"""
1173 """length of non-raw text (processed by a "read" flag processor)"""
1174 # fast path: if no "read" flag processor could change the content,
1174 # fast path: if no "read" flag processor could change the content,
1175 # size is rawsize. note: ELLIPSIS is known to not change the content.
1175 # size is rawsize. note: ELLIPSIS is known to not change the content.
1176 flags = self.flags(rev)
1176 flags = self.flags(rev)
1177 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
1177 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
1178 return self.rawsize(rev)
1178 return self.rawsize(rev)
1179
1179
1180 return len(self.revision(rev))
1180 return len(self.revision(rev))
1181
1181
1182 def fast_rank(self, rev):
1182 def fast_rank(self, rev):
1183 """Return the rank of a revision if already known, or None otherwise.
1183 """Return the rank of a revision if already known, or None otherwise.
1184
1184
1185 The rank of a revision is the size of the sub-graph it defines as a
1185 The rank of a revision is the size of the sub-graph it defines as a
1186 head. Equivalently, the rank of a revision `r` is the size of the set
1186 head. Equivalently, the rank of a revision `r` is the size of the set
1187 `ancestors(r)`, `r` included.
1187 `ancestors(r)`, `r` included.
1188
1188
1189 This method returns the rank retrieved from the revlog in constant
1189 This method returns the rank retrieved from the revlog in constant
1190 time. It makes no attempt at computing unknown values for versions of
1190 time. It makes no attempt at computing unknown values for versions of
1191 the revlog which do not persist the rank.
1191 the revlog which do not persist the rank.
1192 """
1192 """
1193 rank = self.index[rev][ENTRY_RANK]
1193 rank = self.index[rev][ENTRY_RANK]
1194 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
1194 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
1195 return None
1195 return None
1196 if rev == nullrev:
1196 if rev == nullrev:
1197 return 0 # convention
1197 return 0 # convention
1198 return rank
1198 return rank
1199
1199
1200 def chainbase(self, rev):
1200 def chainbase(self, rev):
1201 base = self._chainbasecache.get(rev)
1201 base = self._chainbasecache.get(rev)
1202 if base is not None:
1202 if base is not None:
1203 return base
1203 return base
1204
1204
1205 index = self.index
1205 index = self.index
1206 iterrev = rev
1206 iterrev = rev
1207 base = index[iterrev][3]
1207 base = index[iterrev][3]
1208 while base != iterrev:
1208 while base != iterrev:
1209 iterrev = base
1209 iterrev = base
1210 base = index[iterrev][3]
1210 base = index[iterrev][3]
1211
1211
1212 self._chainbasecache[rev] = base
1212 self._chainbasecache[rev] = base
1213 return base
1213 return base
1214
1214
1215 def linkrev(self, rev):
1215 def linkrev(self, rev):
1216 return self.index[rev][4]
1216 return self.index[rev][4]
1217
1217
1218 def parentrevs(self, rev):
1218 def parentrevs(self, rev):
1219 try:
1219 try:
1220 entry = self.index[rev]
1220 entry = self.index[rev]
1221 except IndexError:
1221 except IndexError:
1222 if rev == wdirrev:
1222 if rev == wdirrev:
1223 raise error.WdirUnsupported
1223 raise error.WdirUnsupported
1224 raise
1224 raise
1225
1225
1226 if self.canonical_parent_order and entry[5] == nullrev:
1226 if self.canonical_parent_order and entry[5] == nullrev:
1227 return entry[6], entry[5]
1227 return entry[6], entry[5]
1228 else:
1228 else:
1229 return entry[5], entry[6]
1229 return entry[5], entry[6]
1230
1230
1231 # fast parentrevs(rev) where rev isn't filtered
1231 # fast parentrevs(rev) where rev isn't filtered
1232 _uncheckedparentrevs = parentrevs
1232 _uncheckedparentrevs = parentrevs
1233
1233
1234 def node(self, rev):
1234 def node(self, rev):
1235 try:
1235 try:
1236 return self.index[rev][7]
1236 return self.index[rev][7]
1237 except IndexError:
1237 except IndexError:
1238 if rev == wdirrev:
1238 if rev == wdirrev:
1239 raise error.WdirUnsupported
1239 raise error.WdirUnsupported
1240 raise
1240 raise
1241
1241
1242 # Derived from index values.
1242 # Derived from index values.
1243
1243
1244 def end(self, rev):
1244 def end(self, rev):
1245 return self.start(rev) + self.length(rev)
1245 return self.start(rev) + self.length(rev)
1246
1246
1247 def parents(self, node):
1247 def parents(self, node):
1248 i = self.index
1248 i = self.index
1249 d = i[self.rev(node)]
1249 d = i[self.rev(node)]
1250 # inline node() to avoid function call overhead
1250 # inline node() to avoid function call overhead
1251 if self.canonical_parent_order and d[5] == self.nullid:
1251 if self.canonical_parent_order and d[5] == self.nullid:
1252 return i[d[6]][7], i[d[5]][7]
1252 return i[d[6]][7], i[d[5]][7]
1253 else:
1253 else:
1254 return i[d[5]][7], i[d[6]][7]
1254 return i[d[5]][7], i[d[6]][7]
1255
1255
1256 def chainlen(self, rev):
1256 def chainlen(self, rev):
1257 return self._chaininfo(rev)[0]
1257 return self._chaininfo(rev)[0]
1258
1258
1259 def _chaininfo(self, rev):
1259 def _chaininfo(self, rev):
1260 chaininfocache = self._chaininfocache
1260 chaininfocache = self._chaininfocache
1261 if rev in chaininfocache:
1261 if rev in chaininfocache:
1262 return chaininfocache[rev]
1262 return chaininfocache[rev]
1263 index = self.index
1263 index = self.index
1264 generaldelta = self.delta_config.general_delta
1264 generaldelta = self.delta_config.general_delta
1265 iterrev = rev
1265 iterrev = rev
1266 e = index[iterrev]
1266 e = index[iterrev]
1267 clen = 0
1267 clen = 0
1268 compresseddeltalen = 0
1268 compresseddeltalen = 0
1269 while iterrev != e[3]:
1269 while iterrev != e[3]:
1270 clen += 1
1270 clen += 1
1271 compresseddeltalen += e[1]
1271 compresseddeltalen += e[1]
1272 if generaldelta:
1272 if generaldelta:
1273 iterrev = e[3]
1273 iterrev = e[3]
1274 else:
1274 else:
1275 iterrev -= 1
1275 iterrev -= 1
1276 if iterrev in chaininfocache:
1276 if iterrev in chaininfocache:
1277 t = chaininfocache[iterrev]
1277 t = chaininfocache[iterrev]
1278 clen += t[0]
1278 clen += t[0]
1279 compresseddeltalen += t[1]
1279 compresseddeltalen += t[1]
1280 break
1280 break
1281 e = index[iterrev]
1281 e = index[iterrev]
1282 else:
1282 else:
1283 # Add text length of base since decompressing that also takes
1283 # Add text length of base since decompressing that also takes
1284 # work. For cache hits the length is already included.
1284 # work. For cache hits the length is already included.
1285 compresseddeltalen += e[1]
1285 compresseddeltalen += e[1]
1286 r = (clen, compresseddeltalen)
1286 r = (clen, compresseddeltalen)
1287 chaininfocache[rev] = r
1287 chaininfocache[rev] = r
1288 return r
1288 return r
1289
1289
1290 def _deltachain(self, rev, stoprev=None):
1290 def _deltachain(self, rev, stoprev=None):
1291 """Obtain the delta chain for a revision.
1291 """Obtain the delta chain for a revision.
1292
1292
1293 ``stoprev`` specifies a revision to stop at. If not specified, we
1293 ``stoprev`` specifies a revision to stop at. If not specified, we
1294 stop at the base of the chain.
1294 stop at the base of the chain.
1295
1295
1296 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1296 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1297 revs in ascending order and ``stopped`` is a bool indicating whether
1297 revs in ascending order and ``stopped`` is a bool indicating whether
1298 ``stoprev`` was hit.
1298 ``stoprev`` was hit.
1299 """
1299 """
1300 generaldelta = self.delta_config.general_delta
1300 generaldelta = self.delta_config.general_delta
1301 # Try C implementation.
1301 # Try C implementation.
1302 try:
1302 try:
1303 return self.index.deltachain(rev, stoprev, generaldelta)
1303 return self.index.deltachain(rev, stoprev, generaldelta)
1304 except AttributeError:
1304 except AttributeError:
1305 pass
1305 pass
1306
1306
1307 chain = []
1307 chain = []
1308
1308
1309 # Alias to prevent attribute lookup in tight loop.
1309 # Alias to prevent attribute lookup in tight loop.
1310 index = self.index
1310 index = self.index
1311
1311
1312 iterrev = rev
1312 iterrev = rev
1313 e = index[iterrev]
1313 e = index[iterrev]
1314 while iterrev != e[3] and iterrev != stoprev:
1314 while iterrev != e[3] and iterrev != stoprev:
1315 chain.append(iterrev)
1315 chain.append(iterrev)
1316 if generaldelta:
1316 if generaldelta:
1317 iterrev = e[3]
1317 iterrev = e[3]
1318 else:
1318 else:
1319 iterrev -= 1
1319 iterrev -= 1
1320 e = index[iterrev]
1320 e = index[iterrev]
1321
1321
1322 if iterrev == stoprev:
1322 if iterrev == stoprev:
1323 stopped = True
1323 stopped = True
1324 else:
1324 else:
1325 chain.append(iterrev)
1325 chain.append(iterrev)
1326 stopped = False
1326 stopped = False
1327
1327
1328 chain.reverse()
1328 chain.reverse()
1329 return chain, stopped
1329 return chain, stopped
1330
1330
1331 def ancestors(self, revs, stoprev=0, inclusive=False):
1331 def ancestors(self, revs, stoprev=0, inclusive=False):
1332 """Generate the ancestors of 'revs' in reverse revision order.
1332 """Generate the ancestors of 'revs' in reverse revision order.
1333 Does not generate revs lower than stoprev.
1333 Does not generate revs lower than stoprev.
1334
1334
1335 See the documentation for ancestor.lazyancestors for more details."""
1335 See the documentation for ancestor.lazyancestors for more details."""
1336
1336
1337 # first, make sure start revisions aren't filtered
1337 # first, make sure start revisions aren't filtered
1338 revs = list(revs)
1338 revs = list(revs)
1339 checkrev = self.node
1339 checkrev = self.node
1340 for r in revs:
1340 for r in revs:
1341 checkrev(r)
1341 checkrev(r)
1342 # and we're sure ancestors aren't filtered as well
1342 # and we're sure ancestors aren't filtered as well
1343
1343
1344 if rustancestor is not None and self.index.rust_ext_compat:
1344 if rustancestor is not None and self.index.rust_ext_compat:
1345 lazyancestors = rustancestor.LazyAncestors
1345 lazyancestors = rustancestor.LazyAncestors
1346 arg = self.index
1346 arg = self.index
1347 else:
1347 else:
1348 lazyancestors = ancestor.lazyancestors
1348 lazyancestors = ancestor.lazyancestors
1349 arg = self._uncheckedparentrevs
1349 arg = self._uncheckedparentrevs
1350 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1350 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1351
1351
1352 def descendants(self, revs):
1352 def descendants(self, revs):
1353 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1353 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1354
1354
1355 def findcommonmissing(self, common=None, heads=None):
1355 def findcommonmissing(self, common=None, heads=None):
1356 """Return a tuple of the ancestors of common and the ancestors of heads
1356 """Return a tuple of the ancestors of common and the ancestors of heads
1357 that are not ancestors of common. In revset terminology, we return the
1357 that are not ancestors of common. In revset terminology, we return the
1358 tuple:
1358 tuple:
1359
1359
1360 ::common, (::heads) - (::common)
1360 ::common, (::heads) - (::common)
1361
1361
1362 The list is sorted by revision number, meaning it is
1362 The list is sorted by revision number, meaning it is
1363 topologically sorted.
1363 topologically sorted.
1364
1364
1365 'heads' and 'common' are both lists of node IDs. If heads is
1365 'heads' and 'common' are both lists of node IDs. If heads is
1366 not supplied, uses all of the revlog's heads. If common is not
1366 not supplied, uses all of the revlog's heads. If common is not
1367 supplied, uses nullid."""
1367 supplied, uses nullid."""
1368 if common is None:
1368 if common is None:
1369 common = [self.nullid]
1369 common = [self.nullid]
1370 if heads is None:
1370 if heads is None:
1371 heads = self.heads()
1371 heads = self.heads()
1372
1372
1373 common = [self.rev(n) for n in common]
1373 common = [self.rev(n) for n in common]
1374 heads = [self.rev(n) for n in heads]
1374 heads = [self.rev(n) for n in heads]
1375
1375
1376 # we want the ancestors, but inclusive
1376 # we want the ancestors, but inclusive
1377 class lazyset:
1377 class lazyset:
1378 def __init__(self, lazyvalues):
1378 def __init__(self, lazyvalues):
1379 self.addedvalues = set()
1379 self.addedvalues = set()
1380 self.lazyvalues = lazyvalues
1380 self.lazyvalues = lazyvalues
1381
1381
1382 def __contains__(self, value):
1382 def __contains__(self, value):
1383 return value in self.addedvalues or value in self.lazyvalues
1383 return value in self.addedvalues or value in self.lazyvalues
1384
1384
1385 def __iter__(self):
1385 def __iter__(self):
1386 added = self.addedvalues
1386 added = self.addedvalues
1387 for r in added:
1387 for r in added:
1388 yield r
1388 yield r
1389 for r in self.lazyvalues:
1389 for r in self.lazyvalues:
1390 if not r in added:
1390 if not r in added:
1391 yield r
1391 yield r
1392
1392
1393 def add(self, value):
1393 def add(self, value):
1394 self.addedvalues.add(value)
1394 self.addedvalues.add(value)
1395
1395
1396 def update(self, values):
1396 def update(self, values):
1397 self.addedvalues.update(values)
1397 self.addedvalues.update(values)
1398
1398
1399 has = lazyset(self.ancestors(common))
1399 has = lazyset(self.ancestors(common))
1400 has.add(nullrev)
1400 has.add(nullrev)
1401 has.update(common)
1401 has.update(common)
1402
1402
1403 # take all ancestors from heads that aren't in has
1403 # take all ancestors from heads that aren't in has
1404 missing = set()
1404 missing = set()
1405 visit = collections.deque(r for r in heads if r not in has)
1405 visit = collections.deque(r for r in heads if r not in has)
1406 while visit:
1406 while visit:
1407 r = visit.popleft()
1407 r = visit.popleft()
1408 if r in missing:
1408 if r in missing:
1409 continue
1409 continue
1410 else:
1410 else:
1411 missing.add(r)
1411 missing.add(r)
1412 for p in self.parentrevs(r):
1412 for p in self.parentrevs(r):
1413 if p not in has:
1413 if p not in has:
1414 visit.append(p)
1414 visit.append(p)
1415 missing = list(missing)
1415 missing = list(missing)
1416 missing.sort()
1416 missing.sort()
1417 return has, [self.node(miss) for miss in missing]
1417 return has, [self.node(miss) for miss in missing]
1418
1418
1419 def incrementalmissingrevs(self, common=None):
1419 def incrementalmissingrevs(self, common=None):
1420 """Return an object that can be used to incrementally compute the
1420 """Return an object that can be used to incrementally compute the
1421 revision numbers of the ancestors of arbitrary sets that are not
1421 revision numbers of the ancestors of arbitrary sets that are not
1422 ancestors of common. This is an ancestor.incrementalmissingancestors
1422 ancestors of common. This is an ancestor.incrementalmissingancestors
1423 object.
1423 object.
1424
1424
1425 'common' is a list of revision numbers. If common is not supplied, uses
1425 'common' is a list of revision numbers. If common is not supplied, uses
1426 nullrev.
1426 nullrev.
1427 """
1427 """
1428 if common is None:
1428 if common is None:
1429 common = [nullrev]
1429 common = [nullrev]
1430
1430
1431 if rustancestor is not None and self.index.rust_ext_compat:
1431 if rustancestor is not None and self.index.rust_ext_compat:
1432 return rustancestor.MissingAncestors(self.index, common)
1432 return rustancestor.MissingAncestors(self.index, common)
1433 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1433 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1434
1434
1435 def findmissingrevs(self, common=None, heads=None):
1435 def findmissingrevs(self, common=None, heads=None):
1436 """Return the revision numbers of the ancestors of heads that
1436 """Return the revision numbers of the ancestors of heads that
1437 are not ancestors of common.
1437 are not ancestors of common.
1438
1438
1439 More specifically, return a list of revision numbers corresponding to
1439 More specifically, return a list of revision numbers corresponding to
1440 nodes N such that every N satisfies the following constraints:
1440 nodes N such that every N satisfies the following constraints:
1441
1441
1442 1. N is an ancestor of some node in 'heads'
1442 1. N is an ancestor of some node in 'heads'
1443 2. N is not an ancestor of any node in 'common'
1443 2. N is not an ancestor of any node in 'common'
1444
1444
1445 The list is sorted by revision number, meaning it is
1445 The list is sorted by revision number, meaning it is
1446 topologically sorted.
1446 topologically sorted.
1447
1447
1448 'heads' and 'common' are both lists of revision numbers. If heads is
1448 'heads' and 'common' are both lists of revision numbers. If heads is
1449 not supplied, uses all of the revlog's heads. If common is not
1449 not supplied, uses all of the revlog's heads. If common is not
1450 supplied, uses nullid."""
1450 supplied, uses nullid."""
1451 if common is None:
1451 if common is None:
1452 common = [nullrev]
1452 common = [nullrev]
1453 if heads is None:
1453 if heads is None:
1454 heads = self.headrevs()
1454 heads = self.headrevs()
1455
1455
1456 inc = self.incrementalmissingrevs(common=common)
1456 inc = self.incrementalmissingrevs(common=common)
1457 return inc.missingancestors(heads)
1457 return inc.missingancestors(heads)
1458
1458
1459 def findmissing(self, common=None, heads=None):
1459 def findmissing(self, common=None, heads=None):
1460 """Return the ancestors of heads that are not ancestors of common.
1460 """Return the ancestors of heads that are not ancestors of common.
1461
1461
1462 More specifically, return a list of nodes N such that every N
1462 More specifically, return a list of nodes N such that every N
1463 satisfies the following constraints:
1463 satisfies the following constraints:
1464
1464
1465 1. N is an ancestor of some node in 'heads'
1465 1. N is an ancestor of some node in 'heads'
1466 2. N is not an ancestor of any node in 'common'
1466 2. N is not an ancestor of any node in 'common'
1467
1467
1468 The list is sorted by revision number, meaning it is
1468 The list is sorted by revision number, meaning it is
1469 topologically sorted.
1469 topologically sorted.
1470
1470
1471 'heads' and 'common' are both lists of node IDs. If heads is
1471 'heads' and 'common' are both lists of node IDs. If heads is
1472 not supplied, uses all of the revlog's heads. If common is not
1472 not supplied, uses all of the revlog's heads. If common is not
1473 supplied, uses nullid."""
1473 supplied, uses nullid."""
1474 if common is None:
1474 if common is None:
1475 common = [self.nullid]
1475 common = [self.nullid]
1476 if heads is None:
1476 if heads is None:
1477 heads = self.heads()
1477 heads = self.heads()
1478
1478
1479 common = [self.rev(n) for n in common]
1479 common = [self.rev(n) for n in common]
1480 heads = [self.rev(n) for n in heads]
1480 heads = [self.rev(n) for n in heads]
1481
1481
1482 inc = self.incrementalmissingrevs(common=common)
1482 inc = self.incrementalmissingrevs(common=common)
1483 return [self.node(r) for r in inc.missingancestors(heads)]
1483 return [self.node(r) for r in inc.missingancestors(heads)]
1484
1484
1485 def nodesbetween(self, roots=None, heads=None):
1485 def nodesbetween(self, roots=None, heads=None):
1486 """Return a topological path from 'roots' to 'heads'.
1486 """Return a topological path from 'roots' to 'heads'.
1487
1487
1488 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1488 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1489 topologically sorted list of all nodes N that satisfy both of
1489 topologically sorted list of all nodes N that satisfy both of
1490 these constraints:
1490 these constraints:
1491
1491
1492 1. N is a descendant of some node in 'roots'
1492 1. N is a descendant of some node in 'roots'
1493 2. N is an ancestor of some node in 'heads'
1493 2. N is an ancestor of some node in 'heads'
1494
1494
1495 Every node is considered to be both a descendant and an ancestor
1495 Every node is considered to be both a descendant and an ancestor
1496 of itself, so every reachable node in 'roots' and 'heads' will be
1496 of itself, so every reachable node in 'roots' and 'heads' will be
1497 included in 'nodes'.
1497 included in 'nodes'.
1498
1498
1499 'outroots' is the list of reachable nodes in 'roots', i.e., the
1499 'outroots' is the list of reachable nodes in 'roots', i.e., the
1500 subset of 'roots' that is returned in 'nodes'. Likewise,
1500 subset of 'roots' that is returned in 'nodes'. Likewise,
1501 'outheads' is the subset of 'heads' that is also in 'nodes'.
1501 'outheads' is the subset of 'heads' that is also in 'nodes'.
1502
1502
1503 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1503 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1504 unspecified, uses nullid as the only root. If 'heads' is
1504 unspecified, uses nullid as the only root. If 'heads' is
1505 unspecified, uses list of all of the revlog's heads."""
1505 unspecified, uses list of all of the revlog's heads."""
1506 nonodes = ([], [], [])
1506 nonodes = ([], [], [])
1507 if roots is not None:
1507 if roots is not None:
1508 roots = list(roots)
1508 roots = list(roots)
1509 if not roots:
1509 if not roots:
1510 return nonodes
1510 return nonodes
1511 lowestrev = min([self.rev(n) for n in roots])
1511 lowestrev = min([self.rev(n) for n in roots])
1512 else:
1512 else:
1513 roots = [self.nullid] # Everybody's a descendant of nullid
1513 roots = [self.nullid] # Everybody's a descendant of nullid
1514 lowestrev = nullrev
1514 lowestrev = nullrev
1515 if (lowestrev == nullrev) and (heads is None):
1515 if (lowestrev == nullrev) and (heads is None):
1516 # We want _all_ the nodes!
1516 # We want _all_ the nodes!
1517 return (
1517 return (
1518 [self.node(r) for r in self],
1518 [self.node(r) for r in self],
1519 [self.nullid],
1519 [self.nullid],
1520 list(self.heads()),
1520 list(self.heads()),
1521 )
1521 )
1522 if heads is None:
1522 if heads is None:
1523 # All nodes are ancestors, so the latest ancestor is the last
1523 # All nodes are ancestors, so the latest ancestor is the last
1524 # node.
1524 # node.
1525 highestrev = len(self) - 1
1525 highestrev = len(self) - 1
1526 # Set ancestors to None to signal that every node is an ancestor.
1526 # Set ancestors to None to signal that every node is an ancestor.
1527 ancestors = None
1527 ancestors = None
1528 # Set heads to an empty dictionary for later discovery of heads
1528 # Set heads to an empty dictionary for later discovery of heads
1529 heads = {}
1529 heads = {}
1530 else:
1530 else:
1531 heads = list(heads)
1531 heads = list(heads)
1532 if not heads:
1532 if not heads:
1533 return nonodes
1533 return nonodes
1534 ancestors = set()
1534 ancestors = set()
1535 # Turn heads into a dictionary so we can remove 'fake' heads.
1535 # Turn heads into a dictionary so we can remove 'fake' heads.
1536 # Also, later we will be using it to filter out the heads we can't
1536 # Also, later we will be using it to filter out the heads we can't
1537 # find from roots.
1537 # find from roots.
1538 heads = dict.fromkeys(heads, False)
1538 heads = dict.fromkeys(heads, False)
1539 # Start at the top and keep marking parents until we're done.
1539 # Start at the top and keep marking parents until we're done.
1540 nodestotag = set(heads)
1540 nodestotag = set(heads)
1541 # Remember where the top was so we can use it as a limit later.
1541 # Remember where the top was so we can use it as a limit later.
1542 highestrev = max([self.rev(n) for n in nodestotag])
1542 highestrev = max([self.rev(n) for n in nodestotag])
1543 while nodestotag:
1543 while nodestotag:
1544 # grab a node to tag
1544 # grab a node to tag
1545 n = nodestotag.pop()
1545 n = nodestotag.pop()
1546 # Never tag nullid
1546 # Never tag nullid
1547 if n == self.nullid:
1547 if n == self.nullid:
1548 continue
1548 continue
1549 # A node's revision number represents its place in a
1549 # A node's revision number represents its place in a
1550 # topologically sorted list of nodes.
1550 # topologically sorted list of nodes.
1551 r = self.rev(n)
1551 r = self.rev(n)
1552 if r >= lowestrev:
1552 if r >= lowestrev:
1553 if n not in ancestors:
1553 if n not in ancestors:
1554 # If we are possibly a descendant of one of the roots
1554 # If we are possibly a descendant of one of the roots
1555 # and we haven't already been marked as an ancestor
1555 # and we haven't already been marked as an ancestor
1556 ancestors.add(n) # Mark as ancestor
1556 ancestors.add(n) # Mark as ancestor
1557 # Add non-nullid parents to list of nodes to tag.
1557 # Add non-nullid parents to list of nodes to tag.
1558 nodestotag.update(
1558 nodestotag.update(
1559 [p for p in self.parents(n) if p != self.nullid]
1559 [p for p in self.parents(n) if p != self.nullid]
1560 )
1560 )
1561 elif n in heads: # We've seen it before, is it a fake head?
1561 elif n in heads: # We've seen it before, is it a fake head?
1562 # So it is, real heads should not be the ancestors of
1562 # So it is, real heads should not be the ancestors of
1563 # any other heads.
1563 # any other heads.
1564 heads.pop(n)
1564 heads.pop(n)
1565 if not ancestors:
1565 if not ancestors:
1566 return nonodes
1566 return nonodes
1567 # Now that we have our set of ancestors, we want to remove any
1567 # Now that we have our set of ancestors, we want to remove any
1568 # roots that are not ancestors.
1568 # roots that are not ancestors.
1569
1569
1570 # If one of the roots was nullid, everything is included anyway.
1570 # If one of the roots was nullid, everything is included anyway.
1571 if lowestrev > nullrev:
1571 if lowestrev > nullrev:
1572 # But, since we weren't, let's recompute the lowest rev to not
1572 # But, since we weren't, let's recompute the lowest rev to not
1573 # include roots that aren't ancestors.
1573 # include roots that aren't ancestors.
1574
1574
1575 # Filter out roots that aren't ancestors of heads
1575 # Filter out roots that aren't ancestors of heads
1576 roots = [root for root in roots if root in ancestors]
1576 roots = [root for root in roots if root in ancestors]
1577 # Recompute the lowest revision
1577 # Recompute the lowest revision
1578 if roots:
1578 if roots:
1579 lowestrev = min([self.rev(root) for root in roots])
1579 lowestrev = min([self.rev(root) for root in roots])
1580 else:
1580 else:
1581 # No more roots? Return empty list
1581 # No more roots? Return empty list
1582 return nonodes
1582 return nonodes
1583 else:
1583 else:
1584 # We are descending from nullid, and don't need to care about
1584 # We are descending from nullid, and don't need to care about
1585 # any other roots.
1585 # any other roots.
1586 lowestrev = nullrev
1586 lowestrev = nullrev
1587 roots = [self.nullid]
1587 roots = [self.nullid]
1588 # Transform our roots list into a set.
1588 # Transform our roots list into a set.
1589 descendants = set(roots)
1589 descendants = set(roots)
1590 # Also, keep the original roots so we can filter out roots that aren't
1590 # Also, keep the original roots so we can filter out roots that aren't
1591 # 'real' roots (i.e. are descended from other roots).
1591 # 'real' roots (i.e. are descended from other roots).
1592 roots = descendants.copy()
1592 roots = descendants.copy()
1593 # Our topologically sorted list of output nodes.
1593 # Our topologically sorted list of output nodes.
1594 orderedout = []
1594 orderedout = []
1595 # Don't start at nullid since we don't want nullid in our output list,
1595 # Don't start at nullid since we don't want nullid in our output list,
1596 # and if nullid shows up in descendants, empty parents will look like
1596 # and if nullid shows up in descendants, empty parents will look like
1597 # they're descendants.
1597 # they're descendants.
1598 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1598 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1599 n = self.node(r)
1599 n = self.node(r)
1600 isdescendant = False
1600 isdescendant = False
1601 if lowestrev == nullrev: # Everybody is a descendant of nullid
1601 if lowestrev == nullrev: # Everybody is a descendant of nullid
1602 isdescendant = True
1602 isdescendant = True
1603 elif n in descendants:
1603 elif n in descendants:
1604 # n is already a descendant
1604 # n is already a descendant
1605 isdescendant = True
1605 isdescendant = True
1606 # This check only needs to be done here because all the roots
1606 # This check only needs to be done here because all the roots
1607 # will start being marked is descendants before the loop.
1607 # will start being marked is descendants before the loop.
1608 if n in roots:
1608 if n in roots:
1609 # If n was a root, check if it's a 'real' root.
1609 # If n was a root, check if it's a 'real' root.
1610 p = tuple(self.parents(n))
1610 p = tuple(self.parents(n))
1611 # If any of its parents are descendants, it's not a root.
1611 # If any of its parents are descendants, it's not a root.
1612 if (p[0] in descendants) or (p[1] in descendants):
1612 if (p[0] in descendants) or (p[1] in descendants):
1613 roots.remove(n)
1613 roots.remove(n)
1614 else:
1614 else:
1615 p = tuple(self.parents(n))
1615 p = tuple(self.parents(n))
1616 # A node is a descendant if either of its parents are
1616 # A node is a descendant if either of its parents are
1617 # descendants. (We seeded the dependents list with the roots
1617 # descendants. (We seeded the dependents list with the roots
1618 # up there, remember?)
1618 # up there, remember?)
1619 if (p[0] in descendants) or (p[1] in descendants):
1619 if (p[0] in descendants) or (p[1] in descendants):
1620 descendants.add(n)
1620 descendants.add(n)
1621 isdescendant = True
1621 isdescendant = True
1622 if isdescendant and ((ancestors is None) or (n in ancestors)):
1622 if isdescendant and ((ancestors is None) or (n in ancestors)):
1623 # Only include nodes that are both descendants and ancestors.
1623 # Only include nodes that are both descendants and ancestors.
1624 orderedout.append(n)
1624 orderedout.append(n)
1625 if (ancestors is not None) and (n in heads):
1625 if (ancestors is not None) and (n in heads):
1626 # We're trying to figure out which heads are reachable
1626 # We're trying to figure out which heads are reachable
1627 # from roots.
1627 # from roots.
1628 # Mark this head as having been reached
1628 # Mark this head as having been reached
1629 heads[n] = True
1629 heads[n] = True
1630 elif ancestors is None:
1630 elif ancestors is None:
1631 # Otherwise, we're trying to discover the heads.
1631 # Otherwise, we're trying to discover the heads.
1632 # Assume this is a head because if it isn't, the next step
1632 # Assume this is a head because if it isn't, the next step
1633 # will eventually remove it.
1633 # will eventually remove it.
1634 heads[n] = True
1634 heads[n] = True
1635 # But, obviously its parents aren't.
1635 # But, obviously its parents aren't.
1636 for p in self.parents(n):
1636 for p in self.parents(n):
1637 heads.pop(p, None)
1637 heads.pop(p, None)
1638 heads = [head for head, flag in heads.items() if flag]
1638 heads = [head for head, flag in heads.items() if flag]
1639 roots = list(roots)
1639 roots = list(roots)
1640 assert orderedout
1640 assert orderedout
1641 assert roots
1641 assert roots
1642 assert heads
1642 assert heads
1643 return (orderedout, roots, heads)
1643 return (orderedout, roots, heads)
1644
1644
1645 def headrevs(self, revs=None):
1645 def headrevs(self, revs=None):
1646 if revs is None:
1646 if revs is None:
1647 try:
1647 try:
1648 return self.index.headrevs()
1648 return self.index.headrevs()
1649 except AttributeError:
1649 except AttributeError:
1650 return self._headrevs()
1650 return self._headrevs()
1651 if rustdagop is not None and self.index.rust_ext_compat:
1651 if rustdagop is not None and self.index.rust_ext_compat:
1652 return rustdagop.headrevs(self.index, revs)
1652 return rustdagop.headrevs(self.index, revs)
1653 return dagop.headrevs(revs, self._uncheckedparentrevs)
1653 return dagop.headrevs(revs, self._uncheckedparentrevs)
1654
1654
1655 def computephases(self, roots):
1655 def computephases(self, roots):
1656 return self.index.computephasesmapsets(roots)
1656 return self.index.computephasesmapsets(roots)
1657
1657
1658 def _headrevs(self):
1658 def _headrevs(self):
1659 count = len(self)
1659 count = len(self)
1660 if not count:
1660 if not count:
1661 return [nullrev]
1661 return [nullrev]
1662 # we won't iter over filtered rev so nobody is a head at start
1662 # we won't iter over filtered rev so nobody is a head at start
1663 ishead = [0] * (count + 1)
1663 ishead = [0] * (count + 1)
1664 index = self.index
1664 index = self.index
1665 for r in self:
1665 for r in self:
1666 ishead[r] = 1 # I may be an head
1666 ishead[r] = 1 # I may be an head
1667 e = index[r]
1667 e = index[r]
1668 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1668 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1669 return [r for r, val in enumerate(ishead) if val]
1669 return [r for r, val in enumerate(ishead) if val]
1670
1670
1671 def heads(self, start=None, stop=None):
1671 def heads(self, start=None, stop=None):
1672 """return the list of all nodes that have no children
1672 """return the list of all nodes that have no children
1673
1673
1674 if start is specified, only heads that are descendants of
1674 if start is specified, only heads that are descendants of
1675 start will be returned
1675 start will be returned
1676 if stop is specified, it will consider all the revs from stop
1676 if stop is specified, it will consider all the revs from stop
1677 as if they had no children
1677 as if they had no children
1678 """
1678 """
1679 if start is None and stop is None:
1679 if start is None and stop is None:
1680 if not len(self):
1680 if not len(self):
1681 return [self.nullid]
1681 return [self.nullid]
1682 return [self.node(r) for r in self.headrevs()]
1682 return [self.node(r) for r in self.headrevs()]
1683
1683
1684 if start is None:
1684 if start is None:
1685 start = nullrev
1685 start = nullrev
1686 else:
1686 else:
1687 start = self.rev(start)
1687 start = self.rev(start)
1688
1688
1689 stoprevs = {self.rev(n) for n in stop or []}
1689 stoprevs = {self.rev(n) for n in stop or []}
1690
1690
1691 revs = dagop.headrevssubset(
1691 revs = dagop.headrevssubset(
1692 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1692 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1693 )
1693 )
1694
1694
1695 return [self.node(rev) for rev in revs]
1695 return [self.node(rev) for rev in revs]
1696
1696
1697 def children(self, node):
1697 def children(self, node):
1698 """find the children of a given node"""
1698 """find the children of a given node"""
1699 c = []
1699 c = []
1700 p = self.rev(node)
1700 p = self.rev(node)
1701 for r in self.revs(start=p + 1):
1701 for r in self.revs(start=p + 1):
1702 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1702 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1703 if prevs:
1703 if prevs:
1704 for pr in prevs:
1704 for pr in prevs:
1705 if pr == p:
1705 if pr == p:
1706 c.append(self.node(r))
1706 c.append(self.node(r))
1707 elif p == nullrev:
1707 elif p == nullrev:
1708 c.append(self.node(r))
1708 c.append(self.node(r))
1709 return c
1709 return c
1710
1710
1711 def commonancestorsheads(self, a, b):
1711 def commonancestorsheads(self, a, b):
1712 """calculate all the heads of the common ancestors of nodes a and b"""
1712 """calculate all the heads of the common ancestors of nodes a and b"""
1713 a, b = self.rev(a), self.rev(b)
1713 a, b = self.rev(a), self.rev(b)
1714 ancs = self._commonancestorsheads(a, b)
1714 ancs = self._commonancestorsheads(a, b)
1715 return pycompat.maplist(self.node, ancs)
1715 return pycompat.maplist(self.node, ancs)
1716
1716
1717 def _commonancestorsheads(self, *revs):
1717 def _commonancestorsheads(self, *revs):
1718 """calculate all the heads of the common ancestors of revs"""
1718 """calculate all the heads of the common ancestors of revs"""
1719 try:
1719 try:
1720 ancs = self.index.commonancestorsheads(*revs)
1720 ancs = self.index.commonancestorsheads(*revs)
1721 except (AttributeError, OverflowError): # C implementation failed
1721 except (AttributeError, OverflowError): # C implementation failed
1722 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1722 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1723 return ancs
1723 return ancs
1724
1724
1725 def isancestor(self, a, b):
1725 def isancestor(self, a, b):
1726 """return True if node a is an ancestor of node b
1726 """return True if node a is an ancestor of node b
1727
1727
1728 A revision is considered an ancestor of itself."""
1728 A revision is considered an ancestor of itself."""
1729 a, b = self.rev(a), self.rev(b)
1729 a, b = self.rev(a), self.rev(b)
1730 return self.isancestorrev(a, b)
1730 return self.isancestorrev(a, b)
1731
1731
1732 def isancestorrev(self, a, b):
1732 def isancestorrev(self, a, b):
1733 """return True if revision a is an ancestor of revision b
1733 """return True if revision a is an ancestor of revision b
1734
1734
1735 A revision is considered an ancestor of itself.
1735 A revision is considered an ancestor of itself.
1736
1736
1737 The implementation of this is trivial but the use of
1737 The implementation of this is trivial but the use of
1738 reachableroots is not."""
1738 reachableroots is not."""
1739 if a == nullrev:
1739 if a == nullrev:
1740 return True
1740 return True
1741 elif a == b:
1741 elif a == b:
1742 return True
1742 return True
1743 elif a > b:
1743 elif a > b:
1744 return False
1744 return False
1745 return bool(self.reachableroots(a, [b], [a], includepath=False))
1745 return bool(self.reachableroots(a, [b], [a], includepath=False))
1746
1746
1747 def reachableroots(self, minroot, heads, roots, includepath=False):
1747 def reachableroots(self, minroot, heads, roots, includepath=False):
1748 """return (heads(::(<roots> and <roots>::<heads>)))
1748 """return (heads(::(<roots> and <roots>::<heads>)))
1749
1749
1750 If includepath is True, return (<roots>::<heads>)."""
1750 If includepath is True, return (<roots>::<heads>)."""
1751 try:
1751 try:
1752 return self.index.reachableroots2(
1752 return self.index.reachableroots2(
1753 minroot, heads, roots, includepath
1753 minroot, heads, roots, includepath
1754 )
1754 )
1755 except AttributeError:
1755 except AttributeError:
1756 return dagop._reachablerootspure(
1756 return dagop._reachablerootspure(
1757 self.parentrevs, minroot, roots, heads, includepath
1757 self.parentrevs, minroot, roots, heads, includepath
1758 )
1758 )
1759
1759
1760 def ancestor(self, a, b):
1760 def ancestor(self, a, b):
1761 """calculate the "best" common ancestor of nodes a and b"""
1761 """calculate the "best" common ancestor of nodes a and b"""
1762
1762
1763 a, b = self.rev(a), self.rev(b)
1763 a, b = self.rev(a), self.rev(b)
1764 try:
1764 try:
1765 ancs = self.index.ancestors(a, b)
1765 ancs = self.index.ancestors(a, b)
1766 except (AttributeError, OverflowError):
1766 except (AttributeError, OverflowError):
1767 ancs = ancestor.ancestors(self.parentrevs, a, b)
1767 ancs = ancestor.ancestors(self.parentrevs, a, b)
1768 if ancs:
1768 if ancs:
1769 # choose a consistent winner when there's a tie
1769 # choose a consistent winner when there's a tie
1770 return min(map(self.node, ancs))
1770 return min(map(self.node, ancs))
1771 return self.nullid
1771 return self.nullid
1772
1772
1773 def _match(self, id):
1773 def _match(self, id):
1774 if isinstance(id, int):
1774 if isinstance(id, int):
1775 # rev
1775 # rev
1776 return self.node(id)
1776 return self.node(id)
1777 if len(id) == self.nodeconstants.nodelen:
1777 if len(id) == self.nodeconstants.nodelen:
1778 # possibly a binary node
1778 # possibly a binary node
1779 # odds of a binary node being all hex in ASCII are 1 in 10**25
1779 # odds of a binary node being all hex in ASCII are 1 in 10**25
1780 try:
1780 try:
1781 node = id
1781 node = id
1782 self.rev(node) # quick search the index
1782 self.rev(node) # quick search the index
1783 return node
1783 return node
1784 except error.LookupError:
1784 except error.LookupError:
1785 pass # may be partial hex id
1785 pass # may be partial hex id
1786 try:
1786 try:
1787 # str(rev)
1787 # str(rev)
1788 rev = int(id)
1788 rev = int(id)
1789 if b"%d" % rev != id:
1789 if b"%d" % rev != id:
1790 raise ValueError
1790 raise ValueError
1791 if rev < 0:
1791 if rev < 0:
1792 rev = len(self) + rev
1792 rev = len(self) + rev
1793 if rev < 0 or rev >= len(self):
1793 if rev < 0 or rev >= len(self):
1794 raise ValueError
1794 raise ValueError
1795 return self.node(rev)
1795 return self.node(rev)
1796 except (ValueError, OverflowError):
1796 except (ValueError, OverflowError):
1797 pass
1797 pass
1798 if len(id) == 2 * self.nodeconstants.nodelen:
1798 if len(id) == 2 * self.nodeconstants.nodelen:
1799 try:
1799 try:
1800 # a full hex nodeid?
1800 # a full hex nodeid?
1801 node = bin(id)
1801 node = bin(id)
1802 self.rev(node)
1802 self.rev(node)
1803 return node
1803 return node
1804 except (binascii.Error, error.LookupError):
1804 except (binascii.Error, error.LookupError):
1805 pass
1805 pass
1806
1806
1807 def _partialmatch(self, id):
1807 def _partialmatch(self, id):
1808 # we don't care wdirfilenodeids as they should be always full hash
1808 # we don't care wdirfilenodeids as they should be always full hash
1809 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1809 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1810 ambiguous = False
1810 ambiguous = False
1811 try:
1811 try:
1812 partial = self.index.partialmatch(id)
1812 partial = self.index.partialmatch(id)
1813 if partial and self.hasnode(partial):
1813 if partial and self.hasnode(partial):
1814 if maybewdir:
1814 if maybewdir:
1815 # single 'ff...' match in radix tree, ambiguous with wdir
1815 # single 'ff...' match in radix tree, ambiguous with wdir
1816 ambiguous = True
1816 ambiguous = True
1817 else:
1817 else:
1818 return partial
1818 return partial
1819 elif maybewdir:
1819 elif maybewdir:
1820 # no 'ff...' match in radix tree, wdir identified
1820 # no 'ff...' match in radix tree, wdir identified
1821 raise error.WdirUnsupported
1821 raise error.WdirUnsupported
1822 else:
1822 else:
1823 return None
1823 return None
1824 except error.RevlogError:
1824 except error.RevlogError:
1825 # parsers.c radix tree lookup gave multiple matches
1825 # parsers.c radix tree lookup gave multiple matches
1826 # fast path: for unfiltered changelog, radix tree is accurate
1826 # fast path: for unfiltered changelog, radix tree is accurate
1827 if not getattr(self, 'filteredrevs', None):
1827 if not getattr(self, 'filteredrevs', None):
1828 ambiguous = True
1828 ambiguous = True
1829 # fall through to slow path that filters hidden revisions
1829 # fall through to slow path that filters hidden revisions
1830 except (AttributeError, ValueError):
1830 except (AttributeError, ValueError):
1831 # we are pure python, or key is not hex
1831 # we are pure python, or key is not hex
1832 pass
1832 pass
1833 if ambiguous:
1833 if ambiguous:
1834 raise error.AmbiguousPrefixLookupError(
1834 raise error.AmbiguousPrefixLookupError(
1835 id, self.display_id, _(b'ambiguous identifier')
1835 id, self.display_id, _(b'ambiguous identifier')
1836 )
1836 )
1837
1837
1838 if id in self._pcache:
1838 if id in self._pcache:
1839 return self._pcache[id]
1839 return self._pcache[id]
1840
1840
1841 if len(id) <= 40:
1841 if len(id) <= 40:
1842 # hex(node)[:...]
1842 # hex(node)[:...]
1843 l = len(id) // 2 * 2 # grab an even number of digits
1843 l = len(id) // 2 * 2 # grab an even number of digits
1844 try:
1844 try:
1845 # we're dropping the last digit, so let's check that it's hex,
1845 # we're dropping the last digit, so let's check that it's hex,
1846 # to avoid the expensive computation below if it's not
1846 # to avoid the expensive computation below if it's not
1847 if len(id) % 2 > 0:
1847 if len(id) % 2 > 0:
1848 if not (id[-1] in hexdigits):
1848 if not (id[-1] in hexdigits):
1849 return None
1849 return None
1850 prefix = bin(id[:l])
1850 prefix = bin(id[:l])
1851 except binascii.Error:
1851 except binascii.Error:
1852 pass
1852 pass
1853 else:
1853 else:
1854 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1854 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1855 nl = [
1855 nl = [
1856 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1856 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1857 ]
1857 ]
1858 if self.nodeconstants.nullhex.startswith(id):
1858 if self.nodeconstants.nullhex.startswith(id):
1859 nl.append(self.nullid)
1859 nl.append(self.nullid)
1860 if len(nl) > 0:
1860 if len(nl) > 0:
1861 if len(nl) == 1 and not maybewdir:
1861 if len(nl) == 1 and not maybewdir:
1862 self._pcache[id] = nl[0]
1862 self._pcache[id] = nl[0]
1863 return nl[0]
1863 return nl[0]
1864 raise error.AmbiguousPrefixLookupError(
1864 raise error.AmbiguousPrefixLookupError(
1865 id, self.display_id, _(b'ambiguous identifier')
1865 id, self.display_id, _(b'ambiguous identifier')
1866 )
1866 )
1867 if maybewdir:
1867 if maybewdir:
1868 raise error.WdirUnsupported
1868 raise error.WdirUnsupported
1869 return None
1869 return None
1870
1870
1871 def lookup(self, id):
1871 def lookup(self, id):
1872 """locate a node based on:
1872 """locate a node based on:
1873 - revision number or str(revision number)
1873 - revision number or str(revision number)
1874 - nodeid or subset of hex nodeid
1874 - nodeid or subset of hex nodeid
1875 """
1875 """
1876 n = self._match(id)
1876 n = self._match(id)
1877 if n is not None:
1877 if n is not None:
1878 return n
1878 return n
1879 n = self._partialmatch(id)
1879 n = self._partialmatch(id)
1880 if n:
1880 if n:
1881 return n
1881 return n
1882
1882
1883 raise error.LookupError(id, self.display_id, _(b'no match found'))
1883 raise error.LookupError(id, self.display_id, _(b'no match found'))
1884
1884
1885 def shortest(self, node, minlength=1):
1885 def shortest(self, node, minlength=1):
1886 """Find the shortest unambiguous prefix that matches node."""
1886 """Find the shortest unambiguous prefix that matches node."""
1887
1887
1888 def isvalid(prefix):
1888 def isvalid(prefix):
1889 try:
1889 try:
1890 matchednode = self._partialmatch(prefix)
1890 matchednode = self._partialmatch(prefix)
1891 except error.AmbiguousPrefixLookupError:
1891 except error.AmbiguousPrefixLookupError:
1892 return False
1892 return False
1893 except error.WdirUnsupported:
1893 except error.WdirUnsupported:
1894 # single 'ff...' match
1894 # single 'ff...' match
1895 return True
1895 return True
1896 if matchednode is None:
1896 if matchednode is None:
1897 raise error.LookupError(node, self.display_id, _(b'no node'))
1897 raise error.LookupError(node, self.display_id, _(b'no node'))
1898 return True
1898 return True
1899
1899
1900 def maybewdir(prefix):
1900 def maybewdir(prefix):
1901 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1901 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1902
1902
1903 hexnode = hex(node)
1903 hexnode = hex(node)
1904
1904
1905 def disambiguate(hexnode, minlength):
1905 def disambiguate(hexnode, minlength):
1906 """Disambiguate against wdirid."""
1906 """Disambiguate against wdirid."""
1907 for length in range(minlength, len(hexnode) + 1):
1907 for length in range(minlength, len(hexnode) + 1):
1908 prefix = hexnode[:length]
1908 prefix = hexnode[:length]
1909 if not maybewdir(prefix):
1909 if not maybewdir(prefix):
1910 return prefix
1910 return prefix
1911
1911
1912 if not getattr(self, 'filteredrevs', None):
1912 if not getattr(self, 'filteredrevs', None):
1913 try:
1913 try:
1914 length = max(self.index.shortest(node), minlength)
1914 length = max(self.index.shortest(node), minlength)
1915 return disambiguate(hexnode, length)
1915 return disambiguate(hexnode, length)
1916 except error.RevlogError:
1916 except error.RevlogError:
1917 if node != self.nodeconstants.wdirid:
1917 if node != self.nodeconstants.wdirid:
1918 raise error.LookupError(
1918 raise error.LookupError(
1919 node, self.display_id, _(b'no node')
1919 node, self.display_id, _(b'no node')
1920 )
1920 )
1921 except AttributeError:
1921 except AttributeError:
1922 # Fall through to pure code
1922 # Fall through to pure code
1923 pass
1923 pass
1924
1924
1925 if node == self.nodeconstants.wdirid:
1925 if node == self.nodeconstants.wdirid:
1926 for length in range(minlength, len(hexnode) + 1):
1926 for length in range(minlength, len(hexnode) + 1):
1927 prefix = hexnode[:length]
1927 prefix = hexnode[:length]
1928 if isvalid(prefix):
1928 if isvalid(prefix):
1929 return prefix
1929 return prefix
1930
1930
1931 for length in range(minlength, len(hexnode) + 1):
1931 for length in range(minlength, len(hexnode) + 1):
1932 prefix = hexnode[:length]
1932 prefix = hexnode[:length]
1933 if isvalid(prefix):
1933 if isvalid(prefix):
1934 return disambiguate(hexnode, length)
1934 return disambiguate(hexnode, length)
1935
1935
1936 def cmp(self, node, text):
1936 def cmp(self, node, text):
1937 """compare text with a given file revision
1937 """compare text with a given file revision
1938
1938
1939 returns True if text is different than what is stored.
1939 returns True if text is different than what is stored.
1940 """
1940 """
1941 p1, p2 = self.parents(node)
1941 p1, p2 = self.parents(node)
1942 return storageutil.hashrevisionsha1(text, p1, p2) != node
1942 return storageutil.hashrevisionsha1(text, p1, p2) != node
1943
1943
1944 def _getsegmentforrevs(self, startrev, endrev):
1944 def _getsegmentforrevs(self, startrev, endrev):
1945 """Obtain a segment of raw data corresponding to a range of revisions.
1945 """Obtain a segment of raw data corresponding to a range of revisions.
1946
1946
1947 Accepts the start and end revisions and an optional already-open
1947 Accepts the start and end revisions and an optional already-open
1948 file handle to be used for reading. If the file handle is read, its
1948 file handle to be used for reading. If the file handle is read, its
1949 seek position will not be preserved.
1949 seek position will not be preserved.
1950
1950
1951 Requests for data may be satisfied by a cache.
1951 Requests for data may be satisfied by a cache.
1952
1952
1953 Returns a 2-tuple of (offset, data) for the requested range of
1953 Returns a 2-tuple of (offset, data) for the requested range of
1954 revisions. Offset is the integer offset from the beginning of the
1954 revisions. Offset is the integer offset from the beginning of the
1955 revlog and data is a str or buffer of the raw byte data.
1955 revlog and data is a str or buffer of the raw byte data.
1956
1956
1957 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1957 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1958 to determine where each revision's data begins and ends.
1958 to determine where each revision's data begins and ends.
1959 """
1959 """
1960 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1960 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1961 # (functions are expensive).
1961 # (functions are expensive).
1962 index = self.index
1962 index = self.index
1963 istart = index[startrev]
1963 istart = index[startrev]
1964 start = int(istart[0] >> 16)
1964 start = int(istart[0] >> 16)
1965 if startrev == endrev:
1965 if startrev == endrev:
1966 end = start + istart[1]
1966 end = start + istart[1]
1967 else:
1967 else:
1968 iend = index[endrev]
1968 iend = index[endrev]
1969 end = int(iend[0] >> 16) + iend[1]
1969 end = int(iend[0] >> 16) + iend[1]
1970
1970
1971 if self._inline:
1971 if self._inline:
1972 start += (startrev + 1) * self.index.entry_size
1972 start += (startrev + 1) * self.index.entry_size
1973 end += (endrev + 1) * self.index.entry_size
1973 end += (endrev + 1) * self.index.entry_size
1974 length = end - start
1974 length = end - start
1975
1975
1976 return start, self._segmentfile.read_chunk(start, length)
1976 return start, self._segmentfile.read_chunk(start, length)
1977
1977
1978 def _chunk(self, rev):
1978 def _chunk(self, rev):
1979 """Obtain a single decompressed chunk for a revision.
1979 """Obtain a single decompressed chunk for a revision.
1980
1980
1981 Accepts an integer revision and an optional already-open file handle
1981 Accepts an integer revision and an optional already-open file handle
1982 to be used for reading. If used, the seek position of the file will not
1982 to be used for reading. If used, the seek position of the file will not
1983 be preserved.
1983 be preserved.
1984
1984
1985 Returns a str holding uncompressed data for the requested revision.
1985 Returns a str holding uncompressed data for the requested revision.
1986 """
1986 """
1987 compression_mode = self.index[rev][10]
1987 compression_mode = self.index[rev][10]
1988 data = self._getsegmentforrevs(rev, rev)[1]
1988 data = self._getsegmentforrevs(rev, rev)[1]
1989 if compression_mode == COMP_MODE_PLAIN:
1989 if compression_mode == COMP_MODE_PLAIN:
1990 return data
1990 return data
1991 elif compression_mode == COMP_MODE_DEFAULT:
1991 elif compression_mode == COMP_MODE_DEFAULT:
1992 return self._decompressor(data)
1992 return self._decompressor(data)
1993 elif compression_mode == COMP_MODE_INLINE:
1993 elif compression_mode == COMP_MODE_INLINE:
1994 return self.decompress(data)
1994 return self.decompress(data)
1995 else:
1995 else:
1996 msg = b'unknown compression mode %d'
1996 msg = b'unknown compression mode %d'
1997 msg %= compression_mode
1997 msg %= compression_mode
1998 raise error.RevlogError(msg)
1998 raise error.RevlogError(msg)
1999
1999
2000 def _chunks(self, revs, targetsize=None):
2000 def _chunks(self, revs, targetsize=None):
2001 """Obtain decompressed chunks for the specified revisions.
2001 """Obtain decompressed chunks for the specified revisions.
2002
2002
2003 Accepts an iterable of numeric revisions that are assumed to be in
2003 Accepts an iterable of numeric revisions that are assumed to be in
2004 ascending order. Also accepts an optional already-open file handle
2004 ascending order. Also accepts an optional already-open file handle
2005 to be used for reading. If used, the seek position of the file will
2005 to be used for reading. If used, the seek position of the file will
2006 not be preserved.
2006 not be preserved.
2007
2007
2008 This function is similar to calling ``self._chunk()`` multiple times,
2008 This function is similar to calling ``self._chunk()`` multiple times,
2009 but is faster.
2009 but is faster.
2010
2010
2011 Returns a list with decompressed data for each requested revision.
2011 Returns a list with decompressed data for each requested revision.
2012 """
2012 """
2013 if not revs:
2013 if not revs:
2014 return []
2014 return []
2015 start = self.start
2015 start = self.start
2016 length = self.length
2016 length = self.length
2017 inline = self._inline
2017 inline = self._inline
2018 iosize = self.index.entry_size
2018 iosize = self.index.entry_size
2019 buffer = util.buffer
2019 buffer = util.buffer
2020
2020
2021 l = []
2021 l = []
2022 ladd = l.append
2022 ladd = l.append
2023
2023
2024 if not self._withsparseread:
2024 if not self.data_config.with_sparse_read:
2025 slicedchunks = (revs,)
2025 slicedchunks = (revs,)
2026 else:
2026 else:
2027 slicedchunks = deltautil.slicechunk(
2027 slicedchunks = deltautil.slicechunk(
2028 self, revs, targetsize=targetsize
2028 self, revs, targetsize=targetsize
2029 )
2029 )
2030
2030
2031 for revschunk in slicedchunks:
2031 for revschunk in slicedchunks:
2032 firstrev = revschunk[0]
2032 firstrev = revschunk[0]
2033 # Skip trailing revisions with empty diff
2033 # Skip trailing revisions with empty diff
2034 for lastrev in revschunk[::-1]:
2034 for lastrev in revschunk[::-1]:
2035 if length(lastrev) != 0:
2035 if length(lastrev) != 0:
2036 break
2036 break
2037
2037
2038 try:
2038 try:
2039 offset, data = self._getsegmentforrevs(firstrev, lastrev)
2039 offset, data = self._getsegmentforrevs(firstrev, lastrev)
2040 except OverflowError:
2040 except OverflowError:
2041 # issue4215 - we can't cache a run of chunks greater than
2041 # issue4215 - we can't cache a run of chunks greater than
2042 # 2G on Windows
2042 # 2G on Windows
2043 return [self._chunk(rev) for rev in revschunk]
2043 return [self._chunk(rev) for rev in revschunk]
2044
2044
2045 decomp = self.decompress
2045 decomp = self.decompress
2046 # self._decompressor might be None, but will not be used in that case
2046 # self._decompressor might be None, but will not be used in that case
2047 def_decomp = self._decompressor
2047 def_decomp = self._decompressor
2048 for rev in revschunk:
2048 for rev in revschunk:
2049 chunkstart = start(rev)
2049 chunkstart = start(rev)
2050 if inline:
2050 if inline:
2051 chunkstart += (rev + 1) * iosize
2051 chunkstart += (rev + 1) * iosize
2052 chunklength = length(rev)
2052 chunklength = length(rev)
2053 comp_mode = self.index[rev][10]
2053 comp_mode = self.index[rev][10]
2054 c = buffer(data, chunkstart - offset, chunklength)
2054 c = buffer(data, chunkstart - offset, chunklength)
2055 if comp_mode == COMP_MODE_PLAIN:
2055 if comp_mode == COMP_MODE_PLAIN:
2056 ladd(c)
2056 ladd(c)
2057 elif comp_mode == COMP_MODE_INLINE:
2057 elif comp_mode == COMP_MODE_INLINE:
2058 ladd(decomp(c))
2058 ladd(decomp(c))
2059 elif comp_mode == COMP_MODE_DEFAULT:
2059 elif comp_mode == COMP_MODE_DEFAULT:
2060 ladd(def_decomp(c))
2060 ladd(def_decomp(c))
2061 else:
2061 else:
2062 msg = b'unknown compression mode %d'
2062 msg = b'unknown compression mode %d'
2063 msg %= comp_mode
2063 msg %= comp_mode
2064 raise error.RevlogError(msg)
2064 raise error.RevlogError(msg)
2065
2065
2066 return l
2066 return l
2067
2067
2068 def deltaparent(self, rev):
2068 def deltaparent(self, rev):
2069 """return deltaparent of the given revision"""
2069 """return deltaparent of the given revision"""
2070 base = self.index[rev][3]
2070 base = self.index[rev][3]
2071 if base == rev:
2071 if base == rev:
2072 return nullrev
2072 return nullrev
2073 elif self.delta_config.general_delta:
2073 elif self.delta_config.general_delta:
2074 return base
2074 return base
2075 else:
2075 else:
2076 return rev - 1
2076 return rev - 1
2077
2077
2078 def issnapshot(self, rev):
2078 def issnapshot(self, rev):
2079 """tells whether rev is a snapshot"""
2079 """tells whether rev is a snapshot"""
2080 if not self._sparserevlog:
2080 if not self._sparserevlog:
2081 return self.deltaparent(rev) == nullrev
2081 return self.deltaparent(rev) == nullrev
2082 elif hasattr(self.index, 'issnapshot'):
2082 elif hasattr(self.index, 'issnapshot'):
2083 # directly assign the method to cache the testing and access
2083 # directly assign the method to cache the testing and access
2084 self.issnapshot = self.index.issnapshot
2084 self.issnapshot = self.index.issnapshot
2085 return self.issnapshot(rev)
2085 return self.issnapshot(rev)
2086 if rev == nullrev:
2086 if rev == nullrev:
2087 return True
2087 return True
2088 entry = self.index[rev]
2088 entry = self.index[rev]
2089 base = entry[3]
2089 base = entry[3]
2090 if base == rev:
2090 if base == rev:
2091 return True
2091 return True
2092 if base == nullrev:
2092 if base == nullrev:
2093 return True
2093 return True
2094 p1 = entry[5]
2094 p1 = entry[5]
2095 while self.length(p1) == 0:
2095 while self.length(p1) == 0:
2096 b = self.deltaparent(p1)
2096 b = self.deltaparent(p1)
2097 if b == p1:
2097 if b == p1:
2098 break
2098 break
2099 p1 = b
2099 p1 = b
2100 p2 = entry[6]
2100 p2 = entry[6]
2101 while self.length(p2) == 0:
2101 while self.length(p2) == 0:
2102 b = self.deltaparent(p2)
2102 b = self.deltaparent(p2)
2103 if b == p2:
2103 if b == p2:
2104 break
2104 break
2105 p2 = b
2105 p2 = b
2106 if base == p1 or base == p2:
2106 if base == p1 or base == p2:
2107 return False
2107 return False
2108 return self.issnapshot(base)
2108 return self.issnapshot(base)
2109
2109
2110 def snapshotdepth(self, rev):
2110 def snapshotdepth(self, rev):
2111 """number of snapshot in the chain before this one"""
2111 """number of snapshot in the chain before this one"""
2112 if not self.issnapshot(rev):
2112 if not self.issnapshot(rev):
2113 raise error.ProgrammingError(b'revision %d not a snapshot')
2113 raise error.ProgrammingError(b'revision %d not a snapshot')
2114 return len(self._deltachain(rev)[0]) - 1
2114 return len(self._deltachain(rev)[0]) - 1
2115
2115
2116 def revdiff(self, rev1, rev2):
2116 def revdiff(self, rev1, rev2):
2117 """return or calculate a delta between two revisions
2117 """return or calculate a delta between two revisions
2118
2118
2119 The delta calculated is in binary form and is intended to be written to
2119 The delta calculated is in binary form and is intended to be written to
2120 revlog data directly. So this function needs raw revision data.
2120 revlog data directly. So this function needs raw revision data.
2121 """
2121 """
2122 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
2122 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
2123 return bytes(self._chunk(rev2))
2123 return bytes(self._chunk(rev2))
2124
2124
2125 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
2125 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
2126
2126
2127 def revision(self, nodeorrev):
2127 def revision(self, nodeorrev):
2128 """return an uncompressed revision of a given node or revision
2128 """return an uncompressed revision of a given node or revision
2129 number.
2129 number.
2130 """
2130 """
2131 return self._revisiondata(nodeorrev)
2131 return self._revisiondata(nodeorrev)
2132
2132
2133 def sidedata(self, nodeorrev):
2133 def sidedata(self, nodeorrev):
2134 """a map of extra data related to the changeset but not part of the hash
2134 """a map of extra data related to the changeset but not part of the hash
2135
2135
2136 This function currently return a dictionary. However, more advanced
2136 This function currently return a dictionary. However, more advanced
2137 mapping object will likely be used in the future for a more
2137 mapping object will likely be used in the future for a more
2138 efficient/lazy code.
2138 efficient/lazy code.
2139 """
2139 """
2140 # deal with <nodeorrev> argument type
2140 # deal with <nodeorrev> argument type
2141 if isinstance(nodeorrev, int):
2141 if isinstance(nodeorrev, int):
2142 rev = nodeorrev
2142 rev = nodeorrev
2143 else:
2143 else:
2144 rev = self.rev(nodeorrev)
2144 rev = self.rev(nodeorrev)
2145 return self._sidedata(rev)
2145 return self._sidedata(rev)
2146
2146
2147 def _revisiondata(self, nodeorrev, raw=False):
2147 def _revisiondata(self, nodeorrev, raw=False):
2148 # deal with <nodeorrev> argument type
2148 # deal with <nodeorrev> argument type
2149 if isinstance(nodeorrev, int):
2149 if isinstance(nodeorrev, int):
2150 rev = nodeorrev
2150 rev = nodeorrev
2151 node = self.node(rev)
2151 node = self.node(rev)
2152 else:
2152 else:
2153 node = nodeorrev
2153 node = nodeorrev
2154 rev = None
2154 rev = None
2155
2155
2156 # fast path the special `nullid` rev
2156 # fast path the special `nullid` rev
2157 if node == self.nullid:
2157 if node == self.nullid:
2158 return b""
2158 return b""
2159
2159
2160 # ``rawtext`` is the text as stored inside the revlog. Might be the
2160 # ``rawtext`` is the text as stored inside the revlog. Might be the
2161 # revision or might need to be processed to retrieve the revision.
2161 # revision or might need to be processed to retrieve the revision.
2162 rev, rawtext, validated = self._rawtext(node, rev)
2162 rev, rawtext, validated = self._rawtext(node, rev)
2163
2163
2164 if raw and validated:
2164 if raw and validated:
2165 # if we don't want to process the raw text and that raw
2165 # if we don't want to process the raw text and that raw
2166 # text is cached, we can exit early.
2166 # text is cached, we can exit early.
2167 return rawtext
2167 return rawtext
2168 if rev is None:
2168 if rev is None:
2169 rev = self.rev(node)
2169 rev = self.rev(node)
2170 # the revlog's flag for this revision
2170 # the revlog's flag for this revision
2171 # (usually alter its state or content)
2171 # (usually alter its state or content)
2172 flags = self.flags(rev)
2172 flags = self.flags(rev)
2173
2173
2174 if validated and flags == REVIDX_DEFAULT_FLAGS:
2174 if validated and flags == REVIDX_DEFAULT_FLAGS:
2175 # no extra flags set, no flag processor runs, text = rawtext
2175 # no extra flags set, no flag processor runs, text = rawtext
2176 return rawtext
2176 return rawtext
2177
2177
2178 if raw:
2178 if raw:
2179 validatehash = flagutil.processflagsraw(self, rawtext, flags)
2179 validatehash = flagutil.processflagsraw(self, rawtext, flags)
2180 text = rawtext
2180 text = rawtext
2181 else:
2181 else:
2182 r = flagutil.processflagsread(self, rawtext, flags)
2182 r = flagutil.processflagsread(self, rawtext, flags)
2183 text, validatehash = r
2183 text, validatehash = r
2184 if validatehash:
2184 if validatehash:
2185 self.checkhash(text, node, rev=rev)
2185 self.checkhash(text, node, rev=rev)
2186 if not validated:
2186 if not validated:
2187 self._revisioncache = (node, rev, rawtext)
2187 self._revisioncache = (node, rev, rawtext)
2188
2188
2189 return text
2189 return text
2190
2190
2191 def _rawtext(self, node, rev):
2191 def _rawtext(self, node, rev):
2192 """return the possibly unvalidated rawtext for a revision
2192 """return the possibly unvalidated rawtext for a revision
2193
2193
2194 returns (rev, rawtext, validated)
2194 returns (rev, rawtext, validated)
2195 """
2195 """
2196
2196
2197 # revision in the cache (could be useful to apply delta)
2197 # revision in the cache (could be useful to apply delta)
2198 cachedrev = None
2198 cachedrev = None
2199 # An intermediate text to apply deltas to
2199 # An intermediate text to apply deltas to
2200 basetext = None
2200 basetext = None
2201
2201
2202 # Check if we have the entry in cache
2202 # Check if we have the entry in cache
2203 # The cache entry looks like (node, rev, rawtext)
2203 # The cache entry looks like (node, rev, rawtext)
2204 if self._revisioncache:
2204 if self._revisioncache:
2205 if self._revisioncache[0] == node:
2205 if self._revisioncache[0] == node:
2206 return (rev, self._revisioncache[2], True)
2206 return (rev, self._revisioncache[2], True)
2207 cachedrev = self._revisioncache[1]
2207 cachedrev = self._revisioncache[1]
2208
2208
2209 if rev is None:
2209 if rev is None:
2210 rev = self.rev(node)
2210 rev = self.rev(node)
2211
2211
2212 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2212 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2213 if stopped:
2213 if stopped:
2214 basetext = self._revisioncache[2]
2214 basetext = self._revisioncache[2]
2215
2215
2216 # drop cache to save memory, the caller is expected to
2216 # drop cache to save memory, the caller is expected to
2217 # update self._revisioncache after validating the text
2217 # update self._revisioncache after validating the text
2218 self._revisioncache = None
2218 self._revisioncache = None
2219
2219
2220 targetsize = None
2220 targetsize = None
2221 rawsize = self.index[rev][2]
2221 rawsize = self.index[rev][2]
2222 if 0 <= rawsize:
2222 if 0 <= rawsize:
2223 targetsize = 4 * rawsize
2223 targetsize = 4 * rawsize
2224
2224
2225 bins = self._chunks(chain, targetsize=targetsize)
2225 bins = self._chunks(chain, targetsize=targetsize)
2226 if basetext is None:
2226 if basetext is None:
2227 basetext = bytes(bins[0])
2227 basetext = bytes(bins[0])
2228 bins = bins[1:]
2228 bins = bins[1:]
2229
2229
2230 rawtext = mdiff.patches(basetext, bins)
2230 rawtext = mdiff.patches(basetext, bins)
2231 del basetext # let us have a chance to free memory early
2231 del basetext # let us have a chance to free memory early
2232 return (rev, rawtext, False)
2232 return (rev, rawtext, False)
2233
2233
2234 def _sidedata(self, rev):
2234 def _sidedata(self, rev):
2235 """Return the sidedata for a given revision number."""
2235 """Return the sidedata for a given revision number."""
2236 index_entry = self.index[rev]
2236 index_entry = self.index[rev]
2237 sidedata_offset = index_entry[8]
2237 sidedata_offset = index_entry[8]
2238 sidedata_size = index_entry[9]
2238 sidedata_size = index_entry[9]
2239
2239
2240 if self._inline:
2240 if self._inline:
2241 sidedata_offset += self.index.entry_size * (1 + rev)
2241 sidedata_offset += self.index.entry_size * (1 + rev)
2242 if sidedata_size == 0:
2242 if sidedata_size == 0:
2243 return {}
2243 return {}
2244
2244
2245 if self._docket.sidedata_end < sidedata_offset + sidedata_size:
2245 if self._docket.sidedata_end < sidedata_offset + sidedata_size:
2246 filename = self._sidedatafile
2246 filename = self._sidedatafile
2247 end = self._docket.sidedata_end
2247 end = self._docket.sidedata_end
2248 offset = sidedata_offset
2248 offset = sidedata_offset
2249 length = sidedata_size
2249 length = sidedata_size
2250 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
2250 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
2251 raise error.RevlogError(m)
2251 raise error.RevlogError(m)
2252
2252
2253 comp_segment = self._segmentfile_sidedata.read_chunk(
2253 comp_segment = self._segmentfile_sidedata.read_chunk(
2254 sidedata_offset, sidedata_size
2254 sidedata_offset, sidedata_size
2255 )
2255 )
2256
2256
2257 comp = self.index[rev][11]
2257 comp = self.index[rev][11]
2258 if comp == COMP_MODE_PLAIN:
2258 if comp == COMP_MODE_PLAIN:
2259 segment = comp_segment
2259 segment = comp_segment
2260 elif comp == COMP_MODE_DEFAULT:
2260 elif comp == COMP_MODE_DEFAULT:
2261 segment = self._decompressor(comp_segment)
2261 segment = self._decompressor(comp_segment)
2262 elif comp == COMP_MODE_INLINE:
2262 elif comp == COMP_MODE_INLINE:
2263 segment = self.decompress(comp_segment)
2263 segment = self.decompress(comp_segment)
2264 else:
2264 else:
2265 msg = b'unknown compression mode %d'
2265 msg = b'unknown compression mode %d'
2266 msg %= comp
2266 msg %= comp
2267 raise error.RevlogError(msg)
2267 raise error.RevlogError(msg)
2268
2268
2269 sidedata = sidedatautil.deserialize_sidedata(segment)
2269 sidedata = sidedatautil.deserialize_sidedata(segment)
2270 return sidedata
2270 return sidedata
2271
2271
2272 def rawdata(self, nodeorrev):
2272 def rawdata(self, nodeorrev):
2273 """return an uncompressed raw data of a given node or revision number."""
2273 """return an uncompressed raw data of a given node or revision number."""
2274 return self._revisiondata(nodeorrev, raw=True)
2274 return self._revisiondata(nodeorrev, raw=True)
2275
2275
2276 def hash(self, text, p1, p2):
2276 def hash(self, text, p1, p2):
2277 """Compute a node hash.
2277 """Compute a node hash.
2278
2278
2279 Available as a function so that subclasses can replace the hash
2279 Available as a function so that subclasses can replace the hash
2280 as needed.
2280 as needed.
2281 """
2281 """
2282 return storageutil.hashrevisionsha1(text, p1, p2)
2282 return storageutil.hashrevisionsha1(text, p1, p2)
2283
2283
2284 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2284 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2285 """Check node hash integrity.
2285 """Check node hash integrity.
2286
2286
2287 Available as a function so that subclasses can extend hash mismatch
2287 Available as a function so that subclasses can extend hash mismatch
2288 behaviors as needed.
2288 behaviors as needed.
2289 """
2289 """
2290 try:
2290 try:
2291 if p1 is None and p2 is None:
2291 if p1 is None and p2 is None:
2292 p1, p2 = self.parents(node)
2292 p1, p2 = self.parents(node)
2293 if node != self.hash(text, p1, p2):
2293 if node != self.hash(text, p1, p2):
2294 # Clear the revision cache on hash failure. The revision cache
2294 # Clear the revision cache on hash failure. The revision cache
2295 # only stores the raw revision and clearing the cache does have
2295 # only stores the raw revision and clearing the cache does have
2296 # the side-effect that we won't have a cache hit when the raw
2296 # the side-effect that we won't have a cache hit when the raw
2297 # revision data is accessed. But this case should be rare and
2297 # revision data is accessed. But this case should be rare and
2298 # it is extra work to teach the cache about the hash
2298 # it is extra work to teach the cache about the hash
2299 # verification state.
2299 # verification state.
2300 if self._revisioncache and self._revisioncache[0] == node:
2300 if self._revisioncache and self._revisioncache[0] == node:
2301 self._revisioncache = None
2301 self._revisioncache = None
2302
2302
2303 revornode = rev
2303 revornode = rev
2304 if revornode is None:
2304 if revornode is None:
2305 revornode = templatefilters.short(hex(node))
2305 revornode = templatefilters.short(hex(node))
2306 raise error.RevlogError(
2306 raise error.RevlogError(
2307 _(b"integrity check failed on %s:%s")
2307 _(b"integrity check failed on %s:%s")
2308 % (self.display_id, pycompat.bytestr(revornode))
2308 % (self.display_id, pycompat.bytestr(revornode))
2309 )
2309 )
2310 except error.RevlogError:
2310 except error.RevlogError:
2311 if self.feature_config.censorable and storageutil.iscensoredtext(
2311 if self.feature_config.censorable and storageutil.iscensoredtext(
2312 text
2312 text
2313 ):
2313 ):
2314 raise error.CensoredNodeError(self.display_id, node, text)
2314 raise error.CensoredNodeError(self.display_id, node, text)
2315 raise
2315 raise
2316
2316
2317 @property
2317 @property
2318 def _split_index_file(self):
2318 def _split_index_file(self):
2319 """the path where to expect the index of an ongoing splitting operation
2319 """the path where to expect the index of an ongoing splitting operation
2320
2320
2321 The file will only exist if a splitting operation is in progress, but
2321 The file will only exist if a splitting operation is in progress, but
2322 it is always expected at the same location."""
2322 it is always expected at the same location."""
2323 parts = self.radix.split(b'/')
2323 parts = self.radix.split(b'/')
2324 if len(parts) > 1:
2324 if len(parts) > 1:
2325 # adds a '-s' prefix to the ``data/` or `meta/` base
2325 # adds a '-s' prefix to the ``data/` or `meta/` base
2326 head = parts[0] + b'-s'
2326 head = parts[0] + b'-s'
2327 mids = parts[1:-1]
2327 mids = parts[1:-1]
2328 tail = parts[-1] + b'.i'
2328 tail = parts[-1] + b'.i'
2329 pieces = [head] + mids + [tail]
2329 pieces = [head] + mids + [tail]
2330 return b'/'.join(pieces)
2330 return b'/'.join(pieces)
2331 else:
2331 else:
2332 # the revlog is stored at the root of the store (changelog or
2332 # the revlog is stored at the root of the store (changelog or
2333 # manifest), no risk of collision.
2333 # manifest), no risk of collision.
2334 return self.radix + b'.i.s'
2334 return self.radix + b'.i.s'
2335
2335
2336 def _enforceinlinesize(self, tr, side_write=True):
2336 def _enforceinlinesize(self, tr, side_write=True):
2337 """Check if the revlog is too big for inline and convert if so.
2337 """Check if the revlog is too big for inline and convert if so.
2338
2338
2339 This should be called after revisions are added to the revlog. If the
2339 This should be called after revisions are added to the revlog. If the
2340 revlog has grown too large to be an inline revlog, it will convert it
2340 revlog has grown too large to be an inline revlog, it will convert it
2341 to use multiple index and data files.
2341 to use multiple index and data files.
2342 """
2342 """
2343 tiprev = len(self) - 1
2343 tiprev = len(self) - 1
2344 total_size = self.start(tiprev) + self.length(tiprev)
2344 total_size = self.start(tiprev) + self.length(tiprev)
2345 if not self._inline or total_size < _maxinline:
2345 if not self._inline or total_size < _maxinline:
2346 return
2346 return
2347
2347
2348 troffset = tr.findoffset(self._indexfile)
2348 troffset = tr.findoffset(self._indexfile)
2349 if troffset is None:
2349 if troffset is None:
2350 raise error.RevlogError(
2350 raise error.RevlogError(
2351 _(b"%s not found in the transaction") % self._indexfile
2351 _(b"%s not found in the transaction") % self._indexfile
2352 )
2352 )
2353 if troffset:
2353 if troffset:
2354 tr.addbackup(self._indexfile, for_offset=True)
2354 tr.addbackup(self._indexfile, for_offset=True)
2355 tr.add(self._datafile, 0)
2355 tr.add(self._datafile, 0)
2356
2356
2357 existing_handles = False
2357 existing_handles = False
2358 if self._writinghandles is not None:
2358 if self._writinghandles is not None:
2359 existing_handles = True
2359 existing_handles = True
2360 fp = self._writinghandles[0]
2360 fp = self._writinghandles[0]
2361 fp.flush()
2361 fp.flush()
2362 fp.close()
2362 fp.close()
2363 # We can't use the cached file handle after close(). So prevent
2363 # We can't use the cached file handle after close(). So prevent
2364 # its usage.
2364 # its usage.
2365 self._writinghandles = None
2365 self._writinghandles = None
2366 self._segmentfile.writing_handle = None
2366 self._segmentfile.writing_handle = None
2367 # No need to deal with sidedata writing handle as it is only
2367 # No need to deal with sidedata writing handle as it is only
2368 # relevant with revlog-v2 which is never inline, not reaching
2368 # relevant with revlog-v2 which is never inline, not reaching
2369 # this code
2369 # this code
2370 if side_write:
2370 if side_write:
2371 old_index_file_path = self._indexfile
2371 old_index_file_path = self._indexfile
2372 new_index_file_path = self._split_index_file
2372 new_index_file_path = self._split_index_file
2373 opener = self.opener
2373 opener = self.opener
2374 weak_self = weakref.ref(self)
2374 weak_self = weakref.ref(self)
2375
2375
2376 # the "split" index replace the real index when the transaction is finalized
2376 # the "split" index replace the real index when the transaction is finalized
2377 def finalize_callback(tr):
2377 def finalize_callback(tr):
2378 opener.rename(
2378 opener.rename(
2379 new_index_file_path,
2379 new_index_file_path,
2380 old_index_file_path,
2380 old_index_file_path,
2381 checkambig=True,
2381 checkambig=True,
2382 )
2382 )
2383 maybe_self = weak_self()
2383 maybe_self = weak_self()
2384 if maybe_self is not None:
2384 if maybe_self is not None:
2385 maybe_self._indexfile = old_index_file_path
2385 maybe_self._indexfile = old_index_file_path
2386
2386
2387 def abort_callback(tr):
2387 def abort_callback(tr):
2388 maybe_self = weak_self()
2388 maybe_self = weak_self()
2389 if maybe_self is not None:
2389 if maybe_self is not None:
2390 maybe_self._indexfile = old_index_file_path
2390 maybe_self._indexfile = old_index_file_path
2391
2391
2392 tr.registertmp(new_index_file_path)
2392 tr.registertmp(new_index_file_path)
2393 if self.target[1] is not None:
2393 if self.target[1] is not None:
2394 callback_id = b'000-revlog-split-%d-%s' % self.target
2394 callback_id = b'000-revlog-split-%d-%s' % self.target
2395 else:
2395 else:
2396 callback_id = b'000-revlog-split-%d' % self.target[0]
2396 callback_id = b'000-revlog-split-%d' % self.target[0]
2397 tr.addfinalize(callback_id, finalize_callback)
2397 tr.addfinalize(callback_id, finalize_callback)
2398 tr.addabort(callback_id, abort_callback)
2398 tr.addabort(callback_id, abort_callback)
2399
2399
2400 new_dfh = self._datafp(b'w+')
2400 new_dfh = self._datafp(b'w+')
2401 new_dfh.truncate(0) # drop any potentially existing data
2401 new_dfh.truncate(0) # drop any potentially existing data
2402 try:
2402 try:
2403 with self.reading():
2403 with self.reading():
2404 for r in self:
2404 for r in self:
2405 new_dfh.write(self._getsegmentforrevs(r, r)[1])
2405 new_dfh.write(self._getsegmentforrevs(r, r)[1])
2406 new_dfh.flush()
2406 new_dfh.flush()
2407
2407
2408 if side_write:
2408 if side_write:
2409 self._indexfile = new_index_file_path
2409 self._indexfile = new_index_file_path
2410 with self.__index_new_fp() as fp:
2410 with self.__index_new_fp() as fp:
2411 self._format_flags &= ~FLAG_INLINE_DATA
2411 self._format_flags &= ~FLAG_INLINE_DATA
2412 self._inline = False
2412 self._inline = False
2413 for i in self:
2413 for i in self:
2414 e = self.index.entry_binary(i)
2414 e = self.index.entry_binary(i)
2415 if i == 0 and self._docket is None:
2415 if i == 0 and self._docket is None:
2416 header = self._format_flags | self._format_version
2416 header = self._format_flags | self._format_version
2417 header = self.index.pack_header(header)
2417 header = self.index.pack_header(header)
2418 e = header + e
2418 e = header + e
2419 fp.write(e)
2419 fp.write(e)
2420 if self._docket is not None:
2420 if self._docket is not None:
2421 self._docket.index_end = fp.tell()
2421 self._docket.index_end = fp.tell()
2422
2422
2423 # If we don't use side-write, the temp file replace the real
2423 # If we don't use side-write, the temp file replace the real
2424 # index when we exit the context manager
2424 # index when we exit the context manager
2425
2425
2426 nodemaputil.setup_persistent_nodemap(tr, self)
2426 nodemaputil.setup_persistent_nodemap(tr, self)
2427 self._segmentfile = randomaccessfile.randomaccessfile(
2427 self._segmentfile = randomaccessfile.randomaccessfile(
2428 self.opener,
2428 self.opener,
2429 self._datafile,
2429 self._datafile,
2430 self.data_config.chunk_cache_size,
2430 self.data_config.chunk_cache_size,
2431 )
2431 )
2432
2432
2433 if existing_handles:
2433 if existing_handles:
2434 # switched from inline to conventional reopen the index
2434 # switched from inline to conventional reopen the index
2435 ifh = self.__index_write_fp()
2435 ifh = self.__index_write_fp()
2436 self._writinghandles = (ifh, new_dfh, None)
2436 self._writinghandles = (ifh, new_dfh, None)
2437 self._segmentfile.writing_handle = new_dfh
2437 self._segmentfile.writing_handle = new_dfh
2438 new_dfh = None
2438 new_dfh = None
2439 # No need to deal with sidedata writing handle as it is only
2439 # No need to deal with sidedata writing handle as it is only
2440 # relevant with revlog-v2 which is never inline, not reaching
2440 # relevant with revlog-v2 which is never inline, not reaching
2441 # this code
2441 # this code
2442 finally:
2442 finally:
2443 if new_dfh is not None:
2443 if new_dfh is not None:
2444 new_dfh.close()
2444 new_dfh.close()
2445
2445
2446 def _nodeduplicatecallback(self, transaction, node):
2446 def _nodeduplicatecallback(self, transaction, node):
2447 """called when trying to add a node already stored."""
2447 """called when trying to add a node already stored."""
2448
2448
2449 @contextlib.contextmanager
2449 @contextlib.contextmanager
2450 def reading(self):
2450 def reading(self):
2451 """Context manager that keeps data and sidedata files open for reading"""
2451 """Context manager that keeps data and sidedata files open for reading"""
2452 if len(self.index) == 0:
2452 if len(self.index) == 0:
2453 yield # nothing to be read
2453 yield # nothing to be read
2454 else:
2454 else:
2455 with self._segmentfile.reading():
2455 with self._segmentfile.reading():
2456 with self._segmentfile_sidedata.reading():
2456 with self._segmentfile_sidedata.reading():
2457 yield
2457 yield
2458
2458
2459 @contextlib.contextmanager
2459 @contextlib.contextmanager
2460 def _writing(self, transaction):
2460 def _writing(self, transaction):
2461 if self._trypending:
2461 if self._trypending:
2462 msg = b'try to write in a `trypending` revlog: %s'
2462 msg = b'try to write in a `trypending` revlog: %s'
2463 msg %= self.display_id
2463 msg %= self.display_id
2464 raise error.ProgrammingError(msg)
2464 raise error.ProgrammingError(msg)
2465 if self._writinghandles is not None:
2465 if self._writinghandles is not None:
2466 yield
2466 yield
2467 else:
2467 else:
2468 ifh = dfh = sdfh = None
2468 ifh = dfh = sdfh = None
2469 try:
2469 try:
2470 r = len(self)
2470 r = len(self)
2471 # opening the data file.
2471 # opening the data file.
2472 dsize = 0
2472 dsize = 0
2473 if r:
2473 if r:
2474 dsize = self.end(r - 1)
2474 dsize = self.end(r - 1)
2475 dfh = None
2475 dfh = None
2476 if not self._inline:
2476 if not self._inline:
2477 try:
2477 try:
2478 dfh = self._datafp(b"r+")
2478 dfh = self._datafp(b"r+")
2479 if self._docket is None:
2479 if self._docket is None:
2480 dfh.seek(0, os.SEEK_END)
2480 dfh.seek(0, os.SEEK_END)
2481 else:
2481 else:
2482 dfh.seek(self._docket.data_end, os.SEEK_SET)
2482 dfh.seek(self._docket.data_end, os.SEEK_SET)
2483 except FileNotFoundError:
2483 except FileNotFoundError:
2484 dfh = self._datafp(b"w+")
2484 dfh = self._datafp(b"w+")
2485 transaction.add(self._datafile, dsize)
2485 transaction.add(self._datafile, dsize)
2486 if self._sidedatafile is not None:
2486 if self._sidedatafile is not None:
2487 # revlog-v2 does not inline, help Pytype
2487 # revlog-v2 does not inline, help Pytype
2488 assert dfh is not None
2488 assert dfh is not None
2489 try:
2489 try:
2490 sdfh = self.opener(self._sidedatafile, mode=b"r+")
2490 sdfh = self.opener(self._sidedatafile, mode=b"r+")
2491 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2491 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2492 except FileNotFoundError:
2492 except FileNotFoundError:
2493 sdfh = self.opener(self._sidedatafile, mode=b"w+")
2493 sdfh = self.opener(self._sidedatafile, mode=b"w+")
2494 transaction.add(
2494 transaction.add(
2495 self._sidedatafile, self._docket.sidedata_end
2495 self._sidedatafile, self._docket.sidedata_end
2496 )
2496 )
2497
2497
2498 # opening the index file.
2498 # opening the index file.
2499 isize = r * self.index.entry_size
2499 isize = r * self.index.entry_size
2500 ifh = self.__index_write_fp()
2500 ifh = self.__index_write_fp()
2501 if self._inline:
2501 if self._inline:
2502 transaction.add(self._indexfile, dsize + isize)
2502 transaction.add(self._indexfile, dsize + isize)
2503 else:
2503 else:
2504 transaction.add(self._indexfile, isize)
2504 transaction.add(self._indexfile, isize)
2505 # exposing all file handle for writing.
2505 # exposing all file handle for writing.
2506 self._writinghandles = (ifh, dfh, sdfh)
2506 self._writinghandles = (ifh, dfh, sdfh)
2507 self._segmentfile.writing_handle = ifh if self._inline else dfh
2507 self._segmentfile.writing_handle = ifh if self._inline else dfh
2508 self._segmentfile_sidedata.writing_handle = sdfh
2508 self._segmentfile_sidedata.writing_handle = sdfh
2509 yield
2509 yield
2510 if self._docket is not None:
2510 if self._docket is not None:
2511 self._write_docket(transaction)
2511 self._write_docket(transaction)
2512 finally:
2512 finally:
2513 self._writinghandles = None
2513 self._writinghandles = None
2514 self._segmentfile.writing_handle = None
2514 self._segmentfile.writing_handle = None
2515 self._segmentfile_sidedata.writing_handle = None
2515 self._segmentfile_sidedata.writing_handle = None
2516 if dfh is not None:
2516 if dfh is not None:
2517 dfh.close()
2517 dfh.close()
2518 if sdfh is not None:
2518 if sdfh is not None:
2519 sdfh.close()
2519 sdfh.close()
2520 # closing the index file last to avoid exposing referent to
2520 # closing the index file last to avoid exposing referent to
2521 # potential unflushed data content.
2521 # potential unflushed data content.
2522 if ifh is not None:
2522 if ifh is not None:
2523 ifh.close()
2523 ifh.close()
2524
2524
2525 def _write_docket(self, transaction):
2525 def _write_docket(self, transaction):
2526 """write the current docket on disk
2526 """write the current docket on disk
2527
2527
2528 Exist as a method to help changelog to implement transaction logic
2528 Exist as a method to help changelog to implement transaction logic
2529
2529
2530 We could also imagine using the same transaction logic for all revlog
2530 We could also imagine using the same transaction logic for all revlog
2531 since docket are cheap."""
2531 since docket are cheap."""
2532 self._docket.write(transaction)
2532 self._docket.write(transaction)
2533
2533
2534 def addrevision(
2534 def addrevision(
2535 self,
2535 self,
2536 text,
2536 text,
2537 transaction,
2537 transaction,
2538 link,
2538 link,
2539 p1,
2539 p1,
2540 p2,
2540 p2,
2541 cachedelta=None,
2541 cachedelta=None,
2542 node=None,
2542 node=None,
2543 flags=REVIDX_DEFAULT_FLAGS,
2543 flags=REVIDX_DEFAULT_FLAGS,
2544 deltacomputer=None,
2544 deltacomputer=None,
2545 sidedata=None,
2545 sidedata=None,
2546 ):
2546 ):
2547 """add a revision to the log
2547 """add a revision to the log
2548
2548
2549 text - the revision data to add
2549 text - the revision data to add
2550 transaction - the transaction object used for rollback
2550 transaction - the transaction object used for rollback
2551 link - the linkrev data to add
2551 link - the linkrev data to add
2552 p1, p2 - the parent nodeids of the revision
2552 p1, p2 - the parent nodeids of the revision
2553 cachedelta - an optional precomputed delta
2553 cachedelta - an optional precomputed delta
2554 node - nodeid of revision; typically node is not specified, and it is
2554 node - nodeid of revision; typically node is not specified, and it is
2555 computed by default as hash(text, p1, p2), however subclasses might
2555 computed by default as hash(text, p1, p2), however subclasses might
2556 use different hashing method (and override checkhash() in such case)
2556 use different hashing method (and override checkhash() in such case)
2557 flags - the known flags to set on the revision
2557 flags - the known flags to set on the revision
2558 deltacomputer - an optional deltacomputer instance shared between
2558 deltacomputer - an optional deltacomputer instance shared between
2559 multiple calls
2559 multiple calls
2560 """
2560 """
2561 if link == nullrev:
2561 if link == nullrev:
2562 raise error.RevlogError(
2562 raise error.RevlogError(
2563 _(b"attempted to add linkrev -1 to %s") % self.display_id
2563 _(b"attempted to add linkrev -1 to %s") % self.display_id
2564 )
2564 )
2565
2565
2566 if sidedata is None:
2566 if sidedata is None:
2567 sidedata = {}
2567 sidedata = {}
2568 elif sidedata and not self.hassidedata:
2568 elif sidedata and not self.hassidedata:
2569 raise error.ProgrammingError(
2569 raise error.ProgrammingError(
2570 _(b"trying to add sidedata to a revlog who don't support them")
2570 _(b"trying to add sidedata to a revlog who don't support them")
2571 )
2571 )
2572
2572
2573 if flags:
2573 if flags:
2574 node = node or self.hash(text, p1, p2)
2574 node = node or self.hash(text, p1, p2)
2575
2575
2576 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2576 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2577
2577
2578 # If the flag processor modifies the revision data, ignore any provided
2578 # If the flag processor modifies the revision data, ignore any provided
2579 # cachedelta.
2579 # cachedelta.
2580 if rawtext != text:
2580 if rawtext != text:
2581 cachedelta = None
2581 cachedelta = None
2582
2582
2583 if len(rawtext) > _maxentrysize:
2583 if len(rawtext) > _maxentrysize:
2584 raise error.RevlogError(
2584 raise error.RevlogError(
2585 _(
2585 _(
2586 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2586 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2587 )
2587 )
2588 % (self.display_id, len(rawtext))
2588 % (self.display_id, len(rawtext))
2589 )
2589 )
2590
2590
2591 node = node or self.hash(rawtext, p1, p2)
2591 node = node or self.hash(rawtext, p1, p2)
2592 rev = self.index.get_rev(node)
2592 rev = self.index.get_rev(node)
2593 if rev is not None:
2593 if rev is not None:
2594 return rev
2594 return rev
2595
2595
2596 if validatehash:
2596 if validatehash:
2597 self.checkhash(rawtext, node, p1=p1, p2=p2)
2597 self.checkhash(rawtext, node, p1=p1, p2=p2)
2598
2598
2599 return self.addrawrevision(
2599 return self.addrawrevision(
2600 rawtext,
2600 rawtext,
2601 transaction,
2601 transaction,
2602 link,
2602 link,
2603 p1,
2603 p1,
2604 p2,
2604 p2,
2605 node,
2605 node,
2606 flags,
2606 flags,
2607 cachedelta=cachedelta,
2607 cachedelta=cachedelta,
2608 deltacomputer=deltacomputer,
2608 deltacomputer=deltacomputer,
2609 sidedata=sidedata,
2609 sidedata=sidedata,
2610 )
2610 )
2611
2611
2612 def addrawrevision(
2612 def addrawrevision(
2613 self,
2613 self,
2614 rawtext,
2614 rawtext,
2615 transaction,
2615 transaction,
2616 link,
2616 link,
2617 p1,
2617 p1,
2618 p2,
2618 p2,
2619 node,
2619 node,
2620 flags,
2620 flags,
2621 cachedelta=None,
2621 cachedelta=None,
2622 deltacomputer=None,
2622 deltacomputer=None,
2623 sidedata=None,
2623 sidedata=None,
2624 ):
2624 ):
2625 """add a raw revision with known flags, node and parents
2625 """add a raw revision with known flags, node and parents
2626 useful when reusing a revision not stored in this revlog (ex: received
2626 useful when reusing a revision not stored in this revlog (ex: received
2627 over wire, or read from an external bundle).
2627 over wire, or read from an external bundle).
2628 """
2628 """
2629 with self._writing(transaction):
2629 with self._writing(transaction):
2630 return self._addrevision(
2630 return self._addrevision(
2631 node,
2631 node,
2632 rawtext,
2632 rawtext,
2633 transaction,
2633 transaction,
2634 link,
2634 link,
2635 p1,
2635 p1,
2636 p2,
2636 p2,
2637 flags,
2637 flags,
2638 cachedelta,
2638 cachedelta,
2639 deltacomputer=deltacomputer,
2639 deltacomputer=deltacomputer,
2640 sidedata=sidedata,
2640 sidedata=sidedata,
2641 )
2641 )
2642
2642
2643 def compress(self, data):
2643 def compress(self, data):
2644 """Generate a possibly-compressed representation of data."""
2644 """Generate a possibly-compressed representation of data."""
2645 if not data:
2645 if not data:
2646 return b'', data
2646 return b'', data
2647
2647
2648 compressed = self._compressor.compress(data)
2648 compressed = self._compressor.compress(data)
2649
2649
2650 if compressed:
2650 if compressed:
2651 # The revlog compressor added the header in the returned data.
2651 # The revlog compressor added the header in the returned data.
2652 return b'', compressed
2652 return b'', compressed
2653
2653
2654 if data[0:1] == b'\0':
2654 if data[0:1] == b'\0':
2655 return b'', data
2655 return b'', data
2656 return b'u', data
2656 return b'u', data
2657
2657
2658 def decompress(self, data):
2658 def decompress(self, data):
2659 """Decompress a revlog chunk.
2659 """Decompress a revlog chunk.
2660
2660
2661 The chunk is expected to begin with a header identifying the
2661 The chunk is expected to begin with a header identifying the
2662 format type so it can be routed to an appropriate decompressor.
2662 format type so it can be routed to an appropriate decompressor.
2663 """
2663 """
2664 if not data:
2664 if not data:
2665 return data
2665 return data
2666
2666
2667 # Revlogs are read much more frequently than they are written and many
2667 # Revlogs are read much more frequently than they are written and many
2668 # chunks only take microseconds to decompress, so performance is
2668 # chunks only take microseconds to decompress, so performance is
2669 # important here.
2669 # important here.
2670 #
2670 #
2671 # We can make a few assumptions about revlogs:
2671 # We can make a few assumptions about revlogs:
2672 #
2672 #
2673 # 1) the majority of chunks will be compressed (as opposed to inline
2673 # 1) the majority of chunks will be compressed (as opposed to inline
2674 # raw data).
2674 # raw data).
2675 # 2) decompressing *any* data will likely by at least 10x slower than
2675 # 2) decompressing *any* data will likely by at least 10x slower than
2676 # returning raw inline data.
2676 # returning raw inline data.
2677 # 3) we want to prioritize common and officially supported compression
2677 # 3) we want to prioritize common and officially supported compression
2678 # engines
2678 # engines
2679 #
2679 #
2680 # It follows that we want to optimize for "decompress compressed data
2680 # It follows that we want to optimize for "decompress compressed data
2681 # when encoded with common and officially supported compression engines"
2681 # when encoded with common and officially supported compression engines"
2682 # case over "raw data" and "data encoded by less common or non-official
2682 # case over "raw data" and "data encoded by less common or non-official
2683 # compression engines." That is why we have the inline lookup first
2683 # compression engines." That is why we have the inline lookup first
2684 # followed by the compengines lookup.
2684 # followed by the compengines lookup.
2685 #
2685 #
2686 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2686 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2687 # compressed chunks. And this matters for changelog and manifest reads.
2687 # compressed chunks. And this matters for changelog and manifest reads.
2688 t = data[0:1]
2688 t = data[0:1]
2689
2689
2690 if t == b'x':
2690 if t == b'x':
2691 try:
2691 try:
2692 return _zlibdecompress(data)
2692 return _zlibdecompress(data)
2693 except zlib.error as e:
2693 except zlib.error as e:
2694 raise error.RevlogError(
2694 raise error.RevlogError(
2695 _(b'revlog decompress error: %s')
2695 _(b'revlog decompress error: %s')
2696 % stringutil.forcebytestr(e)
2696 % stringutil.forcebytestr(e)
2697 )
2697 )
2698 # '\0' is more common than 'u' so it goes first.
2698 # '\0' is more common than 'u' so it goes first.
2699 elif t == b'\0':
2699 elif t == b'\0':
2700 return data
2700 return data
2701 elif t == b'u':
2701 elif t == b'u':
2702 return util.buffer(data, 1)
2702 return util.buffer(data, 1)
2703
2703
2704 compressor = self._get_decompressor(t)
2704 compressor = self._get_decompressor(t)
2705
2705
2706 return compressor.decompress(data)
2706 return compressor.decompress(data)
2707
2707
2708 def _addrevision(
2708 def _addrevision(
2709 self,
2709 self,
2710 node,
2710 node,
2711 rawtext,
2711 rawtext,
2712 transaction,
2712 transaction,
2713 link,
2713 link,
2714 p1,
2714 p1,
2715 p2,
2715 p2,
2716 flags,
2716 flags,
2717 cachedelta,
2717 cachedelta,
2718 alwayscache=False,
2718 alwayscache=False,
2719 deltacomputer=None,
2719 deltacomputer=None,
2720 sidedata=None,
2720 sidedata=None,
2721 ):
2721 ):
2722 """internal function to add revisions to the log
2722 """internal function to add revisions to the log
2723
2723
2724 see addrevision for argument descriptions.
2724 see addrevision for argument descriptions.
2725
2725
2726 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2726 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2727
2727
2728 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2728 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2729 be used.
2729 be used.
2730
2730
2731 invariants:
2731 invariants:
2732 - rawtext is optional (can be None); if not set, cachedelta must be set.
2732 - rawtext is optional (can be None); if not set, cachedelta must be set.
2733 if both are set, they must correspond to each other.
2733 if both are set, they must correspond to each other.
2734 """
2734 """
2735 if node == self.nullid:
2735 if node == self.nullid:
2736 raise error.RevlogError(
2736 raise error.RevlogError(
2737 _(b"%s: attempt to add null revision") % self.display_id
2737 _(b"%s: attempt to add null revision") % self.display_id
2738 )
2738 )
2739 if (
2739 if (
2740 node == self.nodeconstants.wdirid
2740 node == self.nodeconstants.wdirid
2741 or node in self.nodeconstants.wdirfilenodeids
2741 or node in self.nodeconstants.wdirfilenodeids
2742 ):
2742 ):
2743 raise error.RevlogError(
2743 raise error.RevlogError(
2744 _(b"%s: attempt to add wdir revision") % self.display_id
2744 _(b"%s: attempt to add wdir revision") % self.display_id
2745 )
2745 )
2746 if self._writinghandles is None:
2746 if self._writinghandles is None:
2747 msg = b'adding revision outside `revlog._writing` context'
2747 msg = b'adding revision outside `revlog._writing` context'
2748 raise error.ProgrammingError(msg)
2748 raise error.ProgrammingError(msg)
2749
2749
2750 btext = [rawtext]
2750 btext = [rawtext]
2751
2751
2752 curr = len(self)
2752 curr = len(self)
2753 prev = curr - 1
2753 prev = curr - 1
2754
2754
2755 offset = self._get_data_offset(prev)
2755 offset = self._get_data_offset(prev)
2756
2756
2757 if self._concurrencychecker:
2757 if self._concurrencychecker:
2758 ifh, dfh, sdfh = self._writinghandles
2758 ifh, dfh, sdfh = self._writinghandles
2759 # XXX no checking for the sidedata file
2759 # XXX no checking for the sidedata file
2760 if self._inline:
2760 if self._inline:
2761 # offset is "as if" it were in the .d file, so we need to add on
2761 # offset is "as if" it were in the .d file, so we need to add on
2762 # the size of the entry metadata.
2762 # the size of the entry metadata.
2763 self._concurrencychecker(
2763 self._concurrencychecker(
2764 ifh, self._indexfile, offset + curr * self.index.entry_size
2764 ifh, self._indexfile, offset + curr * self.index.entry_size
2765 )
2765 )
2766 else:
2766 else:
2767 # Entries in the .i are a consistent size.
2767 # Entries in the .i are a consistent size.
2768 self._concurrencychecker(
2768 self._concurrencychecker(
2769 ifh, self._indexfile, curr * self.index.entry_size
2769 ifh, self._indexfile, curr * self.index.entry_size
2770 )
2770 )
2771 self._concurrencychecker(dfh, self._datafile, offset)
2771 self._concurrencychecker(dfh, self._datafile, offset)
2772
2772
2773 p1r, p2r = self.rev(p1), self.rev(p2)
2773 p1r, p2r = self.rev(p1), self.rev(p2)
2774
2774
2775 # full versions are inserted when the needed deltas
2775 # full versions are inserted when the needed deltas
2776 # become comparable to the uncompressed text
2776 # become comparable to the uncompressed text
2777 if rawtext is None:
2777 if rawtext is None:
2778 # need rawtext size, before changed by flag processors, which is
2778 # need rawtext size, before changed by flag processors, which is
2779 # the non-raw size. use revlog explicitly to avoid filelog's extra
2779 # the non-raw size. use revlog explicitly to avoid filelog's extra
2780 # logic that might remove metadata size.
2780 # logic that might remove metadata size.
2781 textlen = mdiff.patchedsize(
2781 textlen = mdiff.patchedsize(
2782 revlog.size(self, cachedelta[0]), cachedelta[1]
2782 revlog.size(self, cachedelta[0]), cachedelta[1]
2783 )
2783 )
2784 else:
2784 else:
2785 textlen = len(rawtext)
2785 textlen = len(rawtext)
2786
2786
2787 if deltacomputer is None:
2787 if deltacomputer is None:
2788 write_debug = None
2788 write_debug = None
2789 if self.delta_config.debug_delta:
2789 if self.delta_config.debug_delta:
2790 write_debug = transaction._report
2790 write_debug = transaction._report
2791 deltacomputer = deltautil.deltacomputer(
2791 deltacomputer = deltautil.deltacomputer(
2792 self, write_debug=write_debug
2792 self, write_debug=write_debug
2793 )
2793 )
2794
2794
2795 if cachedelta is not None and len(cachedelta) == 2:
2795 if cachedelta is not None and len(cachedelta) == 2:
2796 # If the cached delta has no information about how it should be
2796 # If the cached delta has no information about how it should be
2797 # reused, add the default reuse instruction according to the
2797 # reused, add the default reuse instruction according to the
2798 # revlog's configuration.
2798 # revlog's configuration.
2799 if (
2799 if (
2800 self.delta_config.general_delta
2800 self.delta_config.general_delta
2801 and self.delta_config.lazy_delta_base
2801 and self.delta_config.lazy_delta_base
2802 ):
2802 ):
2803 delta_base_reuse = DELTA_BASE_REUSE_TRY
2803 delta_base_reuse = DELTA_BASE_REUSE_TRY
2804 else:
2804 else:
2805 delta_base_reuse = DELTA_BASE_REUSE_NO
2805 delta_base_reuse = DELTA_BASE_REUSE_NO
2806 cachedelta = (cachedelta[0], cachedelta[1], delta_base_reuse)
2806 cachedelta = (cachedelta[0], cachedelta[1], delta_base_reuse)
2807
2807
2808 revinfo = revlogutils.revisioninfo(
2808 revinfo = revlogutils.revisioninfo(
2809 node,
2809 node,
2810 p1,
2810 p1,
2811 p2,
2811 p2,
2812 btext,
2812 btext,
2813 textlen,
2813 textlen,
2814 cachedelta,
2814 cachedelta,
2815 flags,
2815 flags,
2816 )
2816 )
2817
2817
2818 deltainfo = deltacomputer.finddeltainfo(revinfo)
2818 deltainfo = deltacomputer.finddeltainfo(revinfo)
2819
2819
2820 compression_mode = COMP_MODE_INLINE
2820 compression_mode = COMP_MODE_INLINE
2821 if self._docket is not None:
2821 if self._docket is not None:
2822 default_comp = self._docket.default_compression_header
2822 default_comp = self._docket.default_compression_header
2823 r = deltautil.delta_compression(default_comp, deltainfo)
2823 r = deltautil.delta_compression(default_comp, deltainfo)
2824 compression_mode, deltainfo = r
2824 compression_mode, deltainfo = r
2825
2825
2826 sidedata_compression_mode = COMP_MODE_INLINE
2826 sidedata_compression_mode = COMP_MODE_INLINE
2827 if sidedata and self.hassidedata:
2827 if sidedata and self.hassidedata:
2828 sidedata_compression_mode = COMP_MODE_PLAIN
2828 sidedata_compression_mode = COMP_MODE_PLAIN
2829 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2829 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2830 sidedata_offset = self._docket.sidedata_end
2830 sidedata_offset = self._docket.sidedata_end
2831 h, comp_sidedata = self.compress(serialized_sidedata)
2831 h, comp_sidedata = self.compress(serialized_sidedata)
2832 if (
2832 if (
2833 h != b'u'
2833 h != b'u'
2834 and comp_sidedata[0:1] != b'\0'
2834 and comp_sidedata[0:1] != b'\0'
2835 and len(comp_sidedata) < len(serialized_sidedata)
2835 and len(comp_sidedata) < len(serialized_sidedata)
2836 ):
2836 ):
2837 assert not h
2837 assert not h
2838 if (
2838 if (
2839 comp_sidedata[0:1]
2839 comp_sidedata[0:1]
2840 == self._docket.default_compression_header
2840 == self._docket.default_compression_header
2841 ):
2841 ):
2842 sidedata_compression_mode = COMP_MODE_DEFAULT
2842 sidedata_compression_mode = COMP_MODE_DEFAULT
2843 serialized_sidedata = comp_sidedata
2843 serialized_sidedata = comp_sidedata
2844 else:
2844 else:
2845 sidedata_compression_mode = COMP_MODE_INLINE
2845 sidedata_compression_mode = COMP_MODE_INLINE
2846 serialized_sidedata = comp_sidedata
2846 serialized_sidedata = comp_sidedata
2847 else:
2847 else:
2848 serialized_sidedata = b""
2848 serialized_sidedata = b""
2849 # Don't store the offset if the sidedata is empty, that way
2849 # Don't store the offset if the sidedata is empty, that way
2850 # we can easily detect empty sidedata and they will be no different
2850 # we can easily detect empty sidedata and they will be no different
2851 # than ones we manually add.
2851 # than ones we manually add.
2852 sidedata_offset = 0
2852 sidedata_offset = 0
2853
2853
2854 rank = RANK_UNKNOWN
2854 rank = RANK_UNKNOWN
2855 if self._compute_rank:
2855 if self._compute_rank:
2856 if (p1r, p2r) == (nullrev, nullrev):
2856 if (p1r, p2r) == (nullrev, nullrev):
2857 rank = 1
2857 rank = 1
2858 elif p1r != nullrev and p2r == nullrev:
2858 elif p1r != nullrev and p2r == nullrev:
2859 rank = 1 + self.fast_rank(p1r)
2859 rank = 1 + self.fast_rank(p1r)
2860 elif p1r == nullrev and p2r != nullrev:
2860 elif p1r == nullrev and p2r != nullrev:
2861 rank = 1 + self.fast_rank(p2r)
2861 rank = 1 + self.fast_rank(p2r)
2862 else: # merge node
2862 else: # merge node
2863 if rustdagop is not None and self.index.rust_ext_compat:
2863 if rustdagop is not None and self.index.rust_ext_compat:
2864 rank = rustdagop.rank(self.index, p1r, p2r)
2864 rank = rustdagop.rank(self.index, p1r, p2r)
2865 else:
2865 else:
2866 pmin, pmax = sorted((p1r, p2r))
2866 pmin, pmax = sorted((p1r, p2r))
2867 rank = 1 + self.fast_rank(pmax)
2867 rank = 1 + self.fast_rank(pmax)
2868 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
2868 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
2869
2869
2870 e = revlogutils.entry(
2870 e = revlogutils.entry(
2871 flags=flags,
2871 flags=flags,
2872 data_offset=offset,
2872 data_offset=offset,
2873 data_compressed_length=deltainfo.deltalen,
2873 data_compressed_length=deltainfo.deltalen,
2874 data_uncompressed_length=textlen,
2874 data_uncompressed_length=textlen,
2875 data_compression_mode=compression_mode,
2875 data_compression_mode=compression_mode,
2876 data_delta_base=deltainfo.base,
2876 data_delta_base=deltainfo.base,
2877 link_rev=link,
2877 link_rev=link,
2878 parent_rev_1=p1r,
2878 parent_rev_1=p1r,
2879 parent_rev_2=p2r,
2879 parent_rev_2=p2r,
2880 node_id=node,
2880 node_id=node,
2881 sidedata_offset=sidedata_offset,
2881 sidedata_offset=sidedata_offset,
2882 sidedata_compressed_length=len(serialized_sidedata),
2882 sidedata_compressed_length=len(serialized_sidedata),
2883 sidedata_compression_mode=sidedata_compression_mode,
2883 sidedata_compression_mode=sidedata_compression_mode,
2884 rank=rank,
2884 rank=rank,
2885 )
2885 )
2886
2886
2887 self.index.append(e)
2887 self.index.append(e)
2888 entry = self.index.entry_binary(curr)
2888 entry = self.index.entry_binary(curr)
2889 if curr == 0 and self._docket is None:
2889 if curr == 0 and self._docket is None:
2890 header = self._format_flags | self._format_version
2890 header = self._format_flags | self._format_version
2891 header = self.index.pack_header(header)
2891 header = self.index.pack_header(header)
2892 entry = header + entry
2892 entry = header + entry
2893 self._writeentry(
2893 self._writeentry(
2894 transaction,
2894 transaction,
2895 entry,
2895 entry,
2896 deltainfo.data,
2896 deltainfo.data,
2897 link,
2897 link,
2898 offset,
2898 offset,
2899 serialized_sidedata,
2899 serialized_sidedata,
2900 sidedata_offset,
2900 sidedata_offset,
2901 )
2901 )
2902
2902
2903 rawtext = btext[0]
2903 rawtext = btext[0]
2904
2904
2905 if alwayscache and rawtext is None:
2905 if alwayscache and rawtext is None:
2906 rawtext = deltacomputer.buildtext(revinfo)
2906 rawtext = deltacomputer.buildtext(revinfo)
2907
2907
2908 if type(rawtext) == bytes: # only accept immutable objects
2908 if type(rawtext) == bytes: # only accept immutable objects
2909 self._revisioncache = (node, curr, rawtext)
2909 self._revisioncache = (node, curr, rawtext)
2910 self._chainbasecache[curr] = deltainfo.chainbase
2910 self._chainbasecache[curr] = deltainfo.chainbase
2911 return curr
2911 return curr
2912
2912
2913 def _get_data_offset(self, prev):
2913 def _get_data_offset(self, prev):
2914 """Returns the current offset in the (in-transaction) data file.
2914 """Returns the current offset in the (in-transaction) data file.
2915 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2915 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2916 file to store that information: since sidedata can be rewritten to the
2916 file to store that information: since sidedata can be rewritten to the
2917 end of the data file within a transaction, you can have cases where, for
2917 end of the data file within a transaction, you can have cases where, for
2918 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2918 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2919 to `n - 1`'s sidedata being written after `n`'s data.
2919 to `n - 1`'s sidedata being written after `n`'s data.
2920
2920
2921 TODO cache this in a docket file before getting out of experimental."""
2921 TODO cache this in a docket file before getting out of experimental."""
2922 if self._docket is None:
2922 if self._docket is None:
2923 return self.end(prev)
2923 return self.end(prev)
2924 else:
2924 else:
2925 return self._docket.data_end
2925 return self._docket.data_end
2926
2926
2927 def _writeentry(
2927 def _writeentry(
2928 self, transaction, entry, data, link, offset, sidedata, sidedata_offset
2928 self, transaction, entry, data, link, offset, sidedata, sidedata_offset
2929 ):
2929 ):
2930 # Files opened in a+ mode have inconsistent behavior on various
2930 # Files opened in a+ mode have inconsistent behavior on various
2931 # platforms. Windows requires that a file positioning call be made
2931 # platforms. Windows requires that a file positioning call be made
2932 # when the file handle transitions between reads and writes. See
2932 # when the file handle transitions between reads and writes. See
2933 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2933 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2934 # platforms, Python or the platform itself can be buggy. Some versions
2934 # platforms, Python or the platform itself can be buggy. Some versions
2935 # of Solaris have been observed to not append at the end of the file
2935 # of Solaris have been observed to not append at the end of the file
2936 # if the file was seeked to before the end. See issue4943 for more.
2936 # if the file was seeked to before the end. See issue4943 for more.
2937 #
2937 #
2938 # We work around this issue by inserting a seek() before writing.
2938 # We work around this issue by inserting a seek() before writing.
2939 # Note: This is likely not necessary on Python 3. However, because
2939 # Note: This is likely not necessary on Python 3. However, because
2940 # the file handle is reused for reads and may be seeked there, we need
2940 # the file handle is reused for reads and may be seeked there, we need
2941 # to be careful before changing this.
2941 # to be careful before changing this.
2942 if self._writinghandles is None:
2942 if self._writinghandles is None:
2943 msg = b'adding revision outside `revlog._writing` context'
2943 msg = b'adding revision outside `revlog._writing` context'
2944 raise error.ProgrammingError(msg)
2944 raise error.ProgrammingError(msg)
2945 ifh, dfh, sdfh = self._writinghandles
2945 ifh, dfh, sdfh = self._writinghandles
2946 if self._docket is None:
2946 if self._docket is None:
2947 ifh.seek(0, os.SEEK_END)
2947 ifh.seek(0, os.SEEK_END)
2948 else:
2948 else:
2949 ifh.seek(self._docket.index_end, os.SEEK_SET)
2949 ifh.seek(self._docket.index_end, os.SEEK_SET)
2950 if dfh:
2950 if dfh:
2951 if self._docket is None:
2951 if self._docket is None:
2952 dfh.seek(0, os.SEEK_END)
2952 dfh.seek(0, os.SEEK_END)
2953 else:
2953 else:
2954 dfh.seek(self._docket.data_end, os.SEEK_SET)
2954 dfh.seek(self._docket.data_end, os.SEEK_SET)
2955 if sdfh:
2955 if sdfh:
2956 sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2956 sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2957
2957
2958 curr = len(self) - 1
2958 curr = len(self) - 1
2959 if not self._inline:
2959 if not self._inline:
2960 transaction.add(self._datafile, offset)
2960 transaction.add(self._datafile, offset)
2961 if self._sidedatafile:
2961 if self._sidedatafile:
2962 transaction.add(self._sidedatafile, sidedata_offset)
2962 transaction.add(self._sidedatafile, sidedata_offset)
2963 transaction.add(self._indexfile, curr * len(entry))
2963 transaction.add(self._indexfile, curr * len(entry))
2964 if data[0]:
2964 if data[0]:
2965 dfh.write(data[0])
2965 dfh.write(data[0])
2966 dfh.write(data[1])
2966 dfh.write(data[1])
2967 if sidedata:
2967 if sidedata:
2968 sdfh.write(sidedata)
2968 sdfh.write(sidedata)
2969 ifh.write(entry)
2969 ifh.write(entry)
2970 else:
2970 else:
2971 offset += curr * self.index.entry_size
2971 offset += curr * self.index.entry_size
2972 transaction.add(self._indexfile, offset)
2972 transaction.add(self._indexfile, offset)
2973 ifh.write(entry)
2973 ifh.write(entry)
2974 ifh.write(data[0])
2974 ifh.write(data[0])
2975 ifh.write(data[1])
2975 ifh.write(data[1])
2976 assert not sidedata
2976 assert not sidedata
2977 self._enforceinlinesize(transaction)
2977 self._enforceinlinesize(transaction)
2978 if self._docket is not None:
2978 if self._docket is not None:
2979 # revlog-v2 always has 3 writing handles, help Pytype
2979 # revlog-v2 always has 3 writing handles, help Pytype
2980 wh1 = self._writinghandles[0]
2980 wh1 = self._writinghandles[0]
2981 wh2 = self._writinghandles[1]
2981 wh2 = self._writinghandles[1]
2982 wh3 = self._writinghandles[2]
2982 wh3 = self._writinghandles[2]
2983 assert wh1 is not None
2983 assert wh1 is not None
2984 assert wh2 is not None
2984 assert wh2 is not None
2985 assert wh3 is not None
2985 assert wh3 is not None
2986 self._docket.index_end = wh1.tell()
2986 self._docket.index_end = wh1.tell()
2987 self._docket.data_end = wh2.tell()
2987 self._docket.data_end = wh2.tell()
2988 self._docket.sidedata_end = wh3.tell()
2988 self._docket.sidedata_end = wh3.tell()
2989
2989
2990 nodemaputil.setup_persistent_nodemap(transaction, self)
2990 nodemaputil.setup_persistent_nodemap(transaction, self)
2991
2991
2992 def addgroup(
2992 def addgroup(
2993 self,
2993 self,
2994 deltas,
2994 deltas,
2995 linkmapper,
2995 linkmapper,
2996 transaction,
2996 transaction,
2997 alwayscache=False,
2997 alwayscache=False,
2998 addrevisioncb=None,
2998 addrevisioncb=None,
2999 duplicaterevisioncb=None,
2999 duplicaterevisioncb=None,
3000 debug_info=None,
3000 debug_info=None,
3001 delta_base_reuse_policy=None,
3001 delta_base_reuse_policy=None,
3002 ):
3002 ):
3003 """
3003 """
3004 add a delta group
3004 add a delta group
3005
3005
3006 given a set of deltas, add them to the revision log. the
3006 given a set of deltas, add them to the revision log. the
3007 first delta is against its parent, which should be in our
3007 first delta is against its parent, which should be in our
3008 log, the rest are against the previous delta.
3008 log, the rest are against the previous delta.
3009
3009
3010 If ``addrevisioncb`` is defined, it will be called with arguments of
3010 If ``addrevisioncb`` is defined, it will be called with arguments of
3011 this revlog and the node that was added.
3011 this revlog and the node that was added.
3012 """
3012 """
3013
3013
3014 if self._adding_group:
3014 if self._adding_group:
3015 raise error.ProgrammingError(b'cannot nest addgroup() calls')
3015 raise error.ProgrammingError(b'cannot nest addgroup() calls')
3016
3016
3017 # read the default delta-base reuse policy from revlog config if the
3017 # read the default delta-base reuse policy from revlog config if the
3018 # group did not specify one.
3018 # group did not specify one.
3019 if delta_base_reuse_policy is None:
3019 if delta_base_reuse_policy is None:
3020 if (
3020 if (
3021 self.delta_config.general_delta
3021 self.delta_config.general_delta
3022 and self.delta_config.lazy_delta_base
3022 and self.delta_config.lazy_delta_base
3023 ):
3023 ):
3024 delta_base_reuse_policy = DELTA_BASE_REUSE_TRY
3024 delta_base_reuse_policy = DELTA_BASE_REUSE_TRY
3025 else:
3025 else:
3026 delta_base_reuse_policy = DELTA_BASE_REUSE_NO
3026 delta_base_reuse_policy = DELTA_BASE_REUSE_NO
3027
3027
3028 self._adding_group = True
3028 self._adding_group = True
3029 empty = True
3029 empty = True
3030 try:
3030 try:
3031 with self._writing(transaction):
3031 with self._writing(transaction):
3032 write_debug = None
3032 write_debug = None
3033 if self.delta_config.debug_delta:
3033 if self.delta_config.debug_delta:
3034 write_debug = transaction._report
3034 write_debug = transaction._report
3035 deltacomputer = deltautil.deltacomputer(
3035 deltacomputer = deltautil.deltacomputer(
3036 self,
3036 self,
3037 write_debug=write_debug,
3037 write_debug=write_debug,
3038 debug_info=debug_info,
3038 debug_info=debug_info,
3039 )
3039 )
3040 # loop through our set of deltas
3040 # loop through our set of deltas
3041 for data in deltas:
3041 for data in deltas:
3042 (
3042 (
3043 node,
3043 node,
3044 p1,
3044 p1,
3045 p2,
3045 p2,
3046 linknode,
3046 linknode,
3047 deltabase,
3047 deltabase,
3048 delta,
3048 delta,
3049 flags,
3049 flags,
3050 sidedata,
3050 sidedata,
3051 ) = data
3051 ) = data
3052 link = linkmapper(linknode)
3052 link = linkmapper(linknode)
3053 flags = flags or REVIDX_DEFAULT_FLAGS
3053 flags = flags or REVIDX_DEFAULT_FLAGS
3054
3054
3055 rev = self.index.get_rev(node)
3055 rev = self.index.get_rev(node)
3056 if rev is not None:
3056 if rev is not None:
3057 # this can happen if two branches make the same change
3057 # this can happen if two branches make the same change
3058 self._nodeduplicatecallback(transaction, rev)
3058 self._nodeduplicatecallback(transaction, rev)
3059 if duplicaterevisioncb:
3059 if duplicaterevisioncb:
3060 duplicaterevisioncb(self, rev)
3060 duplicaterevisioncb(self, rev)
3061 empty = False
3061 empty = False
3062 continue
3062 continue
3063
3063
3064 for p in (p1, p2):
3064 for p in (p1, p2):
3065 if not self.index.has_node(p):
3065 if not self.index.has_node(p):
3066 raise error.LookupError(
3066 raise error.LookupError(
3067 p, self.radix, _(b'unknown parent')
3067 p, self.radix, _(b'unknown parent')
3068 )
3068 )
3069
3069
3070 if not self.index.has_node(deltabase):
3070 if not self.index.has_node(deltabase):
3071 raise error.LookupError(
3071 raise error.LookupError(
3072 deltabase, self.display_id, _(b'unknown delta base')
3072 deltabase, self.display_id, _(b'unknown delta base')
3073 )
3073 )
3074
3074
3075 baserev = self.rev(deltabase)
3075 baserev = self.rev(deltabase)
3076
3076
3077 if baserev != nullrev and self.iscensored(baserev):
3077 if baserev != nullrev and self.iscensored(baserev):
3078 # if base is censored, delta must be full replacement in a
3078 # if base is censored, delta must be full replacement in a
3079 # single patch operation
3079 # single patch operation
3080 hlen = struct.calcsize(b">lll")
3080 hlen = struct.calcsize(b">lll")
3081 oldlen = self.rawsize(baserev)
3081 oldlen = self.rawsize(baserev)
3082 newlen = len(delta) - hlen
3082 newlen = len(delta) - hlen
3083 if delta[:hlen] != mdiff.replacediffheader(
3083 if delta[:hlen] != mdiff.replacediffheader(
3084 oldlen, newlen
3084 oldlen, newlen
3085 ):
3085 ):
3086 raise error.CensoredBaseError(
3086 raise error.CensoredBaseError(
3087 self.display_id, self.node(baserev)
3087 self.display_id, self.node(baserev)
3088 )
3088 )
3089
3089
3090 if not flags and self._peek_iscensored(baserev, delta):
3090 if not flags and self._peek_iscensored(baserev, delta):
3091 flags |= REVIDX_ISCENSORED
3091 flags |= REVIDX_ISCENSORED
3092
3092
3093 # We assume consumers of addrevisioncb will want to retrieve
3093 # We assume consumers of addrevisioncb will want to retrieve
3094 # the added revision, which will require a call to
3094 # the added revision, which will require a call to
3095 # revision(). revision() will fast path if there is a cache
3095 # revision(). revision() will fast path if there is a cache
3096 # hit. So, we tell _addrevision() to always cache in this case.
3096 # hit. So, we tell _addrevision() to always cache in this case.
3097 # We're only using addgroup() in the context of changegroup
3097 # We're only using addgroup() in the context of changegroup
3098 # generation so the revision data can always be handled as raw
3098 # generation so the revision data can always be handled as raw
3099 # by the flagprocessor.
3099 # by the flagprocessor.
3100 rev = self._addrevision(
3100 rev = self._addrevision(
3101 node,
3101 node,
3102 None,
3102 None,
3103 transaction,
3103 transaction,
3104 link,
3104 link,
3105 p1,
3105 p1,
3106 p2,
3106 p2,
3107 flags,
3107 flags,
3108 (baserev, delta, delta_base_reuse_policy),
3108 (baserev, delta, delta_base_reuse_policy),
3109 alwayscache=alwayscache,
3109 alwayscache=alwayscache,
3110 deltacomputer=deltacomputer,
3110 deltacomputer=deltacomputer,
3111 sidedata=sidedata,
3111 sidedata=sidedata,
3112 )
3112 )
3113
3113
3114 if addrevisioncb:
3114 if addrevisioncb:
3115 addrevisioncb(self, rev)
3115 addrevisioncb(self, rev)
3116 empty = False
3116 empty = False
3117 finally:
3117 finally:
3118 self._adding_group = False
3118 self._adding_group = False
3119 return not empty
3119 return not empty
3120
3120
3121 def iscensored(self, rev):
3121 def iscensored(self, rev):
3122 """Check if a file revision is censored."""
3122 """Check if a file revision is censored."""
3123 if not self.feature_config.censorable:
3123 if not self.feature_config.censorable:
3124 return False
3124 return False
3125
3125
3126 return self.flags(rev) & REVIDX_ISCENSORED
3126 return self.flags(rev) & REVIDX_ISCENSORED
3127
3127
3128 def _peek_iscensored(self, baserev, delta):
3128 def _peek_iscensored(self, baserev, delta):
3129 """Quickly check if a delta produces a censored revision."""
3129 """Quickly check if a delta produces a censored revision."""
3130 if not self.feature_config.censorable:
3130 if not self.feature_config.censorable:
3131 return False
3131 return False
3132
3132
3133 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
3133 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
3134
3134
3135 def getstrippoint(self, minlink):
3135 def getstrippoint(self, minlink):
3136 """find the minimum rev that must be stripped to strip the linkrev
3136 """find the minimum rev that must be stripped to strip the linkrev
3137
3137
3138 Returns a tuple containing the minimum rev and a set of all revs that
3138 Returns a tuple containing the minimum rev and a set of all revs that
3139 have linkrevs that will be broken by this strip.
3139 have linkrevs that will be broken by this strip.
3140 """
3140 """
3141 return storageutil.resolvestripinfo(
3141 return storageutil.resolvestripinfo(
3142 minlink,
3142 minlink,
3143 len(self) - 1,
3143 len(self) - 1,
3144 self.headrevs(),
3144 self.headrevs(),
3145 self.linkrev,
3145 self.linkrev,
3146 self.parentrevs,
3146 self.parentrevs,
3147 )
3147 )
3148
3148
3149 def strip(self, minlink, transaction):
3149 def strip(self, minlink, transaction):
3150 """truncate the revlog on the first revision with a linkrev >= minlink
3150 """truncate the revlog on the first revision with a linkrev >= minlink
3151
3151
3152 This function is called when we're stripping revision minlink and
3152 This function is called when we're stripping revision minlink and
3153 its descendants from the repository.
3153 its descendants from the repository.
3154
3154
3155 We have to remove all revisions with linkrev >= minlink, because
3155 We have to remove all revisions with linkrev >= minlink, because
3156 the equivalent changelog revisions will be renumbered after the
3156 the equivalent changelog revisions will be renumbered after the
3157 strip.
3157 strip.
3158
3158
3159 So we truncate the revlog on the first of these revisions, and
3159 So we truncate the revlog on the first of these revisions, and
3160 trust that the caller has saved the revisions that shouldn't be
3160 trust that the caller has saved the revisions that shouldn't be
3161 removed and that it'll re-add them after this truncation.
3161 removed and that it'll re-add them after this truncation.
3162 """
3162 """
3163 if len(self) == 0:
3163 if len(self) == 0:
3164 return
3164 return
3165
3165
3166 rev, _ = self.getstrippoint(minlink)
3166 rev, _ = self.getstrippoint(minlink)
3167 if rev == len(self):
3167 if rev == len(self):
3168 return
3168 return
3169
3169
3170 # first truncate the files on disk
3170 # first truncate the files on disk
3171 data_end = self.start(rev)
3171 data_end = self.start(rev)
3172 if not self._inline:
3172 if not self._inline:
3173 transaction.add(self._datafile, data_end)
3173 transaction.add(self._datafile, data_end)
3174 end = rev * self.index.entry_size
3174 end = rev * self.index.entry_size
3175 else:
3175 else:
3176 end = data_end + (rev * self.index.entry_size)
3176 end = data_end + (rev * self.index.entry_size)
3177
3177
3178 if self._sidedatafile:
3178 if self._sidedatafile:
3179 sidedata_end = self.sidedata_cut_off(rev)
3179 sidedata_end = self.sidedata_cut_off(rev)
3180 transaction.add(self._sidedatafile, sidedata_end)
3180 transaction.add(self._sidedatafile, sidedata_end)
3181
3181
3182 transaction.add(self._indexfile, end)
3182 transaction.add(self._indexfile, end)
3183 if self._docket is not None:
3183 if self._docket is not None:
3184 # XXX we could, leverage the docket while stripping. However it is
3184 # XXX we could, leverage the docket while stripping. However it is
3185 # not powerfull enough at the time of this comment
3185 # not powerfull enough at the time of this comment
3186 self._docket.index_end = end
3186 self._docket.index_end = end
3187 self._docket.data_end = data_end
3187 self._docket.data_end = data_end
3188 self._docket.sidedata_end = sidedata_end
3188 self._docket.sidedata_end = sidedata_end
3189 self._docket.write(transaction, stripping=True)
3189 self._docket.write(transaction, stripping=True)
3190
3190
3191 # then reset internal state in memory to forget those revisions
3191 # then reset internal state in memory to forget those revisions
3192 self._revisioncache = None
3192 self._revisioncache = None
3193 self._chaininfocache = util.lrucachedict(500)
3193 self._chaininfocache = util.lrucachedict(500)
3194 self._segmentfile.clear_cache()
3194 self._segmentfile.clear_cache()
3195 self._segmentfile_sidedata.clear_cache()
3195 self._segmentfile_sidedata.clear_cache()
3196
3196
3197 del self.index[rev:-1]
3197 del self.index[rev:-1]
3198
3198
3199 def checksize(self):
3199 def checksize(self):
3200 """Check size of index and data files
3200 """Check size of index and data files
3201
3201
3202 return a (dd, di) tuple.
3202 return a (dd, di) tuple.
3203 - dd: extra bytes for the "data" file
3203 - dd: extra bytes for the "data" file
3204 - di: extra bytes for the "index" file
3204 - di: extra bytes for the "index" file
3205
3205
3206 A healthy revlog will return (0, 0).
3206 A healthy revlog will return (0, 0).
3207 """
3207 """
3208 expected = 0
3208 expected = 0
3209 if len(self):
3209 if len(self):
3210 expected = max(0, self.end(len(self) - 1))
3210 expected = max(0, self.end(len(self) - 1))
3211
3211
3212 try:
3212 try:
3213 with self._datafp() as f:
3213 with self._datafp() as f:
3214 f.seek(0, io.SEEK_END)
3214 f.seek(0, io.SEEK_END)
3215 actual = f.tell()
3215 actual = f.tell()
3216 dd = actual - expected
3216 dd = actual - expected
3217 except FileNotFoundError:
3217 except FileNotFoundError:
3218 dd = 0
3218 dd = 0
3219
3219
3220 try:
3220 try:
3221 f = self.opener(self._indexfile)
3221 f = self.opener(self._indexfile)
3222 f.seek(0, io.SEEK_END)
3222 f.seek(0, io.SEEK_END)
3223 actual = f.tell()
3223 actual = f.tell()
3224 f.close()
3224 f.close()
3225 s = self.index.entry_size
3225 s = self.index.entry_size
3226 i = max(0, actual // s)
3226 i = max(0, actual // s)
3227 di = actual - (i * s)
3227 di = actual - (i * s)
3228 if self._inline:
3228 if self._inline:
3229 databytes = 0
3229 databytes = 0
3230 for r in self:
3230 for r in self:
3231 databytes += max(0, self.length(r))
3231 databytes += max(0, self.length(r))
3232 dd = 0
3232 dd = 0
3233 di = actual - len(self) * s - databytes
3233 di = actual - len(self) * s - databytes
3234 except FileNotFoundError:
3234 except FileNotFoundError:
3235 di = 0
3235 di = 0
3236
3236
3237 return (dd, di)
3237 return (dd, di)
3238
3238
3239 def files(self):
3239 def files(self):
3240 res = [self._indexfile]
3240 res = [self._indexfile]
3241 if self._docket_file is None:
3241 if self._docket_file is None:
3242 if not self._inline:
3242 if not self._inline:
3243 res.append(self._datafile)
3243 res.append(self._datafile)
3244 else:
3244 else:
3245 res.append(self._docket_file)
3245 res.append(self._docket_file)
3246 res.extend(self._docket.old_index_filepaths(include_empty=False))
3246 res.extend(self._docket.old_index_filepaths(include_empty=False))
3247 if self._docket.data_end:
3247 if self._docket.data_end:
3248 res.append(self._datafile)
3248 res.append(self._datafile)
3249 res.extend(self._docket.old_data_filepaths(include_empty=False))
3249 res.extend(self._docket.old_data_filepaths(include_empty=False))
3250 if self._docket.sidedata_end:
3250 if self._docket.sidedata_end:
3251 res.append(self._sidedatafile)
3251 res.append(self._sidedatafile)
3252 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
3252 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
3253 return res
3253 return res
3254
3254
3255 def emitrevisions(
3255 def emitrevisions(
3256 self,
3256 self,
3257 nodes,
3257 nodes,
3258 nodesorder=None,
3258 nodesorder=None,
3259 revisiondata=False,
3259 revisiondata=False,
3260 assumehaveparentrevisions=False,
3260 assumehaveparentrevisions=False,
3261 deltamode=repository.CG_DELTAMODE_STD,
3261 deltamode=repository.CG_DELTAMODE_STD,
3262 sidedata_helpers=None,
3262 sidedata_helpers=None,
3263 debug_info=None,
3263 debug_info=None,
3264 ):
3264 ):
3265 if nodesorder not in (b'nodes', b'storage', b'linear', None):
3265 if nodesorder not in (b'nodes', b'storage', b'linear', None):
3266 raise error.ProgrammingError(
3266 raise error.ProgrammingError(
3267 b'unhandled value for nodesorder: %s' % nodesorder
3267 b'unhandled value for nodesorder: %s' % nodesorder
3268 )
3268 )
3269
3269
3270 if nodesorder is None and not self.delta_config.general_delta:
3270 if nodesorder is None and not self.delta_config.general_delta:
3271 nodesorder = b'storage'
3271 nodesorder = b'storage'
3272
3272
3273 if (
3273 if (
3274 not self._storedeltachains
3274 not self._storedeltachains
3275 and deltamode != repository.CG_DELTAMODE_PREV
3275 and deltamode != repository.CG_DELTAMODE_PREV
3276 ):
3276 ):
3277 deltamode = repository.CG_DELTAMODE_FULL
3277 deltamode = repository.CG_DELTAMODE_FULL
3278
3278
3279 return storageutil.emitrevisions(
3279 return storageutil.emitrevisions(
3280 self,
3280 self,
3281 nodes,
3281 nodes,
3282 nodesorder,
3282 nodesorder,
3283 revlogrevisiondelta,
3283 revlogrevisiondelta,
3284 deltaparentfn=self.deltaparent,
3284 deltaparentfn=self.deltaparent,
3285 candeltafn=self._candelta,
3285 candeltafn=self._candelta,
3286 rawsizefn=self.rawsize,
3286 rawsizefn=self.rawsize,
3287 revdifffn=self.revdiff,
3287 revdifffn=self.revdiff,
3288 flagsfn=self.flags,
3288 flagsfn=self.flags,
3289 deltamode=deltamode,
3289 deltamode=deltamode,
3290 revisiondata=revisiondata,
3290 revisiondata=revisiondata,
3291 assumehaveparentrevisions=assumehaveparentrevisions,
3291 assumehaveparentrevisions=assumehaveparentrevisions,
3292 sidedata_helpers=sidedata_helpers,
3292 sidedata_helpers=sidedata_helpers,
3293 debug_info=debug_info,
3293 debug_info=debug_info,
3294 )
3294 )
3295
3295
3296 DELTAREUSEALWAYS = b'always'
3296 DELTAREUSEALWAYS = b'always'
3297 DELTAREUSESAMEREVS = b'samerevs'
3297 DELTAREUSESAMEREVS = b'samerevs'
3298 DELTAREUSENEVER = b'never'
3298 DELTAREUSENEVER = b'never'
3299
3299
3300 DELTAREUSEFULLADD = b'fulladd'
3300 DELTAREUSEFULLADD = b'fulladd'
3301
3301
3302 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
3302 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
3303
3303
3304 def clone(
3304 def clone(
3305 self,
3305 self,
3306 tr,
3306 tr,
3307 destrevlog,
3307 destrevlog,
3308 addrevisioncb=None,
3308 addrevisioncb=None,
3309 deltareuse=DELTAREUSESAMEREVS,
3309 deltareuse=DELTAREUSESAMEREVS,
3310 forcedeltabothparents=None,
3310 forcedeltabothparents=None,
3311 sidedata_helpers=None,
3311 sidedata_helpers=None,
3312 ):
3312 ):
3313 """Copy this revlog to another, possibly with format changes.
3313 """Copy this revlog to another, possibly with format changes.
3314
3314
3315 The destination revlog will contain the same revisions and nodes.
3315 The destination revlog will contain the same revisions and nodes.
3316 However, it may not be bit-for-bit identical due to e.g. delta encoding
3316 However, it may not be bit-for-bit identical due to e.g. delta encoding
3317 differences.
3317 differences.
3318
3318
3319 The ``deltareuse`` argument control how deltas from the existing revlog
3319 The ``deltareuse`` argument control how deltas from the existing revlog
3320 are preserved in the destination revlog. The argument can have the
3320 are preserved in the destination revlog. The argument can have the
3321 following values:
3321 following values:
3322
3322
3323 DELTAREUSEALWAYS
3323 DELTAREUSEALWAYS
3324 Deltas will always be reused (if possible), even if the destination
3324 Deltas will always be reused (if possible), even if the destination
3325 revlog would not select the same revisions for the delta. This is the
3325 revlog would not select the same revisions for the delta. This is the
3326 fastest mode of operation.
3326 fastest mode of operation.
3327 DELTAREUSESAMEREVS
3327 DELTAREUSESAMEREVS
3328 Deltas will be reused if the destination revlog would pick the same
3328 Deltas will be reused if the destination revlog would pick the same
3329 revisions for the delta. This mode strikes a balance between speed
3329 revisions for the delta. This mode strikes a balance between speed
3330 and optimization.
3330 and optimization.
3331 DELTAREUSENEVER
3331 DELTAREUSENEVER
3332 Deltas will never be reused. This is the slowest mode of execution.
3332 Deltas will never be reused. This is the slowest mode of execution.
3333 This mode can be used to recompute deltas (e.g. if the diff/delta
3333 This mode can be used to recompute deltas (e.g. if the diff/delta
3334 algorithm changes).
3334 algorithm changes).
3335 DELTAREUSEFULLADD
3335 DELTAREUSEFULLADD
3336 Revision will be re-added as if their were new content. This is
3336 Revision will be re-added as if their were new content. This is
3337 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
3337 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
3338 eg: large file detection and handling.
3338 eg: large file detection and handling.
3339
3339
3340 Delta computation can be slow, so the choice of delta reuse policy can
3340 Delta computation can be slow, so the choice of delta reuse policy can
3341 significantly affect run time.
3341 significantly affect run time.
3342
3342
3343 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
3343 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
3344 two extremes. Deltas will be reused if they are appropriate. But if the
3344 two extremes. Deltas will be reused if they are appropriate. But if the
3345 delta could choose a better revision, it will do so. This means if you
3345 delta could choose a better revision, it will do so. This means if you
3346 are converting a non-generaldelta revlog to a generaldelta revlog,
3346 are converting a non-generaldelta revlog to a generaldelta revlog,
3347 deltas will be recomputed if the delta's parent isn't a parent of the
3347 deltas will be recomputed if the delta's parent isn't a parent of the
3348 revision.
3348 revision.
3349
3349
3350 In addition to the delta policy, the ``forcedeltabothparents``
3350 In addition to the delta policy, the ``forcedeltabothparents``
3351 argument controls whether to force compute deltas against both parents
3351 argument controls whether to force compute deltas against both parents
3352 for merges. By default, the current default is used.
3352 for merges. By default, the current default is used.
3353
3353
3354 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
3354 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
3355 `sidedata_helpers`.
3355 `sidedata_helpers`.
3356 """
3356 """
3357 if deltareuse not in self.DELTAREUSEALL:
3357 if deltareuse not in self.DELTAREUSEALL:
3358 raise ValueError(
3358 raise ValueError(
3359 _(b'value for deltareuse invalid: %s') % deltareuse
3359 _(b'value for deltareuse invalid: %s') % deltareuse
3360 )
3360 )
3361
3361
3362 if len(destrevlog):
3362 if len(destrevlog):
3363 raise ValueError(_(b'destination revlog is not empty'))
3363 raise ValueError(_(b'destination revlog is not empty'))
3364
3364
3365 if getattr(self, 'filteredrevs', None):
3365 if getattr(self, 'filteredrevs', None):
3366 raise ValueError(_(b'source revlog has filtered revisions'))
3366 raise ValueError(_(b'source revlog has filtered revisions'))
3367 if getattr(destrevlog, 'filteredrevs', None):
3367 if getattr(destrevlog, 'filteredrevs', None):
3368 raise ValueError(_(b'destination revlog has filtered revisions'))
3368 raise ValueError(_(b'destination revlog has filtered revisions'))
3369
3369
3370 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3370 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3371 # if possible.
3371 # if possible.
3372 old_delta_config = destrevlog.delta_config
3372 old_delta_config = destrevlog.delta_config
3373 destrevlog.delta_config = destrevlog.delta_config.copy()
3373 destrevlog.delta_config = destrevlog.delta_config.copy()
3374
3374
3375 try:
3375 try:
3376 if deltareuse == self.DELTAREUSEALWAYS:
3376 if deltareuse == self.DELTAREUSEALWAYS:
3377 destrevlog.delta_config.lazy_delta_base = True
3377 destrevlog.delta_config.lazy_delta_base = True
3378 destrevlog.delta_config.lazy_delta = True
3378 destrevlog.delta_config.lazy_delta = True
3379 elif deltareuse == self.DELTAREUSESAMEREVS:
3379 elif deltareuse == self.DELTAREUSESAMEREVS:
3380 destrevlog.delta_config.lazy_delta_base = False
3380 destrevlog.delta_config.lazy_delta_base = False
3381 destrevlog.delta_config.lazy_delta = True
3381 destrevlog.delta_config.lazy_delta = True
3382 elif deltareuse == self.DELTAREUSENEVER:
3382 elif deltareuse == self.DELTAREUSENEVER:
3383 destrevlog.delta_config.lazy_delta_base = False
3383 destrevlog.delta_config.lazy_delta_base = False
3384 destrevlog.delta_config.lazy_delta = False
3384 destrevlog.delta_config.lazy_delta = False
3385
3385
3386 delta_both_parents = (
3386 delta_both_parents = (
3387 forcedeltabothparents or old_delta_config.delta_both_parents
3387 forcedeltabothparents or old_delta_config.delta_both_parents
3388 )
3388 )
3389 destrevlog.delta_config.delta_both_parents = delta_both_parents
3389 destrevlog.delta_config.delta_both_parents = delta_both_parents
3390
3390
3391 with self.reading():
3391 with self.reading():
3392 self._clone(
3392 self._clone(
3393 tr,
3393 tr,
3394 destrevlog,
3394 destrevlog,
3395 addrevisioncb,
3395 addrevisioncb,
3396 deltareuse,
3396 deltareuse,
3397 forcedeltabothparents,
3397 forcedeltabothparents,
3398 sidedata_helpers,
3398 sidedata_helpers,
3399 )
3399 )
3400
3400
3401 finally:
3401 finally:
3402 destrevlog.delta_config = old_delta_config
3402 destrevlog.delta_config = old_delta_config
3403
3403
3404 def _clone(
3404 def _clone(
3405 self,
3405 self,
3406 tr,
3406 tr,
3407 destrevlog,
3407 destrevlog,
3408 addrevisioncb,
3408 addrevisioncb,
3409 deltareuse,
3409 deltareuse,
3410 forcedeltabothparents,
3410 forcedeltabothparents,
3411 sidedata_helpers,
3411 sidedata_helpers,
3412 ):
3412 ):
3413 """perform the core duty of `revlog.clone` after parameter processing"""
3413 """perform the core duty of `revlog.clone` after parameter processing"""
3414 write_debug = None
3414 write_debug = None
3415 if self.delta_config.debug_delta:
3415 if self.delta_config.debug_delta:
3416 write_debug = tr._report
3416 write_debug = tr._report
3417 deltacomputer = deltautil.deltacomputer(
3417 deltacomputer = deltautil.deltacomputer(
3418 destrevlog,
3418 destrevlog,
3419 write_debug=write_debug,
3419 write_debug=write_debug,
3420 )
3420 )
3421 index = self.index
3421 index = self.index
3422 for rev in self:
3422 for rev in self:
3423 entry = index[rev]
3423 entry = index[rev]
3424
3424
3425 # Some classes override linkrev to take filtered revs into
3425 # Some classes override linkrev to take filtered revs into
3426 # account. Use raw entry from index.
3426 # account. Use raw entry from index.
3427 flags = entry[0] & 0xFFFF
3427 flags = entry[0] & 0xFFFF
3428 linkrev = entry[4]
3428 linkrev = entry[4]
3429 p1 = index[entry[5]][7]
3429 p1 = index[entry[5]][7]
3430 p2 = index[entry[6]][7]
3430 p2 = index[entry[6]][7]
3431 node = entry[7]
3431 node = entry[7]
3432
3432
3433 # (Possibly) reuse the delta from the revlog if allowed and
3433 # (Possibly) reuse the delta from the revlog if allowed and
3434 # the revlog chunk is a delta.
3434 # the revlog chunk is a delta.
3435 cachedelta = None
3435 cachedelta = None
3436 rawtext = None
3436 rawtext = None
3437 if deltareuse == self.DELTAREUSEFULLADD:
3437 if deltareuse == self.DELTAREUSEFULLADD:
3438 text = self._revisiondata(rev)
3438 text = self._revisiondata(rev)
3439 sidedata = self.sidedata(rev)
3439 sidedata = self.sidedata(rev)
3440
3440
3441 if sidedata_helpers is not None:
3441 if sidedata_helpers is not None:
3442 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3442 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3443 self, sidedata_helpers, sidedata, rev
3443 self, sidedata_helpers, sidedata, rev
3444 )
3444 )
3445 flags = flags | new_flags[0] & ~new_flags[1]
3445 flags = flags | new_flags[0] & ~new_flags[1]
3446
3446
3447 destrevlog.addrevision(
3447 destrevlog.addrevision(
3448 text,
3448 text,
3449 tr,
3449 tr,
3450 linkrev,
3450 linkrev,
3451 p1,
3451 p1,
3452 p2,
3452 p2,
3453 cachedelta=cachedelta,
3453 cachedelta=cachedelta,
3454 node=node,
3454 node=node,
3455 flags=flags,
3455 flags=flags,
3456 deltacomputer=deltacomputer,
3456 deltacomputer=deltacomputer,
3457 sidedata=sidedata,
3457 sidedata=sidedata,
3458 )
3458 )
3459 else:
3459 else:
3460 if destrevlog._lazydelta:
3460 if destrevlog._lazydelta:
3461 dp = self.deltaparent(rev)
3461 dp = self.deltaparent(rev)
3462 if dp != nullrev:
3462 if dp != nullrev:
3463 cachedelta = (dp, bytes(self._chunk(rev)))
3463 cachedelta = (dp, bytes(self._chunk(rev)))
3464
3464
3465 sidedata = None
3465 sidedata = None
3466 if not cachedelta:
3466 if not cachedelta:
3467 rawtext = self._revisiondata(rev)
3467 rawtext = self._revisiondata(rev)
3468 sidedata = self.sidedata(rev)
3468 sidedata = self.sidedata(rev)
3469 if sidedata is None:
3469 if sidedata is None:
3470 sidedata = self.sidedata(rev)
3470 sidedata = self.sidedata(rev)
3471
3471
3472 if sidedata_helpers is not None:
3472 if sidedata_helpers is not None:
3473 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3473 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3474 self, sidedata_helpers, sidedata, rev
3474 self, sidedata_helpers, sidedata, rev
3475 )
3475 )
3476 flags = flags | new_flags[0] & ~new_flags[1]
3476 flags = flags | new_flags[0] & ~new_flags[1]
3477
3477
3478 with destrevlog._writing(tr):
3478 with destrevlog._writing(tr):
3479 destrevlog._addrevision(
3479 destrevlog._addrevision(
3480 node,
3480 node,
3481 rawtext,
3481 rawtext,
3482 tr,
3482 tr,
3483 linkrev,
3483 linkrev,
3484 p1,
3484 p1,
3485 p2,
3485 p2,
3486 flags,
3486 flags,
3487 cachedelta,
3487 cachedelta,
3488 deltacomputer=deltacomputer,
3488 deltacomputer=deltacomputer,
3489 sidedata=sidedata,
3489 sidedata=sidedata,
3490 )
3490 )
3491
3491
3492 if addrevisioncb:
3492 if addrevisioncb:
3493 addrevisioncb(self, rev, node)
3493 addrevisioncb(self, rev, node)
3494
3494
3495 def censorrevision(self, tr, censornode, tombstone=b''):
3495 def censorrevision(self, tr, censornode, tombstone=b''):
3496 if self._format_version == REVLOGV0:
3496 if self._format_version == REVLOGV0:
3497 raise error.RevlogError(
3497 raise error.RevlogError(
3498 _(b'cannot censor with version %d revlogs')
3498 _(b'cannot censor with version %d revlogs')
3499 % self._format_version
3499 % self._format_version
3500 )
3500 )
3501 elif self._format_version == REVLOGV1:
3501 elif self._format_version == REVLOGV1:
3502 rewrite.v1_censor(self, tr, censornode, tombstone)
3502 rewrite.v1_censor(self, tr, censornode, tombstone)
3503 else:
3503 else:
3504 rewrite.v2_censor(self, tr, censornode, tombstone)
3504 rewrite.v2_censor(self, tr, censornode, tombstone)
3505
3505
3506 def verifyintegrity(self, state):
3506 def verifyintegrity(self, state):
3507 """Verifies the integrity of the revlog.
3507 """Verifies the integrity of the revlog.
3508
3508
3509 Yields ``revlogproblem`` instances describing problems that are
3509 Yields ``revlogproblem`` instances describing problems that are
3510 found.
3510 found.
3511 """
3511 """
3512 dd, di = self.checksize()
3512 dd, di = self.checksize()
3513 if dd:
3513 if dd:
3514 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3514 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3515 if di:
3515 if di:
3516 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3516 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3517
3517
3518 version = self._format_version
3518 version = self._format_version
3519
3519
3520 # The verifier tells us what version revlog we should be.
3520 # The verifier tells us what version revlog we should be.
3521 if version != state[b'expectedversion']:
3521 if version != state[b'expectedversion']:
3522 yield revlogproblem(
3522 yield revlogproblem(
3523 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3523 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3524 % (self.display_id, version, state[b'expectedversion'])
3524 % (self.display_id, version, state[b'expectedversion'])
3525 )
3525 )
3526
3526
3527 state[b'skipread'] = set()
3527 state[b'skipread'] = set()
3528 state[b'safe_renamed'] = set()
3528 state[b'safe_renamed'] = set()
3529
3529
3530 for rev in self:
3530 for rev in self:
3531 node = self.node(rev)
3531 node = self.node(rev)
3532
3532
3533 # Verify contents. 4 cases to care about:
3533 # Verify contents. 4 cases to care about:
3534 #
3534 #
3535 # common: the most common case
3535 # common: the most common case
3536 # rename: with a rename
3536 # rename: with a rename
3537 # meta: file content starts with b'\1\n', the metadata
3537 # meta: file content starts with b'\1\n', the metadata
3538 # header defined in filelog.py, but without a rename
3538 # header defined in filelog.py, but without a rename
3539 # ext: content stored externally
3539 # ext: content stored externally
3540 #
3540 #
3541 # More formally, their differences are shown below:
3541 # More formally, their differences are shown below:
3542 #
3542 #
3543 # | common | rename | meta | ext
3543 # | common | rename | meta | ext
3544 # -------------------------------------------------------
3544 # -------------------------------------------------------
3545 # flags() | 0 | 0 | 0 | not 0
3545 # flags() | 0 | 0 | 0 | not 0
3546 # renamed() | False | True | False | ?
3546 # renamed() | False | True | False | ?
3547 # rawtext[0:2]=='\1\n'| False | True | True | ?
3547 # rawtext[0:2]=='\1\n'| False | True | True | ?
3548 #
3548 #
3549 # "rawtext" means the raw text stored in revlog data, which
3549 # "rawtext" means the raw text stored in revlog data, which
3550 # could be retrieved by "rawdata(rev)". "text"
3550 # could be retrieved by "rawdata(rev)". "text"
3551 # mentioned below is "revision(rev)".
3551 # mentioned below is "revision(rev)".
3552 #
3552 #
3553 # There are 3 different lengths stored physically:
3553 # There are 3 different lengths stored physically:
3554 # 1. L1: rawsize, stored in revlog index
3554 # 1. L1: rawsize, stored in revlog index
3555 # 2. L2: len(rawtext), stored in revlog data
3555 # 2. L2: len(rawtext), stored in revlog data
3556 # 3. L3: len(text), stored in revlog data if flags==0, or
3556 # 3. L3: len(text), stored in revlog data if flags==0, or
3557 # possibly somewhere else if flags!=0
3557 # possibly somewhere else if flags!=0
3558 #
3558 #
3559 # L1 should be equal to L2. L3 could be different from them.
3559 # L1 should be equal to L2. L3 could be different from them.
3560 # "text" may or may not affect commit hash depending on flag
3560 # "text" may or may not affect commit hash depending on flag
3561 # processors (see flagutil.addflagprocessor).
3561 # processors (see flagutil.addflagprocessor).
3562 #
3562 #
3563 # | common | rename | meta | ext
3563 # | common | rename | meta | ext
3564 # -------------------------------------------------
3564 # -------------------------------------------------
3565 # rawsize() | L1 | L1 | L1 | L1
3565 # rawsize() | L1 | L1 | L1 | L1
3566 # size() | L1 | L2-LM | L1(*) | L1 (?)
3566 # size() | L1 | L2-LM | L1(*) | L1 (?)
3567 # len(rawtext) | L2 | L2 | L2 | L2
3567 # len(rawtext) | L2 | L2 | L2 | L2
3568 # len(text) | L2 | L2 | L2 | L3
3568 # len(text) | L2 | L2 | L2 | L3
3569 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3569 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3570 #
3570 #
3571 # LM: length of metadata, depending on rawtext
3571 # LM: length of metadata, depending on rawtext
3572 # (*): not ideal, see comment in filelog.size
3572 # (*): not ideal, see comment in filelog.size
3573 # (?): could be "- len(meta)" if the resolved content has
3573 # (?): could be "- len(meta)" if the resolved content has
3574 # rename metadata
3574 # rename metadata
3575 #
3575 #
3576 # Checks needed to be done:
3576 # Checks needed to be done:
3577 # 1. length check: L1 == L2, in all cases.
3577 # 1. length check: L1 == L2, in all cases.
3578 # 2. hash check: depending on flag processor, we may need to
3578 # 2. hash check: depending on flag processor, we may need to
3579 # use either "text" (external), or "rawtext" (in revlog).
3579 # use either "text" (external), or "rawtext" (in revlog).
3580
3580
3581 try:
3581 try:
3582 skipflags = state.get(b'skipflags', 0)
3582 skipflags = state.get(b'skipflags', 0)
3583 if skipflags:
3583 if skipflags:
3584 skipflags &= self.flags(rev)
3584 skipflags &= self.flags(rev)
3585
3585
3586 _verify_revision(self, skipflags, state, node)
3586 _verify_revision(self, skipflags, state, node)
3587
3587
3588 l1 = self.rawsize(rev)
3588 l1 = self.rawsize(rev)
3589 l2 = len(self.rawdata(node))
3589 l2 = len(self.rawdata(node))
3590
3590
3591 if l1 != l2:
3591 if l1 != l2:
3592 yield revlogproblem(
3592 yield revlogproblem(
3593 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3593 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3594 node=node,
3594 node=node,
3595 )
3595 )
3596
3596
3597 except error.CensoredNodeError:
3597 except error.CensoredNodeError:
3598 if state[b'erroroncensored']:
3598 if state[b'erroroncensored']:
3599 yield revlogproblem(
3599 yield revlogproblem(
3600 error=_(b'censored file data'), node=node
3600 error=_(b'censored file data'), node=node
3601 )
3601 )
3602 state[b'skipread'].add(node)
3602 state[b'skipread'].add(node)
3603 except Exception as e:
3603 except Exception as e:
3604 yield revlogproblem(
3604 yield revlogproblem(
3605 error=_(b'unpacking %s: %s')
3605 error=_(b'unpacking %s: %s')
3606 % (short(node), stringutil.forcebytestr(e)),
3606 % (short(node), stringutil.forcebytestr(e)),
3607 node=node,
3607 node=node,
3608 )
3608 )
3609 state[b'skipread'].add(node)
3609 state[b'skipread'].add(node)
3610
3610
3611 def storageinfo(
3611 def storageinfo(
3612 self,
3612 self,
3613 exclusivefiles=False,
3613 exclusivefiles=False,
3614 sharedfiles=False,
3614 sharedfiles=False,
3615 revisionscount=False,
3615 revisionscount=False,
3616 trackedsize=False,
3616 trackedsize=False,
3617 storedsize=False,
3617 storedsize=False,
3618 ):
3618 ):
3619 d = {}
3619 d = {}
3620
3620
3621 if exclusivefiles:
3621 if exclusivefiles:
3622 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3622 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3623 if not self._inline:
3623 if not self._inline:
3624 d[b'exclusivefiles'].append((self.opener, self._datafile))
3624 d[b'exclusivefiles'].append((self.opener, self._datafile))
3625
3625
3626 if sharedfiles:
3626 if sharedfiles:
3627 d[b'sharedfiles'] = []
3627 d[b'sharedfiles'] = []
3628
3628
3629 if revisionscount:
3629 if revisionscount:
3630 d[b'revisionscount'] = len(self)
3630 d[b'revisionscount'] = len(self)
3631
3631
3632 if trackedsize:
3632 if trackedsize:
3633 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3633 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3634
3634
3635 if storedsize:
3635 if storedsize:
3636 d[b'storedsize'] = sum(
3636 d[b'storedsize'] = sum(
3637 self.opener.stat(path).st_size for path in self.files()
3637 self.opener.stat(path).st_size for path in self.files()
3638 )
3638 )
3639
3639
3640 return d
3640 return d
3641
3641
3642 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3642 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3643 if not self.hassidedata:
3643 if not self.hassidedata:
3644 return
3644 return
3645 # revlog formats with sidedata support does not support inline
3645 # revlog formats with sidedata support does not support inline
3646 assert not self._inline
3646 assert not self._inline
3647 if not helpers[1] and not helpers[2]:
3647 if not helpers[1] and not helpers[2]:
3648 # Nothing to generate or remove
3648 # Nothing to generate or remove
3649 return
3649 return
3650
3650
3651 new_entries = []
3651 new_entries = []
3652 # append the new sidedata
3652 # append the new sidedata
3653 with self._writing(transaction):
3653 with self._writing(transaction):
3654 ifh, dfh, sdfh = self._writinghandles
3654 ifh, dfh, sdfh = self._writinghandles
3655 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3655 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3656
3656
3657 current_offset = sdfh.tell()
3657 current_offset = sdfh.tell()
3658 for rev in range(startrev, endrev + 1):
3658 for rev in range(startrev, endrev + 1):
3659 entry = self.index[rev]
3659 entry = self.index[rev]
3660 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3660 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3661 store=self,
3661 store=self,
3662 sidedata_helpers=helpers,
3662 sidedata_helpers=helpers,
3663 sidedata={},
3663 sidedata={},
3664 rev=rev,
3664 rev=rev,
3665 )
3665 )
3666
3666
3667 serialized_sidedata = sidedatautil.serialize_sidedata(
3667 serialized_sidedata = sidedatautil.serialize_sidedata(
3668 new_sidedata
3668 new_sidedata
3669 )
3669 )
3670
3670
3671 sidedata_compression_mode = COMP_MODE_INLINE
3671 sidedata_compression_mode = COMP_MODE_INLINE
3672 if serialized_sidedata and self.hassidedata:
3672 if serialized_sidedata and self.hassidedata:
3673 sidedata_compression_mode = COMP_MODE_PLAIN
3673 sidedata_compression_mode = COMP_MODE_PLAIN
3674 h, comp_sidedata = self.compress(serialized_sidedata)
3674 h, comp_sidedata = self.compress(serialized_sidedata)
3675 if (
3675 if (
3676 h != b'u'
3676 h != b'u'
3677 and comp_sidedata[0] != b'\0'
3677 and comp_sidedata[0] != b'\0'
3678 and len(comp_sidedata) < len(serialized_sidedata)
3678 and len(comp_sidedata) < len(serialized_sidedata)
3679 ):
3679 ):
3680 assert not h
3680 assert not h
3681 if (
3681 if (
3682 comp_sidedata[0]
3682 comp_sidedata[0]
3683 == self._docket.default_compression_header
3683 == self._docket.default_compression_header
3684 ):
3684 ):
3685 sidedata_compression_mode = COMP_MODE_DEFAULT
3685 sidedata_compression_mode = COMP_MODE_DEFAULT
3686 serialized_sidedata = comp_sidedata
3686 serialized_sidedata = comp_sidedata
3687 else:
3687 else:
3688 sidedata_compression_mode = COMP_MODE_INLINE
3688 sidedata_compression_mode = COMP_MODE_INLINE
3689 serialized_sidedata = comp_sidedata
3689 serialized_sidedata = comp_sidedata
3690 if entry[8] != 0 or entry[9] != 0:
3690 if entry[8] != 0 or entry[9] != 0:
3691 # rewriting entries that already have sidedata is not
3691 # rewriting entries that already have sidedata is not
3692 # supported yet, because it introduces garbage data in the
3692 # supported yet, because it introduces garbage data in the
3693 # revlog.
3693 # revlog.
3694 msg = b"rewriting existing sidedata is not supported yet"
3694 msg = b"rewriting existing sidedata is not supported yet"
3695 raise error.Abort(msg)
3695 raise error.Abort(msg)
3696
3696
3697 # Apply (potential) flags to add and to remove after running
3697 # Apply (potential) flags to add and to remove after running
3698 # the sidedata helpers
3698 # the sidedata helpers
3699 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3699 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3700 entry_update = (
3700 entry_update = (
3701 current_offset,
3701 current_offset,
3702 len(serialized_sidedata),
3702 len(serialized_sidedata),
3703 new_offset_flags,
3703 new_offset_flags,
3704 sidedata_compression_mode,
3704 sidedata_compression_mode,
3705 )
3705 )
3706
3706
3707 # the sidedata computation might have move the file cursors around
3707 # the sidedata computation might have move the file cursors around
3708 sdfh.seek(current_offset, os.SEEK_SET)
3708 sdfh.seek(current_offset, os.SEEK_SET)
3709 sdfh.write(serialized_sidedata)
3709 sdfh.write(serialized_sidedata)
3710 new_entries.append(entry_update)
3710 new_entries.append(entry_update)
3711 current_offset += len(serialized_sidedata)
3711 current_offset += len(serialized_sidedata)
3712 self._docket.sidedata_end = sdfh.tell()
3712 self._docket.sidedata_end = sdfh.tell()
3713
3713
3714 # rewrite the new index entries
3714 # rewrite the new index entries
3715 ifh.seek(startrev * self.index.entry_size)
3715 ifh.seek(startrev * self.index.entry_size)
3716 for i, e in enumerate(new_entries):
3716 for i, e in enumerate(new_entries):
3717 rev = startrev + i
3717 rev = startrev + i
3718 self.index.replace_sidedata_info(rev, *e)
3718 self.index.replace_sidedata_info(rev, *e)
3719 packed = self.index.entry_binary(rev)
3719 packed = self.index.entry_binary(rev)
3720 if rev == 0 and self._docket is None:
3720 if rev == 0 and self._docket is None:
3721 header = self._format_flags | self._format_version
3721 header = self._format_flags | self._format_version
3722 header = self.index.pack_header(header)
3722 header = self.index.pack_header(header)
3723 packed = header + packed
3723 packed = header + packed
3724 ifh.write(packed)
3724 ifh.write(packed)
General Comments 0
You need to be logged in to leave comments. Login now