##// END OF EJS Templates
cleanup: mark some ui.(status|note|warn|write) calls as not needing i18n...
Augie Fackler -
r43350:86e4daa2 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,65 +1,65 b''
1 # debugshell extension
1 # debugshell extension
2 """a python shell with repo, changelog & manifest objects"""
2 """a python shell with repo, changelog & manifest objects"""
3
3
4 from __future__ import absolute_import
4 from __future__ import absolute_import
5 import code
5 import code
6 import mercurial
6 import mercurial
7 import sys
7 import sys
8 from mercurial import (
8 from mercurial import (
9 demandimport,
9 demandimport,
10 pycompat,
10 pycompat,
11 registrar,
11 registrar,
12 )
12 )
13
13
14 cmdtable = {}
14 cmdtable = {}
15 command = registrar.command(cmdtable)
15 command = registrar.command(cmdtable)
16
16
17
17
18 def pdb(ui, repo, msg, **opts):
18 def pdb(ui, repo, msg, **opts):
19 objects = {
19 objects = {
20 'mercurial': mercurial,
20 'mercurial': mercurial,
21 'repo': repo,
21 'repo': repo,
22 'cl': repo.changelog,
22 'cl': repo.changelog,
23 'mf': repo.manifestlog,
23 'mf': repo.manifestlog,
24 }
24 }
25
25
26 code.interact(msg, local=objects)
26 code.interact(msg, local=objects)
27
27
28
28
29 def ipdb(ui, repo, msg, **opts):
29 def ipdb(ui, repo, msg, **opts):
30 import IPython
30 import IPython
31
31
32 cl = repo.changelog
32 cl = repo.changelog
33 mf = repo.manifestlog
33 mf = repo.manifestlog
34 cl, mf # use variables to appease pyflakes
34 cl, mf # use variables to appease pyflakes
35
35
36 IPython.embed()
36 IPython.embed()
37
37
38
38
39 @command(b'debugshell|dbsh', [])
39 @command(b'debugshell|dbsh', [])
40 def debugshell(ui, repo, **opts):
40 def debugshell(ui, repo, **opts):
41 bannermsg = "loaded repo : %s\n" "using source: %s" % (
41 bannermsg = "loaded repo : %s\n" "using source: %s" % (
42 pycompat.sysstr(repo.root),
42 pycompat.sysstr(repo.root),
43 mercurial.__path__[0],
43 mercurial.__path__[0],
44 )
44 )
45
45
46 pdbmap = {'pdb': 'code', 'ipdb': 'IPython'}
46 pdbmap = {'pdb': 'code', 'ipdb': 'IPython'}
47
47
48 debugger = ui.config(b"ui", b"debugger")
48 debugger = ui.config(b"ui", b"debugger")
49 if not debugger:
49 if not debugger:
50 debugger = 'pdb'
50 debugger = 'pdb'
51 else:
51 else:
52 debugger = pycompat.sysstr(debugger)
52 debugger = pycompat.sysstr(debugger)
53
53
54 # if IPython doesn't exist, fallback to code.interact
54 # if IPython doesn't exist, fallback to code.interact
55 try:
55 try:
56 with demandimport.deactivated():
56 with demandimport.deactivated():
57 __import__(pdbmap[debugger])
57 __import__(pdbmap[debugger])
58 except ImportError:
58 except ImportError:
59 ui.warn(
59 ui.warnnoi18n(
60 b"%s debugger specified but %s module was not found\n"
60 b"%s debugger specified but %s module was not found\n"
61 % (debugger, pdbmap[debugger])
61 % (debugger, pdbmap[debugger])
62 )
62 )
63 debugger = b'pdb'
63 debugger = b'pdb'
64
64
65 getattr(sys.modules[__name__], debugger)(ui, repo, bannermsg, **opts)
65 getattr(sys.modules[__name__], debugger)(ui, repo, bannermsg, **opts)
@@ -1,3744 +1,3744 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if b'norepo' not in getargspec(command).args:
234 if b'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'profile-benchmark',
298 b'profile-benchmark',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 )
300 )
301 configitem(
301 configitem(
302 b'perf',
302 b'perf',
303 b'run-limits',
303 b'run-limits',
304 default=mercurial.configitems.dynamicdefault,
304 default=mercurial.configitems.dynamicdefault,
305 experimental=True,
305 experimental=True,
306 )
306 )
307 except (ImportError, AttributeError):
307 except (ImportError, AttributeError):
308 pass
308 pass
309 except TypeError:
309 except TypeError:
310 # compatibility fix for a11fd395e83f
310 # compatibility fix for a11fd395e83f
311 # hg version: 5.2
311 # hg version: 5.2
312 configitem(
312 configitem(
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 )
314 )
315 configitem(
315 configitem(
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 )
320 )
321 configitem(
321 configitem(
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 )
326 )
327 configitem(
327 configitem(
328 b'perf',
328 b'perf',
329 b'profile-benchmark',
329 b'profile-benchmark',
330 default=mercurial.configitems.dynamicdefault,
330 default=mercurial.configitems.dynamicdefault,
331 )
331 )
332 configitem(
332 configitem(
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 )
334 )
335
335
336
336
337 def getlen(ui):
337 def getlen(ui):
338 if ui.configbool(b"perf", b"stub", False):
338 if ui.configbool(b"perf", b"stub", False):
339 return lambda x: 1
339 return lambda x: 1
340 return len
340 return len
341
341
342
342
343 class noop(object):
343 class noop(object):
344 """dummy context manager"""
344 """dummy context manager"""
345
345
346 def __enter__(self):
346 def __enter__(self):
347 pass
347 pass
348
348
349 def __exit__(self, *args):
349 def __exit__(self, *args):
350 pass
350 pass
351
351
352
352
353 NOOPCTX = noop()
353 NOOPCTX = noop()
354
354
355
355
356 def gettimer(ui, opts=None):
356 def gettimer(ui, opts=None):
357 """return a timer function and formatter: (timer, formatter)
357 """return a timer function and formatter: (timer, formatter)
358
358
359 This function exists to gather the creation of formatter in a single
359 This function exists to gather the creation of formatter in a single
360 place instead of duplicating it in all performance commands."""
360 place instead of duplicating it in all performance commands."""
361
361
362 # enforce an idle period before execution to counteract power management
362 # enforce an idle period before execution to counteract power management
363 # experimental config: perf.presleep
363 # experimental config: perf.presleep
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365
365
366 if opts is None:
366 if opts is None:
367 opts = {}
367 opts = {}
368 # redirect all to stderr unless buffer api is in use
368 # redirect all to stderr unless buffer api is in use
369 if not ui._buffers:
369 if not ui._buffers:
370 ui = ui.copy()
370 ui = ui.copy()
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 if uifout:
372 if uifout:
373 # for "historical portability":
373 # for "historical portability":
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 uifout.set(ui.ferr)
375 uifout.set(ui.ferr)
376
376
377 # get a formatter
377 # get a formatter
378 uiformatter = getattr(ui, 'formatter', None)
378 uiformatter = getattr(ui, 'formatter', None)
379 if uiformatter:
379 if uiformatter:
380 fm = uiformatter(b'perf', opts)
380 fm = uiformatter(b'perf', opts)
381 else:
381 else:
382 # for "historical portability":
382 # for "historical portability":
383 # define formatter locally, because ui.formatter has been
383 # define formatter locally, because ui.formatter has been
384 # available since 2.2 (or ae5f92e154d3)
384 # available since 2.2 (or ae5f92e154d3)
385 from mercurial import node
385 from mercurial import node
386
386
387 class defaultformatter(object):
387 class defaultformatter(object):
388 """Minimized composition of baseformatter and plainformatter
388 """Minimized composition of baseformatter and plainformatter
389 """
389 """
390
390
391 def __init__(self, ui, topic, opts):
391 def __init__(self, ui, topic, opts):
392 self._ui = ui
392 self._ui = ui
393 if ui.debugflag:
393 if ui.debugflag:
394 self.hexfunc = node.hex
394 self.hexfunc = node.hex
395 else:
395 else:
396 self.hexfunc = node.short
396 self.hexfunc = node.short
397
397
398 def __nonzero__(self):
398 def __nonzero__(self):
399 return False
399 return False
400
400
401 __bool__ = __nonzero__
401 __bool__ = __nonzero__
402
402
403 def startitem(self):
403 def startitem(self):
404 pass
404 pass
405
405
406 def data(self, **data):
406 def data(self, **data):
407 pass
407 pass
408
408
409 def write(self, fields, deftext, *fielddata, **opts):
409 def write(self, fields, deftext, *fielddata, **opts):
410 self._ui.write(deftext % fielddata, **opts)
410 self._ui.write(deftext % fielddata, **opts)
411
411
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 if cond:
413 if cond:
414 self._ui.write(deftext % fielddata, **opts)
414 self._ui.write(deftext % fielddata, **opts)
415
415
416 def plain(self, text, **opts):
416 def plain(self, text, **opts):
417 self._ui.write(text, **opts)
417 self._ui.write(text, **opts)
418
418
419 def end(self):
419 def end(self):
420 pass
420 pass
421
421
422 fm = defaultformatter(ui, b'perf', opts)
422 fm = defaultformatter(ui, b'perf', opts)
423
423
424 # stub function, runs code only once instead of in a loop
424 # stub function, runs code only once instead of in a loop
425 # experimental config: perf.stub
425 # experimental config: perf.stub
426 if ui.configbool(b"perf", b"stub", False):
426 if ui.configbool(b"perf", b"stub", False):
427 return functools.partial(stub_timer, fm), fm
427 return functools.partial(stub_timer, fm), fm
428
428
429 # experimental config: perf.all-timing
429 # experimental config: perf.all-timing
430 displayall = ui.configbool(b"perf", b"all-timing", False)
430 displayall = ui.configbool(b"perf", b"all-timing", False)
431
431
432 # experimental config: perf.run-limits
432 # experimental config: perf.run-limits
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 limits = []
434 limits = []
435 for item in limitspec:
435 for item in limitspec:
436 parts = item.split(b'-', 1)
436 parts = item.split(b'-', 1)
437 if len(parts) < 2:
437 if len(parts) < 2:
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 continue
439 continue
440 try:
440 try:
441 time_limit = float(_sysstr(parts[0]))
441 time_limit = float(_sysstr(parts[0]))
442 except ValueError as e:
442 except ValueError as e:
443 ui.warn(
443 ui.warn(
444 (
444 (
445 b'malformatted run limit entry, %s: %s\n'
445 b'malformatted run limit entry, %s: %s\n'
446 % (_bytestr(e), item)
446 % (_bytestr(e), item)
447 )
447 )
448 )
448 )
449 continue
449 continue
450 try:
450 try:
451 run_limit = int(_sysstr(parts[1]))
451 run_limit = int(_sysstr(parts[1]))
452 except ValueError as e:
452 except ValueError as e:
453 ui.warn(
453 ui.warn(
454 (
454 (
455 b'malformatted run limit entry, %s: %s\n'
455 b'malformatted run limit entry, %s: %s\n'
456 % (_bytestr(e), item)
456 % (_bytestr(e), item)
457 )
457 )
458 )
458 )
459 continue
459 continue
460 limits.append((time_limit, run_limit))
460 limits.append((time_limit, run_limit))
461 if not limits:
461 if not limits:
462 limits = DEFAULTLIMITS
462 limits = DEFAULTLIMITS
463
463
464 profiler = None
464 profiler = None
465 if profiling is not None:
465 if profiling is not None:
466 if ui.configbool(b"perf", b"profile-benchmark", False):
466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 profiler = profiling.profile(ui)
467 profiler = profiling.profile(ui)
468
468
469 prerun = getint(ui, b"perf", b"pre-run", 0)
469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 t = functools.partial(
470 t = functools.partial(
471 _timer,
471 _timer,
472 fm,
472 fm,
473 displayall=displayall,
473 displayall=displayall,
474 limits=limits,
474 limits=limits,
475 prerun=prerun,
475 prerun=prerun,
476 profiler=profiler,
476 profiler=profiler,
477 )
477 )
478 return t, fm
478 return t, fm
479
479
480
480
481 def stub_timer(fm, func, setup=None, title=None):
481 def stub_timer(fm, func, setup=None, title=None):
482 if setup is not None:
482 if setup is not None:
483 setup()
483 setup()
484 func()
484 func()
485
485
486
486
487 @contextlib.contextmanager
487 @contextlib.contextmanager
488 def timeone():
488 def timeone():
489 r = []
489 r = []
490 ostart = os.times()
490 ostart = os.times()
491 cstart = util.timer()
491 cstart = util.timer()
492 yield r
492 yield r
493 cstop = util.timer()
493 cstop = util.timer()
494 ostop = os.times()
494 ostop = os.times()
495 a, b = ostart, ostop
495 a, b = ostart, ostop
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497
497
498
498
499 # list of stop condition (elapsed time, minimal run count)
499 # list of stop condition (elapsed time, minimal run count)
500 DEFAULTLIMITS = (
500 DEFAULTLIMITS = (
501 (3.0, 100),
501 (3.0, 100),
502 (10.0, 3),
502 (10.0, 3),
503 )
503 )
504
504
505
505
506 def _timer(
506 def _timer(
507 fm,
507 fm,
508 func,
508 func,
509 setup=None,
509 setup=None,
510 title=None,
510 title=None,
511 displayall=False,
511 displayall=False,
512 limits=DEFAULTLIMITS,
512 limits=DEFAULTLIMITS,
513 prerun=0,
513 prerun=0,
514 profiler=None,
514 profiler=None,
515 ):
515 ):
516 gc.collect()
516 gc.collect()
517 results = []
517 results = []
518 begin = util.timer()
518 begin = util.timer()
519 count = 0
519 count = 0
520 if profiler is None:
520 if profiler is None:
521 profiler = NOOPCTX
521 profiler = NOOPCTX
522 for i in range(prerun):
522 for i in range(prerun):
523 if setup is not None:
523 if setup is not None:
524 setup()
524 setup()
525 func()
525 func()
526 keepgoing = True
526 keepgoing = True
527 while keepgoing:
527 while keepgoing:
528 if setup is not None:
528 if setup is not None:
529 setup()
529 setup()
530 with profiler:
530 with profiler:
531 with timeone() as item:
531 with timeone() as item:
532 r = func()
532 r = func()
533 profiler = NOOPCTX
533 profiler = NOOPCTX
534 count += 1
534 count += 1
535 results.append(item[0])
535 results.append(item[0])
536 cstop = util.timer()
536 cstop = util.timer()
537 # Look for a stop condition.
537 # Look for a stop condition.
538 elapsed = cstop - begin
538 elapsed = cstop - begin
539 for t, mincount in limits:
539 for t, mincount in limits:
540 if elapsed >= t and count >= mincount:
540 if elapsed >= t and count >= mincount:
541 keepgoing = False
541 keepgoing = False
542 break
542 break
543
543
544 formatone(fm, results, title=title, result=r, displayall=displayall)
544 formatone(fm, results, title=title, result=r, displayall=displayall)
545
545
546
546
547 def formatone(fm, timings, title=None, result=None, displayall=False):
547 def formatone(fm, timings, title=None, result=None, displayall=False):
548
548
549 count = len(timings)
549 count = len(timings)
550
550
551 fm.startitem()
551 fm.startitem()
552
552
553 if title:
553 if title:
554 fm.write(b'title', b'! %s\n', title)
554 fm.write(b'title', b'! %s\n', title)
555 if result:
555 if result:
556 fm.write(b'result', b'! result: %s\n', result)
556 fm.write(b'result', b'! result: %s\n', result)
557
557
558 def display(role, entry):
558 def display(role, entry):
559 prefix = b''
559 prefix = b''
560 if role != b'best':
560 if role != b'best':
561 prefix = b'%s.' % role
561 prefix = b'%s.' % role
562 fm.plain(b'!')
562 fm.plain(b'!')
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 fm.write(prefix + b'user', b' user %f', entry[1])
565 fm.write(prefix + b'user', b' user %f', entry[1])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 fm.plain(b'\n')
568 fm.plain(b'\n')
569
569
570 timings.sort()
570 timings.sort()
571 min_val = timings[0]
571 min_val = timings[0]
572 display(b'best', min_val)
572 display(b'best', min_val)
573 if displayall:
573 if displayall:
574 max_val = timings[-1]
574 max_val = timings[-1]
575 display(b'max', max_val)
575 display(b'max', max_val)
576 avg = tuple([sum(x) / count for x in zip(*timings)])
576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 display(b'avg', avg)
577 display(b'avg', avg)
578 median = timings[len(timings) // 2]
578 median = timings[len(timings) // 2]
579 display(b'median', median)
579 display(b'median', median)
580
580
581
581
582 # utilities for historical portability
582 # utilities for historical portability
583
583
584
584
585 def getint(ui, section, name, default):
585 def getint(ui, section, name, default):
586 # for "historical portability":
586 # for "historical portability":
587 # ui.configint has been available since 1.9 (or fa2b596db182)
587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 v = ui.config(section, name, None)
588 v = ui.config(section, name, None)
589 if v is None:
589 if v is None:
590 return default
590 return default
591 try:
591 try:
592 return int(v)
592 return int(v)
593 except ValueError:
593 except ValueError:
594 raise error.ConfigError(
594 raise error.ConfigError(
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 )
596 )
597
597
598
598
599 def safeattrsetter(obj, name, ignoremissing=False):
599 def safeattrsetter(obj, name, ignoremissing=False):
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601
601
602 This function is aborted, if 'obj' doesn't have 'name' attribute
602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 at runtime. This avoids overlooking removal of an attribute, which
603 at runtime. This avoids overlooking removal of an attribute, which
604 breaks assumption of performance measurement, in the future.
604 breaks assumption of performance measurement, in the future.
605
605
606 This function returns the object to (1) assign a new value, and
606 This function returns the object to (1) assign a new value, and
607 (2) restore an original value to the attribute.
607 (2) restore an original value to the attribute.
608
608
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 abortion, and this function returns None. This is useful to
610 abortion, and this function returns None. This is useful to
611 examine an attribute, which isn't ensured in all Mercurial
611 examine an attribute, which isn't ensured in all Mercurial
612 versions.
612 versions.
613 """
613 """
614 if not util.safehasattr(obj, name):
614 if not util.safehasattr(obj, name):
615 if ignoremissing:
615 if ignoremissing:
616 return None
616 return None
617 raise error.Abort(
617 raise error.Abort(
618 (
618 (
619 b"missing attribute %s of %s might break assumption"
619 b"missing attribute %s of %s might break assumption"
620 b" of performance measurement"
620 b" of performance measurement"
621 )
621 )
622 % (name, obj)
622 % (name, obj)
623 )
623 )
624
624
625 origvalue = getattr(obj, _sysstr(name))
625 origvalue = getattr(obj, _sysstr(name))
626
626
627 class attrutil(object):
627 class attrutil(object):
628 def set(self, newvalue):
628 def set(self, newvalue):
629 setattr(obj, _sysstr(name), newvalue)
629 setattr(obj, _sysstr(name), newvalue)
630
630
631 def restore(self):
631 def restore(self):
632 setattr(obj, _sysstr(name), origvalue)
632 setattr(obj, _sysstr(name), origvalue)
633
633
634 return attrutil()
634 return attrutil()
635
635
636
636
637 # utilities to examine each internal API changes
637 # utilities to examine each internal API changes
638
638
639
639
640 def getbranchmapsubsettable():
640 def getbranchmapsubsettable():
641 # for "historical portability":
641 # for "historical portability":
642 # subsettable is defined in:
642 # subsettable is defined in:
643 # - branchmap since 2.9 (or 175c6fd8cacc)
643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 # - repoview since 2.5 (or 59a9f18d4587)
644 # - repoview since 2.5 (or 59a9f18d4587)
645 # - repoviewutil since 5.0
645 # - repoviewutil since 5.0
646 for mod in (branchmap, repoview, repoviewutil):
646 for mod in (branchmap, repoview, repoviewutil):
647 subsettable = getattr(mod, 'subsettable', None)
647 subsettable = getattr(mod, 'subsettable', None)
648 if subsettable:
648 if subsettable:
649 return subsettable
649 return subsettable
650
650
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 # branchmap and repoview modules exist, but subsettable attribute
652 # branchmap and repoview modules exist, but subsettable attribute
653 # doesn't)
653 # doesn't)
654 raise error.Abort(
654 raise error.Abort(
655 b"perfbranchmap not available with this Mercurial",
655 b"perfbranchmap not available with this Mercurial",
656 hint=b"use 2.5 or later",
656 hint=b"use 2.5 or later",
657 )
657 )
658
658
659
659
660 def getsvfs(repo):
660 def getsvfs(repo):
661 """Return appropriate object to access files under .hg/store
661 """Return appropriate object to access files under .hg/store
662 """
662 """
663 # for "historical portability":
663 # for "historical portability":
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 svfs = getattr(repo, 'svfs', None)
665 svfs = getattr(repo, 'svfs', None)
666 if svfs:
666 if svfs:
667 return svfs
667 return svfs
668 else:
668 else:
669 return getattr(repo, 'sopener')
669 return getattr(repo, 'sopener')
670
670
671
671
672 def getvfs(repo):
672 def getvfs(repo):
673 """Return appropriate object to access files under .hg
673 """Return appropriate object to access files under .hg
674 """
674 """
675 # for "historical portability":
675 # for "historical portability":
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 vfs = getattr(repo, 'vfs', None)
677 vfs = getattr(repo, 'vfs', None)
678 if vfs:
678 if vfs:
679 return vfs
679 return vfs
680 else:
680 else:
681 return getattr(repo, 'opener')
681 return getattr(repo, 'opener')
682
682
683
683
684 def repocleartagscachefunc(repo):
684 def repocleartagscachefunc(repo):
685 """Return the function to clear tags cache according to repo internal API
685 """Return the function to clear tags cache according to repo internal API
686 """
686 """
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 # correct way to clear tags cache, because existing code paths
689 # correct way to clear tags cache, because existing code paths
690 # expect _tagscache to be a structured object.
690 # expect _tagscache to be a structured object.
691 def clearcache():
691 def clearcache():
692 # _tagscache has been filteredpropertycache since 2.5 (or
692 # _tagscache has been filteredpropertycache since 2.5 (or
693 # 98c867ac1330), and delattr() can't work in such case
693 # 98c867ac1330), and delattr() can't work in such case
694 if b'_tagscache' in vars(repo):
694 if b'_tagscache' in vars(repo):
695 del repo.__dict__[b'_tagscache']
695 del repo.__dict__[b'_tagscache']
696
696
697 return clearcache
697 return clearcache
698
698
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 if repotags: # since 1.4 (or 5614a628d173)
700 if repotags: # since 1.4 (or 5614a628d173)
701 return lambda: repotags.set(None)
701 return lambda: repotags.set(None)
702
702
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 return lambda: repotagscache.set(None)
705 return lambda: repotagscache.set(None)
706
706
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 # this point, but it isn't so problematic, because:
708 # this point, but it isn't so problematic, because:
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 # in perftags() causes failure soon
710 # in perftags() causes failure soon
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 raise error.Abort(b"tags API of this hg command is unknown")
712 raise error.Abort(b"tags API of this hg command is unknown")
713
713
714
714
715 # utilities to clear cache
715 # utilities to clear cache
716
716
717
717
718 def clearfilecache(obj, attrname):
718 def clearfilecache(obj, attrname):
719 unfiltered = getattr(obj, 'unfiltered', None)
719 unfiltered = getattr(obj, 'unfiltered', None)
720 if unfiltered is not None:
720 if unfiltered is not None:
721 obj = obj.unfiltered()
721 obj = obj.unfiltered()
722 if attrname in vars(obj):
722 if attrname in vars(obj):
723 delattr(obj, attrname)
723 delattr(obj, attrname)
724 obj._filecache.pop(attrname, None)
724 obj._filecache.pop(attrname, None)
725
725
726
726
727 def clearchangelog(repo):
727 def clearchangelog(repo):
728 if repo is not repo.unfiltered():
728 if repo is not repo.unfiltered():
729 object.__setattr__(repo, r'_clcachekey', None)
729 object.__setattr__(repo, r'_clcachekey', None)
730 object.__setattr__(repo, r'_clcache', None)
730 object.__setattr__(repo, r'_clcache', None)
731 clearfilecache(repo.unfiltered(), 'changelog')
731 clearfilecache(repo.unfiltered(), 'changelog')
732
732
733
733
734 # perf commands
734 # perf commands
735
735
736
736
737 @command(b'perfwalk', formatteropts)
737 @command(b'perfwalk', formatteropts)
738 def perfwalk(ui, repo, *pats, **opts):
738 def perfwalk(ui, repo, *pats, **opts):
739 opts = _byteskwargs(opts)
739 opts = _byteskwargs(opts)
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 m = scmutil.match(repo[None], pats, {})
741 m = scmutil.match(repo[None], pats, {})
742 timer(
742 timer(
743 lambda: len(
743 lambda: len(
744 list(
744 list(
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 )
746 )
747 )
747 )
748 )
748 )
749 fm.end()
749 fm.end()
750
750
751
751
752 @command(b'perfannotate', formatteropts)
752 @command(b'perfannotate', formatteropts)
753 def perfannotate(ui, repo, f, **opts):
753 def perfannotate(ui, repo, f, **opts):
754 opts = _byteskwargs(opts)
754 opts = _byteskwargs(opts)
755 timer, fm = gettimer(ui, opts)
755 timer, fm = gettimer(ui, opts)
756 fc = repo[b'.'][f]
756 fc = repo[b'.'][f]
757 timer(lambda: len(fc.annotate(True)))
757 timer(lambda: len(fc.annotate(True)))
758 fm.end()
758 fm.end()
759
759
760
760
761 @command(
761 @command(
762 b'perfstatus',
762 b'perfstatus',
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
764 + formatteropts,
764 + formatteropts,
765 )
765 )
766 def perfstatus(ui, repo, **opts):
766 def perfstatus(ui, repo, **opts):
767 opts = _byteskwargs(opts)
767 opts = _byteskwargs(opts)
768 # m = match.always(repo.root, repo.getcwd())
768 # m = match.always(repo.root, repo.getcwd())
769 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
769 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
770 # False))))
770 # False))))
771 timer, fm = gettimer(ui, opts)
771 timer, fm = gettimer(ui, opts)
772 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
772 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
773 fm.end()
773 fm.end()
774
774
775
775
776 @command(b'perfaddremove', formatteropts)
776 @command(b'perfaddremove', formatteropts)
777 def perfaddremove(ui, repo, **opts):
777 def perfaddremove(ui, repo, **opts):
778 opts = _byteskwargs(opts)
778 opts = _byteskwargs(opts)
779 timer, fm = gettimer(ui, opts)
779 timer, fm = gettimer(ui, opts)
780 try:
780 try:
781 oldquiet = repo.ui.quiet
781 oldquiet = repo.ui.quiet
782 repo.ui.quiet = True
782 repo.ui.quiet = True
783 matcher = scmutil.match(repo[None])
783 matcher = scmutil.match(repo[None])
784 opts[b'dry_run'] = True
784 opts[b'dry_run'] = True
785 if b'uipathfn' in getargspec(scmutil.addremove).args:
785 if b'uipathfn' in getargspec(scmutil.addremove).args:
786 uipathfn = scmutil.getuipathfn(repo)
786 uipathfn = scmutil.getuipathfn(repo)
787 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
787 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
788 else:
788 else:
789 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
789 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
790 finally:
790 finally:
791 repo.ui.quiet = oldquiet
791 repo.ui.quiet = oldquiet
792 fm.end()
792 fm.end()
793
793
794
794
795 def clearcaches(cl):
795 def clearcaches(cl):
796 # behave somewhat consistently across internal API changes
796 # behave somewhat consistently across internal API changes
797 if util.safehasattr(cl, b'clearcaches'):
797 if util.safehasattr(cl, b'clearcaches'):
798 cl.clearcaches()
798 cl.clearcaches()
799 elif util.safehasattr(cl, b'_nodecache'):
799 elif util.safehasattr(cl, b'_nodecache'):
800 from mercurial.node import nullid, nullrev
800 from mercurial.node import nullid, nullrev
801
801
802 cl._nodecache = {nullid: nullrev}
802 cl._nodecache = {nullid: nullrev}
803 cl._nodepos = None
803 cl._nodepos = None
804
804
805
805
806 @command(b'perfheads', formatteropts)
806 @command(b'perfheads', formatteropts)
807 def perfheads(ui, repo, **opts):
807 def perfheads(ui, repo, **opts):
808 """benchmark the computation of a changelog heads"""
808 """benchmark the computation of a changelog heads"""
809 opts = _byteskwargs(opts)
809 opts = _byteskwargs(opts)
810 timer, fm = gettimer(ui, opts)
810 timer, fm = gettimer(ui, opts)
811 cl = repo.changelog
811 cl = repo.changelog
812
812
813 def s():
813 def s():
814 clearcaches(cl)
814 clearcaches(cl)
815
815
816 def d():
816 def d():
817 len(cl.headrevs())
817 len(cl.headrevs())
818
818
819 timer(d, setup=s)
819 timer(d, setup=s)
820 fm.end()
820 fm.end()
821
821
822
822
823 @command(
823 @command(
824 b'perftags',
824 b'perftags',
825 formatteropts
825 formatteropts
826 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
826 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
827 )
827 )
828 def perftags(ui, repo, **opts):
828 def perftags(ui, repo, **opts):
829 opts = _byteskwargs(opts)
829 opts = _byteskwargs(opts)
830 timer, fm = gettimer(ui, opts)
830 timer, fm = gettimer(ui, opts)
831 repocleartagscache = repocleartagscachefunc(repo)
831 repocleartagscache = repocleartagscachefunc(repo)
832 clearrevlogs = opts[b'clear_revlogs']
832 clearrevlogs = opts[b'clear_revlogs']
833
833
834 def s():
834 def s():
835 if clearrevlogs:
835 if clearrevlogs:
836 clearchangelog(repo)
836 clearchangelog(repo)
837 clearfilecache(repo.unfiltered(), 'manifest')
837 clearfilecache(repo.unfiltered(), 'manifest')
838 repocleartagscache()
838 repocleartagscache()
839
839
840 def t():
840 def t():
841 return len(repo.tags())
841 return len(repo.tags())
842
842
843 timer(t, setup=s)
843 timer(t, setup=s)
844 fm.end()
844 fm.end()
845
845
846
846
847 @command(b'perfancestors', formatteropts)
847 @command(b'perfancestors', formatteropts)
848 def perfancestors(ui, repo, **opts):
848 def perfancestors(ui, repo, **opts):
849 opts = _byteskwargs(opts)
849 opts = _byteskwargs(opts)
850 timer, fm = gettimer(ui, opts)
850 timer, fm = gettimer(ui, opts)
851 heads = repo.changelog.headrevs()
851 heads = repo.changelog.headrevs()
852
852
853 def d():
853 def d():
854 for a in repo.changelog.ancestors(heads):
854 for a in repo.changelog.ancestors(heads):
855 pass
855 pass
856
856
857 timer(d)
857 timer(d)
858 fm.end()
858 fm.end()
859
859
860
860
861 @command(b'perfancestorset', formatteropts)
861 @command(b'perfancestorset', formatteropts)
862 def perfancestorset(ui, repo, revset, **opts):
862 def perfancestorset(ui, repo, revset, **opts):
863 opts = _byteskwargs(opts)
863 opts = _byteskwargs(opts)
864 timer, fm = gettimer(ui, opts)
864 timer, fm = gettimer(ui, opts)
865 revs = repo.revs(revset)
865 revs = repo.revs(revset)
866 heads = repo.changelog.headrevs()
866 heads = repo.changelog.headrevs()
867
867
868 def d():
868 def d():
869 s = repo.changelog.ancestors(heads)
869 s = repo.changelog.ancestors(heads)
870 for rev in revs:
870 for rev in revs:
871 rev in s
871 rev in s
872
872
873 timer(d)
873 timer(d)
874 fm.end()
874 fm.end()
875
875
876
876
877 @command(b'perfdiscovery', formatteropts, b'PATH')
877 @command(b'perfdiscovery', formatteropts, b'PATH')
878 def perfdiscovery(ui, repo, path, **opts):
878 def perfdiscovery(ui, repo, path, **opts):
879 """benchmark discovery between local repo and the peer at given path
879 """benchmark discovery between local repo and the peer at given path
880 """
880 """
881 repos = [repo, None]
881 repos = [repo, None]
882 timer, fm = gettimer(ui, opts)
882 timer, fm = gettimer(ui, opts)
883 path = ui.expandpath(path)
883 path = ui.expandpath(path)
884
884
885 def s():
885 def s():
886 repos[1] = hg.peer(ui, opts, path)
886 repos[1] = hg.peer(ui, opts, path)
887
887
888 def d():
888 def d():
889 setdiscovery.findcommonheads(ui, *repos)
889 setdiscovery.findcommonheads(ui, *repos)
890
890
891 timer(d, setup=s)
891 timer(d, setup=s)
892 fm.end()
892 fm.end()
893
893
894
894
895 @command(
895 @command(
896 b'perfbookmarks',
896 b'perfbookmarks',
897 formatteropts
897 formatteropts
898 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
898 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
899 )
899 )
900 def perfbookmarks(ui, repo, **opts):
900 def perfbookmarks(ui, repo, **opts):
901 """benchmark parsing bookmarks from disk to memory"""
901 """benchmark parsing bookmarks from disk to memory"""
902 opts = _byteskwargs(opts)
902 opts = _byteskwargs(opts)
903 timer, fm = gettimer(ui, opts)
903 timer, fm = gettimer(ui, opts)
904
904
905 clearrevlogs = opts[b'clear_revlogs']
905 clearrevlogs = opts[b'clear_revlogs']
906
906
907 def s():
907 def s():
908 if clearrevlogs:
908 if clearrevlogs:
909 clearchangelog(repo)
909 clearchangelog(repo)
910 clearfilecache(repo, b'_bookmarks')
910 clearfilecache(repo, b'_bookmarks')
911
911
912 def d():
912 def d():
913 repo._bookmarks
913 repo._bookmarks
914
914
915 timer(d, setup=s)
915 timer(d, setup=s)
916 fm.end()
916 fm.end()
917
917
918
918
919 @command(b'perfbundleread', formatteropts, b'BUNDLE')
919 @command(b'perfbundleread', formatteropts, b'BUNDLE')
920 def perfbundleread(ui, repo, bundlepath, **opts):
920 def perfbundleread(ui, repo, bundlepath, **opts):
921 """Benchmark reading of bundle files.
921 """Benchmark reading of bundle files.
922
922
923 This command is meant to isolate the I/O part of bundle reading as
923 This command is meant to isolate the I/O part of bundle reading as
924 much as possible.
924 much as possible.
925 """
925 """
926 from mercurial import (
926 from mercurial import (
927 bundle2,
927 bundle2,
928 exchange,
928 exchange,
929 streamclone,
929 streamclone,
930 )
930 )
931
931
932 opts = _byteskwargs(opts)
932 opts = _byteskwargs(opts)
933
933
934 def makebench(fn):
934 def makebench(fn):
935 def run():
935 def run():
936 with open(bundlepath, b'rb') as fh:
936 with open(bundlepath, b'rb') as fh:
937 bundle = exchange.readbundle(ui, fh, bundlepath)
937 bundle = exchange.readbundle(ui, fh, bundlepath)
938 fn(bundle)
938 fn(bundle)
939
939
940 return run
940 return run
941
941
942 def makereadnbytes(size):
942 def makereadnbytes(size):
943 def run():
943 def run():
944 with open(bundlepath, b'rb') as fh:
944 with open(bundlepath, b'rb') as fh:
945 bundle = exchange.readbundle(ui, fh, bundlepath)
945 bundle = exchange.readbundle(ui, fh, bundlepath)
946 while bundle.read(size):
946 while bundle.read(size):
947 pass
947 pass
948
948
949 return run
949 return run
950
950
951 def makestdioread(size):
951 def makestdioread(size):
952 def run():
952 def run():
953 with open(bundlepath, b'rb') as fh:
953 with open(bundlepath, b'rb') as fh:
954 while fh.read(size):
954 while fh.read(size):
955 pass
955 pass
956
956
957 return run
957 return run
958
958
959 # bundle1
959 # bundle1
960
960
961 def deltaiter(bundle):
961 def deltaiter(bundle):
962 for delta in bundle.deltaiter():
962 for delta in bundle.deltaiter():
963 pass
963 pass
964
964
965 def iterchunks(bundle):
965 def iterchunks(bundle):
966 for chunk in bundle.getchunks():
966 for chunk in bundle.getchunks():
967 pass
967 pass
968
968
969 # bundle2
969 # bundle2
970
970
971 def forwardchunks(bundle):
971 def forwardchunks(bundle):
972 for chunk in bundle._forwardchunks():
972 for chunk in bundle._forwardchunks():
973 pass
973 pass
974
974
975 def iterparts(bundle):
975 def iterparts(bundle):
976 for part in bundle.iterparts():
976 for part in bundle.iterparts():
977 pass
977 pass
978
978
979 def iterpartsseekable(bundle):
979 def iterpartsseekable(bundle):
980 for part in bundle.iterparts(seekable=True):
980 for part in bundle.iterparts(seekable=True):
981 pass
981 pass
982
982
983 def seek(bundle):
983 def seek(bundle):
984 for part in bundle.iterparts(seekable=True):
984 for part in bundle.iterparts(seekable=True):
985 part.seek(0, os.SEEK_END)
985 part.seek(0, os.SEEK_END)
986
986
987 def makepartreadnbytes(size):
987 def makepartreadnbytes(size):
988 def run():
988 def run():
989 with open(bundlepath, b'rb') as fh:
989 with open(bundlepath, b'rb') as fh:
990 bundle = exchange.readbundle(ui, fh, bundlepath)
990 bundle = exchange.readbundle(ui, fh, bundlepath)
991 for part in bundle.iterparts():
991 for part in bundle.iterparts():
992 while part.read(size):
992 while part.read(size):
993 pass
993 pass
994
994
995 return run
995 return run
996
996
997 benches = [
997 benches = [
998 (makestdioread(8192), b'read(8k)'),
998 (makestdioread(8192), b'read(8k)'),
999 (makestdioread(16384), b'read(16k)'),
999 (makestdioread(16384), b'read(16k)'),
1000 (makestdioread(32768), b'read(32k)'),
1000 (makestdioread(32768), b'read(32k)'),
1001 (makestdioread(131072), b'read(128k)'),
1001 (makestdioread(131072), b'read(128k)'),
1002 ]
1002 ]
1003
1003
1004 with open(bundlepath, b'rb') as fh:
1004 with open(bundlepath, b'rb') as fh:
1005 bundle = exchange.readbundle(ui, fh, bundlepath)
1005 bundle = exchange.readbundle(ui, fh, bundlepath)
1006
1006
1007 if isinstance(bundle, changegroup.cg1unpacker):
1007 if isinstance(bundle, changegroup.cg1unpacker):
1008 benches.extend(
1008 benches.extend(
1009 [
1009 [
1010 (makebench(deltaiter), b'cg1 deltaiter()'),
1010 (makebench(deltaiter), b'cg1 deltaiter()'),
1011 (makebench(iterchunks), b'cg1 getchunks()'),
1011 (makebench(iterchunks), b'cg1 getchunks()'),
1012 (makereadnbytes(8192), b'cg1 read(8k)'),
1012 (makereadnbytes(8192), b'cg1 read(8k)'),
1013 (makereadnbytes(16384), b'cg1 read(16k)'),
1013 (makereadnbytes(16384), b'cg1 read(16k)'),
1014 (makereadnbytes(32768), b'cg1 read(32k)'),
1014 (makereadnbytes(32768), b'cg1 read(32k)'),
1015 (makereadnbytes(131072), b'cg1 read(128k)'),
1015 (makereadnbytes(131072), b'cg1 read(128k)'),
1016 ]
1016 ]
1017 )
1017 )
1018 elif isinstance(bundle, bundle2.unbundle20):
1018 elif isinstance(bundle, bundle2.unbundle20):
1019 benches.extend(
1019 benches.extend(
1020 [
1020 [
1021 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1021 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1022 (makebench(iterparts), b'bundle2 iterparts()'),
1022 (makebench(iterparts), b'bundle2 iterparts()'),
1023 (
1023 (
1024 makebench(iterpartsseekable),
1024 makebench(iterpartsseekable),
1025 b'bundle2 iterparts() seekable',
1025 b'bundle2 iterparts() seekable',
1026 ),
1026 ),
1027 (makebench(seek), b'bundle2 part seek()'),
1027 (makebench(seek), b'bundle2 part seek()'),
1028 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1028 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1029 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1029 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1030 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1030 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1031 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1031 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1032 ]
1032 ]
1033 )
1033 )
1034 elif isinstance(bundle, streamclone.streamcloneapplier):
1034 elif isinstance(bundle, streamclone.streamcloneapplier):
1035 raise error.Abort(b'stream clone bundles not supported')
1035 raise error.Abort(b'stream clone bundles not supported')
1036 else:
1036 else:
1037 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1037 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1038
1038
1039 for fn, title in benches:
1039 for fn, title in benches:
1040 timer, fm = gettimer(ui, opts)
1040 timer, fm = gettimer(ui, opts)
1041 timer(fn, title=title)
1041 timer(fn, title=title)
1042 fm.end()
1042 fm.end()
1043
1043
1044
1044
1045 @command(
1045 @command(
1046 b'perfchangegroupchangelog',
1046 b'perfchangegroupchangelog',
1047 formatteropts
1047 formatteropts
1048 + [
1048 + [
1049 (b'', b'cgversion', b'02', b'changegroup version'),
1049 (b'', b'cgversion', b'02', b'changegroup version'),
1050 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1050 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1051 ],
1051 ],
1052 )
1052 )
1053 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1053 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1054 """Benchmark producing a changelog group for a changegroup.
1054 """Benchmark producing a changelog group for a changegroup.
1055
1055
1056 This measures the time spent processing the changelog during a
1056 This measures the time spent processing the changelog during a
1057 bundle operation. This occurs during `hg bundle` and on a server
1057 bundle operation. This occurs during `hg bundle` and on a server
1058 processing a `getbundle` wire protocol request (handles clones
1058 processing a `getbundle` wire protocol request (handles clones
1059 and pull requests).
1059 and pull requests).
1060
1060
1061 By default, all revisions are added to the changegroup.
1061 By default, all revisions are added to the changegroup.
1062 """
1062 """
1063 opts = _byteskwargs(opts)
1063 opts = _byteskwargs(opts)
1064 cl = repo.changelog
1064 cl = repo.changelog
1065 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1065 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1066 bundler = changegroup.getbundler(cgversion, repo)
1066 bundler = changegroup.getbundler(cgversion, repo)
1067
1067
1068 def d():
1068 def d():
1069 state, chunks = bundler._generatechangelog(cl, nodes)
1069 state, chunks = bundler._generatechangelog(cl, nodes)
1070 for chunk in chunks:
1070 for chunk in chunks:
1071 pass
1071 pass
1072
1072
1073 timer, fm = gettimer(ui, opts)
1073 timer, fm = gettimer(ui, opts)
1074
1074
1075 # Terminal printing can interfere with timing. So disable it.
1075 # Terminal printing can interfere with timing. So disable it.
1076 with ui.configoverride({(b'progress', b'disable'): True}):
1076 with ui.configoverride({(b'progress', b'disable'): True}):
1077 timer(d)
1077 timer(d)
1078
1078
1079 fm.end()
1079 fm.end()
1080
1080
1081
1081
1082 @command(b'perfdirs', formatteropts)
1082 @command(b'perfdirs', formatteropts)
1083 def perfdirs(ui, repo, **opts):
1083 def perfdirs(ui, repo, **opts):
1084 opts = _byteskwargs(opts)
1084 opts = _byteskwargs(opts)
1085 timer, fm = gettimer(ui, opts)
1085 timer, fm = gettimer(ui, opts)
1086 dirstate = repo.dirstate
1086 dirstate = repo.dirstate
1087 b'a' in dirstate
1087 b'a' in dirstate
1088
1088
1089 def d():
1089 def d():
1090 dirstate.hasdir(b'a')
1090 dirstate.hasdir(b'a')
1091 del dirstate._map._dirs
1091 del dirstate._map._dirs
1092
1092
1093 timer(d)
1093 timer(d)
1094 fm.end()
1094 fm.end()
1095
1095
1096
1096
1097 @command(b'perfdirstate', formatteropts)
1097 @command(b'perfdirstate', formatteropts)
1098 def perfdirstate(ui, repo, **opts):
1098 def perfdirstate(ui, repo, **opts):
1099 opts = _byteskwargs(opts)
1099 opts = _byteskwargs(opts)
1100 timer, fm = gettimer(ui, opts)
1100 timer, fm = gettimer(ui, opts)
1101 b"a" in repo.dirstate
1101 b"a" in repo.dirstate
1102
1102
1103 def d():
1103 def d():
1104 repo.dirstate.invalidate()
1104 repo.dirstate.invalidate()
1105 b"a" in repo.dirstate
1105 b"a" in repo.dirstate
1106
1106
1107 timer(d)
1107 timer(d)
1108 fm.end()
1108 fm.end()
1109
1109
1110
1110
1111 @command(b'perfdirstatedirs', formatteropts)
1111 @command(b'perfdirstatedirs', formatteropts)
1112 def perfdirstatedirs(ui, repo, **opts):
1112 def perfdirstatedirs(ui, repo, **opts):
1113 opts = _byteskwargs(opts)
1113 opts = _byteskwargs(opts)
1114 timer, fm = gettimer(ui, opts)
1114 timer, fm = gettimer(ui, opts)
1115 b"a" in repo.dirstate
1115 b"a" in repo.dirstate
1116
1116
1117 def d():
1117 def d():
1118 repo.dirstate.hasdir(b"a")
1118 repo.dirstate.hasdir(b"a")
1119 del repo.dirstate._map._dirs
1119 del repo.dirstate._map._dirs
1120
1120
1121 timer(d)
1121 timer(d)
1122 fm.end()
1122 fm.end()
1123
1123
1124
1124
1125 @command(b'perfdirstatefoldmap', formatteropts)
1125 @command(b'perfdirstatefoldmap', formatteropts)
1126 def perfdirstatefoldmap(ui, repo, **opts):
1126 def perfdirstatefoldmap(ui, repo, **opts):
1127 opts = _byteskwargs(opts)
1127 opts = _byteskwargs(opts)
1128 timer, fm = gettimer(ui, opts)
1128 timer, fm = gettimer(ui, opts)
1129 dirstate = repo.dirstate
1129 dirstate = repo.dirstate
1130 b'a' in dirstate
1130 b'a' in dirstate
1131
1131
1132 def d():
1132 def d():
1133 dirstate._map.filefoldmap.get(b'a')
1133 dirstate._map.filefoldmap.get(b'a')
1134 del dirstate._map.filefoldmap
1134 del dirstate._map.filefoldmap
1135
1135
1136 timer(d)
1136 timer(d)
1137 fm.end()
1137 fm.end()
1138
1138
1139
1139
1140 @command(b'perfdirfoldmap', formatteropts)
1140 @command(b'perfdirfoldmap', formatteropts)
1141 def perfdirfoldmap(ui, repo, **opts):
1141 def perfdirfoldmap(ui, repo, **opts):
1142 opts = _byteskwargs(opts)
1142 opts = _byteskwargs(opts)
1143 timer, fm = gettimer(ui, opts)
1143 timer, fm = gettimer(ui, opts)
1144 dirstate = repo.dirstate
1144 dirstate = repo.dirstate
1145 b'a' in dirstate
1145 b'a' in dirstate
1146
1146
1147 def d():
1147 def d():
1148 dirstate._map.dirfoldmap.get(b'a')
1148 dirstate._map.dirfoldmap.get(b'a')
1149 del dirstate._map.dirfoldmap
1149 del dirstate._map.dirfoldmap
1150 del dirstate._map._dirs
1150 del dirstate._map._dirs
1151
1151
1152 timer(d)
1152 timer(d)
1153 fm.end()
1153 fm.end()
1154
1154
1155
1155
1156 @command(b'perfdirstatewrite', formatteropts)
1156 @command(b'perfdirstatewrite', formatteropts)
1157 def perfdirstatewrite(ui, repo, **opts):
1157 def perfdirstatewrite(ui, repo, **opts):
1158 opts = _byteskwargs(opts)
1158 opts = _byteskwargs(opts)
1159 timer, fm = gettimer(ui, opts)
1159 timer, fm = gettimer(ui, opts)
1160 ds = repo.dirstate
1160 ds = repo.dirstate
1161 b"a" in ds
1161 b"a" in ds
1162
1162
1163 def d():
1163 def d():
1164 ds._dirty = True
1164 ds._dirty = True
1165 ds.write(repo.currenttransaction())
1165 ds.write(repo.currenttransaction())
1166
1166
1167 timer(d)
1167 timer(d)
1168 fm.end()
1168 fm.end()
1169
1169
1170
1170
1171 def _getmergerevs(repo, opts):
1171 def _getmergerevs(repo, opts):
1172 """parse command argument to return rev involved in merge
1172 """parse command argument to return rev involved in merge
1173
1173
1174 input: options dictionnary with `rev`, `from` and `bse`
1174 input: options dictionnary with `rev`, `from` and `bse`
1175 output: (localctx, otherctx, basectx)
1175 output: (localctx, otherctx, basectx)
1176 """
1176 """
1177 if opts[b'from']:
1177 if opts[b'from']:
1178 fromrev = scmutil.revsingle(repo, opts[b'from'])
1178 fromrev = scmutil.revsingle(repo, opts[b'from'])
1179 wctx = repo[fromrev]
1179 wctx = repo[fromrev]
1180 else:
1180 else:
1181 wctx = repo[None]
1181 wctx = repo[None]
1182 # we don't want working dir files to be stat'd in the benchmark, so
1182 # we don't want working dir files to be stat'd in the benchmark, so
1183 # prime that cache
1183 # prime that cache
1184 wctx.dirty()
1184 wctx.dirty()
1185 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1185 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1186 if opts[b'base']:
1186 if opts[b'base']:
1187 fromrev = scmutil.revsingle(repo, opts[b'base'])
1187 fromrev = scmutil.revsingle(repo, opts[b'base'])
1188 ancestor = repo[fromrev]
1188 ancestor = repo[fromrev]
1189 else:
1189 else:
1190 ancestor = wctx.ancestor(rctx)
1190 ancestor = wctx.ancestor(rctx)
1191 return (wctx, rctx, ancestor)
1191 return (wctx, rctx, ancestor)
1192
1192
1193
1193
1194 @command(
1194 @command(
1195 b'perfmergecalculate',
1195 b'perfmergecalculate',
1196 [
1196 [
1197 (b'r', b'rev', b'.', b'rev to merge against'),
1197 (b'r', b'rev', b'.', b'rev to merge against'),
1198 (b'', b'from', b'', b'rev to merge from'),
1198 (b'', b'from', b'', b'rev to merge from'),
1199 (b'', b'base', b'', b'the revision to use as base'),
1199 (b'', b'base', b'', b'the revision to use as base'),
1200 ]
1200 ]
1201 + formatteropts,
1201 + formatteropts,
1202 )
1202 )
1203 def perfmergecalculate(ui, repo, **opts):
1203 def perfmergecalculate(ui, repo, **opts):
1204 opts = _byteskwargs(opts)
1204 opts = _byteskwargs(opts)
1205 timer, fm = gettimer(ui, opts)
1205 timer, fm = gettimer(ui, opts)
1206
1206
1207 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1207 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1208
1208
1209 def d():
1209 def d():
1210 # acceptremote is True because we don't want prompts in the middle of
1210 # acceptremote is True because we don't want prompts in the middle of
1211 # our benchmark
1211 # our benchmark
1212 merge.calculateupdates(
1212 merge.calculateupdates(
1213 repo,
1213 repo,
1214 wctx,
1214 wctx,
1215 rctx,
1215 rctx,
1216 [ancestor],
1216 [ancestor],
1217 branchmerge=False,
1217 branchmerge=False,
1218 force=False,
1218 force=False,
1219 acceptremote=True,
1219 acceptremote=True,
1220 followcopies=True,
1220 followcopies=True,
1221 )
1221 )
1222
1222
1223 timer(d)
1223 timer(d)
1224 fm.end()
1224 fm.end()
1225
1225
1226
1226
1227 @command(
1227 @command(
1228 b'perfmergecopies',
1228 b'perfmergecopies',
1229 [
1229 [
1230 (b'r', b'rev', b'.', b'rev to merge against'),
1230 (b'r', b'rev', b'.', b'rev to merge against'),
1231 (b'', b'from', b'', b'rev to merge from'),
1231 (b'', b'from', b'', b'rev to merge from'),
1232 (b'', b'base', b'', b'the revision to use as base'),
1232 (b'', b'base', b'', b'the revision to use as base'),
1233 ]
1233 ]
1234 + formatteropts,
1234 + formatteropts,
1235 )
1235 )
1236 def perfmergecopies(ui, repo, **opts):
1236 def perfmergecopies(ui, repo, **opts):
1237 """measure runtime of `copies.mergecopies`"""
1237 """measure runtime of `copies.mergecopies`"""
1238 opts = _byteskwargs(opts)
1238 opts = _byteskwargs(opts)
1239 timer, fm = gettimer(ui, opts)
1239 timer, fm = gettimer(ui, opts)
1240 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1240 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1241
1241
1242 def d():
1242 def d():
1243 # acceptremote is True because we don't want prompts in the middle of
1243 # acceptremote is True because we don't want prompts in the middle of
1244 # our benchmark
1244 # our benchmark
1245 copies.mergecopies(repo, wctx, rctx, ancestor)
1245 copies.mergecopies(repo, wctx, rctx, ancestor)
1246
1246
1247 timer(d)
1247 timer(d)
1248 fm.end()
1248 fm.end()
1249
1249
1250
1250
1251 @command(b'perfpathcopies', [], b"REV REV")
1251 @command(b'perfpathcopies', [], b"REV REV")
1252 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1252 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1253 """benchmark the copy tracing logic"""
1253 """benchmark the copy tracing logic"""
1254 opts = _byteskwargs(opts)
1254 opts = _byteskwargs(opts)
1255 timer, fm = gettimer(ui, opts)
1255 timer, fm = gettimer(ui, opts)
1256 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1256 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1257 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1257 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1258
1258
1259 def d():
1259 def d():
1260 copies.pathcopies(ctx1, ctx2)
1260 copies.pathcopies(ctx1, ctx2)
1261
1261
1262 timer(d)
1262 timer(d)
1263 fm.end()
1263 fm.end()
1264
1264
1265
1265
1266 @command(
1266 @command(
1267 b'perfphases',
1267 b'perfphases',
1268 [(b'', b'full', False, b'include file reading time too'),],
1268 [(b'', b'full', False, b'include file reading time too'),],
1269 b"",
1269 b"",
1270 )
1270 )
1271 def perfphases(ui, repo, **opts):
1271 def perfphases(ui, repo, **opts):
1272 """benchmark phasesets computation"""
1272 """benchmark phasesets computation"""
1273 opts = _byteskwargs(opts)
1273 opts = _byteskwargs(opts)
1274 timer, fm = gettimer(ui, opts)
1274 timer, fm = gettimer(ui, opts)
1275 _phases = repo._phasecache
1275 _phases = repo._phasecache
1276 full = opts.get(b'full')
1276 full = opts.get(b'full')
1277
1277
1278 def d():
1278 def d():
1279 phases = _phases
1279 phases = _phases
1280 if full:
1280 if full:
1281 clearfilecache(repo, b'_phasecache')
1281 clearfilecache(repo, b'_phasecache')
1282 phases = repo._phasecache
1282 phases = repo._phasecache
1283 phases.invalidate()
1283 phases.invalidate()
1284 phases.loadphaserevs(repo)
1284 phases.loadphaserevs(repo)
1285
1285
1286 timer(d)
1286 timer(d)
1287 fm.end()
1287 fm.end()
1288
1288
1289
1289
1290 @command(b'perfphasesremote', [], b"[DEST]")
1290 @command(b'perfphasesremote', [], b"[DEST]")
1291 def perfphasesremote(ui, repo, dest=None, **opts):
1291 def perfphasesremote(ui, repo, dest=None, **opts):
1292 """benchmark time needed to analyse phases of the remote server"""
1292 """benchmark time needed to analyse phases of the remote server"""
1293 from mercurial.node import bin
1293 from mercurial.node import bin
1294 from mercurial import (
1294 from mercurial import (
1295 exchange,
1295 exchange,
1296 hg,
1296 hg,
1297 phases,
1297 phases,
1298 )
1298 )
1299
1299
1300 opts = _byteskwargs(opts)
1300 opts = _byteskwargs(opts)
1301 timer, fm = gettimer(ui, opts)
1301 timer, fm = gettimer(ui, opts)
1302
1302
1303 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1303 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1304 if not path:
1304 if not path:
1305 raise error.Abort(
1305 raise error.Abort(
1306 b'default repository not configured!',
1306 b'default repository not configured!',
1307 hint=b"see 'hg help config.paths'",
1307 hint=b"see 'hg help config.paths'",
1308 )
1308 )
1309 dest = path.pushloc or path.loc
1309 dest = path.pushloc or path.loc
1310 ui.status(b'analysing phase of %s\n' % util.hidepassword(dest))
1310 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1311 other = hg.peer(repo, opts, dest)
1311 other = hg.peer(repo, opts, dest)
1312
1312
1313 # easier to perform discovery through the operation
1313 # easier to perform discovery through the operation
1314 op = exchange.pushoperation(repo, other)
1314 op = exchange.pushoperation(repo, other)
1315 exchange._pushdiscoverychangeset(op)
1315 exchange._pushdiscoverychangeset(op)
1316
1316
1317 remotesubset = op.fallbackheads
1317 remotesubset = op.fallbackheads
1318
1318
1319 with other.commandexecutor() as e:
1319 with other.commandexecutor() as e:
1320 remotephases = e.callcommand(
1320 remotephases = e.callcommand(
1321 b'listkeys', {b'namespace': b'phases'}
1321 b'listkeys', {b'namespace': b'phases'}
1322 ).result()
1322 ).result()
1323 del other
1323 del other
1324 publishing = remotephases.get(b'publishing', False)
1324 publishing = remotephases.get(b'publishing', False)
1325 if publishing:
1325 if publishing:
1326 ui.status(b'publishing: yes\n')
1326 ui.statusnoi18n(b'publishing: yes\n')
1327 else:
1327 else:
1328 ui.status(b'publishing: no\n')
1328 ui.statusnoi18n(b'publishing: no\n')
1329
1329
1330 nodemap = repo.changelog.nodemap
1330 nodemap = repo.changelog.nodemap
1331 nonpublishroots = 0
1331 nonpublishroots = 0
1332 for nhex, phase in remotephases.iteritems():
1332 for nhex, phase in remotephases.iteritems():
1333 if nhex == b'publishing': # ignore data related to publish option
1333 if nhex == b'publishing': # ignore data related to publish option
1334 continue
1334 continue
1335 node = bin(nhex)
1335 node = bin(nhex)
1336 if node in nodemap and int(phase):
1336 if node in nodemap and int(phase):
1337 nonpublishroots += 1
1337 nonpublishroots += 1
1338 ui.status(b'number of roots: %d\n' % len(remotephases))
1338 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1339 ui.status(b'number of known non public roots: %d\n' % nonpublishroots)
1339 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1340
1340
1341 def d():
1341 def d():
1342 phases.remotephasessummary(repo, remotesubset, remotephases)
1342 phases.remotephasessummary(repo, remotesubset, remotephases)
1343
1343
1344 timer(d)
1344 timer(d)
1345 fm.end()
1345 fm.end()
1346
1346
1347
1347
1348 @command(
1348 @command(
1349 b'perfmanifest',
1349 b'perfmanifest',
1350 [
1350 [
1351 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1351 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1352 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1352 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1353 ]
1353 ]
1354 + formatteropts,
1354 + formatteropts,
1355 b'REV|NODE',
1355 b'REV|NODE',
1356 )
1356 )
1357 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1357 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1358 """benchmark the time to read a manifest from disk and return a usable
1358 """benchmark the time to read a manifest from disk and return a usable
1359 dict-like object
1359 dict-like object
1360
1360
1361 Manifest caches are cleared before retrieval."""
1361 Manifest caches are cleared before retrieval."""
1362 opts = _byteskwargs(opts)
1362 opts = _byteskwargs(opts)
1363 timer, fm = gettimer(ui, opts)
1363 timer, fm = gettimer(ui, opts)
1364 if not manifest_rev:
1364 if not manifest_rev:
1365 ctx = scmutil.revsingle(repo, rev, rev)
1365 ctx = scmutil.revsingle(repo, rev, rev)
1366 t = ctx.manifestnode()
1366 t = ctx.manifestnode()
1367 else:
1367 else:
1368 from mercurial.node import bin
1368 from mercurial.node import bin
1369
1369
1370 if len(rev) == 40:
1370 if len(rev) == 40:
1371 t = bin(rev)
1371 t = bin(rev)
1372 else:
1372 else:
1373 try:
1373 try:
1374 rev = int(rev)
1374 rev = int(rev)
1375
1375
1376 if util.safehasattr(repo.manifestlog, b'getstorage'):
1376 if util.safehasattr(repo.manifestlog, b'getstorage'):
1377 t = repo.manifestlog.getstorage(b'').node(rev)
1377 t = repo.manifestlog.getstorage(b'').node(rev)
1378 else:
1378 else:
1379 t = repo.manifestlog._revlog.lookup(rev)
1379 t = repo.manifestlog._revlog.lookup(rev)
1380 except ValueError:
1380 except ValueError:
1381 raise error.Abort(
1381 raise error.Abort(
1382 b'manifest revision must be integer or full ' b'node'
1382 b'manifest revision must be integer or full ' b'node'
1383 )
1383 )
1384
1384
1385 def d():
1385 def d():
1386 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1386 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1387 repo.manifestlog[t].read()
1387 repo.manifestlog[t].read()
1388
1388
1389 timer(d)
1389 timer(d)
1390 fm.end()
1390 fm.end()
1391
1391
1392
1392
1393 @command(b'perfchangeset', formatteropts)
1393 @command(b'perfchangeset', formatteropts)
1394 def perfchangeset(ui, repo, rev, **opts):
1394 def perfchangeset(ui, repo, rev, **opts):
1395 opts = _byteskwargs(opts)
1395 opts = _byteskwargs(opts)
1396 timer, fm = gettimer(ui, opts)
1396 timer, fm = gettimer(ui, opts)
1397 n = scmutil.revsingle(repo, rev).node()
1397 n = scmutil.revsingle(repo, rev).node()
1398
1398
1399 def d():
1399 def d():
1400 repo.changelog.read(n)
1400 repo.changelog.read(n)
1401 # repo.changelog._cache = None
1401 # repo.changelog._cache = None
1402
1402
1403 timer(d)
1403 timer(d)
1404 fm.end()
1404 fm.end()
1405
1405
1406
1406
1407 @command(b'perfignore', formatteropts)
1407 @command(b'perfignore', formatteropts)
1408 def perfignore(ui, repo, **opts):
1408 def perfignore(ui, repo, **opts):
1409 """benchmark operation related to computing ignore"""
1409 """benchmark operation related to computing ignore"""
1410 opts = _byteskwargs(opts)
1410 opts = _byteskwargs(opts)
1411 timer, fm = gettimer(ui, opts)
1411 timer, fm = gettimer(ui, opts)
1412 dirstate = repo.dirstate
1412 dirstate = repo.dirstate
1413
1413
1414 def setupone():
1414 def setupone():
1415 dirstate.invalidate()
1415 dirstate.invalidate()
1416 clearfilecache(dirstate, b'_ignore')
1416 clearfilecache(dirstate, b'_ignore')
1417
1417
1418 def runone():
1418 def runone():
1419 dirstate._ignore
1419 dirstate._ignore
1420
1420
1421 timer(runone, setup=setupone, title=b"load")
1421 timer(runone, setup=setupone, title=b"load")
1422 fm.end()
1422 fm.end()
1423
1423
1424
1424
1425 @command(
1425 @command(
1426 b'perfindex',
1426 b'perfindex',
1427 [
1427 [
1428 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1428 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1429 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1429 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1430 ]
1430 ]
1431 + formatteropts,
1431 + formatteropts,
1432 )
1432 )
1433 def perfindex(ui, repo, **opts):
1433 def perfindex(ui, repo, **opts):
1434 """benchmark index creation time followed by a lookup
1434 """benchmark index creation time followed by a lookup
1435
1435
1436 The default is to look `tip` up. Depending on the index implementation,
1436 The default is to look `tip` up. Depending on the index implementation,
1437 the revision looked up can matters. For example, an implementation
1437 the revision looked up can matters. For example, an implementation
1438 scanning the index will have a faster lookup time for `--rev tip` than for
1438 scanning the index will have a faster lookup time for `--rev tip` than for
1439 `--rev 0`. The number of looked up revisions and their order can also
1439 `--rev 0`. The number of looked up revisions and their order can also
1440 matters.
1440 matters.
1441
1441
1442 Example of useful set to test:
1442 Example of useful set to test:
1443 * tip
1443 * tip
1444 * 0
1444 * 0
1445 * -10:
1445 * -10:
1446 * :10
1446 * :10
1447 * -10: + :10
1447 * -10: + :10
1448 * :10: + -10:
1448 * :10: + -10:
1449 * -10000:
1449 * -10000:
1450 * -10000: + 0
1450 * -10000: + 0
1451
1451
1452 It is not currently possible to check for lookup of a missing node. For
1452 It is not currently possible to check for lookup of a missing node. For
1453 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1453 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1454 import mercurial.revlog
1454 import mercurial.revlog
1455
1455
1456 opts = _byteskwargs(opts)
1456 opts = _byteskwargs(opts)
1457 timer, fm = gettimer(ui, opts)
1457 timer, fm = gettimer(ui, opts)
1458 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1458 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1459 if opts[b'no_lookup']:
1459 if opts[b'no_lookup']:
1460 if opts['rev']:
1460 if opts['rev']:
1461 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1461 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1462 nodes = []
1462 nodes = []
1463 elif not opts[b'rev']:
1463 elif not opts[b'rev']:
1464 nodes = [repo[b"tip"].node()]
1464 nodes = [repo[b"tip"].node()]
1465 else:
1465 else:
1466 revs = scmutil.revrange(repo, opts[b'rev'])
1466 revs = scmutil.revrange(repo, opts[b'rev'])
1467 cl = repo.changelog
1467 cl = repo.changelog
1468 nodes = [cl.node(r) for r in revs]
1468 nodes = [cl.node(r) for r in revs]
1469
1469
1470 unfi = repo.unfiltered()
1470 unfi = repo.unfiltered()
1471 # find the filecache func directly
1471 # find the filecache func directly
1472 # This avoid polluting the benchmark with the filecache logic
1472 # This avoid polluting the benchmark with the filecache logic
1473 makecl = unfi.__class__.changelog.func
1473 makecl = unfi.__class__.changelog.func
1474
1474
1475 def setup():
1475 def setup():
1476 # probably not necessary, but for good measure
1476 # probably not necessary, but for good measure
1477 clearchangelog(unfi)
1477 clearchangelog(unfi)
1478
1478
1479 def d():
1479 def d():
1480 cl = makecl(unfi)
1480 cl = makecl(unfi)
1481 for n in nodes:
1481 for n in nodes:
1482 cl.rev(n)
1482 cl.rev(n)
1483
1483
1484 timer(d, setup=setup)
1484 timer(d, setup=setup)
1485 fm.end()
1485 fm.end()
1486
1486
1487
1487
1488 @command(
1488 @command(
1489 b'perfnodemap',
1489 b'perfnodemap',
1490 [
1490 [
1491 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1491 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1492 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1492 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1493 ]
1493 ]
1494 + formatteropts,
1494 + formatteropts,
1495 )
1495 )
1496 def perfnodemap(ui, repo, **opts):
1496 def perfnodemap(ui, repo, **opts):
1497 """benchmark the time necessary to look up revision from a cold nodemap
1497 """benchmark the time necessary to look up revision from a cold nodemap
1498
1498
1499 Depending on the implementation, the amount and order of revision we look
1499 Depending on the implementation, the amount and order of revision we look
1500 up can varies. Example of useful set to test:
1500 up can varies. Example of useful set to test:
1501 * tip
1501 * tip
1502 * 0
1502 * 0
1503 * -10:
1503 * -10:
1504 * :10
1504 * :10
1505 * -10: + :10
1505 * -10: + :10
1506 * :10: + -10:
1506 * :10: + -10:
1507 * -10000:
1507 * -10000:
1508 * -10000: + 0
1508 * -10000: + 0
1509
1509
1510 The command currently focus on valid binary lookup. Benchmarking for
1510 The command currently focus on valid binary lookup. Benchmarking for
1511 hexlookup, prefix lookup and missing lookup would also be valuable.
1511 hexlookup, prefix lookup and missing lookup would also be valuable.
1512 """
1512 """
1513 import mercurial.revlog
1513 import mercurial.revlog
1514
1514
1515 opts = _byteskwargs(opts)
1515 opts = _byteskwargs(opts)
1516 timer, fm = gettimer(ui, opts)
1516 timer, fm = gettimer(ui, opts)
1517 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1517 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1518
1518
1519 unfi = repo.unfiltered()
1519 unfi = repo.unfiltered()
1520 clearcaches = opts['clear_caches']
1520 clearcaches = opts['clear_caches']
1521 # find the filecache func directly
1521 # find the filecache func directly
1522 # This avoid polluting the benchmark with the filecache logic
1522 # This avoid polluting the benchmark with the filecache logic
1523 makecl = unfi.__class__.changelog.func
1523 makecl = unfi.__class__.changelog.func
1524 if not opts[b'rev']:
1524 if not opts[b'rev']:
1525 raise error.Abort('use --rev to specify revisions to look up')
1525 raise error.Abort('use --rev to specify revisions to look up')
1526 revs = scmutil.revrange(repo, opts[b'rev'])
1526 revs = scmutil.revrange(repo, opts[b'rev'])
1527 cl = repo.changelog
1527 cl = repo.changelog
1528 nodes = [cl.node(r) for r in revs]
1528 nodes = [cl.node(r) for r in revs]
1529
1529
1530 # use a list to pass reference to a nodemap from one closure to the next
1530 # use a list to pass reference to a nodemap from one closure to the next
1531 nodeget = [None]
1531 nodeget = [None]
1532
1532
1533 def setnodeget():
1533 def setnodeget():
1534 # probably not necessary, but for good measure
1534 # probably not necessary, but for good measure
1535 clearchangelog(unfi)
1535 clearchangelog(unfi)
1536 nodeget[0] = makecl(unfi).nodemap.get
1536 nodeget[0] = makecl(unfi).nodemap.get
1537
1537
1538 def d():
1538 def d():
1539 get = nodeget[0]
1539 get = nodeget[0]
1540 for n in nodes:
1540 for n in nodes:
1541 get(n)
1541 get(n)
1542
1542
1543 setup = None
1543 setup = None
1544 if clearcaches:
1544 if clearcaches:
1545
1545
1546 def setup():
1546 def setup():
1547 setnodeget()
1547 setnodeget()
1548
1548
1549 else:
1549 else:
1550 setnodeget()
1550 setnodeget()
1551 d() # prewarm the data structure
1551 d() # prewarm the data structure
1552 timer(d, setup=setup)
1552 timer(d, setup=setup)
1553 fm.end()
1553 fm.end()
1554
1554
1555
1555
1556 @command(b'perfstartup', formatteropts)
1556 @command(b'perfstartup', formatteropts)
1557 def perfstartup(ui, repo, **opts):
1557 def perfstartup(ui, repo, **opts):
1558 opts = _byteskwargs(opts)
1558 opts = _byteskwargs(opts)
1559 timer, fm = gettimer(ui, opts)
1559 timer, fm = gettimer(ui, opts)
1560
1560
1561 def d():
1561 def d():
1562 if os.name != r'nt':
1562 if os.name != r'nt':
1563 os.system(
1563 os.system(
1564 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1564 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1565 )
1565 )
1566 else:
1566 else:
1567 os.environ[r'HGRCPATH'] = r' '
1567 os.environ[r'HGRCPATH'] = r' '
1568 os.system(r"%s version -q > NUL" % sys.argv[0])
1568 os.system(r"%s version -q > NUL" % sys.argv[0])
1569
1569
1570 timer(d)
1570 timer(d)
1571 fm.end()
1571 fm.end()
1572
1572
1573
1573
1574 @command(b'perfparents', formatteropts)
1574 @command(b'perfparents', formatteropts)
1575 def perfparents(ui, repo, **opts):
1575 def perfparents(ui, repo, **opts):
1576 """benchmark the time necessary to fetch one changeset's parents.
1576 """benchmark the time necessary to fetch one changeset's parents.
1577
1577
1578 The fetch is done using the `node identifier`, traversing all object layers
1578 The fetch is done using the `node identifier`, traversing all object layers
1579 from the repository object. The first N revisions will be used for this
1579 from the repository object. The first N revisions will be used for this
1580 benchmark. N is controlled by the ``perf.parentscount`` config option
1580 benchmark. N is controlled by the ``perf.parentscount`` config option
1581 (default: 1000).
1581 (default: 1000).
1582 """
1582 """
1583 opts = _byteskwargs(opts)
1583 opts = _byteskwargs(opts)
1584 timer, fm = gettimer(ui, opts)
1584 timer, fm = gettimer(ui, opts)
1585 # control the number of commits perfparents iterates over
1585 # control the number of commits perfparents iterates over
1586 # experimental config: perf.parentscount
1586 # experimental config: perf.parentscount
1587 count = getint(ui, b"perf", b"parentscount", 1000)
1587 count = getint(ui, b"perf", b"parentscount", 1000)
1588 if len(repo.changelog) < count:
1588 if len(repo.changelog) < count:
1589 raise error.Abort(b"repo needs %d commits for this test" % count)
1589 raise error.Abort(b"repo needs %d commits for this test" % count)
1590 repo = repo.unfiltered()
1590 repo = repo.unfiltered()
1591 nl = [repo.changelog.node(i) for i in _xrange(count)]
1591 nl = [repo.changelog.node(i) for i in _xrange(count)]
1592
1592
1593 def d():
1593 def d():
1594 for n in nl:
1594 for n in nl:
1595 repo.changelog.parents(n)
1595 repo.changelog.parents(n)
1596
1596
1597 timer(d)
1597 timer(d)
1598 fm.end()
1598 fm.end()
1599
1599
1600
1600
1601 @command(b'perfctxfiles', formatteropts)
1601 @command(b'perfctxfiles', formatteropts)
1602 def perfctxfiles(ui, repo, x, **opts):
1602 def perfctxfiles(ui, repo, x, **opts):
1603 opts = _byteskwargs(opts)
1603 opts = _byteskwargs(opts)
1604 x = int(x)
1604 x = int(x)
1605 timer, fm = gettimer(ui, opts)
1605 timer, fm = gettimer(ui, opts)
1606
1606
1607 def d():
1607 def d():
1608 len(repo[x].files())
1608 len(repo[x].files())
1609
1609
1610 timer(d)
1610 timer(d)
1611 fm.end()
1611 fm.end()
1612
1612
1613
1613
1614 @command(b'perfrawfiles', formatteropts)
1614 @command(b'perfrawfiles', formatteropts)
1615 def perfrawfiles(ui, repo, x, **opts):
1615 def perfrawfiles(ui, repo, x, **opts):
1616 opts = _byteskwargs(opts)
1616 opts = _byteskwargs(opts)
1617 x = int(x)
1617 x = int(x)
1618 timer, fm = gettimer(ui, opts)
1618 timer, fm = gettimer(ui, opts)
1619 cl = repo.changelog
1619 cl = repo.changelog
1620
1620
1621 def d():
1621 def d():
1622 len(cl.read(x)[3])
1622 len(cl.read(x)[3])
1623
1623
1624 timer(d)
1624 timer(d)
1625 fm.end()
1625 fm.end()
1626
1626
1627
1627
1628 @command(b'perflookup', formatteropts)
1628 @command(b'perflookup', formatteropts)
1629 def perflookup(ui, repo, rev, **opts):
1629 def perflookup(ui, repo, rev, **opts):
1630 opts = _byteskwargs(opts)
1630 opts = _byteskwargs(opts)
1631 timer, fm = gettimer(ui, opts)
1631 timer, fm = gettimer(ui, opts)
1632 timer(lambda: len(repo.lookup(rev)))
1632 timer(lambda: len(repo.lookup(rev)))
1633 fm.end()
1633 fm.end()
1634
1634
1635
1635
1636 @command(
1636 @command(
1637 b'perflinelogedits',
1637 b'perflinelogedits',
1638 [
1638 [
1639 (b'n', b'edits', 10000, b'number of edits'),
1639 (b'n', b'edits', 10000, b'number of edits'),
1640 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1640 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1641 ],
1641 ],
1642 norepo=True,
1642 norepo=True,
1643 )
1643 )
1644 def perflinelogedits(ui, **opts):
1644 def perflinelogedits(ui, **opts):
1645 from mercurial import linelog
1645 from mercurial import linelog
1646
1646
1647 opts = _byteskwargs(opts)
1647 opts = _byteskwargs(opts)
1648
1648
1649 edits = opts[b'edits']
1649 edits = opts[b'edits']
1650 maxhunklines = opts[b'max_hunk_lines']
1650 maxhunklines = opts[b'max_hunk_lines']
1651
1651
1652 maxb1 = 100000
1652 maxb1 = 100000
1653 random.seed(0)
1653 random.seed(0)
1654 randint = random.randint
1654 randint = random.randint
1655 currentlines = 0
1655 currentlines = 0
1656 arglist = []
1656 arglist = []
1657 for rev in _xrange(edits):
1657 for rev in _xrange(edits):
1658 a1 = randint(0, currentlines)
1658 a1 = randint(0, currentlines)
1659 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1659 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1660 b1 = randint(0, maxb1)
1660 b1 = randint(0, maxb1)
1661 b2 = randint(b1, b1 + maxhunklines)
1661 b2 = randint(b1, b1 + maxhunklines)
1662 currentlines += (b2 - b1) - (a2 - a1)
1662 currentlines += (b2 - b1) - (a2 - a1)
1663 arglist.append((rev, a1, a2, b1, b2))
1663 arglist.append((rev, a1, a2, b1, b2))
1664
1664
1665 def d():
1665 def d():
1666 ll = linelog.linelog()
1666 ll = linelog.linelog()
1667 for args in arglist:
1667 for args in arglist:
1668 ll.replacelines(*args)
1668 ll.replacelines(*args)
1669
1669
1670 timer, fm = gettimer(ui, opts)
1670 timer, fm = gettimer(ui, opts)
1671 timer(d)
1671 timer(d)
1672 fm.end()
1672 fm.end()
1673
1673
1674
1674
1675 @command(b'perfrevrange', formatteropts)
1675 @command(b'perfrevrange', formatteropts)
1676 def perfrevrange(ui, repo, *specs, **opts):
1676 def perfrevrange(ui, repo, *specs, **opts):
1677 opts = _byteskwargs(opts)
1677 opts = _byteskwargs(opts)
1678 timer, fm = gettimer(ui, opts)
1678 timer, fm = gettimer(ui, opts)
1679 revrange = scmutil.revrange
1679 revrange = scmutil.revrange
1680 timer(lambda: len(revrange(repo, specs)))
1680 timer(lambda: len(revrange(repo, specs)))
1681 fm.end()
1681 fm.end()
1682
1682
1683
1683
1684 @command(b'perfnodelookup', formatteropts)
1684 @command(b'perfnodelookup', formatteropts)
1685 def perfnodelookup(ui, repo, rev, **opts):
1685 def perfnodelookup(ui, repo, rev, **opts):
1686 opts = _byteskwargs(opts)
1686 opts = _byteskwargs(opts)
1687 timer, fm = gettimer(ui, opts)
1687 timer, fm = gettimer(ui, opts)
1688 import mercurial.revlog
1688 import mercurial.revlog
1689
1689
1690 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1690 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1691 n = scmutil.revsingle(repo, rev).node()
1691 n = scmutil.revsingle(repo, rev).node()
1692 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1692 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1693
1693
1694 def d():
1694 def d():
1695 cl.rev(n)
1695 cl.rev(n)
1696 clearcaches(cl)
1696 clearcaches(cl)
1697
1697
1698 timer(d)
1698 timer(d)
1699 fm.end()
1699 fm.end()
1700
1700
1701
1701
1702 @command(
1702 @command(
1703 b'perflog',
1703 b'perflog',
1704 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1704 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1705 )
1705 )
1706 def perflog(ui, repo, rev=None, **opts):
1706 def perflog(ui, repo, rev=None, **opts):
1707 opts = _byteskwargs(opts)
1707 opts = _byteskwargs(opts)
1708 if rev is None:
1708 if rev is None:
1709 rev = []
1709 rev = []
1710 timer, fm = gettimer(ui, opts)
1710 timer, fm = gettimer(ui, opts)
1711 ui.pushbuffer()
1711 ui.pushbuffer()
1712 timer(
1712 timer(
1713 lambda: commands.log(
1713 lambda: commands.log(
1714 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1714 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1715 )
1715 )
1716 )
1716 )
1717 ui.popbuffer()
1717 ui.popbuffer()
1718 fm.end()
1718 fm.end()
1719
1719
1720
1720
1721 @command(b'perfmoonwalk', formatteropts)
1721 @command(b'perfmoonwalk', formatteropts)
1722 def perfmoonwalk(ui, repo, **opts):
1722 def perfmoonwalk(ui, repo, **opts):
1723 """benchmark walking the changelog backwards
1723 """benchmark walking the changelog backwards
1724
1724
1725 This also loads the changelog data for each revision in the changelog.
1725 This also loads the changelog data for each revision in the changelog.
1726 """
1726 """
1727 opts = _byteskwargs(opts)
1727 opts = _byteskwargs(opts)
1728 timer, fm = gettimer(ui, opts)
1728 timer, fm = gettimer(ui, opts)
1729
1729
1730 def moonwalk():
1730 def moonwalk():
1731 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1731 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1732 ctx = repo[i]
1732 ctx = repo[i]
1733 ctx.branch() # read changelog data (in addition to the index)
1733 ctx.branch() # read changelog data (in addition to the index)
1734
1734
1735 timer(moonwalk)
1735 timer(moonwalk)
1736 fm.end()
1736 fm.end()
1737
1737
1738
1738
1739 @command(
1739 @command(
1740 b'perftemplating',
1740 b'perftemplating',
1741 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1741 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1742 )
1742 )
1743 def perftemplating(ui, repo, testedtemplate=None, **opts):
1743 def perftemplating(ui, repo, testedtemplate=None, **opts):
1744 """test the rendering time of a given template"""
1744 """test the rendering time of a given template"""
1745 if makelogtemplater is None:
1745 if makelogtemplater is None:
1746 raise error.Abort(
1746 raise error.Abort(
1747 b"perftemplating not available with this Mercurial",
1747 b"perftemplating not available with this Mercurial",
1748 hint=b"use 4.3 or later",
1748 hint=b"use 4.3 or later",
1749 )
1749 )
1750
1750
1751 opts = _byteskwargs(opts)
1751 opts = _byteskwargs(opts)
1752
1752
1753 nullui = ui.copy()
1753 nullui = ui.copy()
1754 nullui.fout = open(os.devnull, r'wb')
1754 nullui.fout = open(os.devnull, r'wb')
1755 nullui.disablepager()
1755 nullui.disablepager()
1756 revs = opts.get(b'rev')
1756 revs = opts.get(b'rev')
1757 if not revs:
1757 if not revs:
1758 revs = [b'all()']
1758 revs = [b'all()']
1759 revs = list(scmutil.revrange(repo, revs))
1759 revs = list(scmutil.revrange(repo, revs))
1760
1760
1761 defaulttemplate = (
1761 defaulttemplate = (
1762 b'{date|shortdate} [{rev}:{node|short}]'
1762 b'{date|shortdate} [{rev}:{node|short}]'
1763 b' {author|person}: {desc|firstline}\n'
1763 b' {author|person}: {desc|firstline}\n'
1764 )
1764 )
1765 if testedtemplate is None:
1765 if testedtemplate is None:
1766 testedtemplate = defaulttemplate
1766 testedtemplate = defaulttemplate
1767 displayer = makelogtemplater(nullui, repo, testedtemplate)
1767 displayer = makelogtemplater(nullui, repo, testedtemplate)
1768
1768
1769 def format():
1769 def format():
1770 for r in revs:
1770 for r in revs:
1771 ctx = repo[r]
1771 ctx = repo[r]
1772 displayer.show(ctx)
1772 displayer.show(ctx)
1773 displayer.flush(ctx)
1773 displayer.flush(ctx)
1774
1774
1775 timer, fm = gettimer(ui, opts)
1775 timer, fm = gettimer(ui, opts)
1776 timer(format)
1776 timer(format)
1777 fm.end()
1777 fm.end()
1778
1778
1779
1779
1780 def _displaystats(ui, opts, entries, data):
1780 def _displaystats(ui, opts, entries, data):
1781 pass
1781 pass
1782 # use a second formatter because the data are quite different, not sure
1782 # use a second formatter because the data are quite different, not sure
1783 # how it flies with the templater.
1783 # how it flies with the templater.
1784 fm = ui.formatter(b'perf-stats', opts)
1784 fm = ui.formatter(b'perf-stats', opts)
1785 for key, title in entries:
1785 for key, title in entries:
1786 values = data[key]
1786 values = data[key]
1787 nbvalues = len(data)
1787 nbvalues = len(data)
1788 values.sort()
1788 values.sort()
1789 stats = {
1789 stats = {
1790 'key': key,
1790 'key': key,
1791 'title': title,
1791 'title': title,
1792 'nbitems': len(values),
1792 'nbitems': len(values),
1793 'min': values[0][0],
1793 'min': values[0][0],
1794 '10%': values[(nbvalues * 10) // 100][0],
1794 '10%': values[(nbvalues * 10) // 100][0],
1795 '25%': values[(nbvalues * 25) // 100][0],
1795 '25%': values[(nbvalues * 25) // 100][0],
1796 '50%': values[(nbvalues * 50) // 100][0],
1796 '50%': values[(nbvalues * 50) // 100][0],
1797 '75%': values[(nbvalues * 75) // 100][0],
1797 '75%': values[(nbvalues * 75) // 100][0],
1798 '80%': values[(nbvalues * 80) // 100][0],
1798 '80%': values[(nbvalues * 80) // 100][0],
1799 '85%': values[(nbvalues * 85) // 100][0],
1799 '85%': values[(nbvalues * 85) // 100][0],
1800 '90%': values[(nbvalues * 90) // 100][0],
1800 '90%': values[(nbvalues * 90) // 100][0],
1801 '95%': values[(nbvalues * 95) // 100][0],
1801 '95%': values[(nbvalues * 95) // 100][0],
1802 '99%': values[(nbvalues * 99) // 100][0],
1802 '99%': values[(nbvalues * 99) // 100][0],
1803 'max': values[-1][0],
1803 'max': values[-1][0],
1804 }
1804 }
1805 fm.startitem()
1805 fm.startitem()
1806 fm.data(**stats)
1806 fm.data(**stats)
1807 # make node pretty for the human output
1807 # make node pretty for the human output
1808 fm.plain('### %s (%d items)\n' % (title, len(values)))
1808 fm.plain('### %s (%d items)\n' % (title, len(values)))
1809 lines = [
1809 lines = [
1810 'min',
1810 'min',
1811 '10%',
1811 '10%',
1812 '25%',
1812 '25%',
1813 '50%',
1813 '50%',
1814 '75%',
1814 '75%',
1815 '80%',
1815 '80%',
1816 '85%',
1816 '85%',
1817 '90%',
1817 '90%',
1818 '95%',
1818 '95%',
1819 '99%',
1819 '99%',
1820 'max',
1820 'max',
1821 ]
1821 ]
1822 for l in lines:
1822 for l in lines:
1823 fm.plain('%s: %s\n' % (l, stats[l]))
1823 fm.plain('%s: %s\n' % (l, stats[l]))
1824 fm.end()
1824 fm.end()
1825
1825
1826
1826
1827 @command(
1827 @command(
1828 b'perfhelper-mergecopies',
1828 b'perfhelper-mergecopies',
1829 formatteropts
1829 formatteropts
1830 + [
1830 + [
1831 (b'r', b'revs', [], b'restrict search to these revisions'),
1831 (b'r', b'revs', [], b'restrict search to these revisions'),
1832 (b'', b'timing', False, b'provides extra data (costly)'),
1832 (b'', b'timing', False, b'provides extra data (costly)'),
1833 (b'', b'stats', False, b'provides statistic about the measured data'),
1833 (b'', b'stats', False, b'provides statistic about the measured data'),
1834 ],
1834 ],
1835 )
1835 )
1836 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1836 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1837 """find statistics about potential parameters for `perfmergecopies`
1837 """find statistics about potential parameters for `perfmergecopies`
1838
1838
1839 This command find (base, p1, p2) triplet relevant for copytracing
1839 This command find (base, p1, p2) triplet relevant for copytracing
1840 benchmarking in the context of a merge. It reports values for some of the
1840 benchmarking in the context of a merge. It reports values for some of the
1841 parameters that impact merge copy tracing time during merge.
1841 parameters that impact merge copy tracing time during merge.
1842
1842
1843 If `--timing` is set, rename detection is run and the associated timing
1843 If `--timing` is set, rename detection is run and the associated timing
1844 will be reported. The extra details come at the cost of slower command
1844 will be reported. The extra details come at the cost of slower command
1845 execution.
1845 execution.
1846
1846
1847 Since rename detection is only run once, other factors might easily
1847 Since rename detection is only run once, other factors might easily
1848 affect the precision of the timing. However it should give a good
1848 affect the precision of the timing. However it should give a good
1849 approximation of which revision triplets are very costly.
1849 approximation of which revision triplets are very costly.
1850 """
1850 """
1851 opts = _byteskwargs(opts)
1851 opts = _byteskwargs(opts)
1852 fm = ui.formatter(b'perf', opts)
1852 fm = ui.formatter(b'perf', opts)
1853 dotiming = opts[b'timing']
1853 dotiming = opts[b'timing']
1854 dostats = opts[b'stats']
1854 dostats = opts[b'stats']
1855
1855
1856 output_template = [
1856 output_template = [
1857 ("base", "%(base)12s"),
1857 ("base", "%(base)12s"),
1858 ("p1", "%(p1.node)12s"),
1858 ("p1", "%(p1.node)12s"),
1859 ("p2", "%(p2.node)12s"),
1859 ("p2", "%(p2.node)12s"),
1860 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1860 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1861 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1861 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1862 ("p1.renames", "%(p1.renamedfiles)12d"),
1862 ("p1.renames", "%(p1.renamedfiles)12d"),
1863 ("p1.time", "%(p1.time)12.3f"),
1863 ("p1.time", "%(p1.time)12.3f"),
1864 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1864 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1865 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1865 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1866 ("p2.renames", "%(p2.renamedfiles)12d"),
1866 ("p2.renames", "%(p2.renamedfiles)12d"),
1867 ("p2.time", "%(p2.time)12.3f"),
1867 ("p2.time", "%(p2.time)12.3f"),
1868 ("renames", "%(nbrenamedfiles)12d"),
1868 ("renames", "%(nbrenamedfiles)12d"),
1869 ("total.time", "%(time)12.3f"),
1869 ("total.time", "%(time)12.3f"),
1870 ]
1870 ]
1871 if not dotiming:
1871 if not dotiming:
1872 output_template = [
1872 output_template = [
1873 i
1873 i
1874 for i in output_template
1874 for i in output_template
1875 if not ('time' in i[0] or 'renames' in i[0])
1875 if not ('time' in i[0] or 'renames' in i[0])
1876 ]
1876 ]
1877 header_names = [h for (h, v) in output_template]
1877 header_names = [h for (h, v) in output_template]
1878 output = ' '.join([v for (h, v) in output_template]) + '\n'
1878 output = ' '.join([v for (h, v) in output_template]) + '\n'
1879 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1879 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1880 fm.plain(header % tuple(header_names))
1880 fm.plain(header % tuple(header_names))
1881
1881
1882 if not revs:
1882 if not revs:
1883 revs = ['all()']
1883 revs = ['all()']
1884 revs = scmutil.revrange(repo, revs)
1884 revs = scmutil.revrange(repo, revs)
1885
1885
1886 if dostats:
1886 if dostats:
1887 alldata = {
1887 alldata = {
1888 'nbrevs': [],
1888 'nbrevs': [],
1889 'nbmissingfiles': [],
1889 'nbmissingfiles': [],
1890 }
1890 }
1891 if dotiming:
1891 if dotiming:
1892 alldata['parentnbrenames'] = []
1892 alldata['parentnbrenames'] = []
1893 alldata['totalnbrenames'] = []
1893 alldata['totalnbrenames'] = []
1894 alldata['parenttime'] = []
1894 alldata['parenttime'] = []
1895 alldata['totaltime'] = []
1895 alldata['totaltime'] = []
1896
1896
1897 roi = repo.revs('merge() and %ld', revs)
1897 roi = repo.revs('merge() and %ld', revs)
1898 for r in roi:
1898 for r in roi:
1899 ctx = repo[r]
1899 ctx = repo[r]
1900 p1 = ctx.p1()
1900 p1 = ctx.p1()
1901 p2 = ctx.p2()
1901 p2 = ctx.p2()
1902 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1902 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1903 for b in bases:
1903 for b in bases:
1904 b = repo[b]
1904 b = repo[b]
1905 p1missing = copies._computeforwardmissing(b, p1)
1905 p1missing = copies._computeforwardmissing(b, p1)
1906 p2missing = copies._computeforwardmissing(b, p2)
1906 p2missing = copies._computeforwardmissing(b, p2)
1907 data = {
1907 data = {
1908 b'base': b.hex(),
1908 b'base': b.hex(),
1909 b'p1.node': p1.hex(),
1909 b'p1.node': p1.hex(),
1910 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1910 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1911 b'p1.nbmissingfiles': len(p1missing),
1911 b'p1.nbmissingfiles': len(p1missing),
1912 b'p2.node': p2.hex(),
1912 b'p2.node': p2.hex(),
1913 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1913 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1914 b'p2.nbmissingfiles': len(p2missing),
1914 b'p2.nbmissingfiles': len(p2missing),
1915 }
1915 }
1916 if dostats:
1916 if dostats:
1917 if p1missing:
1917 if p1missing:
1918 alldata['nbrevs'].append(
1918 alldata['nbrevs'].append(
1919 (data['p1.nbrevs'], b.hex(), p1.hex())
1919 (data['p1.nbrevs'], b.hex(), p1.hex())
1920 )
1920 )
1921 alldata['nbmissingfiles'].append(
1921 alldata['nbmissingfiles'].append(
1922 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1922 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1923 )
1923 )
1924 if p2missing:
1924 if p2missing:
1925 alldata['nbrevs'].append(
1925 alldata['nbrevs'].append(
1926 (data['p2.nbrevs'], b.hex(), p2.hex())
1926 (data['p2.nbrevs'], b.hex(), p2.hex())
1927 )
1927 )
1928 alldata['nbmissingfiles'].append(
1928 alldata['nbmissingfiles'].append(
1929 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1929 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1930 )
1930 )
1931 if dotiming:
1931 if dotiming:
1932 begin = util.timer()
1932 begin = util.timer()
1933 mergedata = copies.mergecopies(repo, p1, p2, b)
1933 mergedata = copies.mergecopies(repo, p1, p2, b)
1934 end = util.timer()
1934 end = util.timer()
1935 # not very stable timing since we did only one run
1935 # not very stable timing since we did only one run
1936 data['time'] = end - begin
1936 data['time'] = end - begin
1937 # mergedata contains five dicts: "copy", "movewithdir",
1937 # mergedata contains five dicts: "copy", "movewithdir",
1938 # "diverge", "renamedelete" and "dirmove".
1938 # "diverge", "renamedelete" and "dirmove".
1939 # The first 4 are about renamed file so lets count that.
1939 # The first 4 are about renamed file so lets count that.
1940 renames = len(mergedata[0])
1940 renames = len(mergedata[0])
1941 renames += len(mergedata[1])
1941 renames += len(mergedata[1])
1942 renames += len(mergedata[2])
1942 renames += len(mergedata[2])
1943 renames += len(mergedata[3])
1943 renames += len(mergedata[3])
1944 data['nbrenamedfiles'] = renames
1944 data['nbrenamedfiles'] = renames
1945 begin = util.timer()
1945 begin = util.timer()
1946 p1renames = copies.pathcopies(b, p1)
1946 p1renames = copies.pathcopies(b, p1)
1947 end = util.timer()
1947 end = util.timer()
1948 data['p1.time'] = end - begin
1948 data['p1.time'] = end - begin
1949 begin = util.timer()
1949 begin = util.timer()
1950 p2renames = copies.pathcopies(b, p2)
1950 p2renames = copies.pathcopies(b, p2)
1951 data['p2.time'] = end - begin
1951 data['p2.time'] = end - begin
1952 end = util.timer()
1952 end = util.timer()
1953 data['p1.renamedfiles'] = len(p1renames)
1953 data['p1.renamedfiles'] = len(p1renames)
1954 data['p2.renamedfiles'] = len(p2renames)
1954 data['p2.renamedfiles'] = len(p2renames)
1955
1955
1956 if dostats:
1956 if dostats:
1957 if p1missing:
1957 if p1missing:
1958 alldata['parentnbrenames'].append(
1958 alldata['parentnbrenames'].append(
1959 (data['p1.renamedfiles'], b.hex(), p1.hex())
1959 (data['p1.renamedfiles'], b.hex(), p1.hex())
1960 )
1960 )
1961 alldata['parenttime'].append(
1961 alldata['parenttime'].append(
1962 (data['p1.time'], b.hex(), p1.hex())
1962 (data['p1.time'], b.hex(), p1.hex())
1963 )
1963 )
1964 if p2missing:
1964 if p2missing:
1965 alldata['parentnbrenames'].append(
1965 alldata['parentnbrenames'].append(
1966 (data['p2.renamedfiles'], b.hex(), p2.hex())
1966 (data['p2.renamedfiles'], b.hex(), p2.hex())
1967 )
1967 )
1968 alldata['parenttime'].append(
1968 alldata['parenttime'].append(
1969 (data['p2.time'], b.hex(), p2.hex())
1969 (data['p2.time'], b.hex(), p2.hex())
1970 )
1970 )
1971 if p1missing or p2missing:
1971 if p1missing or p2missing:
1972 alldata['totalnbrenames'].append(
1972 alldata['totalnbrenames'].append(
1973 (
1973 (
1974 data['nbrenamedfiles'],
1974 data['nbrenamedfiles'],
1975 b.hex(),
1975 b.hex(),
1976 p1.hex(),
1976 p1.hex(),
1977 p2.hex(),
1977 p2.hex(),
1978 )
1978 )
1979 )
1979 )
1980 alldata['totaltime'].append(
1980 alldata['totaltime'].append(
1981 (data['time'], b.hex(), p1.hex(), p2.hex())
1981 (data['time'], b.hex(), p1.hex(), p2.hex())
1982 )
1982 )
1983 fm.startitem()
1983 fm.startitem()
1984 fm.data(**data)
1984 fm.data(**data)
1985 # make node pretty for the human output
1985 # make node pretty for the human output
1986 out = data.copy()
1986 out = data.copy()
1987 out['base'] = fm.hexfunc(b.node())
1987 out['base'] = fm.hexfunc(b.node())
1988 out['p1.node'] = fm.hexfunc(p1.node())
1988 out['p1.node'] = fm.hexfunc(p1.node())
1989 out['p2.node'] = fm.hexfunc(p2.node())
1989 out['p2.node'] = fm.hexfunc(p2.node())
1990 fm.plain(output % out)
1990 fm.plain(output % out)
1991
1991
1992 fm.end()
1992 fm.end()
1993 if dostats:
1993 if dostats:
1994 # use a second formatter because the data are quite different, not sure
1994 # use a second formatter because the data are quite different, not sure
1995 # how it flies with the templater.
1995 # how it flies with the templater.
1996 entries = [
1996 entries = [
1997 ('nbrevs', 'number of revision covered'),
1997 ('nbrevs', 'number of revision covered'),
1998 ('nbmissingfiles', 'number of missing files at head'),
1998 ('nbmissingfiles', 'number of missing files at head'),
1999 ]
1999 ]
2000 if dotiming:
2000 if dotiming:
2001 entries.append(
2001 entries.append(
2002 ('parentnbrenames', 'rename from one parent to base')
2002 ('parentnbrenames', 'rename from one parent to base')
2003 )
2003 )
2004 entries.append(('totalnbrenames', 'total number of renames'))
2004 entries.append(('totalnbrenames', 'total number of renames'))
2005 entries.append(('parenttime', 'time for one parent'))
2005 entries.append(('parenttime', 'time for one parent'))
2006 entries.append(('totaltime', 'time for both parents'))
2006 entries.append(('totaltime', 'time for both parents'))
2007 _displaystats(ui, opts, entries, alldata)
2007 _displaystats(ui, opts, entries, alldata)
2008
2008
2009
2009
2010 @command(
2010 @command(
2011 b'perfhelper-pathcopies',
2011 b'perfhelper-pathcopies',
2012 formatteropts
2012 formatteropts
2013 + [
2013 + [
2014 (b'r', b'revs', [], b'restrict search to these revisions'),
2014 (b'r', b'revs', [], b'restrict search to these revisions'),
2015 (b'', b'timing', False, b'provides extra data (costly)'),
2015 (b'', b'timing', False, b'provides extra data (costly)'),
2016 (b'', b'stats', False, b'provides statistic about the measured data'),
2016 (b'', b'stats', False, b'provides statistic about the measured data'),
2017 ],
2017 ],
2018 )
2018 )
2019 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2019 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2020 """find statistic about potential parameters for the `perftracecopies`
2020 """find statistic about potential parameters for the `perftracecopies`
2021
2021
2022 This command find source-destination pair relevant for copytracing testing.
2022 This command find source-destination pair relevant for copytracing testing.
2023 It report value for some of the parameters that impact copy tracing time.
2023 It report value for some of the parameters that impact copy tracing time.
2024
2024
2025 If `--timing` is set, rename detection is run and the associated timing
2025 If `--timing` is set, rename detection is run and the associated timing
2026 will be reported. The extra details comes at the cost of a slower command
2026 will be reported. The extra details comes at the cost of a slower command
2027 execution.
2027 execution.
2028
2028
2029 Since the rename detection is only run once, other factors might easily
2029 Since the rename detection is only run once, other factors might easily
2030 affect the precision of the timing. However it should give a good
2030 affect the precision of the timing. However it should give a good
2031 approximation of which revision pairs are very costly.
2031 approximation of which revision pairs are very costly.
2032 """
2032 """
2033 opts = _byteskwargs(opts)
2033 opts = _byteskwargs(opts)
2034 fm = ui.formatter(b'perf', opts)
2034 fm = ui.formatter(b'perf', opts)
2035 dotiming = opts[b'timing']
2035 dotiming = opts[b'timing']
2036 dostats = opts[b'stats']
2036 dostats = opts[b'stats']
2037
2037
2038 if dotiming:
2038 if dotiming:
2039 header = '%12s %12s %12s %12s %12s %12s\n'
2039 header = '%12s %12s %12s %12s %12s %12s\n'
2040 output = (
2040 output = (
2041 "%(source)12s %(destination)12s "
2041 "%(source)12s %(destination)12s "
2042 "%(nbrevs)12d %(nbmissingfiles)12d "
2042 "%(nbrevs)12d %(nbmissingfiles)12d "
2043 "%(nbrenamedfiles)12d %(time)18.5f\n"
2043 "%(nbrenamedfiles)12d %(time)18.5f\n"
2044 )
2044 )
2045 header_names = (
2045 header_names = (
2046 "source",
2046 "source",
2047 "destination",
2047 "destination",
2048 "nb-revs",
2048 "nb-revs",
2049 "nb-files",
2049 "nb-files",
2050 "nb-renames",
2050 "nb-renames",
2051 "time",
2051 "time",
2052 )
2052 )
2053 fm.plain(header % header_names)
2053 fm.plain(header % header_names)
2054 else:
2054 else:
2055 header = '%12s %12s %12s %12s\n'
2055 header = '%12s %12s %12s %12s\n'
2056 output = (
2056 output = (
2057 "%(source)12s %(destination)12s "
2057 "%(source)12s %(destination)12s "
2058 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2058 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2059 )
2059 )
2060 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2060 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2061
2061
2062 if not revs:
2062 if not revs:
2063 revs = ['all()']
2063 revs = ['all()']
2064 revs = scmutil.revrange(repo, revs)
2064 revs = scmutil.revrange(repo, revs)
2065
2065
2066 if dostats:
2066 if dostats:
2067 alldata = {
2067 alldata = {
2068 'nbrevs': [],
2068 'nbrevs': [],
2069 'nbmissingfiles': [],
2069 'nbmissingfiles': [],
2070 }
2070 }
2071 if dotiming:
2071 if dotiming:
2072 alldata['nbrenames'] = []
2072 alldata['nbrenames'] = []
2073 alldata['time'] = []
2073 alldata['time'] = []
2074
2074
2075 roi = repo.revs('merge() and %ld', revs)
2075 roi = repo.revs('merge() and %ld', revs)
2076 for r in roi:
2076 for r in roi:
2077 ctx = repo[r]
2077 ctx = repo[r]
2078 p1 = ctx.p1().rev()
2078 p1 = ctx.p1().rev()
2079 p2 = ctx.p2().rev()
2079 p2 = ctx.p2().rev()
2080 bases = repo.changelog._commonancestorsheads(p1, p2)
2080 bases = repo.changelog._commonancestorsheads(p1, p2)
2081 for p in (p1, p2):
2081 for p in (p1, p2):
2082 for b in bases:
2082 for b in bases:
2083 base = repo[b]
2083 base = repo[b]
2084 parent = repo[p]
2084 parent = repo[p]
2085 missing = copies._computeforwardmissing(base, parent)
2085 missing = copies._computeforwardmissing(base, parent)
2086 if not missing:
2086 if not missing:
2087 continue
2087 continue
2088 data = {
2088 data = {
2089 b'source': base.hex(),
2089 b'source': base.hex(),
2090 b'destination': parent.hex(),
2090 b'destination': parent.hex(),
2091 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2091 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2092 b'nbmissingfiles': len(missing),
2092 b'nbmissingfiles': len(missing),
2093 }
2093 }
2094 if dostats:
2094 if dostats:
2095 alldata['nbrevs'].append(
2095 alldata['nbrevs'].append(
2096 (data['nbrevs'], base.hex(), parent.hex(),)
2096 (data['nbrevs'], base.hex(), parent.hex(),)
2097 )
2097 )
2098 alldata['nbmissingfiles'].append(
2098 alldata['nbmissingfiles'].append(
2099 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2099 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2100 )
2100 )
2101 if dotiming:
2101 if dotiming:
2102 begin = util.timer()
2102 begin = util.timer()
2103 renames = copies.pathcopies(base, parent)
2103 renames = copies.pathcopies(base, parent)
2104 end = util.timer()
2104 end = util.timer()
2105 # not very stable timing since we did only one run
2105 # not very stable timing since we did only one run
2106 data['time'] = end - begin
2106 data['time'] = end - begin
2107 data['nbrenamedfiles'] = len(renames)
2107 data['nbrenamedfiles'] = len(renames)
2108 if dostats:
2108 if dostats:
2109 alldata['time'].append(
2109 alldata['time'].append(
2110 (data['time'], base.hex(), parent.hex(),)
2110 (data['time'], base.hex(), parent.hex(),)
2111 )
2111 )
2112 alldata['nbrenames'].append(
2112 alldata['nbrenames'].append(
2113 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2113 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2114 )
2114 )
2115 fm.startitem()
2115 fm.startitem()
2116 fm.data(**data)
2116 fm.data(**data)
2117 out = data.copy()
2117 out = data.copy()
2118 out['source'] = fm.hexfunc(base.node())
2118 out['source'] = fm.hexfunc(base.node())
2119 out['destination'] = fm.hexfunc(parent.node())
2119 out['destination'] = fm.hexfunc(parent.node())
2120 fm.plain(output % out)
2120 fm.plain(output % out)
2121
2121
2122 fm.end()
2122 fm.end()
2123 if dostats:
2123 if dostats:
2124 # use a second formatter because the data are quite different, not sure
2124 # use a second formatter because the data are quite different, not sure
2125 # how it flies with the templater.
2125 # how it flies with the templater.
2126 fm = ui.formatter(b'perf', opts)
2126 fm = ui.formatter(b'perf', opts)
2127 entries = [
2127 entries = [
2128 ('nbrevs', 'number of revision covered'),
2128 ('nbrevs', 'number of revision covered'),
2129 ('nbmissingfiles', 'number of missing files at head'),
2129 ('nbmissingfiles', 'number of missing files at head'),
2130 ]
2130 ]
2131 if dotiming:
2131 if dotiming:
2132 entries.append(('nbrenames', 'renamed files'))
2132 entries.append(('nbrenames', 'renamed files'))
2133 entries.append(('time', 'time'))
2133 entries.append(('time', 'time'))
2134 _displaystats(ui, opts, entries, alldata)
2134 _displaystats(ui, opts, entries, alldata)
2135
2135
2136
2136
2137 @command(b'perfcca', formatteropts)
2137 @command(b'perfcca', formatteropts)
2138 def perfcca(ui, repo, **opts):
2138 def perfcca(ui, repo, **opts):
2139 opts = _byteskwargs(opts)
2139 opts = _byteskwargs(opts)
2140 timer, fm = gettimer(ui, opts)
2140 timer, fm = gettimer(ui, opts)
2141 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2141 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2142 fm.end()
2142 fm.end()
2143
2143
2144
2144
2145 @command(b'perffncacheload', formatteropts)
2145 @command(b'perffncacheload', formatteropts)
2146 def perffncacheload(ui, repo, **opts):
2146 def perffncacheload(ui, repo, **opts):
2147 opts = _byteskwargs(opts)
2147 opts = _byteskwargs(opts)
2148 timer, fm = gettimer(ui, opts)
2148 timer, fm = gettimer(ui, opts)
2149 s = repo.store
2149 s = repo.store
2150
2150
2151 def d():
2151 def d():
2152 s.fncache._load()
2152 s.fncache._load()
2153
2153
2154 timer(d)
2154 timer(d)
2155 fm.end()
2155 fm.end()
2156
2156
2157
2157
2158 @command(b'perffncachewrite', formatteropts)
2158 @command(b'perffncachewrite', formatteropts)
2159 def perffncachewrite(ui, repo, **opts):
2159 def perffncachewrite(ui, repo, **opts):
2160 opts = _byteskwargs(opts)
2160 opts = _byteskwargs(opts)
2161 timer, fm = gettimer(ui, opts)
2161 timer, fm = gettimer(ui, opts)
2162 s = repo.store
2162 s = repo.store
2163 lock = repo.lock()
2163 lock = repo.lock()
2164 s.fncache._load()
2164 s.fncache._load()
2165 tr = repo.transaction(b'perffncachewrite')
2165 tr = repo.transaction(b'perffncachewrite')
2166 tr.addbackup(b'fncache')
2166 tr.addbackup(b'fncache')
2167
2167
2168 def d():
2168 def d():
2169 s.fncache._dirty = True
2169 s.fncache._dirty = True
2170 s.fncache.write(tr)
2170 s.fncache.write(tr)
2171
2171
2172 timer(d)
2172 timer(d)
2173 tr.close()
2173 tr.close()
2174 lock.release()
2174 lock.release()
2175 fm.end()
2175 fm.end()
2176
2176
2177
2177
2178 @command(b'perffncacheencode', formatteropts)
2178 @command(b'perffncacheencode', formatteropts)
2179 def perffncacheencode(ui, repo, **opts):
2179 def perffncacheencode(ui, repo, **opts):
2180 opts = _byteskwargs(opts)
2180 opts = _byteskwargs(opts)
2181 timer, fm = gettimer(ui, opts)
2181 timer, fm = gettimer(ui, opts)
2182 s = repo.store
2182 s = repo.store
2183 s.fncache._load()
2183 s.fncache._load()
2184
2184
2185 def d():
2185 def d():
2186 for p in s.fncache.entries:
2186 for p in s.fncache.entries:
2187 s.encode(p)
2187 s.encode(p)
2188
2188
2189 timer(d)
2189 timer(d)
2190 fm.end()
2190 fm.end()
2191
2191
2192
2192
2193 def _bdiffworker(q, blocks, xdiff, ready, done):
2193 def _bdiffworker(q, blocks, xdiff, ready, done):
2194 while not done.is_set():
2194 while not done.is_set():
2195 pair = q.get()
2195 pair = q.get()
2196 while pair is not None:
2196 while pair is not None:
2197 if xdiff:
2197 if xdiff:
2198 mdiff.bdiff.xdiffblocks(*pair)
2198 mdiff.bdiff.xdiffblocks(*pair)
2199 elif blocks:
2199 elif blocks:
2200 mdiff.bdiff.blocks(*pair)
2200 mdiff.bdiff.blocks(*pair)
2201 else:
2201 else:
2202 mdiff.textdiff(*pair)
2202 mdiff.textdiff(*pair)
2203 q.task_done()
2203 q.task_done()
2204 pair = q.get()
2204 pair = q.get()
2205 q.task_done() # for the None one
2205 q.task_done() # for the None one
2206 with ready:
2206 with ready:
2207 ready.wait()
2207 ready.wait()
2208
2208
2209
2209
2210 def _manifestrevision(repo, mnode):
2210 def _manifestrevision(repo, mnode):
2211 ml = repo.manifestlog
2211 ml = repo.manifestlog
2212
2212
2213 if util.safehasattr(ml, b'getstorage'):
2213 if util.safehasattr(ml, b'getstorage'):
2214 store = ml.getstorage(b'')
2214 store = ml.getstorage(b'')
2215 else:
2215 else:
2216 store = ml._revlog
2216 store = ml._revlog
2217
2217
2218 return store.revision(mnode)
2218 return store.revision(mnode)
2219
2219
2220
2220
2221 @command(
2221 @command(
2222 b'perfbdiff',
2222 b'perfbdiff',
2223 revlogopts
2223 revlogopts
2224 + formatteropts
2224 + formatteropts
2225 + [
2225 + [
2226 (
2226 (
2227 b'',
2227 b'',
2228 b'count',
2228 b'count',
2229 1,
2229 1,
2230 b'number of revisions to test (when using --startrev)',
2230 b'number of revisions to test (when using --startrev)',
2231 ),
2231 ),
2232 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2232 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2233 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2233 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2234 (b'', b'blocks', False, b'test computing diffs into blocks'),
2234 (b'', b'blocks', False, b'test computing diffs into blocks'),
2235 (b'', b'xdiff', False, b'use xdiff algorithm'),
2235 (b'', b'xdiff', False, b'use xdiff algorithm'),
2236 ],
2236 ],
2237 b'-c|-m|FILE REV',
2237 b'-c|-m|FILE REV',
2238 )
2238 )
2239 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2239 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2240 """benchmark a bdiff between revisions
2240 """benchmark a bdiff between revisions
2241
2241
2242 By default, benchmark a bdiff between its delta parent and itself.
2242 By default, benchmark a bdiff between its delta parent and itself.
2243
2243
2244 With ``--count``, benchmark bdiffs between delta parents and self for N
2244 With ``--count``, benchmark bdiffs between delta parents and self for N
2245 revisions starting at the specified revision.
2245 revisions starting at the specified revision.
2246
2246
2247 With ``--alldata``, assume the requested revision is a changeset and
2247 With ``--alldata``, assume the requested revision is a changeset and
2248 measure bdiffs for all changes related to that changeset (manifest
2248 measure bdiffs for all changes related to that changeset (manifest
2249 and filelogs).
2249 and filelogs).
2250 """
2250 """
2251 opts = _byteskwargs(opts)
2251 opts = _byteskwargs(opts)
2252
2252
2253 if opts[b'xdiff'] and not opts[b'blocks']:
2253 if opts[b'xdiff'] and not opts[b'blocks']:
2254 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2254 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2255
2255
2256 if opts[b'alldata']:
2256 if opts[b'alldata']:
2257 opts[b'changelog'] = True
2257 opts[b'changelog'] = True
2258
2258
2259 if opts.get(b'changelog') or opts.get(b'manifest'):
2259 if opts.get(b'changelog') or opts.get(b'manifest'):
2260 file_, rev = None, file_
2260 file_, rev = None, file_
2261 elif rev is None:
2261 elif rev is None:
2262 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2262 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2263
2263
2264 blocks = opts[b'blocks']
2264 blocks = opts[b'blocks']
2265 xdiff = opts[b'xdiff']
2265 xdiff = opts[b'xdiff']
2266 textpairs = []
2266 textpairs = []
2267
2267
2268 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2268 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2269
2269
2270 startrev = r.rev(r.lookup(rev))
2270 startrev = r.rev(r.lookup(rev))
2271 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2271 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2272 if opts[b'alldata']:
2272 if opts[b'alldata']:
2273 # Load revisions associated with changeset.
2273 # Load revisions associated with changeset.
2274 ctx = repo[rev]
2274 ctx = repo[rev]
2275 mtext = _manifestrevision(repo, ctx.manifestnode())
2275 mtext = _manifestrevision(repo, ctx.manifestnode())
2276 for pctx in ctx.parents():
2276 for pctx in ctx.parents():
2277 pman = _manifestrevision(repo, pctx.manifestnode())
2277 pman = _manifestrevision(repo, pctx.manifestnode())
2278 textpairs.append((pman, mtext))
2278 textpairs.append((pman, mtext))
2279
2279
2280 # Load filelog revisions by iterating manifest delta.
2280 # Load filelog revisions by iterating manifest delta.
2281 man = ctx.manifest()
2281 man = ctx.manifest()
2282 pman = ctx.p1().manifest()
2282 pman = ctx.p1().manifest()
2283 for filename, change in pman.diff(man).items():
2283 for filename, change in pman.diff(man).items():
2284 fctx = repo.file(filename)
2284 fctx = repo.file(filename)
2285 f1 = fctx.revision(change[0][0] or -1)
2285 f1 = fctx.revision(change[0][0] or -1)
2286 f2 = fctx.revision(change[1][0] or -1)
2286 f2 = fctx.revision(change[1][0] or -1)
2287 textpairs.append((f1, f2))
2287 textpairs.append((f1, f2))
2288 else:
2288 else:
2289 dp = r.deltaparent(rev)
2289 dp = r.deltaparent(rev)
2290 textpairs.append((r.revision(dp), r.revision(rev)))
2290 textpairs.append((r.revision(dp), r.revision(rev)))
2291
2291
2292 withthreads = threads > 0
2292 withthreads = threads > 0
2293 if not withthreads:
2293 if not withthreads:
2294
2294
2295 def d():
2295 def d():
2296 for pair in textpairs:
2296 for pair in textpairs:
2297 if xdiff:
2297 if xdiff:
2298 mdiff.bdiff.xdiffblocks(*pair)
2298 mdiff.bdiff.xdiffblocks(*pair)
2299 elif blocks:
2299 elif blocks:
2300 mdiff.bdiff.blocks(*pair)
2300 mdiff.bdiff.blocks(*pair)
2301 else:
2301 else:
2302 mdiff.textdiff(*pair)
2302 mdiff.textdiff(*pair)
2303
2303
2304 else:
2304 else:
2305 q = queue()
2305 q = queue()
2306 for i in _xrange(threads):
2306 for i in _xrange(threads):
2307 q.put(None)
2307 q.put(None)
2308 ready = threading.Condition()
2308 ready = threading.Condition()
2309 done = threading.Event()
2309 done = threading.Event()
2310 for i in _xrange(threads):
2310 for i in _xrange(threads):
2311 threading.Thread(
2311 threading.Thread(
2312 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2312 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2313 ).start()
2313 ).start()
2314 q.join()
2314 q.join()
2315
2315
2316 def d():
2316 def d():
2317 for pair in textpairs:
2317 for pair in textpairs:
2318 q.put(pair)
2318 q.put(pair)
2319 for i in _xrange(threads):
2319 for i in _xrange(threads):
2320 q.put(None)
2320 q.put(None)
2321 with ready:
2321 with ready:
2322 ready.notify_all()
2322 ready.notify_all()
2323 q.join()
2323 q.join()
2324
2324
2325 timer, fm = gettimer(ui, opts)
2325 timer, fm = gettimer(ui, opts)
2326 timer(d)
2326 timer(d)
2327 fm.end()
2327 fm.end()
2328
2328
2329 if withthreads:
2329 if withthreads:
2330 done.set()
2330 done.set()
2331 for i in _xrange(threads):
2331 for i in _xrange(threads):
2332 q.put(None)
2332 q.put(None)
2333 with ready:
2333 with ready:
2334 ready.notify_all()
2334 ready.notify_all()
2335
2335
2336
2336
2337 @command(
2337 @command(
2338 b'perfunidiff',
2338 b'perfunidiff',
2339 revlogopts
2339 revlogopts
2340 + formatteropts
2340 + formatteropts
2341 + [
2341 + [
2342 (
2342 (
2343 b'',
2343 b'',
2344 b'count',
2344 b'count',
2345 1,
2345 1,
2346 b'number of revisions to test (when using --startrev)',
2346 b'number of revisions to test (when using --startrev)',
2347 ),
2347 ),
2348 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2348 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2349 ],
2349 ],
2350 b'-c|-m|FILE REV',
2350 b'-c|-m|FILE REV',
2351 )
2351 )
2352 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2352 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2353 """benchmark a unified diff between revisions
2353 """benchmark a unified diff between revisions
2354
2354
2355 This doesn't include any copy tracing - it's just a unified diff
2355 This doesn't include any copy tracing - it's just a unified diff
2356 of the texts.
2356 of the texts.
2357
2357
2358 By default, benchmark a diff between its delta parent and itself.
2358 By default, benchmark a diff between its delta parent and itself.
2359
2359
2360 With ``--count``, benchmark diffs between delta parents and self for N
2360 With ``--count``, benchmark diffs between delta parents and self for N
2361 revisions starting at the specified revision.
2361 revisions starting at the specified revision.
2362
2362
2363 With ``--alldata``, assume the requested revision is a changeset and
2363 With ``--alldata``, assume the requested revision is a changeset and
2364 measure diffs for all changes related to that changeset (manifest
2364 measure diffs for all changes related to that changeset (manifest
2365 and filelogs).
2365 and filelogs).
2366 """
2366 """
2367 opts = _byteskwargs(opts)
2367 opts = _byteskwargs(opts)
2368 if opts[b'alldata']:
2368 if opts[b'alldata']:
2369 opts[b'changelog'] = True
2369 opts[b'changelog'] = True
2370
2370
2371 if opts.get(b'changelog') or opts.get(b'manifest'):
2371 if opts.get(b'changelog') or opts.get(b'manifest'):
2372 file_, rev = None, file_
2372 file_, rev = None, file_
2373 elif rev is None:
2373 elif rev is None:
2374 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2374 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2375
2375
2376 textpairs = []
2376 textpairs = []
2377
2377
2378 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2378 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2379
2379
2380 startrev = r.rev(r.lookup(rev))
2380 startrev = r.rev(r.lookup(rev))
2381 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2381 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2382 if opts[b'alldata']:
2382 if opts[b'alldata']:
2383 # Load revisions associated with changeset.
2383 # Load revisions associated with changeset.
2384 ctx = repo[rev]
2384 ctx = repo[rev]
2385 mtext = _manifestrevision(repo, ctx.manifestnode())
2385 mtext = _manifestrevision(repo, ctx.manifestnode())
2386 for pctx in ctx.parents():
2386 for pctx in ctx.parents():
2387 pman = _manifestrevision(repo, pctx.manifestnode())
2387 pman = _manifestrevision(repo, pctx.manifestnode())
2388 textpairs.append((pman, mtext))
2388 textpairs.append((pman, mtext))
2389
2389
2390 # Load filelog revisions by iterating manifest delta.
2390 # Load filelog revisions by iterating manifest delta.
2391 man = ctx.manifest()
2391 man = ctx.manifest()
2392 pman = ctx.p1().manifest()
2392 pman = ctx.p1().manifest()
2393 for filename, change in pman.diff(man).items():
2393 for filename, change in pman.diff(man).items():
2394 fctx = repo.file(filename)
2394 fctx = repo.file(filename)
2395 f1 = fctx.revision(change[0][0] or -1)
2395 f1 = fctx.revision(change[0][0] or -1)
2396 f2 = fctx.revision(change[1][0] or -1)
2396 f2 = fctx.revision(change[1][0] or -1)
2397 textpairs.append((f1, f2))
2397 textpairs.append((f1, f2))
2398 else:
2398 else:
2399 dp = r.deltaparent(rev)
2399 dp = r.deltaparent(rev)
2400 textpairs.append((r.revision(dp), r.revision(rev)))
2400 textpairs.append((r.revision(dp), r.revision(rev)))
2401
2401
2402 def d():
2402 def d():
2403 for left, right in textpairs:
2403 for left, right in textpairs:
2404 # The date strings don't matter, so we pass empty strings.
2404 # The date strings don't matter, so we pass empty strings.
2405 headerlines, hunks = mdiff.unidiff(
2405 headerlines, hunks = mdiff.unidiff(
2406 left, b'', right, b'', b'left', b'right', binary=False
2406 left, b'', right, b'', b'left', b'right', binary=False
2407 )
2407 )
2408 # consume iterators in roughly the way patch.py does
2408 # consume iterators in roughly the way patch.py does
2409 b'\n'.join(headerlines)
2409 b'\n'.join(headerlines)
2410 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2410 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2411
2411
2412 timer, fm = gettimer(ui, opts)
2412 timer, fm = gettimer(ui, opts)
2413 timer(d)
2413 timer(d)
2414 fm.end()
2414 fm.end()
2415
2415
2416
2416
2417 @command(b'perfdiffwd', formatteropts)
2417 @command(b'perfdiffwd', formatteropts)
2418 def perfdiffwd(ui, repo, **opts):
2418 def perfdiffwd(ui, repo, **opts):
2419 """Profile diff of working directory changes"""
2419 """Profile diff of working directory changes"""
2420 opts = _byteskwargs(opts)
2420 opts = _byteskwargs(opts)
2421 timer, fm = gettimer(ui, opts)
2421 timer, fm = gettimer(ui, opts)
2422 options = {
2422 options = {
2423 'w': 'ignore_all_space',
2423 'w': 'ignore_all_space',
2424 'b': 'ignore_space_change',
2424 'b': 'ignore_space_change',
2425 'B': 'ignore_blank_lines',
2425 'B': 'ignore_blank_lines',
2426 }
2426 }
2427
2427
2428 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2428 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2429 opts = dict((options[c], b'1') for c in diffopt)
2429 opts = dict((options[c], b'1') for c in diffopt)
2430
2430
2431 def d():
2431 def d():
2432 ui.pushbuffer()
2432 ui.pushbuffer()
2433 commands.diff(ui, repo, **opts)
2433 commands.diff(ui, repo, **opts)
2434 ui.popbuffer()
2434 ui.popbuffer()
2435
2435
2436 diffopt = diffopt.encode('ascii')
2436 diffopt = diffopt.encode('ascii')
2437 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2437 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2438 timer(d, title=title)
2438 timer(d, title=title)
2439 fm.end()
2439 fm.end()
2440
2440
2441
2441
2442 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2442 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2443 def perfrevlogindex(ui, repo, file_=None, **opts):
2443 def perfrevlogindex(ui, repo, file_=None, **opts):
2444 """Benchmark operations against a revlog index.
2444 """Benchmark operations against a revlog index.
2445
2445
2446 This tests constructing a revlog instance, reading index data,
2446 This tests constructing a revlog instance, reading index data,
2447 parsing index data, and performing various operations related to
2447 parsing index data, and performing various operations related to
2448 index data.
2448 index data.
2449 """
2449 """
2450
2450
2451 opts = _byteskwargs(opts)
2451 opts = _byteskwargs(opts)
2452
2452
2453 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2453 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2454
2454
2455 opener = getattr(rl, 'opener') # trick linter
2455 opener = getattr(rl, 'opener') # trick linter
2456 indexfile = rl.indexfile
2456 indexfile = rl.indexfile
2457 data = opener.read(indexfile)
2457 data = opener.read(indexfile)
2458
2458
2459 header = struct.unpack(b'>I', data[0:4])[0]
2459 header = struct.unpack(b'>I', data[0:4])[0]
2460 version = header & 0xFFFF
2460 version = header & 0xFFFF
2461 if version == 1:
2461 if version == 1:
2462 revlogio = revlog.revlogio()
2462 revlogio = revlog.revlogio()
2463 inline = header & (1 << 16)
2463 inline = header & (1 << 16)
2464 else:
2464 else:
2465 raise error.Abort(b'unsupported revlog version: %d' % version)
2465 raise error.Abort(b'unsupported revlog version: %d' % version)
2466
2466
2467 rllen = len(rl)
2467 rllen = len(rl)
2468
2468
2469 node0 = rl.node(0)
2469 node0 = rl.node(0)
2470 node25 = rl.node(rllen // 4)
2470 node25 = rl.node(rllen // 4)
2471 node50 = rl.node(rllen // 2)
2471 node50 = rl.node(rllen // 2)
2472 node75 = rl.node(rllen // 4 * 3)
2472 node75 = rl.node(rllen // 4 * 3)
2473 node100 = rl.node(rllen - 1)
2473 node100 = rl.node(rllen - 1)
2474
2474
2475 allrevs = range(rllen)
2475 allrevs = range(rllen)
2476 allrevsrev = list(reversed(allrevs))
2476 allrevsrev = list(reversed(allrevs))
2477 allnodes = [rl.node(rev) for rev in range(rllen)]
2477 allnodes = [rl.node(rev) for rev in range(rllen)]
2478 allnodesrev = list(reversed(allnodes))
2478 allnodesrev = list(reversed(allnodes))
2479
2479
2480 def constructor():
2480 def constructor():
2481 revlog.revlog(opener, indexfile)
2481 revlog.revlog(opener, indexfile)
2482
2482
2483 def read():
2483 def read():
2484 with opener(indexfile) as fh:
2484 with opener(indexfile) as fh:
2485 fh.read()
2485 fh.read()
2486
2486
2487 def parseindex():
2487 def parseindex():
2488 revlogio.parseindex(data, inline)
2488 revlogio.parseindex(data, inline)
2489
2489
2490 def getentry(revornode):
2490 def getentry(revornode):
2491 index = revlogio.parseindex(data, inline)[0]
2491 index = revlogio.parseindex(data, inline)[0]
2492 index[revornode]
2492 index[revornode]
2493
2493
2494 def getentries(revs, count=1):
2494 def getentries(revs, count=1):
2495 index = revlogio.parseindex(data, inline)[0]
2495 index = revlogio.parseindex(data, inline)[0]
2496
2496
2497 for i in range(count):
2497 for i in range(count):
2498 for rev in revs:
2498 for rev in revs:
2499 index[rev]
2499 index[rev]
2500
2500
2501 def resolvenode(node):
2501 def resolvenode(node):
2502 nodemap = revlogio.parseindex(data, inline)[1]
2502 nodemap = revlogio.parseindex(data, inline)[1]
2503 # This only works for the C code.
2503 # This only works for the C code.
2504 if nodemap is None:
2504 if nodemap is None:
2505 return
2505 return
2506
2506
2507 try:
2507 try:
2508 nodemap[node]
2508 nodemap[node]
2509 except error.RevlogError:
2509 except error.RevlogError:
2510 pass
2510 pass
2511
2511
2512 def resolvenodes(nodes, count=1):
2512 def resolvenodes(nodes, count=1):
2513 nodemap = revlogio.parseindex(data, inline)[1]
2513 nodemap = revlogio.parseindex(data, inline)[1]
2514 if nodemap is None:
2514 if nodemap is None:
2515 return
2515 return
2516
2516
2517 for i in range(count):
2517 for i in range(count):
2518 for node in nodes:
2518 for node in nodes:
2519 try:
2519 try:
2520 nodemap[node]
2520 nodemap[node]
2521 except error.RevlogError:
2521 except error.RevlogError:
2522 pass
2522 pass
2523
2523
2524 benches = [
2524 benches = [
2525 (constructor, b'revlog constructor'),
2525 (constructor, b'revlog constructor'),
2526 (read, b'read'),
2526 (read, b'read'),
2527 (parseindex, b'create index object'),
2527 (parseindex, b'create index object'),
2528 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2528 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2529 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2529 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2530 (lambda: resolvenode(node0), b'look up node at rev 0'),
2530 (lambda: resolvenode(node0), b'look up node at rev 0'),
2531 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2531 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2532 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2532 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2533 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2533 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2534 (lambda: resolvenode(node100), b'look up node at tip'),
2534 (lambda: resolvenode(node100), b'look up node at tip'),
2535 # 2x variation is to measure caching impact.
2535 # 2x variation is to measure caching impact.
2536 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2536 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2537 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2537 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2538 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2538 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2539 (
2539 (
2540 lambda: resolvenodes(allnodesrev, 2),
2540 lambda: resolvenodes(allnodesrev, 2),
2541 b'look up all nodes 2x (reverse)',
2541 b'look up all nodes 2x (reverse)',
2542 ),
2542 ),
2543 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2543 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2544 (
2544 (
2545 lambda: getentries(allrevs, 2),
2545 lambda: getentries(allrevs, 2),
2546 b'retrieve all index entries 2x (forward)',
2546 b'retrieve all index entries 2x (forward)',
2547 ),
2547 ),
2548 (
2548 (
2549 lambda: getentries(allrevsrev),
2549 lambda: getentries(allrevsrev),
2550 b'retrieve all index entries (reverse)',
2550 b'retrieve all index entries (reverse)',
2551 ),
2551 ),
2552 (
2552 (
2553 lambda: getentries(allrevsrev, 2),
2553 lambda: getentries(allrevsrev, 2),
2554 b'retrieve all index entries 2x (reverse)',
2554 b'retrieve all index entries 2x (reverse)',
2555 ),
2555 ),
2556 ]
2556 ]
2557
2557
2558 for fn, title in benches:
2558 for fn, title in benches:
2559 timer, fm = gettimer(ui, opts)
2559 timer, fm = gettimer(ui, opts)
2560 timer(fn, title=title)
2560 timer(fn, title=title)
2561 fm.end()
2561 fm.end()
2562
2562
2563
2563
2564 @command(
2564 @command(
2565 b'perfrevlogrevisions',
2565 b'perfrevlogrevisions',
2566 revlogopts
2566 revlogopts
2567 + formatteropts
2567 + formatteropts
2568 + [
2568 + [
2569 (b'd', b'dist', 100, b'distance between the revisions'),
2569 (b'd', b'dist', 100, b'distance between the revisions'),
2570 (b's', b'startrev', 0, b'revision to start reading at'),
2570 (b's', b'startrev', 0, b'revision to start reading at'),
2571 (b'', b'reverse', False, b'read in reverse'),
2571 (b'', b'reverse', False, b'read in reverse'),
2572 ],
2572 ],
2573 b'-c|-m|FILE',
2573 b'-c|-m|FILE',
2574 )
2574 )
2575 def perfrevlogrevisions(
2575 def perfrevlogrevisions(
2576 ui, repo, file_=None, startrev=0, reverse=False, **opts
2576 ui, repo, file_=None, startrev=0, reverse=False, **opts
2577 ):
2577 ):
2578 """Benchmark reading a series of revisions from a revlog.
2578 """Benchmark reading a series of revisions from a revlog.
2579
2579
2580 By default, we read every ``-d/--dist`` revision from 0 to tip of
2580 By default, we read every ``-d/--dist`` revision from 0 to tip of
2581 the specified revlog.
2581 the specified revlog.
2582
2582
2583 The start revision can be defined via ``-s/--startrev``.
2583 The start revision can be defined via ``-s/--startrev``.
2584 """
2584 """
2585 opts = _byteskwargs(opts)
2585 opts = _byteskwargs(opts)
2586
2586
2587 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2587 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2588 rllen = getlen(ui)(rl)
2588 rllen = getlen(ui)(rl)
2589
2589
2590 if startrev < 0:
2590 if startrev < 0:
2591 startrev = rllen + startrev
2591 startrev = rllen + startrev
2592
2592
2593 def d():
2593 def d():
2594 rl.clearcaches()
2594 rl.clearcaches()
2595
2595
2596 beginrev = startrev
2596 beginrev = startrev
2597 endrev = rllen
2597 endrev = rllen
2598 dist = opts[b'dist']
2598 dist = opts[b'dist']
2599
2599
2600 if reverse:
2600 if reverse:
2601 beginrev, endrev = endrev - 1, beginrev - 1
2601 beginrev, endrev = endrev - 1, beginrev - 1
2602 dist = -1 * dist
2602 dist = -1 * dist
2603
2603
2604 for x in _xrange(beginrev, endrev, dist):
2604 for x in _xrange(beginrev, endrev, dist):
2605 # Old revisions don't support passing int.
2605 # Old revisions don't support passing int.
2606 n = rl.node(x)
2606 n = rl.node(x)
2607 rl.revision(n)
2607 rl.revision(n)
2608
2608
2609 timer, fm = gettimer(ui, opts)
2609 timer, fm = gettimer(ui, opts)
2610 timer(d)
2610 timer(d)
2611 fm.end()
2611 fm.end()
2612
2612
2613
2613
2614 @command(
2614 @command(
2615 b'perfrevlogwrite',
2615 b'perfrevlogwrite',
2616 revlogopts
2616 revlogopts
2617 + formatteropts
2617 + formatteropts
2618 + [
2618 + [
2619 (b's', b'startrev', 1000, b'revision to start writing at'),
2619 (b's', b'startrev', 1000, b'revision to start writing at'),
2620 (b'', b'stoprev', -1, b'last revision to write'),
2620 (b'', b'stoprev', -1, b'last revision to write'),
2621 (b'', b'count', 3, b'number of passes to perform'),
2621 (b'', b'count', 3, b'number of passes to perform'),
2622 (b'', b'details', False, b'print timing for every revisions tested'),
2622 (b'', b'details', False, b'print timing for every revisions tested'),
2623 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2623 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2624 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2624 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2625 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2625 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2626 ],
2626 ],
2627 b'-c|-m|FILE',
2627 b'-c|-m|FILE',
2628 )
2628 )
2629 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2629 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2630 """Benchmark writing a series of revisions to a revlog.
2630 """Benchmark writing a series of revisions to a revlog.
2631
2631
2632 Possible source values are:
2632 Possible source values are:
2633 * `full`: add from a full text (default).
2633 * `full`: add from a full text (default).
2634 * `parent-1`: add from a delta to the first parent
2634 * `parent-1`: add from a delta to the first parent
2635 * `parent-2`: add from a delta to the second parent if it exists
2635 * `parent-2`: add from a delta to the second parent if it exists
2636 (use a delta from the first parent otherwise)
2636 (use a delta from the first parent otherwise)
2637 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2637 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2638 * `storage`: add from the existing precomputed deltas
2638 * `storage`: add from the existing precomputed deltas
2639
2639
2640 Note: This performance command measures performance in a custom way. As a
2640 Note: This performance command measures performance in a custom way. As a
2641 result some of the global configuration of the 'perf' command does not
2641 result some of the global configuration of the 'perf' command does not
2642 apply to it:
2642 apply to it:
2643
2643
2644 * ``pre-run``: disabled
2644 * ``pre-run``: disabled
2645
2645
2646 * ``profile-benchmark``: disabled
2646 * ``profile-benchmark``: disabled
2647
2647
2648 * ``run-limits``: disabled use --count instead
2648 * ``run-limits``: disabled use --count instead
2649 """
2649 """
2650 opts = _byteskwargs(opts)
2650 opts = _byteskwargs(opts)
2651
2651
2652 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2652 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2653 rllen = getlen(ui)(rl)
2653 rllen = getlen(ui)(rl)
2654 if startrev < 0:
2654 if startrev < 0:
2655 startrev = rllen + startrev
2655 startrev = rllen + startrev
2656 if stoprev < 0:
2656 if stoprev < 0:
2657 stoprev = rllen + stoprev
2657 stoprev = rllen + stoprev
2658
2658
2659 lazydeltabase = opts['lazydeltabase']
2659 lazydeltabase = opts['lazydeltabase']
2660 source = opts['source']
2660 source = opts['source']
2661 clearcaches = opts['clear_caches']
2661 clearcaches = opts['clear_caches']
2662 validsource = (
2662 validsource = (
2663 b'full',
2663 b'full',
2664 b'parent-1',
2664 b'parent-1',
2665 b'parent-2',
2665 b'parent-2',
2666 b'parent-smallest',
2666 b'parent-smallest',
2667 b'storage',
2667 b'storage',
2668 )
2668 )
2669 if source not in validsource:
2669 if source not in validsource:
2670 raise error.Abort('invalid source type: %s' % source)
2670 raise error.Abort('invalid source type: %s' % source)
2671
2671
2672 ### actually gather results
2672 ### actually gather results
2673 count = opts['count']
2673 count = opts['count']
2674 if count <= 0:
2674 if count <= 0:
2675 raise error.Abort('invalide run count: %d' % count)
2675 raise error.Abort('invalide run count: %d' % count)
2676 allresults = []
2676 allresults = []
2677 for c in range(count):
2677 for c in range(count):
2678 timing = _timeonewrite(
2678 timing = _timeonewrite(
2679 ui,
2679 ui,
2680 rl,
2680 rl,
2681 source,
2681 source,
2682 startrev,
2682 startrev,
2683 stoprev,
2683 stoprev,
2684 c + 1,
2684 c + 1,
2685 lazydeltabase=lazydeltabase,
2685 lazydeltabase=lazydeltabase,
2686 clearcaches=clearcaches,
2686 clearcaches=clearcaches,
2687 )
2687 )
2688 allresults.append(timing)
2688 allresults.append(timing)
2689
2689
2690 ### consolidate the results in a single list
2690 ### consolidate the results in a single list
2691 results = []
2691 results = []
2692 for idx, (rev, t) in enumerate(allresults[0]):
2692 for idx, (rev, t) in enumerate(allresults[0]):
2693 ts = [t]
2693 ts = [t]
2694 for other in allresults[1:]:
2694 for other in allresults[1:]:
2695 orev, ot = other[idx]
2695 orev, ot = other[idx]
2696 assert orev == rev
2696 assert orev == rev
2697 ts.append(ot)
2697 ts.append(ot)
2698 results.append((rev, ts))
2698 results.append((rev, ts))
2699 resultcount = len(results)
2699 resultcount = len(results)
2700
2700
2701 ### Compute and display relevant statistics
2701 ### Compute and display relevant statistics
2702
2702
2703 # get a formatter
2703 # get a formatter
2704 fm = ui.formatter(b'perf', opts)
2704 fm = ui.formatter(b'perf', opts)
2705 displayall = ui.configbool(b"perf", b"all-timing", False)
2705 displayall = ui.configbool(b"perf", b"all-timing", False)
2706
2706
2707 # print individual details if requested
2707 # print individual details if requested
2708 if opts['details']:
2708 if opts['details']:
2709 for idx, item in enumerate(results, 1):
2709 for idx, item in enumerate(results, 1):
2710 rev, data = item
2710 rev, data = item
2711 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2711 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2712 formatone(fm, data, title=title, displayall=displayall)
2712 formatone(fm, data, title=title, displayall=displayall)
2713
2713
2714 # sorts results by median time
2714 # sorts results by median time
2715 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2715 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2716 # list of (name, index) to display)
2716 # list of (name, index) to display)
2717 relevants = [
2717 relevants = [
2718 ("min", 0),
2718 ("min", 0),
2719 ("10%", resultcount * 10 // 100),
2719 ("10%", resultcount * 10 // 100),
2720 ("25%", resultcount * 25 // 100),
2720 ("25%", resultcount * 25 // 100),
2721 ("50%", resultcount * 70 // 100),
2721 ("50%", resultcount * 70 // 100),
2722 ("75%", resultcount * 75 // 100),
2722 ("75%", resultcount * 75 // 100),
2723 ("90%", resultcount * 90 // 100),
2723 ("90%", resultcount * 90 // 100),
2724 ("95%", resultcount * 95 // 100),
2724 ("95%", resultcount * 95 // 100),
2725 ("99%", resultcount * 99 // 100),
2725 ("99%", resultcount * 99 // 100),
2726 ("99.9%", resultcount * 999 // 1000),
2726 ("99.9%", resultcount * 999 // 1000),
2727 ("99.99%", resultcount * 9999 // 10000),
2727 ("99.99%", resultcount * 9999 // 10000),
2728 ("99.999%", resultcount * 99999 // 100000),
2728 ("99.999%", resultcount * 99999 // 100000),
2729 ("max", -1),
2729 ("max", -1),
2730 ]
2730 ]
2731 if not ui.quiet:
2731 if not ui.quiet:
2732 for name, idx in relevants:
2732 for name, idx in relevants:
2733 data = results[idx]
2733 data = results[idx]
2734 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2734 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2735 formatone(fm, data[1], title=title, displayall=displayall)
2735 formatone(fm, data[1], title=title, displayall=displayall)
2736
2736
2737 # XXX summing that many float will not be very precise, we ignore this fact
2737 # XXX summing that many float will not be very precise, we ignore this fact
2738 # for now
2738 # for now
2739 totaltime = []
2739 totaltime = []
2740 for item in allresults:
2740 for item in allresults:
2741 totaltime.append(
2741 totaltime.append(
2742 (
2742 (
2743 sum(x[1][0] for x in item),
2743 sum(x[1][0] for x in item),
2744 sum(x[1][1] for x in item),
2744 sum(x[1][1] for x in item),
2745 sum(x[1][2] for x in item),
2745 sum(x[1][2] for x in item),
2746 )
2746 )
2747 )
2747 )
2748 formatone(
2748 formatone(
2749 fm,
2749 fm,
2750 totaltime,
2750 totaltime,
2751 title="total time (%d revs)" % resultcount,
2751 title="total time (%d revs)" % resultcount,
2752 displayall=displayall,
2752 displayall=displayall,
2753 )
2753 )
2754 fm.end()
2754 fm.end()
2755
2755
2756
2756
2757 class _faketr(object):
2757 class _faketr(object):
2758 def add(s, x, y, z=None):
2758 def add(s, x, y, z=None):
2759 return None
2759 return None
2760
2760
2761
2761
2762 def _timeonewrite(
2762 def _timeonewrite(
2763 ui,
2763 ui,
2764 orig,
2764 orig,
2765 source,
2765 source,
2766 startrev,
2766 startrev,
2767 stoprev,
2767 stoprev,
2768 runidx=None,
2768 runidx=None,
2769 lazydeltabase=True,
2769 lazydeltabase=True,
2770 clearcaches=True,
2770 clearcaches=True,
2771 ):
2771 ):
2772 timings = []
2772 timings = []
2773 tr = _faketr()
2773 tr = _faketr()
2774 with _temprevlog(ui, orig, startrev) as dest:
2774 with _temprevlog(ui, orig, startrev) as dest:
2775 dest._lazydeltabase = lazydeltabase
2775 dest._lazydeltabase = lazydeltabase
2776 revs = list(orig.revs(startrev, stoprev))
2776 revs = list(orig.revs(startrev, stoprev))
2777 total = len(revs)
2777 total = len(revs)
2778 topic = 'adding'
2778 topic = 'adding'
2779 if runidx is not None:
2779 if runidx is not None:
2780 topic += ' (run #%d)' % runidx
2780 topic += ' (run #%d)' % runidx
2781 # Support both old and new progress API
2781 # Support both old and new progress API
2782 if util.safehasattr(ui, 'makeprogress'):
2782 if util.safehasattr(ui, 'makeprogress'):
2783 progress = ui.makeprogress(topic, unit='revs', total=total)
2783 progress = ui.makeprogress(topic, unit='revs', total=total)
2784
2784
2785 def updateprogress(pos):
2785 def updateprogress(pos):
2786 progress.update(pos)
2786 progress.update(pos)
2787
2787
2788 def completeprogress():
2788 def completeprogress():
2789 progress.complete()
2789 progress.complete()
2790
2790
2791 else:
2791 else:
2792
2792
2793 def updateprogress(pos):
2793 def updateprogress(pos):
2794 ui.progress(topic, pos, unit='revs', total=total)
2794 ui.progress(topic, pos, unit='revs', total=total)
2795
2795
2796 def completeprogress():
2796 def completeprogress():
2797 ui.progress(topic, None, unit='revs', total=total)
2797 ui.progress(topic, None, unit='revs', total=total)
2798
2798
2799 for idx, rev in enumerate(revs):
2799 for idx, rev in enumerate(revs):
2800 updateprogress(idx)
2800 updateprogress(idx)
2801 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2801 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2802 if clearcaches:
2802 if clearcaches:
2803 dest.index.clearcaches()
2803 dest.index.clearcaches()
2804 dest.clearcaches()
2804 dest.clearcaches()
2805 with timeone() as r:
2805 with timeone() as r:
2806 dest.addrawrevision(*addargs, **addkwargs)
2806 dest.addrawrevision(*addargs, **addkwargs)
2807 timings.append((rev, r[0]))
2807 timings.append((rev, r[0]))
2808 updateprogress(total)
2808 updateprogress(total)
2809 completeprogress()
2809 completeprogress()
2810 return timings
2810 return timings
2811
2811
2812
2812
2813 def _getrevisionseed(orig, rev, tr, source):
2813 def _getrevisionseed(orig, rev, tr, source):
2814 from mercurial.node import nullid
2814 from mercurial.node import nullid
2815
2815
2816 linkrev = orig.linkrev(rev)
2816 linkrev = orig.linkrev(rev)
2817 node = orig.node(rev)
2817 node = orig.node(rev)
2818 p1, p2 = orig.parents(node)
2818 p1, p2 = orig.parents(node)
2819 flags = orig.flags(rev)
2819 flags = orig.flags(rev)
2820 cachedelta = None
2820 cachedelta = None
2821 text = None
2821 text = None
2822
2822
2823 if source == b'full':
2823 if source == b'full':
2824 text = orig.revision(rev)
2824 text = orig.revision(rev)
2825 elif source == b'parent-1':
2825 elif source == b'parent-1':
2826 baserev = orig.rev(p1)
2826 baserev = orig.rev(p1)
2827 cachedelta = (baserev, orig.revdiff(p1, rev))
2827 cachedelta = (baserev, orig.revdiff(p1, rev))
2828 elif source == b'parent-2':
2828 elif source == b'parent-2':
2829 parent = p2
2829 parent = p2
2830 if p2 == nullid:
2830 if p2 == nullid:
2831 parent = p1
2831 parent = p1
2832 baserev = orig.rev(parent)
2832 baserev = orig.rev(parent)
2833 cachedelta = (baserev, orig.revdiff(parent, rev))
2833 cachedelta = (baserev, orig.revdiff(parent, rev))
2834 elif source == b'parent-smallest':
2834 elif source == b'parent-smallest':
2835 p1diff = orig.revdiff(p1, rev)
2835 p1diff = orig.revdiff(p1, rev)
2836 parent = p1
2836 parent = p1
2837 diff = p1diff
2837 diff = p1diff
2838 if p2 != nullid:
2838 if p2 != nullid:
2839 p2diff = orig.revdiff(p2, rev)
2839 p2diff = orig.revdiff(p2, rev)
2840 if len(p1diff) > len(p2diff):
2840 if len(p1diff) > len(p2diff):
2841 parent = p2
2841 parent = p2
2842 diff = p2diff
2842 diff = p2diff
2843 baserev = orig.rev(parent)
2843 baserev = orig.rev(parent)
2844 cachedelta = (baserev, diff)
2844 cachedelta = (baserev, diff)
2845 elif source == b'storage':
2845 elif source == b'storage':
2846 baserev = orig.deltaparent(rev)
2846 baserev = orig.deltaparent(rev)
2847 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2847 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2848
2848
2849 return (
2849 return (
2850 (text, tr, linkrev, p1, p2),
2850 (text, tr, linkrev, p1, p2),
2851 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2851 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2852 )
2852 )
2853
2853
2854
2854
2855 @contextlib.contextmanager
2855 @contextlib.contextmanager
2856 def _temprevlog(ui, orig, truncaterev):
2856 def _temprevlog(ui, orig, truncaterev):
2857 from mercurial import vfs as vfsmod
2857 from mercurial import vfs as vfsmod
2858
2858
2859 if orig._inline:
2859 if orig._inline:
2860 raise error.Abort('not supporting inline revlog (yet)')
2860 raise error.Abort('not supporting inline revlog (yet)')
2861 revlogkwargs = {}
2861 revlogkwargs = {}
2862 k = 'upperboundcomp'
2862 k = 'upperboundcomp'
2863 if util.safehasattr(orig, k):
2863 if util.safehasattr(orig, k):
2864 revlogkwargs[k] = getattr(orig, k)
2864 revlogkwargs[k] = getattr(orig, k)
2865
2865
2866 origindexpath = orig.opener.join(orig.indexfile)
2866 origindexpath = orig.opener.join(orig.indexfile)
2867 origdatapath = orig.opener.join(orig.datafile)
2867 origdatapath = orig.opener.join(orig.datafile)
2868 indexname = 'revlog.i'
2868 indexname = 'revlog.i'
2869 dataname = 'revlog.d'
2869 dataname = 'revlog.d'
2870
2870
2871 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2871 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2872 try:
2872 try:
2873 # copy the data file in a temporary directory
2873 # copy the data file in a temporary directory
2874 ui.debug('copying data in %s\n' % tmpdir)
2874 ui.debug('copying data in %s\n' % tmpdir)
2875 destindexpath = os.path.join(tmpdir, 'revlog.i')
2875 destindexpath = os.path.join(tmpdir, 'revlog.i')
2876 destdatapath = os.path.join(tmpdir, 'revlog.d')
2876 destdatapath = os.path.join(tmpdir, 'revlog.d')
2877 shutil.copyfile(origindexpath, destindexpath)
2877 shutil.copyfile(origindexpath, destindexpath)
2878 shutil.copyfile(origdatapath, destdatapath)
2878 shutil.copyfile(origdatapath, destdatapath)
2879
2879
2880 # remove the data we want to add again
2880 # remove the data we want to add again
2881 ui.debug('truncating data to be rewritten\n')
2881 ui.debug('truncating data to be rewritten\n')
2882 with open(destindexpath, 'ab') as index:
2882 with open(destindexpath, 'ab') as index:
2883 index.seek(0)
2883 index.seek(0)
2884 index.truncate(truncaterev * orig._io.size)
2884 index.truncate(truncaterev * orig._io.size)
2885 with open(destdatapath, 'ab') as data:
2885 with open(destdatapath, 'ab') as data:
2886 data.seek(0)
2886 data.seek(0)
2887 data.truncate(orig.start(truncaterev))
2887 data.truncate(orig.start(truncaterev))
2888
2888
2889 # instantiate a new revlog from the temporary copy
2889 # instantiate a new revlog from the temporary copy
2890 ui.debug('truncating adding to be rewritten\n')
2890 ui.debug('truncating adding to be rewritten\n')
2891 vfs = vfsmod.vfs(tmpdir)
2891 vfs = vfsmod.vfs(tmpdir)
2892 vfs.options = getattr(orig.opener, 'options', None)
2892 vfs.options = getattr(orig.opener, 'options', None)
2893
2893
2894 dest = revlog.revlog(
2894 dest = revlog.revlog(
2895 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2895 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2896 )
2896 )
2897 if dest._inline:
2897 if dest._inline:
2898 raise error.Abort('not supporting inline revlog (yet)')
2898 raise error.Abort('not supporting inline revlog (yet)')
2899 # make sure internals are initialized
2899 # make sure internals are initialized
2900 dest.revision(len(dest) - 1)
2900 dest.revision(len(dest) - 1)
2901 yield dest
2901 yield dest
2902 del dest, vfs
2902 del dest, vfs
2903 finally:
2903 finally:
2904 shutil.rmtree(tmpdir, True)
2904 shutil.rmtree(tmpdir, True)
2905
2905
2906
2906
2907 @command(
2907 @command(
2908 b'perfrevlogchunks',
2908 b'perfrevlogchunks',
2909 revlogopts
2909 revlogopts
2910 + formatteropts
2910 + formatteropts
2911 + [
2911 + [
2912 (b'e', b'engines', b'', b'compression engines to use'),
2912 (b'e', b'engines', b'', b'compression engines to use'),
2913 (b's', b'startrev', 0, b'revision to start at'),
2913 (b's', b'startrev', 0, b'revision to start at'),
2914 ],
2914 ],
2915 b'-c|-m|FILE',
2915 b'-c|-m|FILE',
2916 )
2916 )
2917 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2917 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2918 """Benchmark operations on revlog chunks.
2918 """Benchmark operations on revlog chunks.
2919
2919
2920 Logically, each revlog is a collection of fulltext revisions. However,
2920 Logically, each revlog is a collection of fulltext revisions. However,
2921 stored within each revlog are "chunks" of possibly compressed data. This
2921 stored within each revlog are "chunks" of possibly compressed data. This
2922 data needs to be read and decompressed or compressed and written.
2922 data needs to be read and decompressed or compressed and written.
2923
2923
2924 This command measures the time it takes to read+decompress and recompress
2924 This command measures the time it takes to read+decompress and recompress
2925 chunks in a revlog. It effectively isolates I/O and compression performance.
2925 chunks in a revlog. It effectively isolates I/O and compression performance.
2926 For measurements of higher-level operations like resolving revisions,
2926 For measurements of higher-level operations like resolving revisions,
2927 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2927 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2928 """
2928 """
2929 opts = _byteskwargs(opts)
2929 opts = _byteskwargs(opts)
2930
2930
2931 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2931 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2932
2932
2933 # _chunkraw was renamed to _getsegmentforrevs.
2933 # _chunkraw was renamed to _getsegmentforrevs.
2934 try:
2934 try:
2935 segmentforrevs = rl._getsegmentforrevs
2935 segmentforrevs = rl._getsegmentforrevs
2936 except AttributeError:
2936 except AttributeError:
2937 segmentforrevs = rl._chunkraw
2937 segmentforrevs = rl._chunkraw
2938
2938
2939 # Verify engines argument.
2939 # Verify engines argument.
2940 if engines:
2940 if engines:
2941 engines = set(e.strip() for e in engines.split(b','))
2941 engines = set(e.strip() for e in engines.split(b','))
2942 for engine in engines:
2942 for engine in engines:
2943 try:
2943 try:
2944 util.compressionengines[engine]
2944 util.compressionengines[engine]
2945 except KeyError:
2945 except KeyError:
2946 raise error.Abort(b'unknown compression engine: %s' % engine)
2946 raise error.Abort(b'unknown compression engine: %s' % engine)
2947 else:
2947 else:
2948 engines = []
2948 engines = []
2949 for e in util.compengines:
2949 for e in util.compengines:
2950 engine = util.compengines[e]
2950 engine = util.compengines[e]
2951 try:
2951 try:
2952 if engine.available():
2952 if engine.available():
2953 engine.revlogcompressor().compress(b'dummy')
2953 engine.revlogcompressor().compress(b'dummy')
2954 engines.append(e)
2954 engines.append(e)
2955 except NotImplementedError:
2955 except NotImplementedError:
2956 pass
2956 pass
2957
2957
2958 revs = list(rl.revs(startrev, len(rl) - 1))
2958 revs = list(rl.revs(startrev, len(rl) - 1))
2959
2959
2960 def rlfh(rl):
2960 def rlfh(rl):
2961 if rl._inline:
2961 if rl._inline:
2962 return getsvfs(repo)(rl.indexfile)
2962 return getsvfs(repo)(rl.indexfile)
2963 else:
2963 else:
2964 return getsvfs(repo)(rl.datafile)
2964 return getsvfs(repo)(rl.datafile)
2965
2965
2966 def doread():
2966 def doread():
2967 rl.clearcaches()
2967 rl.clearcaches()
2968 for rev in revs:
2968 for rev in revs:
2969 segmentforrevs(rev, rev)
2969 segmentforrevs(rev, rev)
2970
2970
2971 def doreadcachedfh():
2971 def doreadcachedfh():
2972 rl.clearcaches()
2972 rl.clearcaches()
2973 fh = rlfh(rl)
2973 fh = rlfh(rl)
2974 for rev in revs:
2974 for rev in revs:
2975 segmentforrevs(rev, rev, df=fh)
2975 segmentforrevs(rev, rev, df=fh)
2976
2976
2977 def doreadbatch():
2977 def doreadbatch():
2978 rl.clearcaches()
2978 rl.clearcaches()
2979 segmentforrevs(revs[0], revs[-1])
2979 segmentforrevs(revs[0], revs[-1])
2980
2980
2981 def doreadbatchcachedfh():
2981 def doreadbatchcachedfh():
2982 rl.clearcaches()
2982 rl.clearcaches()
2983 fh = rlfh(rl)
2983 fh = rlfh(rl)
2984 segmentforrevs(revs[0], revs[-1], df=fh)
2984 segmentforrevs(revs[0], revs[-1], df=fh)
2985
2985
2986 def dochunk():
2986 def dochunk():
2987 rl.clearcaches()
2987 rl.clearcaches()
2988 fh = rlfh(rl)
2988 fh = rlfh(rl)
2989 for rev in revs:
2989 for rev in revs:
2990 rl._chunk(rev, df=fh)
2990 rl._chunk(rev, df=fh)
2991
2991
2992 chunks = [None]
2992 chunks = [None]
2993
2993
2994 def dochunkbatch():
2994 def dochunkbatch():
2995 rl.clearcaches()
2995 rl.clearcaches()
2996 fh = rlfh(rl)
2996 fh = rlfh(rl)
2997 # Save chunks as a side-effect.
2997 # Save chunks as a side-effect.
2998 chunks[0] = rl._chunks(revs, df=fh)
2998 chunks[0] = rl._chunks(revs, df=fh)
2999
2999
3000 def docompress(compressor):
3000 def docompress(compressor):
3001 rl.clearcaches()
3001 rl.clearcaches()
3002
3002
3003 try:
3003 try:
3004 # Swap in the requested compression engine.
3004 # Swap in the requested compression engine.
3005 oldcompressor = rl._compressor
3005 oldcompressor = rl._compressor
3006 rl._compressor = compressor
3006 rl._compressor = compressor
3007 for chunk in chunks[0]:
3007 for chunk in chunks[0]:
3008 rl.compress(chunk)
3008 rl.compress(chunk)
3009 finally:
3009 finally:
3010 rl._compressor = oldcompressor
3010 rl._compressor = oldcompressor
3011
3011
3012 benches = [
3012 benches = [
3013 (lambda: doread(), b'read'),
3013 (lambda: doread(), b'read'),
3014 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3014 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3015 (lambda: doreadbatch(), b'read batch'),
3015 (lambda: doreadbatch(), b'read batch'),
3016 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3016 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3017 (lambda: dochunk(), b'chunk'),
3017 (lambda: dochunk(), b'chunk'),
3018 (lambda: dochunkbatch(), b'chunk batch'),
3018 (lambda: dochunkbatch(), b'chunk batch'),
3019 ]
3019 ]
3020
3020
3021 for engine in sorted(engines):
3021 for engine in sorted(engines):
3022 compressor = util.compengines[engine].revlogcompressor()
3022 compressor = util.compengines[engine].revlogcompressor()
3023 benches.append(
3023 benches.append(
3024 (
3024 (
3025 functools.partial(docompress, compressor),
3025 functools.partial(docompress, compressor),
3026 b'compress w/ %s' % engine,
3026 b'compress w/ %s' % engine,
3027 )
3027 )
3028 )
3028 )
3029
3029
3030 for fn, title in benches:
3030 for fn, title in benches:
3031 timer, fm = gettimer(ui, opts)
3031 timer, fm = gettimer(ui, opts)
3032 timer(fn, title=title)
3032 timer(fn, title=title)
3033 fm.end()
3033 fm.end()
3034
3034
3035
3035
3036 @command(
3036 @command(
3037 b'perfrevlogrevision',
3037 b'perfrevlogrevision',
3038 revlogopts
3038 revlogopts
3039 + formatteropts
3039 + formatteropts
3040 + [(b'', b'cache', False, b'use caches instead of clearing')],
3040 + [(b'', b'cache', False, b'use caches instead of clearing')],
3041 b'-c|-m|FILE REV',
3041 b'-c|-m|FILE REV',
3042 )
3042 )
3043 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3043 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3044 """Benchmark obtaining a revlog revision.
3044 """Benchmark obtaining a revlog revision.
3045
3045
3046 Obtaining a revlog revision consists of roughly the following steps:
3046 Obtaining a revlog revision consists of roughly the following steps:
3047
3047
3048 1. Compute the delta chain
3048 1. Compute the delta chain
3049 2. Slice the delta chain if applicable
3049 2. Slice the delta chain if applicable
3050 3. Obtain the raw chunks for that delta chain
3050 3. Obtain the raw chunks for that delta chain
3051 4. Decompress each raw chunk
3051 4. Decompress each raw chunk
3052 5. Apply binary patches to obtain fulltext
3052 5. Apply binary patches to obtain fulltext
3053 6. Verify hash of fulltext
3053 6. Verify hash of fulltext
3054
3054
3055 This command measures the time spent in each of these phases.
3055 This command measures the time spent in each of these phases.
3056 """
3056 """
3057 opts = _byteskwargs(opts)
3057 opts = _byteskwargs(opts)
3058
3058
3059 if opts.get(b'changelog') or opts.get(b'manifest'):
3059 if opts.get(b'changelog') or opts.get(b'manifest'):
3060 file_, rev = None, file_
3060 file_, rev = None, file_
3061 elif rev is None:
3061 elif rev is None:
3062 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3062 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3063
3063
3064 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3064 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3065
3065
3066 # _chunkraw was renamed to _getsegmentforrevs.
3066 # _chunkraw was renamed to _getsegmentforrevs.
3067 try:
3067 try:
3068 segmentforrevs = r._getsegmentforrevs
3068 segmentforrevs = r._getsegmentforrevs
3069 except AttributeError:
3069 except AttributeError:
3070 segmentforrevs = r._chunkraw
3070 segmentforrevs = r._chunkraw
3071
3071
3072 node = r.lookup(rev)
3072 node = r.lookup(rev)
3073 rev = r.rev(node)
3073 rev = r.rev(node)
3074
3074
3075 def getrawchunks(data, chain):
3075 def getrawchunks(data, chain):
3076 start = r.start
3076 start = r.start
3077 length = r.length
3077 length = r.length
3078 inline = r._inline
3078 inline = r._inline
3079 iosize = r._io.size
3079 iosize = r._io.size
3080 buffer = util.buffer
3080 buffer = util.buffer
3081
3081
3082 chunks = []
3082 chunks = []
3083 ladd = chunks.append
3083 ladd = chunks.append
3084 for idx, item in enumerate(chain):
3084 for idx, item in enumerate(chain):
3085 offset = start(item[0])
3085 offset = start(item[0])
3086 bits = data[idx]
3086 bits = data[idx]
3087 for rev in item:
3087 for rev in item:
3088 chunkstart = start(rev)
3088 chunkstart = start(rev)
3089 if inline:
3089 if inline:
3090 chunkstart += (rev + 1) * iosize
3090 chunkstart += (rev + 1) * iosize
3091 chunklength = length(rev)
3091 chunklength = length(rev)
3092 ladd(buffer(bits, chunkstart - offset, chunklength))
3092 ladd(buffer(bits, chunkstart - offset, chunklength))
3093
3093
3094 return chunks
3094 return chunks
3095
3095
3096 def dodeltachain(rev):
3096 def dodeltachain(rev):
3097 if not cache:
3097 if not cache:
3098 r.clearcaches()
3098 r.clearcaches()
3099 r._deltachain(rev)
3099 r._deltachain(rev)
3100
3100
3101 def doread(chain):
3101 def doread(chain):
3102 if not cache:
3102 if not cache:
3103 r.clearcaches()
3103 r.clearcaches()
3104 for item in slicedchain:
3104 for item in slicedchain:
3105 segmentforrevs(item[0], item[-1])
3105 segmentforrevs(item[0], item[-1])
3106
3106
3107 def doslice(r, chain, size):
3107 def doslice(r, chain, size):
3108 for s in slicechunk(r, chain, targetsize=size):
3108 for s in slicechunk(r, chain, targetsize=size):
3109 pass
3109 pass
3110
3110
3111 def dorawchunks(data, chain):
3111 def dorawchunks(data, chain):
3112 if not cache:
3112 if not cache:
3113 r.clearcaches()
3113 r.clearcaches()
3114 getrawchunks(data, chain)
3114 getrawchunks(data, chain)
3115
3115
3116 def dodecompress(chunks):
3116 def dodecompress(chunks):
3117 decomp = r.decompress
3117 decomp = r.decompress
3118 for chunk in chunks:
3118 for chunk in chunks:
3119 decomp(chunk)
3119 decomp(chunk)
3120
3120
3121 def dopatch(text, bins):
3121 def dopatch(text, bins):
3122 if not cache:
3122 if not cache:
3123 r.clearcaches()
3123 r.clearcaches()
3124 mdiff.patches(text, bins)
3124 mdiff.patches(text, bins)
3125
3125
3126 def dohash(text):
3126 def dohash(text):
3127 if not cache:
3127 if not cache:
3128 r.clearcaches()
3128 r.clearcaches()
3129 r.checkhash(text, node, rev=rev)
3129 r.checkhash(text, node, rev=rev)
3130
3130
3131 def dorevision():
3131 def dorevision():
3132 if not cache:
3132 if not cache:
3133 r.clearcaches()
3133 r.clearcaches()
3134 r.revision(node)
3134 r.revision(node)
3135
3135
3136 try:
3136 try:
3137 from mercurial.revlogutils.deltas import slicechunk
3137 from mercurial.revlogutils.deltas import slicechunk
3138 except ImportError:
3138 except ImportError:
3139 slicechunk = getattr(revlog, '_slicechunk', None)
3139 slicechunk = getattr(revlog, '_slicechunk', None)
3140
3140
3141 size = r.length(rev)
3141 size = r.length(rev)
3142 chain = r._deltachain(rev)[0]
3142 chain = r._deltachain(rev)[0]
3143 if not getattr(r, '_withsparseread', False):
3143 if not getattr(r, '_withsparseread', False):
3144 slicedchain = (chain,)
3144 slicedchain = (chain,)
3145 else:
3145 else:
3146 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3146 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3147 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3147 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3148 rawchunks = getrawchunks(data, slicedchain)
3148 rawchunks = getrawchunks(data, slicedchain)
3149 bins = r._chunks(chain)
3149 bins = r._chunks(chain)
3150 text = bytes(bins[0])
3150 text = bytes(bins[0])
3151 bins = bins[1:]
3151 bins = bins[1:]
3152 text = mdiff.patches(text, bins)
3152 text = mdiff.patches(text, bins)
3153
3153
3154 benches = [
3154 benches = [
3155 (lambda: dorevision(), b'full'),
3155 (lambda: dorevision(), b'full'),
3156 (lambda: dodeltachain(rev), b'deltachain'),
3156 (lambda: dodeltachain(rev), b'deltachain'),
3157 (lambda: doread(chain), b'read'),
3157 (lambda: doread(chain), b'read'),
3158 ]
3158 ]
3159
3159
3160 if getattr(r, '_withsparseread', False):
3160 if getattr(r, '_withsparseread', False):
3161 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3161 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3162 benches.append(slicing)
3162 benches.append(slicing)
3163
3163
3164 benches.extend(
3164 benches.extend(
3165 [
3165 [
3166 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3166 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3167 (lambda: dodecompress(rawchunks), b'decompress'),
3167 (lambda: dodecompress(rawchunks), b'decompress'),
3168 (lambda: dopatch(text, bins), b'patch'),
3168 (lambda: dopatch(text, bins), b'patch'),
3169 (lambda: dohash(text), b'hash'),
3169 (lambda: dohash(text), b'hash'),
3170 ]
3170 ]
3171 )
3171 )
3172
3172
3173 timer, fm = gettimer(ui, opts)
3173 timer, fm = gettimer(ui, opts)
3174 for fn, title in benches:
3174 for fn, title in benches:
3175 timer(fn, title=title)
3175 timer(fn, title=title)
3176 fm.end()
3176 fm.end()
3177
3177
3178
3178
3179 @command(
3179 @command(
3180 b'perfrevset',
3180 b'perfrevset',
3181 [
3181 [
3182 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3182 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3183 (b'', b'contexts', False, b'obtain changectx for each revision'),
3183 (b'', b'contexts', False, b'obtain changectx for each revision'),
3184 ]
3184 ]
3185 + formatteropts,
3185 + formatteropts,
3186 b"REVSET",
3186 b"REVSET",
3187 )
3187 )
3188 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3188 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3189 """benchmark the execution time of a revset
3189 """benchmark the execution time of a revset
3190
3190
3191 Use the --clean option if need to evaluate the impact of build volatile
3191 Use the --clean option if need to evaluate the impact of build volatile
3192 revisions set cache on the revset execution. Volatile cache hold filtered
3192 revisions set cache on the revset execution. Volatile cache hold filtered
3193 and obsolete related cache."""
3193 and obsolete related cache."""
3194 opts = _byteskwargs(opts)
3194 opts = _byteskwargs(opts)
3195
3195
3196 timer, fm = gettimer(ui, opts)
3196 timer, fm = gettimer(ui, opts)
3197
3197
3198 def d():
3198 def d():
3199 if clear:
3199 if clear:
3200 repo.invalidatevolatilesets()
3200 repo.invalidatevolatilesets()
3201 if contexts:
3201 if contexts:
3202 for ctx in repo.set(expr):
3202 for ctx in repo.set(expr):
3203 pass
3203 pass
3204 else:
3204 else:
3205 for r in repo.revs(expr):
3205 for r in repo.revs(expr):
3206 pass
3206 pass
3207
3207
3208 timer(d)
3208 timer(d)
3209 fm.end()
3209 fm.end()
3210
3210
3211
3211
3212 @command(
3212 @command(
3213 b'perfvolatilesets',
3213 b'perfvolatilesets',
3214 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3214 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3215 + formatteropts,
3215 + formatteropts,
3216 )
3216 )
3217 def perfvolatilesets(ui, repo, *names, **opts):
3217 def perfvolatilesets(ui, repo, *names, **opts):
3218 """benchmark the computation of various volatile set
3218 """benchmark the computation of various volatile set
3219
3219
3220 Volatile set computes element related to filtering and obsolescence."""
3220 Volatile set computes element related to filtering and obsolescence."""
3221 opts = _byteskwargs(opts)
3221 opts = _byteskwargs(opts)
3222 timer, fm = gettimer(ui, opts)
3222 timer, fm = gettimer(ui, opts)
3223 repo = repo.unfiltered()
3223 repo = repo.unfiltered()
3224
3224
3225 def getobs(name):
3225 def getobs(name):
3226 def d():
3226 def d():
3227 repo.invalidatevolatilesets()
3227 repo.invalidatevolatilesets()
3228 if opts[b'clear_obsstore']:
3228 if opts[b'clear_obsstore']:
3229 clearfilecache(repo, b'obsstore')
3229 clearfilecache(repo, b'obsstore')
3230 obsolete.getrevs(repo, name)
3230 obsolete.getrevs(repo, name)
3231
3231
3232 return d
3232 return d
3233
3233
3234 allobs = sorted(obsolete.cachefuncs)
3234 allobs = sorted(obsolete.cachefuncs)
3235 if names:
3235 if names:
3236 allobs = [n for n in allobs if n in names]
3236 allobs = [n for n in allobs if n in names]
3237
3237
3238 for name in allobs:
3238 for name in allobs:
3239 timer(getobs(name), title=name)
3239 timer(getobs(name), title=name)
3240
3240
3241 def getfiltered(name):
3241 def getfiltered(name):
3242 def d():
3242 def d():
3243 repo.invalidatevolatilesets()
3243 repo.invalidatevolatilesets()
3244 if opts[b'clear_obsstore']:
3244 if opts[b'clear_obsstore']:
3245 clearfilecache(repo, b'obsstore')
3245 clearfilecache(repo, b'obsstore')
3246 repoview.filterrevs(repo, name)
3246 repoview.filterrevs(repo, name)
3247
3247
3248 return d
3248 return d
3249
3249
3250 allfilter = sorted(repoview.filtertable)
3250 allfilter = sorted(repoview.filtertable)
3251 if names:
3251 if names:
3252 allfilter = [n for n in allfilter if n in names]
3252 allfilter = [n for n in allfilter if n in names]
3253
3253
3254 for name in allfilter:
3254 for name in allfilter:
3255 timer(getfiltered(name), title=name)
3255 timer(getfiltered(name), title=name)
3256 fm.end()
3256 fm.end()
3257
3257
3258
3258
3259 @command(
3259 @command(
3260 b'perfbranchmap',
3260 b'perfbranchmap',
3261 [
3261 [
3262 (b'f', b'full', False, b'Includes build time of subset'),
3262 (b'f', b'full', False, b'Includes build time of subset'),
3263 (
3263 (
3264 b'',
3264 b'',
3265 b'clear-revbranch',
3265 b'clear-revbranch',
3266 False,
3266 False,
3267 b'purge the revbranch cache between computation',
3267 b'purge the revbranch cache between computation',
3268 ),
3268 ),
3269 ]
3269 ]
3270 + formatteropts,
3270 + formatteropts,
3271 )
3271 )
3272 def perfbranchmap(ui, repo, *filternames, **opts):
3272 def perfbranchmap(ui, repo, *filternames, **opts):
3273 """benchmark the update of a branchmap
3273 """benchmark the update of a branchmap
3274
3274
3275 This benchmarks the full repo.branchmap() call with read and write disabled
3275 This benchmarks the full repo.branchmap() call with read and write disabled
3276 """
3276 """
3277 opts = _byteskwargs(opts)
3277 opts = _byteskwargs(opts)
3278 full = opts.get(b"full", False)
3278 full = opts.get(b"full", False)
3279 clear_revbranch = opts.get(b"clear_revbranch", False)
3279 clear_revbranch = opts.get(b"clear_revbranch", False)
3280 timer, fm = gettimer(ui, opts)
3280 timer, fm = gettimer(ui, opts)
3281
3281
3282 def getbranchmap(filtername):
3282 def getbranchmap(filtername):
3283 """generate a benchmark function for the filtername"""
3283 """generate a benchmark function for the filtername"""
3284 if filtername is None:
3284 if filtername is None:
3285 view = repo
3285 view = repo
3286 else:
3286 else:
3287 view = repo.filtered(filtername)
3287 view = repo.filtered(filtername)
3288 if util.safehasattr(view._branchcaches, '_per_filter'):
3288 if util.safehasattr(view._branchcaches, '_per_filter'):
3289 filtered = view._branchcaches._per_filter
3289 filtered = view._branchcaches._per_filter
3290 else:
3290 else:
3291 # older versions
3291 # older versions
3292 filtered = view._branchcaches
3292 filtered = view._branchcaches
3293
3293
3294 def d():
3294 def d():
3295 if clear_revbranch:
3295 if clear_revbranch:
3296 repo.revbranchcache()._clear()
3296 repo.revbranchcache()._clear()
3297 if full:
3297 if full:
3298 view._branchcaches.clear()
3298 view._branchcaches.clear()
3299 else:
3299 else:
3300 filtered.pop(filtername, None)
3300 filtered.pop(filtername, None)
3301 view.branchmap()
3301 view.branchmap()
3302
3302
3303 return d
3303 return d
3304
3304
3305 # add filter in smaller subset to bigger subset
3305 # add filter in smaller subset to bigger subset
3306 possiblefilters = set(repoview.filtertable)
3306 possiblefilters = set(repoview.filtertable)
3307 if filternames:
3307 if filternames:
3308 possiblefilters &= set(filternames)
3308 possiblefilters &= set(filternames)
3309 subsettable = getbranchmapsubsettable()
3309 subsettable = getbranchmapsubsettable()
3310 allfilters = []
3310 allfilters = []
3311 while possiblefilters:
3311 while possiblefilters:
3312 for name in possiblefilters:
3312 for name in possiblefilters:
3313 subset = subsettable.get(name)
3313 subset = subsettable.get(name)
3314 if subset not in possiblefilters:
3314 if subset not in possiblefilters:
3315 break
3315 break
3316 else:
3316 else:
3317 assert False, b'subset cycle %s!' % possiblefilters
3317 assert False, b'subset cycle %s!' % possiblefilters
3318 allfilters.append(name)
3318 allfilters.append(name)
3319 possiblefilters.remove(name)
3319 possiblefilters.remove(name)
3320
3320
3321 # warm the cache
3321 # warm the cache
3322 if not full:
3322 if not full:
3323 for name in allfilters:
3323 for name in allfilters:
3324 repo.filtered(name).branchmap()
3324 repo.filtered(name).branchmap()
3325 if not filternames or b'unfiltered' in filternames:
3325 if not filternames or b'unfiltered' in filternames:
3326 # add unfiltered
3326 # add unfiltered
3327 allfilters.append(None)
3327 allfilters.append(None)
3328
3328
3329 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3329 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3330 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3330 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3331 branchcacheread.set(classmethod(lambda *args: None))
3331 branchcacheread.set(classmethod(lambda *args: None))
3332 else:
3332 else:
3333 # older versions
3333 # older versions
3334 branchcacheread = safeattrsetter(branchmap, b'read')
3334 branchcacheread = safeattrsetter(branchmap, b'read')
3335 branchcacheread.set(lambda *args: None)
3335 branchcacheread.set(lambda *args: None)
3336 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3336 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3337 branchcachewrite.set(lambda *args: None)
3337 branchcachewrite.set(lambda *args: None)
3338 try:
3338 try:
3339 for name in allfilters:
3339 for name in allfilters:
3340 printname = name
3340 printname = name
3341 if name is None:
3341 if name is None:
3342 printname = b'unfiltered'
3342 printname = b'unfiltered'
3343 timer(getbranchmap(name), title=str(printname))
3343 timer(getbranchmap(name), title=str(printname))
3344 finally:
3344 finally:
3345 branchcacheread.restore()
3345 branchcacheread.restore()
3346 branchcachewrite.restore()
3346 branchcachewrite.restore()
3347 fm.end()
3347 fm.end()
3348
3348
3349
3349
3350 @command(
3350 @command(
3351 b'perfbranchmapupdate',
3351 b'perfbranchmapupdate',
3352 [
3352 [
3353 (b'', b'base', [], b'subset of revision to start from'),
3353 (b'', b'base', [], b'subset of revision to start from'),
3354 (b'', b'target', [], b'subset of revision to end with'),
3354 (b'', b'target', [], b'subset of revision to end with'),
3355 (b'', b'clear-caches', False, b'clear cache between each runs'),
3355 (b'', b'clear-caches', False, b'clear cache between each runs'),
3356 ]
3356 ]
3357 + formatteropts,
3357 + formatteropts,
3358 )
3358 )
3359 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3359 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3360 """benchmark branchmap update from for <base> revs to <target> revs
3360 """benchmark branchmap update from for <base> revs to <target> revs
3361
3361
3362 If `--clear-caches` is passed, the following items will be reset before
3362 If `--clear-caches` is passed, the following items will be reset before
3363 each update:
3363 each update:
3364 * the changelog instance and associated indexes
3364 * the changelog instance and associated indexes
3365 * the rev-branch-cache instance
3365 * the rev-branch-cache instance
3366
3366
3367 Examples:
3367 Examples:
3368
3368
3369 # update for the one last revision
3369 # update for the one last revision
3370 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3370 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3371
3371
3372 $ update for change coming with a new branch
3372 $ update for change coming with a new branch
3373 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3373 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3374 """
3374 """
3375 from mercurial import branchmap
3375 from mercurial import branchmap
3376 from mercurial import repoview
3376 from mercurial import repoview
3377
3377
3378 opts = _byteskwargs(opts)
3378 opts = _byteskwargs(opts)
3379 timer, fm = gettimer(ui, opts)
3379 timer, fm = gettimer(ui, opts)
3380 clearcaches = opts[b'clear_caches']
3380 clearcaches = opts[b'clear_caches']
3381 unfi = repo.unfiltered()
3381 unfi = repo.unfiltered()
3382 x = [None] # used to pass data between closure
3382 x = [None] # used to pass data between closure
3383
3383
3384 # we use a `list` here to avoid possible side effect from smartset
3384 # we use a `list` here to avoid possible side effect from smartset
3385 baserevs = list(scmutil.revrange(repo, base))
3385 baserevs = list(scmutil.revrange(repo, base))
3386 targetrevs = list(scmutil.revrange(repo, target))
3386 targetrevs = list(scmutil.revrange(repo, target))
3387 if not baserevs:
3387 if not baserevs:
3388 raise error.Abort(b'no revisions selected for --base')
3388 raise error.Abort(b'no revisions selected for --base')
3389 if not targetrevs:
3389 if not targetrevs:
3390 raise error.Abort(b'no revisions selected for --target')
3390 raise error.Abort(b'no revisions selected for --target')
3391
3391
3392 # make sure the target branchmap also contains the one in the base
3392 # make sure the target branchmap also contains the one in the base
3393 targetrevs = list(set(baserevs) | set(targetrevs))
3393 targetrevs = list(set(baserevs) | set(targetrevs))
3394 targetrevs.sort()
3394 targetrevs.sort()
3395
3395
3396 cl = repo.changelog
3396 cl = repo.changelog
3397 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3397 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3398 allbaserevs.sort()
3398 allbaserevs.sort()
3399 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3399 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3400
3400
3401 newrevs = list(alltargetrevs.difference(allbaserevs))
3401 newrevs = list(alltargetrevs.difference(allbaserevs))
3402 newrevs.sort()
3402 newrevs.sort()
3403
3403
3404 allrevs = frozenset(unfi.changelog.revs())
3404 allrevs = frozenset(unfi.changelog.revs())
3405 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3405 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3406 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3406 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3407
3407
3408 def basefilter(repo, visibilityexceptions=None):
3408 def basefilter(repo, visibilityexceptions=None):
3409 return basefilterrevs
3409 return basefilterrevs
3410
3410
3411 def targetfilter(repo, visibilityexceptions=None):
3411 def targetfilter(repo, visibilityexceptions=None):
3412 return targetfilterrevs
3412 return targetfilterrevs
3413
3413
3414 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3414 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3415 ui.status(msg % (len(allbaserevs), len(newrevs)))
3415 ui.status(msg % (len(allbaserevs), len(newrevs)))
3416 if targetfilterrevs:
3416 if targetfilterrevs:
3417 msg = b'(%d revisions still filtered)\n'
3417 msg = b'(%d revisions still filtered)\n'
3418 ui.status(msg % len(targetfilterrevs))
3418 ui.status(msg % len(targetfilterrevs))
3419
3419
3420 try:
3420 try:
3421 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3421 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3422 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3422 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3423
3423
3424 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3424 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3425 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3425 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3426
3426
3427 # try to find an existing branchmap to reuse
3427 # try to find an existing branchmap to reuse
3428 subsettable = getbranchmapsubsettable()
3428 subsettable = getbranchmapsubsettable()
3429 candidatefilter = subsettable.get(None)
3429 candidatefilter = subsettable.get(None)
3430 while candidatefilter is not None:
3430 while candidatefilter is not None:
3431 candidatebm = repo.filtered(candidatefilter).branchmap()
3431 candidatebm = repo.filtered(candidatefilter).branchmap()
3432 if candidatebm.validfor(baserepo):
3432 if candidatebm.validfor(baserepo):
3433 filtered = repoview.filterrevs(repo, candidatefilter)
3433 filtered = repoview.filterrevs(repo, candidatefilter)
3434 missing = [r for r in allbaserevs if r in filtered]
3434 missing = [r for r in allbaserevs if r in filtered]
3435 base = candidatebm.copy()
3435 base = candidatebm.copy()
3436 base.update(baserepo, missing)
3436 base.update(baserepo, missing)
3437 break
3437 break
3438 candidatefilter = subsettable.get(candidatefilter)
3438 candidatefilter = subsettable.get(candidatefilter)
3439 else:
3439 else:
3440 # no suitable subset where found
3440 # no suitable subset where found
3441 base = branchmap.branchcache()
3441 base = branchmap.branchcache()
3442 base.update(baserepo, allbaserevs)
3442 base.update(baserepo, allbaserevs)
3443
3443
3444 def setup():
3444 def setup():
3445 x[0] = base.copy()
3445 x[0] = base.copy()
3446 if clearcaches:
3446 if clearcaches:
3447 unfi._revbranchcache = None
3447 unfi._revbranchcache = None
3448 clearchangelog(repo)
3448 clearchangelog(repo)
3449
3449
3450 def bench():
3450 def bench():
3451 x[0].update(targetrepo, newrevs)
3451 x[0].update(targetrepo, newrevs)
3452
3452
3453 timer(bench, setup=setup)
3453 timer(bench, setup=setup)
3454 fm.end()
3454 fm.end()
3455 finally:
3455 finally:
3456 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3456 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3457 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3457 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3458
3458
3459
3459
3460 @command(
3460 @command(
3461 b'perfbranchmapload',
3461 b'perfbranchmapload',
3462 [
3462 [
3463 (b'f', b'filter', b'', b'Specify repoview filter'),
3463 (b'f', b'filter', b'', b'Specify repoview filter'),
3464 (b'', b'list', False, b'List brachmap filter caches'),
3464 (b'', b'list', False, b'List brachmap filter caches'),
3465 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3465 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3466 ]
3466 ]
3467 + formatteropts,
3467 + formatteropts,
3468 )
3468 )
3469 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3469 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3470 """benchmark reading the branchmap"""
3470 """benchmark reading the branchmap"""
3471 opts = _byteskwargs(opts)
3471 opts = _byteskwargs(opts)
3472 clearrevlogs = opts[b'clear_revlogs']
3472 clearrevlogs = opts[b'clear_revlogs']
3473
3473
3474 if list:
3474 if list:
3475 for name, kind, st in repo.cachevfs.readdir(stat=True):
3475 for name, kind, st in repo.cachevfs.readdir(stat=True):
3476 if name.startswith(b'branch2'):
3476 if name.startswith(b'branch2'):
3477 filtername = name.partition(b'-')[2] or b'unfiltered'
3477 filtername = name.partition(b'-')[2] or b'unfiltered'
3478 ui.status(
3478 ui.status(
3479 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3479 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3480 )
3480 )
3481 return
3481 return
3482 if not filter:
3482 if not filter:
3483 filter = None
3483 filter = None
3484 subsettable = getbranchmapsubsettable()
3484 subsettable = getbranchmapsubsettable()
3485 if filter is None:
3485 if filter is None:
3486 repo = repo.unfiltered()
3486 repo = repo.unfiltered()
3487 else:
3487 else:
3488 repo = repoview.repoview(repo, filter)
3488 repo = repoview.repoview(repo, filter)
3489
3489
3490 repo.branchmap() # make sure we have a relevant, up to date branchmap
3490 repo.branchmap() # make sure we have a relevant, up to date branchmap
3491
3491
3492 try:
3492 try:
3493 fromfile = branchmap.branchcache.fromfile
3493 fromfile = branchmap.branchcache.fromfile
3494 except AttributeError:
3494 except AttributeError:
3495 # older versions
3495 # older versions
3496 fromfile = branchmap.read
3496 fromfile = branchmap.read
3497
3497
3498 currentfilter = filter
3498 currentfilter = filter
3499 # try once without timer, the filter may not be cached
3499 # try once without timer, the filter may not be cached
3500 while fromfile(repo) is None:
3500 while fromfile(repo) is None:
3501 currentfilter = subsettable.get(currentfilter)
3501 currentfilter = subsettable.get(currentfilter)
3502 if currentfilter is None:
3502 if currentfilter is None:
3503 raise error.Abort(
3503 raise error.Abort(
3504 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3504 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3505 )
3505 )
3506 repo = repo.filtered(currentfilter)
3506 repo = repo.filtered(currentfilter)
3507 timer, fm = gettimer(ui, opts)
3507 timer, fm = gettimer(ui, opts)
3508
3508
3509 def setup():
3509 def setup():
3510 if clearrevlogs:
3510 if clearrevlogs:
3511 clearchangelog(repo)
3511 clearchangelog(repo)
3512
3512
3513 def bench():
3513 def bench():
3514 fromfile(repo)
3514 fromfile(repo)
3515
3515
3516 timer(bench, setup=setup)
3516 timer(bench, setup=setup)
3517 fm.end()
3517 fm.end()
3518
3518
3519
3519
3520 @command(b'perfloadmarkers')
3520 @command(b'perfloadmarkers')
3521 def perfloadmarkers(ui, repo):
3521 def perfloadmarkers(ui, repo):
3522 """benchmark the time to parse the on-disk markers for a repo
3522 """benchmark the time to parse the on-disk markers for a repo
3523
3523
3524 Result is the number of markers in the repo."""
3524 Result is the number of markers in the repo."""
3525 timer, fm = gettimer(ui)
3525 timer, fm = gettimer(ui)
3526 svfs = getsvfs(repo)
3526 svfs = getsvfs(repo)
3527 timer(lambda: len(obsolete.obsstore(svfs)))
3527 timer(lambda: len(obsolete.obsstore(svfs)))
3528 fm.end()
3528 fm.end()
3529
3529
3530
3530
3531 @command(
3531 @command(
3532 b'perflrucachedict',
3532 b'perflrucachedict',
3533 formatteropts
3533 formatteropts
3534 + [
3534 + [
3535 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3535 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3536 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3536 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3537 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3537 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3538 (b'', b'size', 4, b'size of cache'),
3538 (b'', b'size', 4, b'size of cache'),
3539 (b'', b'gets', 10000, b'number of key lookups'),
3539 (b'', b'gets', 10000, b'number of key lookups'),
3540 (b'', b'sets', 10000, b'number of key sets'),
3540 (b'', b'sets', 10000, b'number of key sets'),
3541 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3541 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3542 (
3542 (
3543 b'',
3543 b'',
3544 b'mixedgetfreq',
3544 b'mixedgetfreq',
3545 50,
3545 50,
3546 b'frequency of get vs set ops in mixed mode',
3546 b'frequency of get vs set ops in mixed mode',
3547 ),
3547 ),
3548 ],
3548 ],
3549 norepo=True,
3549 norepo=True,
3550 )
3550 )
3551 def perflrucache(
3551 def perflrucache(
3552 ui,
3552 ui,
3553 mincost=0,
3553 mincost=0,
3554 maxcost=100,
3554 maxcost=100,
3555 costlimit=0,
3555 costlimit=0,
3556 size=4,
3556 size=4,
3557 gets=10000,
3557 gets=10000,
3558 sets=10000,
3558 sets=10000,
3559 mixed=10000,
3559 mixed=10000,
3560 mixedgetfreq=50,
3560 mixedgetfreq=50,
3561 **opts
3561 **opts
3562 ):
3562 ):
3563 opts = _byteskwargs(opts)
3563 opts = _byteskwargs(opts)
3564
3564
3565 def doinit():
3565 def doinit():
3566 for i in _xrange(10000):
3566 for i in _xrange(10000):
3567 util.lrucachedict(size)
3567 util.lrucachedict(size)
3568
3568
3569 costrange = list(range(mincost, maxcost + 1))
3569 costrange = list(range(mincost, maxcost + 1))
3570
3570
3571 values = []
3571 values = []
3572 for i in _xrange(size):
3572 for i in _xrange(size):
3573 values.append(random.randint(0, _maxint))
3573 values.append(random.randint(0, _maxint))
3574
3574
3575 # Get mode fills the cache and tests raw lookup performance with no
3575 # Get mode fills the cache and tests raw lookup performance with no
3576 # eviction.
3576 # eviction.
3577 getseq = []
3577 getseq = []
3578 for i in _xrange(gets):
3578 for i in _xrange(gets):
3579 getseq.append(random.choice(values))
3579 getseq.append(random.choice(values))
3580
3580
3581 def dogets():
3581 def dogets():
3582 d = util.lrucachedict(size)
3582 d = util.lrucachedict(size)
3583 for v in values:
3583 for v in values:
3584 d[v] = v
3584 d[v] = v
3585 for key in getseq:
3585 for key in getseq:
3586 value = d[key]
3586 value = d[key]
3587 value # silence pyflakes warning
3587 value # silence pyflakes warning
3588
3588
3589 def dogetscost():
3589 def dogetscost():
3590 d = util.lrucachedict(size, maxcost=costlimit)
3590 d = util.lrucachedict(size, maxcost=costlimit)
3591 for i, v in enumerate(values):
3591 for i, v in enumerate(values):
3592 d.insert(v, v, cost=costs[i])
3592 d.insert(v, v, cost=costs[i])
3593 for key in getseq:
3593 for key in getseq:
3594 try:
3594 try:
3595 value = d[key]
3595 value = d[key]
3596 value # silence pyflakes warning
3596 value # silence pyflakes warning
3597 except KeyError:
3597 except KeyError:
3598 pass
3598 pass
3599
3599
3600 # Set mode tests insertion speed with cache eviction.
3600 # Set mode tests insertion speed with cache eviction.
3601 setseq = []
3601 setseq = []
3602 costs = []
3602 costs = []
3603 for i in _xrange(sets):
3603 for i in _xrange(sets):
3604 setseq.append(random.randint(0, _maxint))
3604 setseq.append(random.randint(0, _maxint))
3605 costs.append(random.choice(costrange))
3605 costs.append(random.choice(costrange))
3606
3606
3607 def doinserts():
3607 def doinserts():
3608 d = util.lrucachedict(size)
3608 d = util.lrucachedict(size)
3609 for v in setseq:
3609 for v in setseq:
3610 d.insert(v, v)
3610 d.insert(v, v)
3611
3611
3612 def doinsertscost():
3612 def doinsertscost():
3613 d = util.lrucachedict(size, maxcost=costlimit)
3613 d = util.lrucachedict(size, maxcost=costlimit)
3614 for i, v in enumerate(setseq):
3614 for i, v in enumerate(setseq):
3615 d.insert(v, v, cost=costs[i])
3615 d.insert(v, v, cost=costs[i])
3616
3616
3617 def dosets():
3617 def dosets():
3618 d = util.lrucachedict(size)
3618 d = util.lrucachedict(size)
3619 for v in setseq:
3619 for v in setseq:
3620 d[v] = v
3620 d[v] = v
3621
3621
3622 # Mixed mode randomly performs gets and sets with eviction.
3622 # Mixed mode randomly performs gets and sets with eviction.
3623 mixedops = []
3623 mixedops = []
3624 for i in _xrange(mixed):
3624 for i in _xrange(mixed):
3625 r = random.randint(0, 100)
3625 r = random.randint(0, 100)
3626 if r < mixedgetfreq:
3626 if r < mixedgetfreq:
3627 op = 0
3627 op = 0
3628 else:
3628 else:
3629 op = 1
3629 op = 1
3630
3630
3631 mixedops.append(
3631 mixedops.append(
3632 (op, random.randint(0, size * 2), random.choice(costrange))
3632 (op, random.randint(0, size * 2), random.choice(costrange))
3633 )
3633 )
3634
3634
3635 def domixed():
3635 def domixed():
3636 d = util.lrucachedict(size)
3636 d = util.lrucachedict(size)
3637
3637
3638 for op, v, cost in mixedops:
3638 for op, v, cost in mixedops:
3639 if op == 0:
3639 if op == 0:
3640 try:
3640 try:
3641 d[v]
3641 d[v]
3642 except KeyError:
3642 except KeyError:
3643 pass
3643 pass
3644 else:
3644 else:
3645 d[v] = v
3645 d[v] = v
3646
3646
3647 def domixedcost():
3647 def domixedcost():
3648 d = util.lrucachedict(size, maxcost=costlimit)
3648 d = util.lrucachedict(size, maxcost=costlimit)
3649
3649
3650 for op, v, cost in mixedops:
3650 for op, v, cost in mixedops:
3651 if op == 0:
3651 if op == 0:
3652 try:
3652 try:
3653 d[v]
3653 d[v]
3654 except KeyError:
3654 except KeyError:
3655 pass
3655 pass
3656 else:
3656 else:
3657 d.insert(v, v, cost=cost)
3657 d.insert(v, v, cost=cost)
3658
3658
3659 benches = [
3659 benches = [
3660 (doinit, b'init'),
3660 (doinit, b'init'),
3661 ]
3661 ]
3662
3662
3663 if costlimit:
3663 if costlimit:
3664 benches.extend(
3664 benches.extend(
3665 [
3665 [
3666 (dogetscost, b'gets w/ cost limit'),
3666 (dogetscost, b'gets w/ cost limit'),
3667 (doinsertscost, b'inserts w/ cost limit'),
3667 (doinsertscost, b'inserts w/ cost limit'),
3668 (domixedcost, b'mixed w/ cost limit'),
3668 (domixedcost, b'mixed w/ cost limit'),
3669 ]
3669 ]
3670 )
3670 )
3671 else:
3671 else:
3672 benches.extend(
3672 benches.extend(
3673 [
3673 [
3674 (dogets, b'gets'),
3674 (dogets, b'gets'),
3675 (doinserts, b'inserts'),
3675 (doinserts, b'inserts'),
3676 (dosets, b'sets'),
3676 (dosets, b'sets'),
3677 (domixed, b'mixed'),
3677 (domixed, b'mixed'),
3678 ]
3678 ]
3679 )
3679 )
3680
3680
3681 for fn, title in benches:
3681 for fn, title in benches:
3682 timer, fm = gettimer(ui, opts)
3682 timer, fm = gettimer(ui, opts)
3683 timer(fn, title=title)
3683 timer(fn, title=title)
3684 fm.end()
3684 fm.end()
3685
3685
3686
3686
3687 @command(b'perfwrite', formatteropts)
3687 @command(b'perfwrite', formatteropts)
3688 def perfwrite(ui, repo, **opts):
3688 def perfwrite(ui, repo, **opts):
3689 """microbenchmark ui.write
3689 """microbenchmark ui.write
3690 """
3690 """
3691 opts = _byteskwargs(opts)
3691 opts = _byteskwargs(opts)
3692
3692
3693 timer, fm = gettimer(ui, opts)
3693 timer, fm = gettimer(ui, opts)
3694
3694
3695 def write():
3695 def write():
3696 for i in range(100000):
3696 for i in range(100000):
3697 ui.write(b'Testing write performance\n')
3697 ui.writenoi18n(b'Testing write performance\n')
3698
3698
3699 timer(write)
3699 timer(write)
3700 fm.end()
3700 fm.end()
3701
3701
3702
3702
3703 def uisetup(ui):
3703 def uisetup(ui):
3704 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3704 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3705 commands, b'debugrevlogopts'
3705 commands, b'debugrevlogopts'
3706 ):
3706 ):
3707 # for "historical portability":
3707 # for "historical portability":
3708 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3708 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3709 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3709 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3710 # openrevlog() should cause failure, because it has been
3710 # openrevlog() should cause failure, because it has been
3711 # available since 3.5 (or 49c583ca48c4).
3711 # available since 3.5 (or 49c583ca48c4).
3712 def openrevlog(orig, repo, cmd, file_, opts):
3712 def openrevlog(orig, repo, cmd, file_, opts):
3713 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3713 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3714 raise error.Abort(
3714 raise error.Abort(
3715 b"This version doesn't support --dir option",
3715 b"This version doesn't support --dir option",
3716 hint=b"use 3.5 or later",
3716 hint=b"use 3.5 or later",
3717 )
3717 )
3718 return orig(repo, cmd, file_, opts)
3718 return orig(repo, cmd, file_, opts)
3719
3719
3720 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3720 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3721
3721
3722
3722
3723 @command(
3723 @command(
3724 b'perfprogress',
3724 b'perfprogress',
3725 formatteropts
3725 formatteropts
3726 + [
3726 + [
3727 (b'', b'topic', b'topic', b'topic for progress messages'),
3727 (b'', b'topic', b'topic', b'topic for progress messages'),
3728 (b'c', b'total', 1000000, b'total value we are progressing to'),
3728 (b'c', b'total', 1000000, b'total value we are progressing to'),
3729 ],
3729 ],
3730 norepo=True,
3730 norepo=True,
3731 )
3731 )
3732 def perfprogress(ui, topic=None, total=None, **opts):
3732 def perfprogress(ui, topic=None, total=None, **opts):
3733 """printing of progress bars"""
3733 """printing of progress bars"""
3734 opts = _byteskwargs(opts)
3734 opts = _byteskwargs(opts)
3735
3735
3736 timer, fm = gettimer(ui, opts)
3736 timer, fm = gettimer(ui, opts)
3737
3737
3738 def doprogress():
3738 def doprogress():
3739 with ui.makeprogress(topic, total=total) as progress:
3739 with ui.makeprogress(topic, total=total) as progress:
3740 for i in _xrange(total):
3740 for i in _xrange(total):
3741 progress.increment()
3741 progress.increment()
3742
3742
3743 timer(doprogress)
3743 timer(doprogress)
3744 fm.end()
3744 fm.end()
@@ -1,227 +1,227 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # checkseclevel - checking section title levels in each online help document
3 # checkseclevel - checking section title levels in each online help document
4
4
5 from __future__ import absolute_import
5 from __future__ import absolute_import
6
6
7 import optparse
7 import optparse
8 import os
8 import os
9 import sys
9 import sys
10
10
11 # import from the live mercurial repo
11 # import from the live mercurial repo
12 os.environ['HGMODULEPOLICY'] = 'py'
12 os.environ['HGMODULEPOLICY'] = 'py'
13 sys.path.insert(0, "..")
13 sys.path.insert(0, "..")
14 from mercurial import demandimport
14 from mercurial import demandimport
15
15
16 demandimport.enable()
16 demandimport.enable()
17 from mercurial import (
17 from mercurial import (
18 commands,
18 commands,
19 extensions,
19 extensions,
20 help,
20 help,
21 minirst,
21 minirst,
22 ui as uimod,
22 ui as uimod,
23 )
23 )
24
24
25 table = commands.table
25 table = commands.table
26 helptable = help.helptable
26 helptable = help.helptable
27
27
28 level2mark = [b'"', b'=', b'-', b'.', b'#']
28 level2mark = [b'"', b'=', b'-', b'.', b'#']
29 reservedmarks = [b'"']
29 reservedmarks = [b'"']
30
30
31 mark2level = {}
31 mark2level = {}
32 for m, l in zip(level2mark, range(len(level2mark))):
32 for m, l in zip(level2mark, range(len(level2mark))):
33 if m not in reservedmarks:
33 if m not in reservedmarks:
34 mark2level[m] = l
34 mark2level[m] = l
35
35
36 initlevel_topic = 0
36 initlevel_topic = 0
37 initlevel_cmd = 1
37 initlevel_cmd = 1
38 initlevel_ext = 1
38 initlevel_ext = 1
39 initlevel_ext_cmd = 3
39 initlevel_ext_cmd = 3
40
40
41
41
42 def showavailables(ui, initlevel):
42 def showavailables(ui, initlevel):
43 avail = ' available marks and order of them in this help: %s\n' % (
43 avail = ' available marks and order of them in this help: %s\n' % (
44 ', '.join(['%r' % (m * 4) for m in level2mark[initlevel + 1 :]])
44 ', '.join(['%r' % (m * 4) for m in level2mark[initlevel + 1 :]])
45 )
45 )
46 ui.warn(avail.encode('utf-8'))
46 ui.warn(avail.encode('utf-8'))
47
47
48
48
49 def checkseclevel(ui, doc, name, initlevel):
49 def checkseclevel(ui, doc, name, initlevel):
50 ui.note('checking "%s"\n' % name)
50 ui.notenoi18n('checking "%s"\n' % name)
51 if not isinstance(doc, bytes):
51 if not isinstance(doc, bytes):
52 doc = doc.encode('utf-8')
52 doc = doc.encode('utf-8')
53 blocks, pruned = minirst.parse(doc, 0, ['verbose'])
53 blocks, pruned = minirst.parse(doc, 0, ['verbose'])
54 errorcnt = 0
54 errorcnt = 0
55 curlevel = initlevel
55 curlevel = initlevel
56 for block in blocks:
56 for block in blocks:
57 if block[b'type'] != b'section':
57 if block[b'type'] != b'section':
58 continue
58 continue
59 mark = block[b'underline']
59 mark = block[b'underline']
60 title = block[b'lines'][0]
60 title = block[b'lines'][0]
61 if (mark not in mark2level) or (mark2level[mark] <= initlevel):
61 if (mark not in mark2level) or (mark2level[mark] <= initlevel):
62 ui.warn(
62 ui.warn(
63 (
63 (
64 'invalid section mark %r for "%s" of %s\n'
64 'invalid section mark %r for "%s" of %s\n'
65 % (mark * 4, title, name)
65 % (mark * 4, title, name)
66 ).encode('utf-8')
66 ).encode('utf-8')
67 )
67 )
68 showavailables(ui, initlevel)
68 showavailables(ui, initlevel)
69 errorcnt += 1
69 errorcnt += 1
70 continue
70 continue
71 nextlevel = mark2level[mark]
71 nextlevel = mark2level[mark]
72 if curlevel < nextlevel and curlevel + 1 != nextlevel:
72 if curlevel < nextlevel and curlevel + 1 != nextlevel:
73 ui.warn('gap of section level at "%s" of %s\n' % (title, name))
73 ui.warnnoi18n('gap of section level at "%s" of %s\n' % (title, name))
74 showavailables(ui, initlevel)
74 showavailables(ui, initlevel)
75 errorcnt += 1
75 errorcnt += 1
76 continue
76 continue
77 ui.note(
77 ui.notenoi18n(
78 'appropriate section level for "%s %s"\n'
78 'appropriate section level for "%s %s"\n'
79 % (mark * (nextlevel * 2), title)
79 % (mark * (nextlevel * 2), title)
80 )
80 )
81 curlevel = nextlevel
81 curlevel = nextlevel
82
82
83 return errorcnt
83 return errorcnt
84
84
85
85
86 def checkcmdtable(ui, cmdtable, namefmt, initlevel):
86 def checkcmdtable(ui, cmdtable, namefmt, initlevel):
87 errorcnt = 0
87 errorcnt = 0
88 for k, entry in cmdtable.items():
88 for k, entry in cmdtable.items():
89 name = k.split(b"|")[0].lstrip(b"^")
89 name = k.split(b"|")[0].lstrip(b"^")
90 if not entry[0].__doc__:
90 if not entry[0].__doc__:
91 ui.note('skip checking %s: no help document\n' % (namefmt % name))
91 ui.notenoi18n('skip checking %s: no help document\n' % (namefmt % name))
92 continue
92 continue
93 errorcnt += checkseclevel(
93 errorcnt += checkseclevel(
94 ui, entry[0].__doc__, namefmt % name, initlevel
94 ui, entry[0].__doc__, namefmt % name, initlevel
95 )
95 )
96 return errorcnt
96 return errorcnt
97
97
98
98
99 def checkhghelps(ui):
99 def checkhghelps(ui):
100 errorcnt = 0
100 errorcnt = 0
101 for h in helptable:
101 for h in helptable:
102 names, sec, doc = h[0:3]
102 names, sec, doc = h[0:3]
103 if callable(doc):
103 if callable(doc):
104 doc = doc(ui)
104 doc = doc(ui)
105 errorcnt += checkseclevel(
105 errorcnt += checkseclevel(
106 ui, doc, '%s help topic' % names[0], initlevel_topic
106 ui, doc, '%s help topic' % names[0], initlevel_topic
107 )
107 )
108
108
109 errorcnt += checkcmdtable(ui, table, '%s command', initlevel_cmd)
109 errorcnt += checkcmdtable(ui, table, '%s command', initlevel_cmd)
110
110
111 for name in sorted(
111 for name in sorted(
112 list(extensions.enabled()) + list(extensions.disabled())
112 list(extensions.enabled()) + list(extensions.disabled())
113 ):
113 ):
114 mod = extensions.load(ui, name, None)
114 mod = extensions.load(ui, name, None)
115 if not mod.__doc__:
115 if not mod.__doc__:
116 ui.note('skip checking %s extension: no help document\n' % name)
116 ui.notenoi18n('skip checking %s extension: no help document\n' % name)
117 continue
117 continue
118 errorcnt += checkseclevel(
118 errorcnt += checkseclevel(
119 ui, mod.__doc__, '%s extension' % name, initlevel_ext
119 ui, mod.__doc__, '%s extension' % name, initlevel_ext
120 )
120 )
121
121
122 cmdtable = getattr(mod, 'cmdtable', None)
122 cmdtable = getattr(mod, 'cmdtable', None)
123 if cmdtable:
123 if cmdtable:
124 errorcnt += checkcmdtable(
124 errorcnt += checkcmdtable(
125 ui,
125 ui,
126 cmdtable,
126 cmdtable,
127 '%%s command of %s extension' % name,
127 '%%s command of %s extension' % name,
128 initlevel_ext_cmd,
128 initlevel_ext_cmd,
129 )
129 )
130 return errorcnt
130 return errorcnt
131
131
132
132
133 def checkfile(ui, filename, initlevel):
133 def checkfile(ui, filename, initlevel):
134 if filename == '-':
134 if filename == '-':
135 filename = 'stdin'
135 filename = 'stdin'
136 doc = sys.stdin.read()
136 doc = sys.stdin.read()
137 else:
137 else:
138 with open(filename) as fp:
138 with open(filename) as fp:
139 doc = fp.read()
139 doc = fp.read()
140
140
141 ui.note(
141 ui.notenoi18n(
142 'checking input from %s with initlevel %d\n' % (filename, initlevel)
142 'checking input from %s with initlevel %d\n' % (filename, initlevel)
143 )
143 )
144 return checkseclevel(ui, doc, 'input from %s' % filename, initlevel)
144 return checkseclevel(ui, doc, 'input from %s' % filename, initlevel)
145
145
146
146
147 def main():
147 def main():
148 optparser = optparse.OptionParser(
148 optparser = optparse.OptionParser(
149 """%prog [options]
149 """%prog [options]
150
150
151 This checks all help documents of Mercurial (topics, commands,
151 This checks all help documents of Mercurial (topics, commands,
152 extensions and commands of them), if no file is specified by --file
152 extensions and commands of them), if no file is specified by --file
153 option.
153 option.
154 """
154 """
155 )
155 )
156 optparser.add_option(
156 optparser.add_option(
157 "-v", "--verbose", help="enable additional output", action="store_true"
157 "-v", "--verbose", help="enable additional output", action="store_true"
158 )
158 )
159 optparser.add_option(
159 optparser.add_option(
160 "-d", "--debug", help="debug mode", action="store_true"
160 "-d", "--debug", help="debug mode", action="store_true"
161 )
161 )
162 optparser.add_option(
162 optparser.add_option(
163 "-f",
163 "-f",
164 "--file",
164 "--file",
165 help="filename to read in (or '-' for stdin)",
165 help="filename to read in (or '-' for stdin)",
166 action="store",
166 action="store",
167 default="",
167 default="",
168 )
168 )
169
169
170 optparser.add_option(
170 optparser.add_option(
171 "-t",
171 "-t",
172 "--topic",
172 "--topic",
173 help="parse file as help topic",
173 help="parse file as help topic",
174 action="store_const",
174 action="store_const",
175 dest="initlevel",
175 dest="initlevel",
176 const=0,
176 const=0,
177 )
177 )
178 optparser.add_option(
178 optparser.add_option(
179 "-c",
179 "-c",
180 "--command",
180 "--command",
181 help="parse file as help of core command",
181 help="parse file as help of core command",
182 action="store_const",
182 action="store_const",
183 dest="initlevel",
183 dest="initlevel",
184 const=1,
184 const=1,
185 )
185 )
186 optparser.add_option(
186 optparser.add_option(
187 "-e",
187 "-e",
188 "--extension",
188 "--extension",
189 help="parse file as help of extension",
189 help="parse file as help of extension",
190 action="store_const",
190 action="store_const",
191 dest="initlevel",
191 dest="initlevel",
192 const=1,
192 const=1,
193 )
193 )
194 optparser.add_option(
194 optparser.add_option(
195 "-C",
195 "-C",
196 "--extension-command",
196 "--extension-command",
197 help="parse file as help of extension command",
197 help="parse file as help of extension command",
198 action="store_const",
198 action="store_const",
199 dest="initlevel",
199 dest="initlevel",
200 const=3,
200 const=3,
201 )
201 )
202
202
203 optparser.add_option(
203 optparser.add_option(
204 "-l",
204 "-l",
205 "--initlevel",
205 "--initlevel",
206 help="set initial section level manually",
206 help="set initial section level manually",
207 action="store",
207 action="store",
208 type="int",
208 type="int",
209 default=0,
209 default=0,
210 )
210 )
211
211
212 (options, args) = optparser.parse_args()
212 (options, args) = optparser.parse_args()
213
213
214 ui = uimod.ui.load()
214 ui = uimod.ui.load()
215 ui.setconfig(b'ui', b'verbose', options.verbose, b'--verbose')
215 ui.setconfig(b'ui', b'verbose', options.verbose, b'--verbose')
216 ui.setconfig(b'ui', b'debug', options.debug, b'--debug')
216 ui.setconfig(b'ui', b'debug', options.debug, b'--debug')
217
217
218 if options.file:
218 if options.file:
219 if checkfile(ui, options.file, options.initlevel):
219 if checkfile(ui, options.file, options.initlevel):
220 sys.exit(1)
220 sys.exit(1)
221 else:
221 else:
222 if checkhghelps(ui):
222 if checkhghelps(ui):
223 sys.exit(1)
223 sys.exit(1)
224
224
225
225
226 if __name__ == "__main__":
226 if __name__ == "__main__":
227 main()
227 main()
@@ -1,1073 +1,1073 b''
1 # Mercurial built-in replacement for cvsps.
1 # Mercurial built-in replacement for cvsps.
2 #
2 #
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import functools
9 import functools
10 import os
10 import os
11 import re
11 import re
12
12
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14 from mercurial import (
14 from mercurial import (
15 encoding,
15 encoding,
16 error,
16 error,
17 hook,
17 hook,
18 pycompat,
18 pycompat,
19 util,
19 util,
20 )
20 )
21 from mercurial.utils import (
21 from mercurial.utils import (
22 dateutil,
22 dateutil,
23 procutil,
23 procutil,
24 stringutil,
24 stringutil,
25 )
25 )
26
26
27 pickle = util.pickle
27 pickle = util.pickle
28
28
29
29
30 class logentry(object):
30 class logentry(object):
31 '''Class logentry has the following attributes:
31 '''Class logentry has the following attributes:
32 .author - author name as CVS knows it
32 .author - author name as CVS knows it
33 .branch - name of branch this revision is on
33 .branch - name of branch this revision is on
34 .branches - revision tuple of branches starting at this revision
34 .branches - revision tuple of branches starting at this revision
35 .comment - commit message
35 .comment - commit message
36 .commitid - CVS commitid or None
36 .commitid - CVS commitid or None
37 .date - the commit date as a (time, tz) tuple
37 .date - the commit date as a (time, tz) tuple
38 .dead - true if file revision is dead
38 .dead - true if file revision is dead
39 .file - Name of file
39 .file - Name of file
40 .lines - a tuple (+lines, -lines) or None
40 .lines - a tuple (+lines, -lines) or None
41 .parent - Previous revision of this entry
41 .parent - Previous revision of this entry
42 .rcs - name of file as returned from CVS
42 .rcs - name of file as returned from CVS
43 .revision - revision number as tuple
43 .revision - revision number as tuple
44 .tags - list of tags on the file
44 .tags - list of tags on the file
45 .synthetic - is this a synthetic "file ... added on ..." revision?
45 .synthetic - is this a synthetic "file ... added on ..." revision?
46 .mergepoint - the branch that has been merged from (if present in
46 .mergepoint - the branch that has been merged from (if present in
47 rlog output) or None
47 rlog output) or None
48 .branchpoints - the branches that start at the current entry or empty
48 .branchpoints - the branches that start at the current entry or empty
49 '''
49 '''
50
50
51 def __init__(self, **entries):
51 def __init__(self, **entries):
52 self.synthetic = False
52 self.synthetic = False
53 self.__dict__.update(entries)
53 self.__dict__.update(entries)
54
54
55 def __repr__(self):
55 def __repr__(self):
56 items = (
56 items = (
57 r"%s=%r" % (k, self.__dict__[k]) for k in sorted(self.__dict__)
57 r"%s=%r" % (k, self.__dict__[k]) for k in sorted(self.__dict__)
58 )
58 )
59 return r"%s(%s)" % (type(self).__name__, r", ".join(items))
59 return r"%s(%s)" % (type(self).__name__, r", ".join(items))
60
60
61
61
62 class logerror(Exception):
62 class logerror(Exception):
63 pass
63 pass
64
64
65
65
66 def getrepopath(cvspath):
66 def getrepopath(cvspath):
67 """Return the repository path from a CVS path.
67 """Return the repository path from a CVS path.
68
68
69 >>> getrepopath(b'/foo/bar')
69 >>> getrepopath(b'/foo/bar')
70 '/foo/bar'
70 '/foo/bar'
71 >>> getrepopath(b'c:/foo/bar')
71 >>> getrepopath(b'c:/foo/bar')
72 '/foo/bar'
72 '/foo/bar'
73 >>> getrepopath(b':pserver:10/foo/bar')
73 >>> getrepopath(b':pserver:10/foo/bar')
74 '/foo/bar'
74 '/foo/bar'
75 >>> getrepopath(b':pserver:10c:/foo/bar')
75 >>> getrepopath(b':pserver:10c:/foo/bar')
76 '/foo/bar'
76 '/foo/bar'
77 >>> getrepopath(b':pserver:/foo/bar')
77 >>> getrepopath(b':pserver:/foo/bar')
78 '/foo/bar'
78 '/foo/bar'
79 >>> getrepopath(b':pserver:c:/foo/bar')
79 >>> getrepopath(b':pserver:c:/foo/bar')
80 '/foo/bar'
80 '/foo/bar'
81 >>> getrepopath(b':pserver:truc@foo.bar:/foo/bar')
81 >>> getrepopath(b':pserver:truc@foo.bar:/foo/bar')
82 '/foo/bar'
82 '/foo/bar'
83 >>> getrepopath(b':pserver:truc@foo.bar:c:/foo/bar')
83 >>> getrepopath(b':pserver:truc@foo.bar:c:/foo/bar')
84 '/foo/bar'
84 '/foo/bar'
85 >>> getrepopath(b'user@server/path/to/repository')
85 >>> getrepopath(b'user@server/path/to/repository')
86 '/path/to/repository'
86 '/path/to/repository'
87 """
87 """
88 # According to CVS manual, CVS paths are expressed like:
88 # According to CVS manual, CVS paths are expressed like:
89 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
89 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
90 #
90 #
91 # CVSpath is splitted into parts and then position of the first occurrence
91 # CVSpath is splitted into parts and then position of the first occurrence
92 # of the '/' char after the '@' is located. The solution is the rest of the
92 # of the '/' char after the '@' is located. The solution is the rest of the
93 # string after that '/' sign including it
93 # string after that '/' sign including it
94
94
95 parts = cvspath.split(b':')
95 parts = cvspath.split(b':')
96 atposition = parts[-1].find(b'@')
96 atposition = parts[-1].find(b'@')
97 start = 0
97 start = 0
98
98
99 if atposition != -1:
99 if atposition != -1:
100 start = atposition
100 start = atposition
101
101
102 repopath = parts[-1][parts[-1].find(b'/', start) :]
102 repopath = parts[-1][parts[-1].find(b'/', start) :]
103 return repopath
103 return repopath
104
104
105
105
106 def createlog(ui, directory=None, root=b"", rlog=True, cache=None):
106 def createlog(ui, directory=None, root=b"", rlog=True, cache=None):
107 '''Collect the CVS rlog'''
107 '''Collect the CVS rlog'''
108
108
109 # Because we store many duplicate commit log messages, reusing strings
109 # Because we store many duplicate commit log messages, reusing strings
110 # saves a lot of memory and pickle storage space.
110 # saves a lot of memory and pickle storage space.
111 _scache = {}
111 _scache = {}
112
112
113 def scache(s):
113 def scache(s):
114 b"return a shared version of a string"
114 b"return a shared version of a string"
115 return _scache.setdefault(s, s)
115 return _scache.setdefault(s, s)
116
116
117 ui.status(_(b'collecting CVS rlog\n'))
117 ui.status(_(b'collecting CVS rlog\n'))
118
118
119 log = [] # list of logentry objects containing the CVS state
119 log = [] # list of logentry objects containing the CVS state
120
120
121 # patterns to match in CVS (r)log output, by state of use
121 # patterns to match in CVS (r)log output, by state of use
122 re_00 = re.compile(b'RCS file: (.+)$')
122 re_00 = re.compile(b'RCS file: (.+)$')
123 re_01 = re.compile(b'cvs \\[r?log aborted\\]: (.+)$')
123 re_01 = re.compile(b'cvs \\[r?log aborted\\]: (.+)$')
124 re_02 = re.compile(b'cvs (r?log|server): (.+)\n$')
124 re_02 = re.compile(b'cvs (r?log|server): (.+)\n$')
125 re_03 = re.compile(
125 re_03 = re.compile(
126 b"(Cannot access.+CVSROOT)|" b"(can't create temporary directory.+)$"
126 b"(Cannot access.+CVSROOT)|" b"(can't create temporary directory.+)$"
127 )
127 )
128 re_10 = re.compile(b'Working file: (.+)$')
128 re_10 = re.compile(b'Working file: (.+)$')
129 re_20 = re.compile(b'symbolic names:')
129 re_20 = re.compile(b'symbolic names:')
130 re_30 = re.compile(b'\t(.+): ([\\d.]+)$')
130 re_30 = re.compile(b'\t(.+): ([\\d.]+)$')
131 re_31 = re.compile(b'----------------------------$')
131 re_31 = re.compile(b'----------------------------$')
132 re_32 = re.compile(
132 re_32 = re.compile(
133 b'======================================='
133 b'======================================='
134 b'======================================$'
134 b'======================================$'
135 )
135 )
136 re_50 = re.compile(br'revision ([\d.]+)(\s+locked by:\s+.+;)?$')
136 re_50 = re.compile(br'revision ([\d.]+)(\s+locked by:\s+.+;)?$')
137 re_60 = re.compile(
137 re_60 = re.compile(
138 br'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
138 br'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
139 br'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
139 br'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
140 br'(\s+commitid:\s+([^;]+);)?'
140 br'(\s+commitid:\s+([^;]+);)?'
141 br'(.*mergepoint:\s+([^;]+);)?'
141 br'(.*mergepoint:\s+([^;]+);)?'
142 )
142 )
143 re_70 = re.compile(b'branches: (.+);$')
143 re_70 = re.compile(b'branches: (.+);$')
144
144
145 file_added_re = re.compile(br'file [^/]+ was (initially )?added on branch')
145 file_added_re = re.compile(br'file [^/]+ was (initially )?added on branch')
146
146
147 prefix = b'' # leading path to strip of what we get from CVS
147 prefix = b'' # leading path to strip of what we get from CVS
148
148
149 if directory is None:
149 if directory is None:
150 # Current working directory
150 # Current working directory
151
151
152 # Get the real directory in the repository
152 # Get the real directory in the repository
153 try:
153 try:
154 with open(os.path.join(b'CVS', b'Repository'), b'rb') as f:
154 with open(os.path.join(b'CVS', b'Repository'), b'rb') as f:
155 prefix = f.read().strip()
155 prefix = f.read().strip()
156 directory = prefix
156 directory = prefix
157 if prefix == b".":
157 if prefix == b".":
158 prefix = b""
158 prefix = b""
159 except IOError:
159 except IOError:
160 raise logerror(_(b'not a CVS sandbox'))
160 raise logerror(_(b'not a CVS sandbox'))
161
161
162 if prefix and not prefix.endswith(pycompat.ossep):
162 if prefix and not prefix.endswith(pycompat.ossep):
163 prefix += pycompat.ossep
163 prefix += pycompat.ossep
164
164
165 # Use the Root file in the sandbox, if it exists
165 # Use the Root file in the sandbox, if it exists
166 try:
166 try:
167 root = open(os.path.join(b'CVS', b'Root'), b'rb').read().strip()
167 root = open(os.path.join(b'CVS', b'Root'), b'rb').read().strip()
168 except IOError:
168 except IOError:
169 pass
169 pass
170
170
171 if not root:
171 if not root:
172 root = encoding.environ.get(b'CVSROOT', b'')
172 root = encoding.environ.get(b'CVSROOT', b'')
173
173
174 # read log cache if one exists
174 # read log cache if one exists
175 oldlog = []
175 oldlog = []
176 date = None
176 date = None
177
177
178 if cache:
178 if cache:
179 cachedir = os.path.expanduser(b'~/.hg.cvsps')
179 cachedir = os.path.expanduser(b'~/.hg.cvsps')
180 if not os.path.exists(cachedir):
180 if not os.path.exists(cachedir):
181 os.mkdir(cachedir)
181 os.mkdir(cachedir)
182
182
183 # The cvsps cache pickle needs a uniquified name, based on the
183 # The cvsps cache pickle needs a uniquified name, based on the
184 # repository location. The address may have all sort of nasties
184 # repository location. The address may have all sort of nasties
185 # in it, slashes, colons and such. So here we take just the
185 # in it, slashes, colons and such. So here we take just the
186 # alphanumeric characters, concatenated in a way that does not
186 # alphanumeric characters, concatenated in a way that does not
187 # mix up the various components, so that
187 # mix up the various components, so that
188 # :pserver:user@server:/path
188 # :pserver:user@server:/path
189 # and
189 # and
190 # /pserver/user/server/path
190 # /pserver/user/server/path
191 # are mapped to different cache file names.
191 # are mapped to different cache file names.
192 cachefile = root.split(b":") + [directory, b"cache"]
192 cachefile = root.split(b":") + [directory, b"cache"]
193 cachefile = [b'-'.join(re.findall(br'\w+', s)) for s in cachefile if s]
193 cachefile = [b'-'.join(re.findall(br'\w+', s)) for s in cachefile if s]
194 cachefile = os.path.join(
194 cachefile = os.path.join(
195 cachedir, b'.'.join([s for s in cachefile if s])
195 cachedir, b'.'.join([s for s in cachefile if s])
196 )
196 )
197
197
198 if cache == b'update':
198 if cache == b'update':
199 try:
199 try:
200 ui.note(_(b'reading cvs log cache %s\n') % cachefile)
200 ui.note(_(b'reading cvs log cache %s\n') % cachefile)
201 oldlog = pickle.load(open(cachefile, b'rb'))
201 oldlog = pickle.load(open(cachefile, b'rb'))
202 for e in oldlog:
202 for e in oldlog:
203 if not (
203 if not (
204 util.safehasattr(e, b'branchpoints')
204 util.safehasattr(e, b'branchpoints')
205 and util.safehasattr(e, b'commitid')
205 and util.safehasattr(e, b'commitid')
206 and util.safehasattr(e, b'mergepoint')
206 and util.safehasattr(e, b'mergepoint')
207 ):
207 ):
208 ui.status(_(b'ignoring old cache\n'))
208 ui.status(_(b'ignoring old cache\n'))
209 oldlog = []
209 oldlog = []
210 break
210 break
211
211
212 ui.note(_(b'cache has %d log entries\n') % len(oldlog))
212 ui.note(_(b'cache has %d log entries\n') % len(oldlog))
213 except Exception as e:
213 except Exception as e:
214 ui.note(_(b'error reading cache: %r\n') % e)
214 ui.note(_(b'error reading cache: %r\n') % e)
215
215
216 if oldlog:
216 if oldlog:
217 date = oldlog[-1].date # last commit date as a (time,tz) tuple
217 date = oldlog[-1].date # last commit date as a (time,tz) tuple
218 date = dateutil.datestr(date, b'%Y/%m/%d %H:%M:%S %1%2')
218 date = dateutil.datestr(date, b'%Y/%m/%d %H:%M:%S %1%2')
219
219
220 # build the CVS commandline
220 # build the CVS commandline
221 cmd = [b'cvs', b'-q']
221 cmd = [b'cvs', b'-q']
222 if root:
222 if root:
223 cmd.append(b'-d%s' % root)
223 cmd.append(b'-d%s' % root)
224 p = util.normpath(getrepopath(root))
224 p = util.normpath(getrepopath(root))
225 if not p.endswith(b'/'):
225 if not p.endswith(b'/'):
226 p += b'/'
226 p += b'/'
227 if prefix:
227 if prefix:
228 # looks like normpath replaces "" by "."
228 # looks like normpath replaces "" by "."
229 prefix = p + util.normpath(prefix)
229 prefix = p + util.normpath(prefix)
230 else:
230 else:
231 prefix = p
231 prefix = p
232 cmd.append([b'log', b'rlog'][rlog])
232 cmd.append([b'log', b'rlog'][rlog])
233 if date:
233 if date:
234 # no space between option and date string
234 # no space between option and date string
235 cmd.append(b'-d>%s' % date)
235 cmd.append(b'-d>%s' % date)
236 cmd.append(directory)
236 cmd.append(directory)
237
237
238 # state machine begins here
238 # state machine begins here
239 tags = {} # dictionary of revisions on current file with their tags
239 tags = {} # dictionary of revisions on current file with their tags
240 branchmap = {} # mapping between branch names and revision numbers
240 branchmap = {} # mapping between branch names and revision numbers
241 rcsmap = {}
241 rcsmap = {}
242 state = 0
242 state = 0
243 store = False # set when a new record can be appended
243 store = False # set when a new record can be appended
244
244
245 cmd = [procutil.shellquote(arg) for arg in cmd]
245 cmd = [procutil.shellquote(arg) for arg in cmd]
246 ui.note(_(b"running %s\n") % (b' '.join(cmd)))
246 ui.note(_(b"running %s\n") % (b' '.join(cmd)))
247 ui.debug(b"prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
247 ui.debug(b"prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
248
248
249 pfp = procutil.popen(b' '.join(cmd), b'rb')
249 pfp = procutil.popen(b' '.join(cmd), b'rb')
250 peek = util.fromnativeeol(pfp.readline())
250 peek = util.fromnativeeol(pfp.readline())
251 while True:
251 while True:
252 line = peek
252 line = peek
253 if line == b'':
253 if line == b'':
254 break
254 break
255 peek = util.fromnativeeol(pfp.readline())
255 peek = util.fromnativeeol(pfp.readline())
256 if line.endswith(b'\n'):
256 if line.endswith(b'\n'):
257 line = line[:-1]
257 line = line[:-1]
258 # ui.debug('state=%d line=%r\n' % (state, line))
258 # ui.debug('state=%d line=%r\n' % (state, line))
259
259
260 if state == 0:
260 if state == 0:
261 # initial state, consume input until we see 'RCS file'
261 # initial state, consume input until we see 'RCS file'
262 match = re_00.match(line)
262 match = re_00.match(line)
263 if match:
263 if match:
264 rcs = match.group(1)
264 rcs = match.group(1)
265 tags = {}
265 tags = {}
266 if rlog:
266 if rlog:
267 filename = util.normpath(rcs[:-2])
267 filename = util.normpath(rcs[:-2])
268 if filename.startswith(prefix):
268 if filename.startswith(prefix):
269 filename = filename[len(prefix) :]
269 filename = filename[len(prefix) :]
270 if filename.startswith(b'/'):
270 if filename.startswith(b'/'):
271 filename = filename[1:]
271 filename = filename[1:]
272 if filename.startswith(b'Attic/'):
272 if filename.startswith(b'Attic/'):
273 filename = filename[6:]
273 filename = filename[6:]
274 else:
274 else:
275 filename = filename.replace(b'/Attic/', b'/')
275 filename = filename.replace(b'/Attic/', b'/')
276 state = 2
276 state = 2
277 continue
277 continue
278 state = 1
278 state = 1
279 continue
279 continue
280 match = re_01.match(line)
280 match = re_01.match(line)
281 if match:
281 if match:
282 raise logerror(match.group(1))
282 raise logerror(match.group(1))
283 match = re_02.match(line)
283 match = re_02.match(line)
284 if match:
284 if match:
285 raise logerror(match.group(2))
285 raise logerror(match.group(2))
286 if re_03.match(line):
286 if re_03.match(line):
287 raise logerror(line)
287 raise logerror(line)
288
288
289 elif state == 1:
289 elif state == 1:
290 # expect 'Working file' (only when using log instead of rlog)
290 # expect 'Working file' (only when using log instead of rlog)
291 match = re_10.match(line)
291 match = re_10.match(line)
292 assert match, _(b'RCS file must be followed by working file')
292 assert match, _(b'RCS file must be followed by working file')
293 filename = util.normpath(match.group(1))
293 filename = util.normpath(match.group(1))
294 state = 2
294 state = 2
295
295
296 elif state == 2:
296 elif state == 2:
297 # expect 'symbolic names'
297 # expect 'symbolic names'
298 if re_20.match(line):
298 if re_20.match(line):
299 branchmap = {}
299 branchmap = {}
300 state = 3
300 state = 3
301
301
302 elif state == 3:
302 elif state == 3:
303 # read the symbolic names and store as tags
303 # read the symbolic names and store as tags
304 match = re_30.match(line)
304 match = re_30.match(line)
305 if match:
305 if match:
306 rev = [int(x) for x in match.group(2).split(b'.')]
306 rev = [int(x) for x in match.group(2).split(b'.')]
307
307
308 # Convert magic branch number to an odd-numbered one
308 # Convert magic branch number to an odd-numbered one
309 revn = len(rev)
309 revn = len(rev)
310 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
310 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
311 rev = rev[:-2] + rev[-1:]
311 rev = rev[:-2] + rev[-1:]
312 rev = tuple(rev)
312 rev = tuple(rev)
313
313
314 if rev not in tags:
314 if rev not in tags:
315 tags[rev] = []
315 tags[rev] = []
316 tags[rev].append(match.group(1))
316 tags[rev].append(match.group(1))
317 branchmap[match.group(1)] = match.group(2)
317 branchmap[match.group(1)] = match.group(2)
318
318
319 elif re_31.match(line):
319 elif re_31.match(line):
320 state = 5
320 state = 5
321 elif re_32.match(line):
321 elif re_32.match(line):
322 state = 0
322 state = 0
323
323
324 elif state == 4:
324 elif state == 4:
325 # expecting '------' separator before first revision
325 # expecting '------' separator before first revision
326 if re_31.match(line):
326 if re_31.match(line):
327 state = 5
327 state = 5
328 else:
328 else:
329 assert not re_32.match(line), _(
329 assert not re_32.match(line), _(
330 b'must have at least ' b'some revisions'
330 b'must have at least ' b'some revisions'
331 )
331 )
332
332
333 elif state == 5:
333 elif state == 5:
334 # expecting revision number and possibly (ignored) lock indication
334 # expecting revision number and possibly (ignored) lock indication
335 # we create the logentry here from values stored in states 0 to 4,
335 # we create the logentry here from values stored in states 0 to 4,
336 # as this state is re-entered for subsequent revisions of a file.
336 # as this state is re-entered for subsequent revisions of a file.
337 match = re_50.match(line)
337 match = re_50.match(line)
338 assert match, _(b'expected revision number')
338 assert match, _(b'expected revision number')
339 e = logentry(
339 e = logentry(
340 rcs=scache(rcs),
340 rcs=scache(rcs),
341 file=scache(filename),
341 file=scache(filename),
342 revision=tuple([int(x) for x in match.group(1).split(b'.')]),
342 revision=tuple([int(x) for x in match.group(1).split(b'.')]),
343 branches=[],
343 branches=[],
344 parent=None,
344 parent=None,
345 commitid=None,
345 commitid=None,
346 mergepoint=None,
346 mergepoint=None,
347 branchpoints=set(),
347 branchpoints=set(),
348 )
348 )
349
349
350 state = 6
350 state = 6
351
351
352 elif state == 6:
352 elif state == 6:
353 # expecting date, author, state, lines changed
353 # expecting date, author, state, lines changed
354 match = re_60.match(line)
354 match = re_60.match(line)
355 assert match, _(b'revision must be followed by date line')
355 assert match, _(b'revision must be followed by date line')
356 d = match.group(1)
356 d = match.group(1)
357 if d[2] == b'/':
357 if d[2] == b'/':
358 # Y2K
358 # Y2K
359 d = b'19' + d
359 d = b'19' + d
360
360
361 if len(d.split()) != 3:
361 if len(d.split()) != 3:
362 # cvs log dates always in GMT
362 # cvs log dates always in GMT
363 d = d + b' UTC'
363 d = d + b' UTC'
364 e.date = dateutil.parsedate(
364 e.date = dateutil.parsedate(
365 d,
365 d,
366 [
366 [
367 b'%y/%m/%d %H:%M:%S',
367 b'%y/%m/%d %H:%M:%S',
368 b'%Y/%m/%d %H:%M:%S',
368 b'%Y/%m/%d %H:%M:%S',
369 b'%Y-%m-%d %H:%M:%S',
369 b'%Y-%m-%d %H:%M:%S',
370 ],
370 ],
371 )
371 )
372 e.author = scache(match.group(2))
372 e.author = scache(match.group(2))
373 e.dead = match.group(3).lower() == b'dead'
373 e.dead = match.group(3).lower() == b'dead'
374
374
375 if match.group(5):
375 if match.group(5):
376 if match.group(6):
376 if match.group(6):
377 e.lines = (int(match.group(5)), int(match.group(6)))
377 e.lines = (int(match.group(5)), int(match.group(6)))
378 else:
378 else:
379 e.lines = (int(match.group(5)), 0)
379 e.lines = (int(match.group(5)), 0)
380 elif match.group(6):
380 elif match.group(6):
381 e.lines = (0, int(match.group(6)))
381 e.lines = (0, int(match.group(6)))
382 else:
382 else:
383 e.lines = None
383 e.lines = None
384
384
385 if match.group(7): # cvs 1.12 commitid
385 if match.group(7): # cvs 1.12 commitid
386 e.commitid = match.group(8)
386 e.commitid = match.group(8)
387
387
388 if match.group(9): # cvsnt mergepoint
388 if match.group(9): # cvsnt mergepoint
389 myrev = match.group(10).split(b'.')
389 myrev = match.group(10).split(b'.')
390 if len(myrev) == 2: # head
390 if len(myrev) == 2: # head
391 e.mergepoint = b'HEAD'
391 e.mergepoint = b'HEAD'
392 else:
392 else:
393 myrev = b'.'.join(myrev[:-2] + [b'0', myrev[-2]])
393 myrev = b'.'.join(myrev[:-2] + [b'0', myrev[-2]])
394 branches = [b for b in branchmap if branchmap[b] == myrev]
394 branches = [b for b in branchmap if branchmap[b] == myrev]
395 assert len(branches) == 1, (
395 assert len(branches) == 1, (
396 b'unknown branch: %s' % e.mergepoint
396 b'unknown branch: %s' % e.mergepoint
397 )
397 )
398 e.mergepoint = branches[0]
398 e.mergepoint = branches[0]
399
399
400 e.comment = []
400 e.comment = []
401 state = 7
401 state = 7
402
402
403 elif state == 7:
403 elif state == 7:
404 # read the revision numbers of branches that start at this revision
404 # read the revision numbers of branches that start at this revision
405 # or store the commit log message otherwise
405 # or store the commit log message otherwise
406 m = re_70.match(line)
406 m = re_70.match(line)
407 if m:
407 if m:
408 e.branches = [
408 e.branches = [
409 tuple([int(y) for y in x.strip().split(b'.')])
409 tuple([int(y) for y in x.strip().split(b'.')])
410 for x in m.group(1).split(b';')
410 for x in m.group(1).split(b';')
411 ]
411 ]
412 state = 8
412 state = 8
413 elif re_31.match(line) and re_50.match(peek):
413 elif re_31.match(line) and re_50.match(peek):
414 state = 5
414 state = 5
415 store = True
415 store = True
416 elif re_32.match(line):
416 elif re_32.match(line):
417 state = 0
417 state = 0
418 store = True
418 store = True
419 else:
419 else:
420 e.comment.append(line)
420 e.comment.append(line)
421
421
422 elif state == 8:
422 elif state == 8:
423 # store commit log message
423 # store commit log message
424 if re_31.match(line):
424 if re_31.match(line):
425 cpeek = peek
425 cpeek = peek
426 if cpeek.endswith(b'\n'):
426 if cpeek.endswith(b'\n'):
427 cpeek = cpeek[:-1]
427 cpeek = cpeek[:-1]
428 if re_50.match(cpeek):
428 if re_50.match(cpeek):
429 state = 5
429 state = 5
430 store = True
430 store = True
431 else:
431 else:
432 e.comment.append(line)
432 e.comment.append(line)
433 elif re_32.match(line):
433 elif re_32.match(line):
434 state = 0
434 state = 0
435 store = True
435 store = True
436 else:
436 else:
437 e.comment.append(line)
437 e.comment.append(line)
438
438
439 # When a file is added on a branch B1, CVS creates a synthetic
439 # When a file is added on a branch B1, CVS creates a synthetic
440 # dead trunk revision 1.1 so that the branch has a root.
440 # dead trunk revision 1.1 so that the branch has a root.
441 # Likewise, if you merge such a file to a later branch B2 (one
441 # Likewise, if you merge such a file to a later branch B2 (one
442 # that already existed when the file was added on B1), CVS
442 # that already existed when the file was added on B1), CVS
443 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
443 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
444 # these revisions now, but mark them synthetic so
444 # these revisions now, but mark them synthetic so
445 # createchangeset() can take care of them.
445 # createchangeset() can take care of them.
446 if (
446 if (
447 store
447 store
448 and e.dead
448 and e.dead
449 and e.revision[-1] == 1
449 and e.revision[-1] == 1
450 and len(e.comment) == 1 # 1.1 or 1.1.x.1
450 and len(e.comment) == 1 # 1.1 or 1.1.x.1
451 and file_added_re.match(e.comment[0])
451 and file_added_re.match(e.comment[0])
452 ):
452 ):
453 ui.debug(
453 ui.debug(
454 b'found synthetic revision in %s: %r\n' % (e.rcs, e.comment[0])
454 b'found synthetic revision in %s: %r\n' % (e.rcs, e.comment[0])
455 )
455 )
456 e.synthetic = True
456 e.synthetic = True
457
457
458 if store:
458 if store:
459 # clean up the results and save in the log.
459 # clean up the results and save in the log.
460 store = False
460 store = False
461 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
461 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
462 e.comment = scache(b'\n'.join(e.comment))
462 e.comment = scache(b'\n'.join(e.comment))
463
463
464 revn = len(e.revision)
464 revn = len(e.revision)
465 if revn > 3 and (revn % 2) == 0:
465 if revn > 3 and (revn % 2) == 0:
466 e.branch = tags.get(e.revision[:-1], [None])[0]
466 e.branch = tags.get(e.revision[:-1], [None])[0]
467 else:
467 else:
468 e.branch = None
468 e.branch = None
469
469
470 # find the branches starting from this revision
470 # find the branches starting from this revision
471 branchpoints = set()
471 branchpoints = set()
472 for branch, revision in branchmap.iteritems():
472 for branch, revision in branchmap.iteritems():
473 revparts = tuple([int(i) for i in revision.split(b'.')])
473 revparts = tuple([int(i) for i in revision.split(b'.')])
474 if len(revparts) < 2: # bad tags
474 if len(revparts) < 2: # bad tags
475 continue
475 continue
476 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
476 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
477 # normal branch
477 # normal branch
478 if revparts[:-2] == e.revision:
478 if revparts[:-2] == e.revision:
479 branchpoints.add(branch)
479 branchpoints.add(branch)
480 elif revparts == (1, 1, 1): # vendor branch
480 elif revparts == (1, 1, 1): # vendor branch
481 if revparts in e.branches:
481 if revparts in e.branches:
482 branchpoints.add(branch)
482 branchpoints.add(branch)
483 e.branchpoints = branchpoints
483 e.branchpoints = branchpoints
484
484
485 log.append(e)
485 log.append(e)
486
486
487 rcsmap[e.rcs.replace(b'/Attic/', b'/')] = e.rcs
487 rcsmap[e.rcs.replace(b'/Attic/', b'/')] = e.rcs
488
488
489 if len(log) % 100 == 0:
489 if len(log) % 100 == 0:
490 ui.status(
490 ui.status(
491 stringutil.ellipsis(b'%d %s' % (len(log), e.file), 80)
491 stringutil.ellipsis(b'%d %s' % (len(log), e.file), 80)
492 + b'\n'
492 + b'\n'
493 )
493 )
494
494
495 log.sort(key=lambda x: (x.rcs, x.revision))
495 log.sort(key=lambda x: (x.rcs, x.revision))
496
496
497 # find parent revisions of individual files
497 # find parent revisions of individual files
498 versions = {}
498 versions = {}
499 for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)):
499 for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)):
500 rcs = e.rcs.replace(b'/Attic/', b'/')
500 rcs = e.rcs.replace(b'/Attic/', b'/')
501 if rcs in rcsmap:
501 if rcs in rcsmap:
502 e.rcs = rcsmap[rcs]
502 e.rcs = rcsmap[rcs]
503 branch = e.revision[:-1]
503 branch = e.revision[:-1]
504 versions[(e.rcs, branch)] = e.revision
504 versions[(e.rcs, branch)] = e.revision
505
505
506 for e in log:
506 for e in log:
507 branch = e.revision[:-1]
507 branch = e.revision[:-1]
508 p = versions.get((e.rcs, branch), None)
508 p = versions.get((e.rcs, branch), None)
509 if p is None:
509 if p is None:
510 p = e.revision[:-2]
510 p = e.revision[:-2]
511 e.parent = p
511 e.parent = p
512 versions[(e.rcs, branch)] = e.revision
512 versions[(e.rcs, branch)] = e.revision
513
513
514 # update the log cache
514 # update the log cache
515 if cache:
515 if cache:
516 if log:
516 if log:
517 # join up the old and new logs
517 # join up the old and new logs
518 log.sort(key=lambda x: x.date)
518 log.sort(key=lambda x: x.date)
519
519
520 if oldlog and oldlog[-1].date >= log[0].date:
520 if oldlog and oldlog[-1].date >= log[0].date:
521 raise logerror(
521 raise logerror(
522 _(
522 _(
523 b'log cache overlaps with new log entries,'
523 b'log cache overlaps with new log entries,'
524 b' re-run without cache.'
524 b' re-run without cache.'
525 )
525 )
526 )
526 )
527
527
528 log = oldlog + log
528 log = oldlog + log
529
529
530 # write the new cachefile
530 # write the new cachefile
531 ui.note(_(b'writing cvs log cache %s\n') % cachefile)
531 ui.note(_(b'writing cvs log cache %s\n') % cachefile)
532 pickle.dump(log, open(cachefile, b'wb'))
532 pickle.dump(log, open(cachefile, b'wb'))
533 else:
533 else:
534 log = oldlog
534 log = oldlog
535
535
536 ui.status(_(b'%d log entries\n') % len(log))
536 ui.status(_(b'%d log entries\n') % len(log))
537
537
538 encodings = ui.configlist(b'convert', b'cvsps.logencoding')
538 encodings = ui.configlist(b'convert', b'cvsps.logencoding')
539 if encodings:
539 if encodings:
540
540
541 def revstr(r):
541 def revstr(r):
542 # this is needed, because logentry.revision is a tuple of "int"
542 # this is needed, because logentry.revision is a tuple of "int"
543 # (e.g. (1, 2) for "1.2")
543 # (e.g. (1, 2) for "1.2")
544 return b'.'.join(pycompat.maplist(pycompat.bytestr, r))
544 return b'.'.join(pycompat.maplist(pycompat.bytestr, r))
545
545
546 for entry in log:
546 for entry in log:
547 comment = entry.comment
547 comment = entry.comment
548 for e in encodings:
548 for e in encodings:
549 try:
549 try:
550 entry.comment = comment.decode(pycompat.sysstr(e)).encode(
550 entry.comment = comment.decode(pycompat.sysstr(e)).encode(
551 'utf-8'
551 'utf-8'
552 )
552 )
553 if ui.debugflag:
553 if ui.debugflag:
554 ui.debug(
554 ui.debug(
555 b"transcoding by %s: %s of %s\n"
555 b"transcoding by %s: %s of %s\n"
556 % (e, revstr(entry.revision), entry.file)
556 % (e, revstr(entry.revision), entry.file)
557 )
557 )
558 break
558 break
559 except UnicodeDecodeError:
559 except UnicodeDecodeError:
560 pass # try next encoding
560 pass # try next encoding
561 except LookupError as inst: # unknown encoding, maybe
561 except LookupError as inst: # unknown encoding, maybe
562 raise error.Abort(
562 raise error.Abort(
563 inst,
563 inst,
564 hint=_(
564 hint=_(
565 b'check convert.cvsps.logencoding' b' configuration'
565 b'check convert.cvsps.logencoding' b' configuration'
566 ),
566 ),
567 )
567 )
568 else:
568 else:
569 raise error.Abort(
569 raise error.Abort(
570 _(
570 _(
571 b"no encoding can transcode"
571 b"no encoding can transcode"
572 b" CVS log message for %s of %s"
572 b" CVS log message for %s of %s"
573 )
573 )
574 % (revstr(entry.revision), entry.file),
574 % (revstr(entry.revision), entry.file),
575 hint=_(
575 hint=_(
576 b'check convert.cvsps.logencoding' b' configuration'
576 b'check convert.cvsps.logencoding' b' configuration'
577 ),
577 ),
578 )
578 )
579
579
580 hook.hook(ui, None, b"cvslog", True, log=log)
580 hook.hook(ui, None, b"cvslog", True, log=log)
581
581
582 return log
582 return log
583
583
584
584
585 class changeset(object):
585 class changeset(object):
586 '''Class changeset has the following attributes:
586 '''Class changeset has the following attributes:
587 .id - integer identifying this changeset (list index)
587 .id - integer identifying this changeset (list index)
588 .author - author name as CVS knows it
588 .author - author name as CVS knows it
589 .branch - name of branch this changeset is on, or None
589 .branch - name of branch this changeset is on, or None
590 .comment - commit message
590 .comment - commit message
591 .commitid - CVS commitid or None
591 .commitid - CVS commitid or None
592 .date - the commit date as a (time,tz) tuple
592 .date - the commit date as a (time,tz) tuple
593 .entries - list of logentry objects in this changeset
593 .entries - list of logentry objects in this changeset
594 .parents - list of one or two parent changesets
594 .parents - list of one or two parent changesets
595 .tags - list of tags on this changeset
595 .tags - list of tags on this changeset
596 .synthetic - from synthetic revision "file ... added on branch ..."
596 .synthetic - from synthetic revision "file ... added on branch ..."
597 .mergepoint- the branch that has been merged from or None
597 .mergepoint- the branch that has been merged from or None
598 .branchpoints- the branches that start at the current entry or empty
598 .branchpoints- the branches that start at the current entry or empty
599 '''
599 '''
600
600
601 def __init__(self, **entries):
601 def __init__(self, **entries):
602 self.id = None
602 self.id = None
603 self.synthetic = False
603 self.synthetic = False
604 self.__dict__.update(entries)
604 self.__dict__.update(entries)
605
605
606 def __repr__(self):
606 def __repr__(self):
607 items = (
607 items = (
608 b"%s=%r" % (k, self.__dict__[k]) for k in sorted(self.__dict__)
608 b"%s=%r" % (k, self.__dict__[k]) for k in sorted(self.__dict__)
609 )
609 )
610 return b"%s(%s)" % (type(self).__name__, b", ".join(items))
610 return b"%s(%s)" % (type(self).__name__, b", ".join(items))
611
611
612
612
613 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
613 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
614 '''Convert log into changesets.'''
614 '''Convert log into changesets.'''
615
615
616 ui.status(_(b'creating changesets\n'))
616 ui.status(_(b'creating changesets\n'))
617
617
618 # try to order commitids by date
618 # try to order commitids by date
619 mindate = {}
619 mindate = {}
620 for e in log:
620 for e in log:
621 if e.commitid:
621 if e.commitid:
622 if e.commitid not in mindate:
622 if e.commitid not in mindate:
623 mindate[e.commitid] = e.date
623 mindate[e.commitid] = e.date
624 else:
624 else:
625 mindate[e.commitid] = min(e.date, mindate[e.commitid])
625 mindate[e.commitid] = min(e.date, mindate[e.commitid])
626
626
627 # Merge changesets
627 # Merge changesets
628 log.sort(
628 log.sort(
629 key=lambda x: (
629 key=lambda x: (
630 mindate.get(x.commitid, (-1, 0)),
630 mindate.get(x.commitid, (-1, 0)),
631 x.commitid or b'',
631 x.commitid or b'',
632 x.comment,
632 x.comment,
633 x.author,
633 x.author,
634 x.branch or b'',
634 x.branch or b'',
635 x.date,
635 x.date,
636 x.branchpoints,
636 x.branchpoints,
637 )
637 )
638 )
638 )
639
639
640 changesets = []
640 changesets = []
641 files = set()
641 files = set()
642 c = None
642 c = None
643 for i, e in enumerate(log):
643 for i, e in enumerate(log):
644
644
645 # Check if log entry belongs to the current changeset or not.
645 # Check if log entry belongs to the current changeset or not.
646
646
647 # Since CVS is file-centric, two different file revisions with
647 # Since CVS is file-centric, two different file revisions with
648 # different branchpoints should be treated as belonging to two
648 # different branchpoints should be treated as belonging to two
649 # different changesets (and the ordering is important and not
649 # different changesets (and the ordering is important and not
650 # honoured by cvsps at this point).
650 # honoured by cvsps at this point).
651 #
651 #
652 # Consider the following case:
652 # Consider the following case:
653 # foo 1.1 branchpoints: [MYBRANCH]
653 # foo 1.1 branchpoints: [MYBRANCH]
654 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
654 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
655 #
655 #
656 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
656 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
657 # later version of foo may be in MYBRANCH2, so foo should be the
657 # later version of foo may be in MYBRANCH2, so foo should be the
658 # first changeset and bar the next and MYBRANCH and MYBRANCH2
658 # first changeset and bar the next and MYBRANCH and MYBRANCH2
659 # should both start off of the bar changeset. No provisions are
659 # should both start off of the bar changeset. No provisions are
660 # made to ensure that this is, in fact, what happens.
660 # made to ensure that this is, in fact, what happens.
661 if not (
661 if not (
662 c
662 c
663 and e.branchpoints == c.branchpoints
663 and e.branchpoints == c.branchpoints
664 and ( # cvs commitids
664 and ( # cvs commitids
665 (e.commitid is not None and e.commitid == c.commitid)
665 (e.commitid is not None and e.commitid == c.commitid)
666 or ( # no commitids, use fuzzy commit detection
666 or ( # no commitids, use fuzzy commit detection
667 (e.commitid is None or c.commitid is None)
667 (e.commitid is None or c.commitid is None)
668 and e.comment == c.comment
668 and e.comment == c.comment
669 and e.author == c.author
669 and e.author == c.author
670 and e.branch == c.branch
670 and e.branch == c.branch
671 and (
671 and (
672 (c.date[0] + c.date[1])
672 (c.date[0] + c.date[1])
673 <= (e.date[0] + e.date[1])
673 <= (e.date[0] + e.date[1])
674 <= (c.date[0] + c.date[1]) + fuzz
674 <= (c.date[0] + c.date[1]) + fuzz
675 )
675 )
676 and e.file not in files
676 and e.file not in files
677 )
677 )
678 )
678 )
679 ):
679 ):
680 c = changeset(
680 c = changeset(
681 comment=e.comment,
681 comment=e.comment,
682 author=e.author,
682 author=e.author,
683 branch=e.branch,
683 branch=e.branch,
684 date=e.date,
684 date=e.date,
685 entries=[],
685 entries=[],
686 mergepoint=e.mergepoint,
686 mergepoint=e.mergepoint,
687 branchpoints=e.branchpoints,
687 branchpoints=e.branchpoints,
688 commitid=e.commitid,
688 commitid=e.commitid,
689 )
689 )
690 changesets.append(c)
690 changesets.append(c)
691
691
692 files = set()
692 files = set()
693 if len(changesets) % 100 == 0:
693 if len(changesets) % 100 == 0:
694 t = b'%d %s' % (len(changesets), repr(e.comment)[1:-1])
694 t = b'%d %s' % (len(changesets), repr(e.comment)[1:-1])
695 ui.status(stringutil.ellipsis(t, 80) + b'\n')
695 ui.status(stringutil.ellipsis(t, 80) + b'\n')
696
696
697 c.entries.append(e)
697 c.entries.append(e)
698 files.add(e.file)
698 files.add(e.file)
699 c.date = e.date # changeset date is date of latest commit in it
699 c.date = e.date # changeset date is date of latest commit in it
700
700
701 # Mark synthetic changesets
701 # Mark synthetic changesets
702
702
703 for c in changesets:
703 for c in changesets:
704 # Synthetic revisions always get their own changeset, because
704 # Synthetic revisions always get their own changeset, because
705 # the log message includes the filename. E.g. if you add file3
705 # the log message includes the filename. E.g. if you add file3
706 # and file4 on a branch, you get four log entries and three
706 # and file4 on a branch, you get four log entries and three
707 # changesets:
707 # changesets:
708 # "File file3 was added on branch ..." (synthetic, 1 entry)
708 # "File file3 was added on branch ..." (synthetic, 1 entry)
709 # "File file4 was added on branch ..." (synthetic, 1 entry)
709 # "File file4 was added on branch ..." (synthetic, 1 entry)
710 # "Add file3 and file4 to fix ..." (real, 2 entries)
710 # "Add file3 and file4 to fix ..." (real, 2 entries)
711 # Hence the check for 1 entry here.
711 # Hence the check for 1 entry here.
712 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
712 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
713
713
714 # Sort files in each changeset
714 # Sort files in each changeset
715
715
716 def entitycompare(l, r):
716 def entitycompare(l, r):
717 b'Mimic cvsps sorting order'
717 b'Mimic cvsps sorting order'
718 l = l.file.split(b'/')
718 l = l.file.split(b'/')
719 r = r.file.split(b'/')
719 r = r.file.split(b'/')
720 nl = len(l)
720 nl = len(l)
721 nr = len(r)
721 nr = len(r)
722 n = min(nl, nr)
722 n = min(nl, nr)
723 for i in range(n):
723 for i in range(n):
724 if i + 1 == nl and nl < nr:
724 if i + 1 == nl and nl < nr:
725 return -1
725 return -1
726 elif i + 1 == nr and nl > nr:
726 elif i + 1 == nr and nl > nr:
727 return +1
727 return +1
728 elif l[i] < r[i]:
728 elif l[i] < r[i]:
729 return -1
729 return -1
730 elif l[i] > r[i]:
730 elif l[i] > r[i]:
731 return +1
731 return +1
732 return 0
732 return 0
733
733
734 for c in changesets:
734 for c in changesets:
735 c.entries.sort(key=functools.cmp_to_key(entitycompare))
735 c.entries.sort(key=functools.cmp_to_key(entitycompare))
736
736
737 # Sort changesets by date
737 # Sort changesets by date
738
738
739 odd = set()
739 odd = set()
740
740
741 def cscmp(l, r):
741 def cscmp(l, r):
742 d = sum(l.date) - sum(r.date)
742 d = sum(l.date) - sum(r.date)
743 if d:
743 if d:
744 return d
744 return d
745
745
746 # detect vendor branches and initial commits on a branch
746 # detect vendor branches and initial commits on a branch
747 le = {}
747 le = {}
748 for e in l.entries:
748 for e in l.entries:
749 le[e.rcs] = e.revision
749 le[e.rcs] = e.revision
750 re = {}
750 re = {}
751 for e in r.entries:
751 for e in r.entries:
752 re[e.rcs] = e.revision
752 re[e.rcs] = e.revision
753
753
754 d = 0
754 d = 0
755 for e in l.entries:
755 for e in l.entries:
756 if re.get(e.rcs, None) == e.parent:
756 if re.get(e.rcs, None) == e.parent:
757 assert not d
757 assert not d
758 d = 1
758 d = 1
759 break
759 break
760
760
761 for e in r.entries:
761 for e in r.entries:
762 if le.get(e.rcs, None) == e.parent:
762 if le.get(e.rcs, None) == e.parent:
763 if d:
763 if d:
764 odd.add((l, r))
764 odd.add((l, r))
765 d = -1
765 d = -1
766 break
766 break
767 # By this point, the changesets are sufficiently compared that
767 # By this point, the changesets are sufficiently compared that
768 # we don't really care about ordering. However, this leaves
768 # we don't really care about ordering. However, this leaves
769 # some race conditions in the tests, so we compare on the
769 # some race conditions in the tests, so we compare on the
770 # number of files modified, the files contained in each
770 # number of files modified, the files contained in each
771 # changeset, and the branchpoints in the change to ensure test
771 # changeset, and the branchpoints in the change to ensure test
772 # output remains stable.
772 # output remains stable.
773
773
774 # recommended replacement for cmp from
774 # recommended replacement for cmp from
775 # https://docs.python.org/3.0/whatsnew/3.0.html
775 # https://docs.python.org/3.0/whatsnew/3.0.html
776 c = lambda x, y: (x > y) - (x < y)
776 c = lambda x, y: (x > y) - (x < y)
777 # Sort bigger changes first.
777 # Sort bigger changes first.
778 if not d:
778 if not d:
779 d = c(len(l.entries), len(r.entries))
779 d = c(len(l.entries), len(r.entries))
780 # Try sorting by filename in the change.
780 # Try sorting by filename in the change.
781 if not d:
781 if not d:
782 d = c([e.file for e in l.entries], [e.file for e in r.entries])
782 d = c([e.file for e in l.entries], [e.file for e in r.entries])
783 # Try and put changes without a branch point before ones with
783 # Try and put changes without a branch point before ones with
784 # a branch point.
784 # a branch point.
785 if not d:
785 if not d:
786 d = c(len(l.branchpoints), len(r.branchpoints))
786 d = c(len(l.branchpoints), len(r.branchpoints))
787 return d
787 return d
788
788
789 changesets.sort(key=functools.cmp_to_key(cscmp))
789 changesets.sort(key=functools.cmp_to_key(cscmp))
790
790
791 # Collect tags
791 # Collect tags
792
792
793 globaltags = {}
793 globaltags = {}
794 for c in changesets:
794 for c in changesets:
795 for e in c.entries:
795 for e in c.entries:
796 for tag in e.tags:
796 for tag in e.tags:
797 # remember which is the latest changeset to have this tag
797 # remember which is the latest changeset to have this tag
798 globaltags[tag] = c
798 globaltags[tag] = c
799
799
800 for c in changesets:
800 for c in changesets:
801 tags = set()
801 tags = set()
802 for e in c.entries:
802 for e in c.entries:
803 tags.update(e.tags)
803 tags.update(e.tags)
804 # remember tags only if this is the latest changeset to have it
804 # remember tags only if this is the latest changeset to have it
805 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
805 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
806
806
807 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
807 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
808 # by inserting dummy changesets with two parents, and handle
808 # by inserting dummy changesets with two parents, and handle
809 # {{mergefrombranch BRANCHNAME}} by setting two parents.
809 # {{mergefrombranch BRANCHNAME}} by setting two parents.
810
810
811 if mergeto is None:
811 if mergeto is None:
812 mergeto = br'{{mergetobranch ([-\w]+)}}'
812 mergeto = br'{{mergetobranch ([-\w]+)}}'
813 if mergeto:
813 if mergeto:
814 mergeto = re.compile(mergeto)
814 mergeto = re.compile(mergeto)
815
815
816 if mergefrom is None:
816 if mergefrom is None:
817 mergefrom = br'{{mergefrombranch ([-\w]+)}}'
817 mergefrom = br'{{mergefrombranch ([-\w]+)}}'
818 if mergefrom:
818 if mergefrom:
819 mergefrom = re.compile(mergefrom)
819 mergefrom = re.compile(mergefrom)
820
820
821 versions = {} # changeset index where we saw any particular file version
821 versions = {} # changeset index where we saw any particular file version
822 branches = {} # changeset index where we saw a branch
822 branches = {} # changeset index where we saw a branch
823 n = len(changesets)
823 n = len(changesets)
824 i = 0
824 i = 0
825 while i < n:
825 while i < n:
826 c = changesets[i]
826 c = changesets[i]
827
827
828 for f in c.entries:
828 for f in c.entries:
829 versions[(f.rcs, f.revision)] = i
829 versions[(f.rcs, f.revision)] = i
830
830
831 p = None
831 p = None
832 if c.branch in branches:
832 if c.branch in branches:
833 p = branches[c.branch]
833 p = branches[c.branch]
834 else:
834 else:
835 # first changeset on a new branch
835 # first changeset on a new branch
836 # the parent is a changeset with the branch in its
836 # the parent is a changeset with the branch in its
837 # branchpoints such that it is the latest possible
837 # branchpoints such that it is the latest possible
838 # commit without any intervening, unrelated commits.
838 # commit without any intervening, unrelated commits.
839
839
840 for candidate in pycompat.xrange(i):
840 for candidate in pycompat.xrange(i):
841 if c.branch not in changesets[candidate].branchpoints:
841 if c.branch not in changesets[candidate].branchpoints:
842 if p is not None:
842 if p is not None:
843 break
843 break
844 continue
844 continue
845 p = candidate
845 p = candidate
846
846
847 c.parents = []
847 c.parents = []
848 if p is not None:
848 if p is not None:
849 p = changesets[p]
849 p = changesets[p]
850
850
851 # Ensure no changeset has a synthetic changeset as a parent.
851 # Ensure no changeset has a synthetic changeset as a parent.
852 while p.synthetic:
852 while p.synthetic:
853 assert len(p.parents) <= 1, _(
853 assert len(p.parents) <= 1, _(
854 b'synthetic changeset cannot have multiple parents'
854 b'synthetic changeset cannot have multiple parents'
855 )
855 )
856 if p.parents:
856 if p.parents:
857 p = p.parents[0]
857 p = p.parents[0]
858 else:
858 else:
859 p = None
859 p = None
860 break
860 break
861
861
862 if p is not None:
862 if p is not None:
863 c.parents.append(p)
863 c.parents.append(p)
864
864
865 if c.mergepoint:
865 if c.mergepoint:
866 if c.mergepoint == b'HEAD':
866 if c.mergepoint == b'HEAD':
867 c.mergepoint = None
867 c.mergepoint = None
868 c.parents.append(changesets[branches[c.mergepoint]])
868 c.parents.append(changesets[branches[c.mergepoint]])
869
869
870 if mergefrom:
870 if mergefrom:
871 m = mergefrom.search(c.comment)
871 m = mergefrom.search(c.comment)
872 if m:
872 if m:
873 m = m.group(1)
873 m = m.group(1)
874 if m == b'HEAD':
874 if m == b'HEAD':
875 m = None
875 m = None
876 try:
876 try:
877 candidate = changesets[branches[m]]
877 candidate = changesets[branches[m]]
878 except KeyError:
878 except KeyError:
879 ui.warn(
879 ui.warn(
880 _(
880 _(
881 b"warning: CVS commit message references "
881 b"warning: CVS commit message references "
882 b"non-existent branch %r:\n%s\n"
882 b"non-existent branch %r:\n%s\n"
883 )
883 )
884 % (pycompat.bytestr(m), c.comment)
884 % (pycompat.bytestr(m), c.comment)
885 )
885 )
886 if m in branches and c.branch != m and not candidate.synthetic:
886 if m in branches and c.branch != m and not candidate.synthetic:
887 c.parents.append(candidate)
887 c.parents.append(candidate)
888
888
889 if mergeto:
889 if mergeto:
890 m = mergeto.search(c.comment)
890 m = mergeto.search(c.comment)
891 if m:
891 if m:
892 if m.groups():
892 if m.groups():
893 m = m.group(1)
893 m = m.group(1)
894 if m == b'HEAD':
894 if m == b'HEAD':
895 m = None
895 m = None
896 else:
896 else:
897 m = None # if no group found then merge to HEAD
897 m = None # if no group found then merge to HEAD
898 if m in branches and c.branch != m:
898 if m in branches and c.branch != m:
899 # insert empty changeset for merge
899 # insert empty changeset for merge
900 cc = changeset(
900 cc = changeset(
901 author=c.author,
901 author=c.author,
902 branch=m,
902 branch=m,
903 date=c.date,
903 date=c.date,
904 comment=b'convert-repo: CVS merge from branch %s'
904 comment=b'convert-repo: CVS merge from branch %s'
905 % c.branch,
905 % c.branch,
906 entries=[],
906 entries=[],
907 tags=[],
907 tags=[],
908 parents=[changesets[branches[m]], c],
908 parents=[changesets[branches[m]], c],
909 )
909 )
910 changesets.insert(i + 1, cc)
910 changesets.insert(i + 1, cc)
911 branches[m] = i + 1
911 branches[m] = i + 1
912
912
913 # adjust our loop counters now we have inserted a new entry
913 # adjust our loop counters now we have inserted a new entry
914 n += 1
914 n += 1
915 i += 2
915 i += 2
916 continue
916 continue
917
917
918 branches[c.branch] = i
918 branches[c.branch] = i
919 i += 1
919 i += 1
920
920
921 # Drop synthetic changesets (safe now that we have ensured no other
921 # Drop synthetic changesets (safe now that we have ensured no other
922 # changesets can have them as parents).
922 # changesets can have them as parents).
923 i = 0
923 i = 0
924 while i < len(changesets):
924 while i < len(changesets):
925 if changesets[i].synthetic:
925 if changesets[i].synthetic:
926 del changesets[i]
926 del changesets[i]
927 else:
927 else:
928 i += 1
928 i += 1
929
929
930 # Number changesets
930 # Number changesets
931
931
932 for i, c in enumerate(changesets):
932 for i, c in enumerate(changesets):
933 c.id = i + 1
933 c.id = i + 1
934
934
935 if odd:
935 if odd:
936 for l, r in odd:
936 for l, r in odd:
937 if l.id is not None and r.id is not None:
937 if l.id is not None and r.id is not None:
938 ui.warn(
938 ui.warn(
939 _(b'changeset %d is both before and after %d\n')
939 _(b'changeset %d is both before and after %d\n')
940 % (l.id, r.id)
940 % (l.id, r.id)
941 )
941 )
942
942
943 ui.status(_(b'%d changeset entries\n') % len(changesets))
943 ui.status(_(b'%d changeset entries\n') % len(changesets))
944
944
945 hook.hook(ui, None, b"cvschangesets", True, changesets=changesets)
945 hook.hook(ui, None, b"cvschangesets", True, changesets=changesets)
946
946
947 return changesets
947 return changesets
948
948
949
949
950 def debugcvsps(ui, *args, **opts):
950 def debugcvsps(ui, *args, **opts):
951 '''Read CVS rlog for current directory or named path in
951 '''Read CVS rlog for current directory or named path in
952 repository, and convert the log to changesets based on matching
952 repository, and convert the log to changesets based on matching
953 commit log entries and dates.
953 commit log entries and dates.
954 '''
954 '''
955 opts = pycompat.byteskwargs(opts)
955 opts = pycompat.byteskwargs(opts)
956 if opts[b"new_cache"]:
956 if opts[b"new_cache"]:
957 cache = b"write"
957 cache = b"write"
958 elif opts[b"update_cache"]:
958 elif opts[b"update_cache"]:
959 cache = b"update"
959 cache = b"update"
960 else:
960 else:
961 cache = None
961 cache = None
962
962
963 revisions = opts[b"revisions"]
963 revisions = opts[b"revisions"]
964
964
965 try:
965 try:
966 if args:
966 if args:
967 log = []
967 log = []
968 for d in args:
968 for d in args:
969 log += createlog(ui, d, root=opts[b"root"], cache=cache)
969 log += createlog(ui, d, root=opts[b"root"], cache=cache)
970 else:
970 else:
971 log = createlog(ui, root=opts[b"root"], cache=cache)
971 log = createlog(ui, root=opts[b"root"], cache=cache)
972 except logerror as e:
972 except logerror as e:
973 ui.write(b"%r\n" % e)
973 ui.write(b"%r\n" % e)
974 return
974 return
975
975
976 changesets = createchangeset(ui, log, opts[b"fuzz"])
976 changesets = createchangeset(ui, log, opts[b"fuzz"])
977 del log
977 del log
978
978
979 # Print changesets (optionally filtered)
979 # Print changesets (optionally filtered)
980
980
981 off = len(revisions)
981 off = len(revisions)
982 branches = {} # latest version number in each branch
982 branches = {} # latest version number in each branch
983 ancestors = {} # parent branch
983 ancestors = {} # parent branch
984 for cs in changesets:
984 for cs in changesets:
985
985
986 if opts[b"ancestors"]:
986 if opts[b"ancestors"]:
987 if cs.branch not in branches and cs.parents and cs.parents[0].id:
987 if cs.branch not in branches and cs.parents and cs.parents[0].id:
988 ancestors[cs.branch] = (
988 ancestors[cs.branch] = (
989 changesets[cs.parents[0].id - 1].branch,
989 changesets[cs.parents[0].id - 1].branch,
990 cs.parents[0].id,
990 cs.parents[0].id,
991 )
991 )
992 branches[cs.branch] = cs.id
992 branches[cs.branch] = cs.id
993
993
994 # limit by branches
994 # limit by branches
995 if (
995 if (
996 opts[b"branches"]
996 opts[b"branches"]
997 and (cs.branch or b'HEAD') not in opts[b"branches"]
997 and (cs.branch or b'HEAD') not in opts[b"branches"]
998 ):
998 ):
999 continue
999 continue
1000
1000
1001 if not off:
1001 if not off:
1002 # Note: trailing spaces on several lines here are needed to have
1002 # Note: trailing spaces on several lines here are needed to have
1003 # bug-for-bug compatibility with cvsps.
1003 # bug-for-bug compatibility with cvsps.
1004 ui.write(b'---------------------\n')
1004 ui.write(b'---------------------\n')
1005 ui.write((b'PatchSet %d \n' % cs.id))
1005 ui.write((b'PatchSet %d \n' % cs.id))
1006 ui.write(
1006 ui.write(
1007 (
1007 (
1008 b'Date: %s\n'
1008 b'Date: %s\n'
1009 % dateutil.datestr(cs.date, b'%Y/%m/%d %H:%M:%S %1%2')
1009 % dateutil.datestr(cs.date, b'%Y/%m/%d %H:%M:%S %1%2')
1010 )
1010 )
1011 )
1011 )
1012 ui.write((b'Author: %s\n' % cs.author))
1012 ui.write((b'Author: %s\n' % cs.author))
1013 ui.write((b'Branch: %s\n' % (cs.branch or b'HEAD')))
1013 ui.write((b'Branch: %s\n' % (cs.branch or b'HEAD')))
1014 ui.write(
1014 ui.write(
1015 (
1015 (
1016 b'Tag%s: %s \n'
1016 b'Tag%s: %s \n'
1017 % (
1017 % (
1018 [b'', b's'][len(cs.tags) > 1],
1018 [b'', b's'][len(cs.tags) > 1],
1019 b','.join(cs.tags) or b'(none)',
1019 b','.join(cs.tags) or b'(none)',
1020 )
1020 )
1021 )
1021 )
1022 )
1022 )
1023 if cs.branchpoints:
1023 if cs.branchpoints:
1024 ui.write(
1024 ui.writenoi18n(
1025 b'Branchpoints: %s \n' % b', '.join(sorted(cs.branchpoints))
1025 b'Branchpoints: %s \n' % b', '.join(sorted(cs.branchpoints))
1026 )
1026 )
1027 if opts[b"parents"] and cs.parents:
1027 if opts[b"parents"] and cs.parents:
1028 if len(cs.parents) > 1:
1028 if len(cs.parents) > 1:
1029 ui.write(
1029 ui.write(
1030 (
1030 (
1031 b'Parents: %s\n'
1031 b'Parents: %s\n'
1032 % (b','.join([(b"%d" % p.id) for p in cs.parents]))
1032 % (b','.join([(b"%d" % p.id) for p in cs.parents]))
1033 )
1033 )
1034 )
1034 )
1035 else:
1035 else:
1036 ui.write((b'Parent: %d\n' % cs.parents[0].id))
1036 ui.write((b'Parent: %d\n' % cs.parents[0].id))
1037
1037
1038 if opts[b"ancestors"]:
1038 if opts[b"ancestors"]:
1039 b = cs.branch
1039 b = cs.branch
1040 r = []
1040 r = []
1041 while b:
1041 while b:
1042 b, c = ancestors[b]
1042 b, c = ancestors[b]
1043 r.append(b'%s:%d:%d' % (b or b"HEAD", c, branches[b]))
1043 r.append(b'%s:%d:%d' % (b or b"HEAD", c, branches[b]))
1044 if r:
1044 if r:
1045 ui.write((b'Ancestors: %s\n' % (b','.join(r))))
1045 ui.write((b'Ancestors: %s\n' % (b','.join(r))))
1046
1046
1047 ui.write(b'Log:\n')
1047 ui.writenoi18n(b'Log:\n')
1048 ui.write(b'%s\n\n' % cs.comment)
1048 ui.write(b'%s\n\n' % cs.comment)
1049 ui.write(b'Members: \n')
1049 ui.writenoi18n(b'Members: \n')
1050 for f in cs.entries:
1050 for f in cs.entries:
1051 fn = f.file
1051 fn = f.file
1052 if fn.startswith(opts[b"prefix"]):
1052 if fn.startswith(opts[b"prefix"]):
1053 fn = fn[len(opts[b"prefix"]) :]
1053 fn = fn[len(opts[b"prefix"]) :]
1054 ui.write(
1054 ui.write(
1055 b'\t%s:%s->%s%s \n'
1055 b'\t%s:%s->%s%s \n'
1056 % (
1056 % (
1057 fn,
1057 fn,
1058 b'.'.join([b"%d" % x for x in f.parent]) or b'INITIAL',
1058 b'.'.join([b"%d" % x for x in f.parent]) or b'INITIAL',
1059 b'.'.join([(b"%d" % x) for x in f.revision]),
1059 b'.'.join([(b"%d" % x) for x in f.revision]),
1060 [b'', b'(DEAD)'][f.dead],
1060 [b'', b'(DEAD)'][f.dead],
1061 )
1061 )
1062 )
1062 )
1063 ui.write(b'\n')
1063 ui.write(b'\n')
1064
1064
1065 # have we seen the start tag?
1065 # have we seen the start tag?
1066 if revisions and off:
1066 if revisions and off:
1067 if revisions[0] == (b"%d" % cs.id) or revisions[0] in cs.tags:
1067 if revisions[0] == (b"%d" % cs.id) or revisions[0] in cs.tags:
1068 off = False
1068 off = False
1069
1069
1070 # see if we reached the end tag
1070 # see if we reached the end tag
1071 if len(revisions) > 1 and not off:
1071 if len(revisions) > 1 and not off:
1072 if revisions[1] == (b"%d" % cs.id) or revisions[1] in cs.tags:
1072 if revisions[1] == (b"%d" % cs.id) or revisions[1] in cs.tags:
1073 break
1073 break
@@ -1,385 +1,385 b''
1 # Minimal support for git commands on an hg repository
1 # Minimal support for git commands on an hg repository
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''browse the repository in a graphical way
8 '''browse the repository in a graphical way
9
9
10 The hgk extension allows browsing the history of a repository in a
10 The hgk extension allows browsing the history of a repository in a
11 graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not
11 graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not
12 distributed with Mercurial.)
12 distributed with Mercurial.)
13
13
14 hgk consists of two parts: a Tcl script that does the displaying and
14 hgk consists of two parts: a Tcl script that does the displaying and
15 querying of information, and an extension to Mercurial named hgk.py,
15 querying of information, and an extension to Mercurial named hgk.py,
16 which provides hooks for hgk to get information. hgk can be found in
16 which provides hooks for hgk to get information. hgk can be found in
17 the contrib directory, and the extension is shipped in the hgext
17 the contrib directory, and the extension is shipped in the hgext
18 repository, and needs to be enabled.
18 repository, and needs to be enabled.
19
19
20 The :hg:`view` command will launch the hgk Tcl script. For this command
20 The :hg:`view` command will launch the hgk Tcl script. For this command
21 to work, hgk must be in your search path. Alternately, you can specify
21 to work, hgk must be in your search path. Alternately, you can specify
22 the path to hgk in your configuration file::
22 the path to hgk in your configuration file::
23
23
24 [hgk]
24 [hgk]
25 path = /location/of/hgk
25 path = /location/of/hgk
26
26
27 hgk can make use of the extdiff extension to visualize revisions.
27 hgk can make use of the extdiff extension to visualize revisions.
28 Assuming you had already configured extdiff vdiff command, just add::
28 Assuming you had already configured extdiff vdiff command, just add::
29
29
30 [hgk]
30 [hgk]
31 vdiff=vdiff
31 vdiff=vdiff
32
32
33 Revisions context menu will now display additional entries to fire
33 Revisions context menu will now display additional entries to fire
34 vdiff on hovered and selected revisions.
34 vdiff on hovered and selected revisions.
35 '''
35 '''
36
36
37 from __future__ import absolute_import
37 from __future__ import absolute_import
38
38
39 import os
39 import os
40
40
41 from mercurial.i18n import _
41 from mercurial.i18n import _
42 from mercurial.node import (
42 from mercurial.node import (
43 nullid,
43 nullid,
44 nullrev,
44 nullrev,
45 short,
45 short,
46 )
46 )
47 from mercurial import (
47 from mercurial import (
48 commands,
48 commands,
49 obsolete,
49 obsolete,
50 patch,
50 patch,
51 pycompat,
51 pycompat,
52 registrar,
52 registrar,
53 scmutil,
53 scmutil,
54 )
54 )
55
55
56 cmdtable = {}
56 cmdtable = {}
57 command = registrar.command(cmdtable)
57 command = registrar.command(cmdtable)
58 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
58 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
59 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
59 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
60 # be specifying the version(s) of Mercurial they are tested with, or
60 # be specifying the version(s) of Mercurial they are tested with, or
61 # leave the attribute unspecified.
61 # leave the attribute unspecified.
62 testedwith = b'ships-with-hg-core'
62 testedwith = b'ships-with-hg-core'
63
63
64 configtable = {}
64 configtable = {}
65 configitem = registrar.configitem(configtable)
65 configitem = registrar.configitem(configtable)
66
66
67 configitem(
67 configitem(
68 b'hgk', b'path', default=b'hgk',
68 b'hgk', b'path', default=b'hgk',
69 )
69 )
70
70
71
71
72 @command(
72 @command(
73 b'debug-diff-tree',
73 b'debug-diff-tree',
74 [
74 [
75 (b'p', b'patch', None, _(b'generate patch')),
75 (b'p', b'patch', None, _(b'generate patch')),
76 (b'r', b'recursive', None, _(b'recursive')),
76 (b'r', b'recursive', None, _(b'recursive')),
77 (b'P', b'pretty', None, _(b'pretty')),
77 (b'P', b'pretty', None, _(b'pretty')),
78 (b's', b'stdin', None, _(b'stdin')),
78 (b's', b'stdin', None, _(b'stdin')),
79 (b'C', b'copy', None, _(b'detect copies')),
79 (b'C', b'copy', None, _(b'detect copies')),
80 (b'S', b'search', b"", _(b'search')),
80 (b'S', b'search', b"", _(b'search')),
81 ],
81 ],
82 b'[OPTION]... NODE1 NODE2 [FILE]...',
82 b'[OPTION]... NODE1 NODE2 [FILE]...',
83 inferrepo=True,
83 inferrepo=True,
84 )
84 )
85 def difftree(ui, repo, node1=None, node2=None, *files, **opts):
85 def difftree(ui, repo, node1=None, node2=None, *files, **opts):
86 """diff trees from two commits"""
86 """diff trees from two commits"""
87
87
88 def __difftree(repo, node1, node2, files=None):
88 def __difftree(repo, node1, node2, files=None):
89 assert node2 is not None
89 assert node2 is not None
90 if files is None:
90 if files is None:
91 files = []
91 files = []
92 mmap = repo[node1].manifest()
92 mmap = repo[node1].manifest()
93 mmap2 = repo[node2].manifest()
93 mmap2 = repo[node2].manifest()
94 m = scmutil.match(repo[node1], files)
94 m = scmutil.match(repo[node1], files)
95 modified, added, removed = repo.status(node1, node2, m)[:3]
95 modified, added, removed = repo.status(node1, node2, m)[:3]
96 empty = short(nullid)
96 empty = short(nullid)
97
97
98 for f in modified:
98 for f in modified:
99 # TODO get file permissions
99 # TODO get file permissions
100 ui.write(
100 ui.writenoi18n(
101 b":100664 100664 %s %s M\t%s\t%s\n"
101 b":100664 100664 %s %s M\t%s\t%s\n"
102 % (short(mmap[f]), short(mmap2[f]), f, f)
102 % (short(mmap[f]), short(mmap2[f]), f, f)
103 )
103 )
104 for f in added:
104 for f in added:
105 ui.write(
105 ui.writenoi18n(
106 b":000000 100664 %s %s N\t%s\t%s\n"
106 b":000000 100664 %s %s N\t%s\t%s\n"
107 % (empty, short(mmap2[f]), f, f)
107 % (empty, short(mmap2[f]), f, f)
108 )
108 )
109 for f in removed:
109 for f in removed:
110 ui.write(
110 ui.writenoi18n(
111 b":100664 000000 %s %s D\t%s\t%s\n"
111 b":100664 000000 %s %s D\t%s\t%s\n"
112 % (short(mmap[f]), empty, f, f)
112 % (short(mmap[f]), empty, f, f)
113 )
113 )
114
114
115 ##
115 ##
116
116
117 while True:
117 while True:
118 if opts[r'stdin']:
118 if opts[r'stdin']:
119 line = ui.fin.readline()
119 line = ui.fin.readline()
120 if not line:
120 if not line:
121 break
121 break
122 line = line.rstrip(pycompat.oslinesep).split(b' ')
122 line = line.rstrip(pycompat.oslinesep).split(b' ')
123 node1 = line[0]
123 node1 = line[0]
124 if len(line) > 1:
124 if len(line) > 1:
125 node2 = line[1]
125 node2 = line[1]
126 else:
126 else:
127 node2 = None
127 node2 = None
128 node1 = repo.lookup(node1)
128 node1 = repo.lookup(node1)
129 if node2:
129 if node2:
130 node2 = repo.lookup(node2)
130 node2 = repo.lookup(node2)
131 else:
131 else:
132 node2 = node1
132 node2 = node1
133 node1 = repo.changelog.parents(node1)[0]
133 node1 = repo.changelog.parents(node1)[0]
134 if opts[r'patch']:
134 if opts[r'patch']:
135 if opts[r'pretty']:
135 if opts[r'pretty']:
136 catcommit(ui, repo, node2, b"")
136 catcommit(ui, repo, node2, b"")
137 m = scmutil.match(repo[node1], files)
137 m = scmutil.match(repo[node1], files)
138 diffopts = patch.difffeatureopts(ui)
138 diffopts = patch.difffeatureopts(ui)
139 diffopts.git = True
139 diffopts.git = True
140 chunks = patch.diff(repo, node1, node2, match=m, opts=diffopts)
140 chunks = patch.diff(repo, node1, node2, match=m, opts=diffopts)
141 for chunk in chunks:
141 for chunk in chunks:
142 ui.write(chunk)
142 ui.write(chunk)
143 else:
143 else:
144 __difftree(repo, node1, node2, files=files)
144 __difftree(repo, node1, node2, files=files)
145 if not opts[r'stdin']:
145 if not opts[r'stdin']:
146 break
146 break
147
147
148
148
149 def catcommit(ui, repo, n, prefix, ctx=None):
149 def catcommit(ui, repo, n, prefix, ctx=None):
150 nlprefix = b'\n' + prefix
150 nlprefix = b'\n' + prefix
151 if ctx is None:
151 if ctx is None:
152 ctx = repo[n]
152 ctx = repo[n]
153 # use ctx.node() instead ??
153 # use ctx.node() instead ??
154 ui.write((b"tree %s\n" % short(ctx.changeset()[0])))
154 ui.write((b"tree %s\n" % short(ctx.changeset()[0])))
155 for p in ctx.parents():
155 for p in ctx.parents():
156 ui.write((b"parent %s\n" % p))
156 ui.write((b"parent %s\n" % p))
157
157
158 date = ctx.date()
158 date = ctx.date()
159 description = ctx.description().replace(b"\0", b"")
159 description = ctx.description().replace(b"\0", b"")
160 ui.write((b"author %s %d %d\n" % (ctx.user(), int(date[0]), date[1])))
160 ui.write((b"author %s %d %d\n" % (ctx.user(), int(date[0]), date[1])))
161
161
162 if b'committer' in ctx.extra():
162 if b'committer' in ctx.extra():
163 ui.write((b"committer %s\n" % ctx.extra()[b'committer']))
163 ui.write((b"committer %s\n" % ctx.extra()[b'committer']))
164
164
165 ui.write((b"revision %d\n" % ctx.rev()))
165 ui.write((b"revision %d\n" % ctx.rev()))
166 ui.write((b"branch %s\n" % ctx.branch()))
166 ui.write((b"branch %s\n" % ctx.branch()))
167 if obsolete.isenabled(repo, obsolete.createmarkersopt):
167 if obsolete.isenabled(repo, obsolete.createmarkersopt):
168 if ctx.obsolete():
168 if ctx.obsolete():
169 ui.write(b"obsolete\n")
169 ui.writenoi18n(b"obsolete\n")
170 ui.write((b"phase %s\n\n" % ctx.phasestr()))
170 ui.write((b"phase %s\n\n" % ctx.phasestr()))
171
171
172 if prefix != b"":
172 if prefix != b"":
173 ui.write(
173 ui.write(
174 b"%s%s\n" % (prefix, description.replace(b'\n', nlprefix).strip())
174 b"%s%s\n" % (prefix, description.replace(b'\n', nlprefix).strip())
175 )
175 )
176 else:
176 else:
177 ui.write(description + b"\n")
177 ui.write(description + b"\n")
178 if prefix:
178 if prefix:
179 ui.write(b'\0')
179 ui.write(b'\0')
180
180
181
181
182 @command(b'debug-merge-base', [], _(b'REV REV'))
182 @command(b'debug-merge-base', [], _(b'REV REV'))
183 def base(ui, repo, node1, node2):
183 def base(ui, repo, node1, node2):
184 """output common ancestor information"""
184 """output common ancestor information"""
185 node1 = repo.lookup(node1)
185 node1 = repo.lookup(node1)
186 node2 = repo.lookup(node2)
186 node2 = repo.lookup(node2)
187 n = repo.changelog.ancestor(node1, node2)
187 n = repo.changelog.ancestor(node1, node2)
188 ui.write(short(n) + b"\n")
188 ui.write(short(n) + b"\n")
189
189
190
190
191 @command(
191 @command(
192 b'debug-cat-file',
192 b'debug-cat-file',
193 [(b's', b'stdin', None, _(b'stdin'))],
193 [(b's', b'stdin', None, _(b'stdin'))],
194 _(b'[OPTION]... TYPE FILE'),
194 _(b'[OPTION]... TYPE FILE'),
195 inferrepo=True,
195 inferrepo=True,
196 )
196 )
197 def catfile(ui, repo, type=None, r=None, **opts):
197 def catfile(ui, repo, type=None, r=None, **opts):
198 """cat a specific revision"""
198 """cat a specific revision"""
199 # in stdin mode, every line except the commit is prefixed with two
199 # in stdin mode, every line except the commit is prefixed with two
200 # spaces. This way the our caller can find the commit without magic
200 # spaces. This way the our caller can find the commit without magic
201 # strings
201 # strings
202 #
202 #
203 prefix = b""
203 prefix = b""
204 if opts[r'stdin']:
204 if opts[r'stdin']:
205 line = ui.fin.readline()
205 line = ui.fin.readline()
206 if not line:
206 if not line:
207 return
207 return
208 (type, r) = line.rstrip(pycompat.oslinesep).split(b' ')
208 (type, r) = line.rstrip(pycompat.oslinesep).split(b' ')
209 prefix = b" "
209 prefix = b" "
210 else:
210 else:
211 if not type or not r:
211 if not type or not r:
212 ui.warn(_(b"cat-file: type or revision not supplied\n"))
212 ui.warn(_(b"cat-file: type or revision not supplied\n"))
213 commands.help_(ui, b'cat-file')
213 commands.help_(ui, b'cat-file')
214
214
215 while r:
215 while r:
216 if type != b"commit":
216 if type != b"commit":
217 ui.warn(_(b"aborting hg cat-file only understands commits\n"))
217 ui.warn(_(b"aborting hg cat-file only understands commits\n"))
218 return 1
218 return 1
219 n = repo.lookup(r)
219 n = repo.lookup(r)
220 catcommit(ui, repo, n, prefix)
220 catcommit(ui, repo, n, prefix)
221 if opts[r'stdin']:
221 if opts[r'stdin']:
222 line = ui.fin.readline()
222 line = ui.fin.readline()
223 if not line:
223 if not line:
224 break
224 break
225 (type, r) = line.rstrip(pycompat.oslinesep).split(b' ')
225 (type, r) = line.rstrip(pycompat.oslinesep).split(b' ')
226 else:
226 else:
227 break
227 break
228
228
229
229
230 # git rev-tree is a confusing thing. You can supply a number of
230 # git rev-tree is a confusing thing. You can supply a number of
231 # commit sha1s on the command line, and it walks the commit history
231 # commit sha1s on the command line, and it walks the commit history
232 # telling you which commits are reachable from the supplied ones via
232 # telling you which commits are reachable from the supplied ones via
233 # a bitmask based on arg position.
233 # a bitmask based on arg position.
234 # you can specify a commit to stop at by starting the sha1 with ^
234 # you can specify a commit to stop at by starting the sha1 with ^
235 def revtree(ui, args, repo, full=b"tree", maxnr=0, parents=False):
235 def revtree(ui, args, repo, full=b"tree", maxnr=0, parents=False):
236 def chlogwalk():
236 def chlogwalk():
237 count = len(repo)
237 count = len(repo)
238 i = count
238 i = count
239 l = [0] * 100
239 l = [0] * 100
240 chunk = 100
240 chunk = 100
241 while True:
241 while True:
242 if chunk > i:
242 if chunk > i:
243 chunk = i
243 chunk = i
244 i = 0
244 i = 0
245 else:
245 else:
246 i -= chunk
246 i -= chunk
247
247
248 for x in pycompat.xrange(chunk):
248 for x in pycompat.xrange(chunk):
249 if i + x >= count:
249 if i + x >= count:
250 l[chunk - x :] = [0] * (chunk - x)
250 l[chunk - x :] = [0] * (chunk - x)
251 break
251 break
252 if full is not None:
252 if full is not None:
253 if (i + x) in repo:
253 if (i + x) in repo:
254 l[x] = repo[i + x]
254 l[x] = repo[i + x]
255 l[x].changeset() # force reading
255 l[x].changeset() # force reading
256 else:
256 else:
257 if (i + x) in repo:
257 if (i + x) in repo:
258 l[x] = 1
258 l[x] = 1
259 for x in pycompat.xrange(chunk - 1, -1, -1):
259 for x in pycompat.xrange(chunk - 1, -1, -1):
260 if l[x] != 0:
260 if l[x] != 0:
261 yield (i + x, full is not None and l[x] or None)
261 yield (i + x, full is not None and l[x] or None)
262 if i == 0:
262 if i == 0:
263 break
263 break
264
264
265 # calculate and return the reachability bitmask for sha
265 # calculate and return the reachability bitmask for sha
266 def is_reachable(ar, reachable, sha):
266 def is_reachable(ar, reachable, sha):
267 if len(ar) == 0:
267 if len(ar) == 0:
268 return 1
268 return 1
269 mask = 0
269 mask = 0
270 for i in pycompat.xrange(len(ar)):
270 for i in pycompat.xrange(len(ar)):
271 if sha in reachable[i]:
271 if sha in reachable[i]:
272 mask |= 1 << i
272 mask |= 1 << i
273
273
274 return mask
274 return mask
275
275
276 reachable = []
276 reachable = []
277 stop_sha1 = []
277 stop_sha1 = []
278 want_sha1 = []
278 want_sha1 = []
279 count = 0
279 count = 0
280
280
281 # figure out which commits they are asking for and which ones they
281 # figure out which commits they are asking for and which ones they
282 # want us to stop on
282 # want us to stop on
283 for i, arg in enumerate(args):
283 for i, arg in enumerate(args):
284 if arg.startswith(b'^'):
284 if arg.startswith(b'^'):
285 s = repo.lookup(arg[1:])
285 s = repo.lookup(arg[1:])
286 stop_sha1.append(s)
286 stop_sha1.append(s)
287 want_sha1.append(s)
287 want_sha1.append(s)
288 elif arg != b'HEAD':
288 elif arg != b'HEAD':
289 want_sha1.append(repo.lookup(arg))
289 want_sha1.append(repo.lookup(arg))
290
290
291 # calculate the graph for the supplied commits
291 # calculate the graph for the supplied commits
292 for i, n in enumerate(want_sha1):
292 for i, n in enumerate(want_sha1):
293 reachable.append(set())
293 reachable.append(set())
294 visit = [n]
294 visit = [n]
295 reachable[i].add(n)
295 reachable[i].add(n)
296 while visit:
296 while visit:
297 n = visit.pop(0)
297 n = visit.pop(0)
298 if n in stop_sha1:
298 if n in stop_sha1:
299 continue
299 continue
300 for p in repo.changelog.parents(n):
300 for p in repo.changelog.parents(n):
301 if p not in reachable[i]:
301 if p not in reachable[i]:
302 reachable[i].add(p)
302 reachable[i].add(p)
303 visit.append(p)
303 visit.append(p)
304 if p in stop_sha1:
304 if p in stop_sha1:
305 continue
305 continue
306
306
307 # walk the repository looking for commits that are in our
307 # walk the repository looking for commits that are in our
308 # reachability graph
308 # reachability graph
309 for i, ctx in chlogwalk():
309 for i, ctx in chlogwalk():
310 if i not in repo:
310 if i not in repo:
311 continue
311 continue
312 n = repo.changelog.node(i)
312 n = repo.changelog.node(i)
313 mask = is_reachable(want_sha1, reachable, n)
313 mask = is_reachable(want_sha1, reachable, n)
314 if mask:
314 if mask:
315 parentstr = b""
315 parentstr = b""
316 if parents:
316 if parents:
317 pp = repo.changelog.parents(n)
317 pp = repo.changelog.parents(n)
318 if pp[0] != nullid:
318 if pp[0] != nullid:
319 parentstr += b" " + short(pp[0])
319 parentstr += b" " + short(pp[0])
320 if pp[1] != nullid:
320 if pp[1] != nullid:
321 parentstr += b" " + short(pp[1])
321 parentstr += b" " + short(pp[1])
322 if not full:
322 if not full:
323 ui.write(b"%s%s\n" % (short(n), parentstr))
323 ui.write(b"%s%s\n" % (short(n), parentstr))
324 elif full == b"commit":
324 elif full == b"commit":
325 ui.write(b"%s%s\n" % (short(n), parentstr))
325 ui.write(b"%s%s\n" % (short(n), parentstr))
326 catcommit(ui, repo, n, b' ', ctx)
326 catcommit(ui, repo, n, b' ', ctx)
327 else:
327 else:
328 (p1, p2) = repo.changelog.parents(n)
328 (p1, p2) = repo.changelog.parents(n)
329 (h, h1, h2) = map(short, (n, p1, p2))
329 (h, h1, h2) = map(short, (n, p1, p2))
330 (i1, i2) = map(repo.changelog.rev, (p1, p2))
330 (i1, i2) = map(repo.changelog.rev, (p1, p2))
331
331
332 date = ctx.date()[0]
332 date = ctx.date()[0]
333 ui.write(b"%s %s:%s" % (date, h, mask))
333 ui.write(b"%s %s:%s" % (date, h, mask))
334 mask = is_reachable(want_sha1, reachable, p1)
334 mask = is_reachable(want_sha1, reachable, p1)
335 if i1 != nullrev and mask > 0:
335 if i1 != nullrev and mask > 0:
336 ui.write(b"%s:%s " % (h1, mask)),
336 ui.write(b"%s:%s " % (h1, mask)),
337 mask = is_reachable(want_sha1, reachable, p2)
337 mask = is_reachable(want_sha1, reachable, p2)
338 if i2 != nullrev and mask > 0:
338 if i2 != nullrev and mask > 0:
339 ui.write(b"%s:%s " % (h2, mask))
339 ui.write(b"%s:%s " % (h2, mask))
340 ui.write(b"\n")
340 ui.write(b"\n")
341 if maxnr and count >= maxnr:
341 if maxnr and count >= maxnr:
342 break
342 break
343 count += 1
343 count += 1
344
344
345
345
346 # git rev-list tries to order things by date, and has the ability to stop
346 # git rev-list tries to order things by date, and has the ability to stop
347 # at a given commit without walking the whole repo. TODO add the stop
347 # at a given commit without walking the whole repo. TODO add the stop
348 # parameter
348 # parameter
349 @command(
349 @command(
350 b'debug-rev-list',
350 b'debug-rev-list',
351 [
351 [
352 (b'H', b'header', None, _(b'header')),
352 (b'H', b'header', None, _(b'header')),
353 (b't', b'topo-order', None, _(b'topo-order')),
353 (b't', b'topo-order', None, _(b'topo-order')),
354 (b'p', b'parents', None, _(b'parents')),
354 (b'p', b'parents', None, _(b'parents')),
355 (b'n', b'max-count', 0, _(b'max-count')),
355 (b'n', b'max-count', 0, _(b'max-count')),
356 ],
356 ],
357 b'[OPTION]... REV...',
357 b'[OPTION]... REV...',
358 )
358 )
359 def revlist(ui, repo, *revs, **opts):
359 def revlist(ui, repo, *revs, **opts):
360 """print revisions"""
360 """print revisions"""
361 if opts[b'header']:
361 if opts[b'header']:
362 full = b"commit"
362 full = b"commit"
363 else:
363 else:
364 full = None
364 full = None
365 copy = [x for x in revs]
365 copy = [x for x in revs]
366 revtree(ui, copy, repo, full, opts[r'max_count'], opts[r'parents'])
366 revtree(ui, copy, repo, full, opts[r'max_count'], opts[r'parents'])
367
367
368
368
369 @command(
369 @command(
370 b'view',
370 b'view',
371 [(b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM'))],
371 [(b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM'))],
372 _(b'[-l LIMIT] [REVRANGE]'),
372 _(b'[-l LIMIT] [REVRANGE]'),
373 helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
373 helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
374 )
374 )
375 def view(ui, repo, *etc, **opts):
375 def view(ui, repo, *etc, **opts):
376 b"start interactive history viewer"
376 b"start interactive history viewer"
377 opts = pycompat.byteskwargs(opts)
377 opts = pycompat.byteskwargs(opts)
378 os.chdir(repo.root)
378 os.chdir(repo.root)
379 optstr = b' '.join([b'--%s %s' % (k, v) for k, v in opts.iteritems() if v])
379 optstr = b' '.join([b'--%s %s' % (k, v) for k, v in opts.iteritems() if v])
380 if repo.filtername is None:
380 if repo.filtername is None:
381 optstr += b'--hidden'
381 optstr += b'--hidden'
382
382
383 cmd = ui.config(b"hgk", b"path") + b" %s %s" % (optstr, b" ".join(etc))
383 cmd = ui.config(b"hgk", b"path") + b" %s %s" % (optstr, b" ".join(etc))
384 ui.debug(b"running %s\n" % cmd)
384 ui.debug(b"running %s\n" % cmd)
385 ui.system(cmd, blockedtag=b'hgk_view')
385 ui.system(cmd, blockedtag=b'hgk_view')
@@ -1,886 +1,886 b''
1 # keyword.py - $Keyword$ expansion for Mercurial
1 # keyword.py - $Keyword$ expansion for Mercurial
2 #
2 #
3 # Copyright 2007-2015 Christian Ebert <blacktrash@gmx.net>
3 # Copyright 2007-2015 Christian Ebert <blacktrash@gmx.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 #
7 #
8 # $Id$
8 # $Id$
9 #
9 #
10 # Keyword expansion hack against the grain of a Distributed SCM
10 # Keyword expansion hack against the grain of a Distributed SCM
11 #
11 #
12 # There are many good reasons why this is not needed in a distributed
12 # There are many good reasons why this is not needed in a distributed
13 # SCM, still it may be useful in very small projects based on single
13 # SCM, still it may be useful in very small projects based on single
14 # files (like LaTeX packages), that are mostly addressed to an
14 # files (like LaTeX packages), that are mostly addressed to an
15 # audience not running a version control system.
15 # audience not running a version control system.
16 #
16 #
17 # For in-depth discussion refer to
17 # For in-depth discussion refer to
18 # <https://mercurial-scm.org/wiki/KeywordPlan>.
18 # <https://mercurial-scm.org/wiki/KeywordPlan>.
19 #
19 #
20 # Keyword expansion is based on Mercurial's changeset template mappings.
20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 #
21 #
22 # Binary files are not touched.
22 # Binary files are not touched.
23 #
23 #
24 # Files to act upon/ignore are specified in the [keyword] section.
24 # Files to act upon/ignore are specified in the [keyword] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
26 #
26 #
27 # Run 'hg help keyword' and 'hg kwdemo' to get info on configuration.
27 # Run 'hg help keyword' and 'hg kwdemo' to get info on configuration.
28
28
29 '''expand keywords in tracked files
29 '''expand keywords in tracked files
30
30
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 tracked text files selected by your configuration.
32 tracked text files selected by your configuration.
33
33
34 Keywords are only expanded in local repositories and not stored in the
34 Keywords are only expanded in local repositories and not stored in the
35 change history. The mechanism can be regarded as a convenience for the
35 change history. The mechanism can be regarded as a convenience for the
36 current user or for archive distribution.
36 current user or for archive distribution.
37
37
38 Keywords expand to the changeset data pertaining to the latest change
38 Keywords expand to the changeset data pertaining to the latest change
39 relative to the working directory parent of each file.
39 relative to the working directory parent of each file.
40
40
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 sections of hgrc files.
42 sections of hgrc files.
43
43
44 Example::
44 Example::
45
45
46 [keyword]
46 [keyword]
47 # expand keywords in every python file except those matching "x*"
47 # expand keywords in every python file except those matching "x*"
48 **.py =
48 **.py =
49 x* = ignore
49 x* = ignore
50
50
51 [keywordset]
51 [keywordset]
52 # prefer svn- over cvs-like default keywordmaps
52 # prefer svn- over cvs-like default keywordmaps
53 svn = True
53 svn = True
54
54
55 .. note::
55 .. note::
56
56
57 The more specific you are in your filename patterns the less you
57 The more specific you are in your filename patterns the less you
58 lose speed in huge repositories.
58 lose speed in huge repositories.
59
59
60 For [keywordmaps] template mapping and expansion demonstration and
60 For [keywordmaps] template mapping and expansion demonstration and
61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
62 available templates and filters.
62 available templates and filters.
63
63
64 Three additional date template filters are provided:
64 Three additional date template filters are provided:
65
65
66 :``utcdate``: "2006/09/18 15:13:13"
66 :``utcdate``: "2006/09/18 15:13:13"
67 :``svnutcdate``: "2006-09-18 15:13:13Z"
67 :``svnutcdate``: "2006-09-18 15:13:13Z"
68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
69
69
70 The default template mappings (view with :hg:`kwdemo -d`) can be
70 The default template mappings (view with :hg:`kwdemo -d`) can be
71 replaced with customized keywords and templates. Again, run
71 replaced with customized keywords and templates. Again, run
72 :hg:`kwdemo` to control the results of your configuration changes.
72 :hg:`kwdemo` to control the results of your configuration changes.
73
73
74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
75 to avoid storing expanded keywords in the change history.
75 to avoid storing expanded keywords in the change history.
76
76
77 To force expansion after enabling it, or a configuration change, run
77 To force expansion after enabling it, or a configuration change, run
78 :hg:`kwexpand`.
78 :hg:`kwexpand`.
79
79
80 Expansions spanning more than one line and incremental expansions,
80 Expansions spanning more than one line and incremental expansions,
81 like CVS' $Log$, are not supported. A keyword template map "Log =
81 like CVS' $Log$, are not supported. A keyword template map "Log =
82 {desc}" expands to the first line of the changeset description.
82 {desc}" expands to the first line of the changeset description.
83 '''
83 '''
84
84
85
85
86 from __future__ import absolute_import
86 from __future__ import absolute_import
87
87
88 import os
88 import os
89 import re
89 import re
90 import weakref
90 import weakref
91
91
92 from mercurial.i18n import _
92 from mercurial.i18n import _
93 from mercurial.hgweb import webcommands
93 from mercurial.hgweb import webcommands
94
94
95 from mercurial import (
95 from mercurial import (
96 cmdutil,
96 cmdutil,
97 context,
97 context,
98 dispatch,
98 dispatch,
99 error,
99 error,
100 extensions,
100 extensions,
101 filelog,
101 filelog,
102 localrepo,
102 localrepo,
103 logcmdutil,
103 logcmdutil,
104 match,
104 match,
105 patch,
105 patch,
106 pathutil,
106 pathutil,
107 pycompat,
107 pycompat,
108 registrar,
108 registrar,
109 scmutil,
109 scmutil,
110 templatefilters,
110 templatefilters,
111 templateutil,
111 templateutil,
112 util,
112 util,
113 )
113 )
114 from mercurial.utils import (
114 from mercurial.utils import (
115 dateutil,
115 dateutil,
116 stringutil,
116 stringutil,
117 )
117 )
118
118
119 cmdtable = {}
119 cmdtable = {}
120 command = registrar.command(cmdtable)
120 command = registrar.command(cmdtable)
121 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
121 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
122 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
122 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
123 # be specifying the version(s) of Mercurial they are tested with, or
123 # be specifying the version(s) of Mercurial they are tested with, or
124 # leave the attribute unspecified.
124 # leave the attribute unspecified.
125 testedwith = b'ships-with-hg-core'
125 testedwith = b'ships-with-hg-core'
126
126
127 # hg commands that do not act on keywords
127 # hg commands that do not act on keywords
128 nokwcommands = (
128 nokwcommands = (
129 b'add addremove annotate bundle export grep incoming init log'
129 b'add addremove annotate bundle export grep incoming init log'
130 b' outgoing push tip verify convert email glog'
130 b' outgoing push tip verify convert email glog'
131 )
131 )
132
132
133 # webcommands that do not act on keywords
133 # webcommands that do not act on keywords
134 nokwwebcommands = b'annotate changeset rev filediff diff comparison'
134 nokwwebcommands = b'annotate changeset rev filediff diff comparison'
135
135
136 # hg commands that trigger expansion only when writing to working dir,
136 # hg commands that trigger expansion only when writing to working dir,
137 # not when reading filelog, and unexpand when reading from working dir
137 # not when reading filelog, and unexpand when reading from working dir
138 restricted = (
138 restricted = (
139 b'merge kwexpand kwshrink record qrecord resolve transplant'
139 b'merge kwexpand kwshrink record qrecord resolve transplant'
140 b' unshelve rebase graft backout histedit fetch'
140 b' unshelve rebase graft backout histedit fetch'
141 )
141 )
142
142
143 # names of extensions using dorecord
143 # names of extensions using dorecord
144 recordextensions = b'record'
144 recordextensions = b'record'
145
145
146 colortable = {
146 colortable = {
147 b'kwfiles.enabled': b'green bold',
147 b'kwfiles.enabled': b'green bold',
148 b'kwfiles.deleted': b'cyan bold underline',
148 b'kwfiles.deleted': b'cyan bold underline',
149 b'kwfiles.enabledunknown': b'green',
149 b'kwfiles.enabledunknown': b'green',
150 b'kwfiles.ignored': b'bold',
150 b'kwfiles.ignored': b'bold',
151 b'kwfiles.ignoredunknown': b'none',
151 b'kwfiles.ignoredunknown': b'none',
152 }
152 }
153
153
154 templatefilter = registrar.templatefilter()
154 templatefilter = registrar.templatefilter()
155
155
156 configtable = {}
156 configtable = {}
157 configitem = registrar.configitem(configtable)
157 configitem = registrar.configitem(configtable)
158
158
159 configitem(
159 configitem(
160 b'keywordset', b'svn', default=False,
160 b'keywordset', b'svn', default=False,
161 )
161 )
162 # date like in cvs' $Date
162 # date like in cvs' $Date
163 @templatefilter(b'utcdate', intype=templateutil.date)
163 @templatefilter(b'utcdate', intype=templateutil.date)
164 def utcdate(date):
164 def utcdate(date):
165 '''Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
165 '''Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
166 '''
166 '''
167 dateformat = b'%Y/%m/%d %H:%M:%S'
167 dateformat = b'%Y/%m/%d %H:%M:%S'
168 return dateutil.datestr((date[0], 0), dateformat)
168 return dateutil.datestr((date[0], 0), dateformat)
169
169
170
170
171 # date like in svn's $Date
171 # date like in svn's $Date
172 @templatefilter(b'svnisodate', intype=templateutil.date)
172 @templatefilter(b'svnisodate', intype=templateutil.date)
173 def svnisodate(date):
173 def svnisodate(date):
174 '''Date. Returns a date in this format: "2009-08-18 13:00:13
174 '''Date. Returns a date in this format: "2009-08-18 13:00:13
175 +0200 (Tue, 18 Aug 2009)".
175 +0200 (Tue, 18 Aug 2009)".
176 '''
176 '''
177 return dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
177 return dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
178
178
179
179
180 # date like in svn's $Id
180 # date like in svn's $Id
181 @templatefilter(b'svnutcdate', intype=templateutil.date)
181 @templatefilter(b'svnutcdate', intype=templateutil.date)
182 def svnutcdate(date):
182 def svnutcdate(date):
183 '''Date. Returns a UTC-date in this format: "2009-08-18
183 '''Date. Returns a UTC-date in this format: "2009-08-18
184 11:00:13Z".
184 11:00:13Z".
185 '''
185 '''
186 dateformat = b'%Y-%m-%d %H:%M:%SZ'
186 dateformat = b'%Y-%m-%d %H:%M:%SZ'
187 return dateutil.datestr((date[0], 0), dateformat)
187 return dateutil.datestr((date[0], 0), dateformat)
188
188
189
189
190 # make keyword tools accessible
190 # make keyword tools accessible
191 kwtools = {b'hgcmd': b''}
191 kwtools = {b'hgcmd': b''}
192
192
193
193
194 def _defaultkwmaps(ui):
194 def _defaultkwmaps(ui):
195 '''Returns default keywordmaps according to keywordset configuration.'''
195 '''Returns default keywordmaps according to keywordset configuration.'''
196 templates = {
196 templates = {
197 b'Revision': b'{node|short}',
197 b'Revision': b'{node|short}',
198 b'Author': b'{author|user}',
198 b'Author': b'{author|user}',
199 }
199 }
200 kwsets = (
200 kwsets = (
201 {
201 {
202 b'Date': b'{date|utcdate}',
202 b'Date': b'{date|utcdate}',
203 b'RCSfile': b'{file|basename},v',
203 b'RCSfile': b'{file|basename},v',
204 b'RCSFile': b'{file|basename},v', # kept for backwards compatibility
204 b'RCSFile': b'{file|basename},v', # kept for backwards compatibility
205 # with hg-keyword
205 # with hg-keyword
206 b'Source': b'{root}/{file},v',
206 b'Source': b'{root}/{file},v',
207 b'Id': b'{file|basename},v {node|short} {date|utcdate} {author|user}',
207 b'Id': b'{file|basename},v {node|short} {date|utcdate} {author|user}',
208 b'Header': b'{root}/{file},v {node|short} {date|utcdate} {author|user}',
208 b'Header': b'{root}/{file},v {node|short} {date|utcdate} {author|user}',
209 },
209 },
210 {
210 {
211 b'Date': b'{date|svnisodate}',
211 b'Date': b'{date|svnisodate}',
212 b'Id': b'{file|basename},v {node|short} {date|svnutcdate} {author|user}',
212 b'Id': b'{file|basename},v {node|short} {date|svnutcdate} {author|user}',
213 b'LastChangedRevision': b'{node|short}',
213 b'LastChangedRevision': b'{node|short}',
214 b'LastChangedBy': b'{author|user}',
214 b'LastChangedBy': b'{author|user}',
215 b'LastChangedDate': b'{date|svnisodate}',
215 b'LastChangedDate': b'{date|svnisodate}',
216 },
216 },
217 )
217 )
218 templates.update(kwsets[ui.configbool(b'keywordset', b'svn')])
218 templates.update(kwsets[ui.configbool(b'keywordset', b'svn')])
219 return templates
219 return templates
220
220
221
221
222 def _shrinktext(text, subfunc):
222 def _shrinktext(text, subfunc):
223 '''Helper for keyword expansion removal in text.
223 '''Helper for keyword expansion removal in text.
224 Depending on subfunc also returns number of substitutions.'''
224 Depending on subfunc also returns number of substitutions.'''
225 return subfunc(br'$\1$', text)
225 return subfunc(br'$\1$', text)
226
226
227
227
228 def _preselect(wstatus, changed):
228 def _preselect(wstatus, changed):
229 '''Retrieves modified and added files from a working directory state
229 '''Retrieves modified and added files from a working directory state
230 and returns the subset of each contained in given changed files
230 and returns the subset of each contained in given changed files
231 retrieved from a change context.'''
231 retrieved from a change context.'''
232 modified = [f for f in wstatus.modified if f in changed]
232 modified = [f for f in wstatus.modified if f in changed]
233 added = [f for f in wstatus.added if f in changed]
233 added = [f for f in wstatus.added if f in changed]
234 return modified, added
234 return modified, added
235
235
236
236
237 class kwtemplater(object):
237 class kwtemplater(object):
238 '''
238 '''
239 Sets up keyword templates, corresponding keyword regex, and
239 Sets up keyword templates, corresponding keyword regex, and
240 provides keyword substitution functions.
240 provides keyword substitution functions.
241 '''
241 '''
242
242
243 def __init__(self, ui, repo, inc, exc):
243 def __init__(self, ui, repo, inc, exc):
244 self.ui = ui
244 self.ui = ui
245 self._repo = weakref.ref(repo)
245 self._repo = weakref.ref(repo)
246 self.match = match.match(repo.root, b'', [], inc, exc)
246 self.match = match.match(repo.root, b'', [], inc, exc)
247 self.restrict = kwtools[b'hgcmd'] in restricted.split()
247 self.restrict = kwtools[b'hgcmd'] in restricted.split()
248 self.postcommit = False
248 self.postcommit = False
249
249
250 kwmaps = self.ui.configitems(b'keywordmaps')
250 kwmaps = self.ui.configitems(b'keywordmaps')
251 if kwmaps: # override default templates
251 if kwmaps: # override default templates
252 self.templates = dict(kwmaps)
252 self.templates = dict(kwmaps)
253 else:
253 else:
254 self.templates = _defaultkwmaps(self.ui)
254 self.templates = _defaultkwmaps(self.ui)
255
255
256 @property
256 @property
257 def repo(self):
257 def repo(self):
258 return self._repo()
258 return self._repo()
259
259
260 @util.propertycache
260 @util.propertycache
261 def escape(self):
261 def escape(self):
262 '''Returns bar-separated and escaped keywords.'''
262 '''Returns bar-separated and escaped keywords.'''
263 return b'|'.join(map(stringutil.reescape, self.templates.keys()))
263 return b'|'.join(map(stringutil.reescape, self.templates.keys()))
264
264
265 @util.propertycache
265 @util.propertycache
266 def rekw(self):
266 def rekw(self):
267 '''Returns regex for unexpanded keywords.'''
267 '''Returns regex for unexpanded keywords.'''
268 return re.compile(br'\$(%s)\$' % self.escape)
268 return re.compile(br'\$(%s)\$' % self.escape)
269
269
270 @util.propertycache
270 @util.propertycache
271 def rekwexp(self):
271 def rekwexp(self):
272 '''Returns regex for expanded keywords.'''
272 '''Returns regex for expanded keywords.'''
273 return re.compile(br'\$(%s): [^$\n\r]*? \$' % self.escape)
273 return re.compile(br'\$(%s): [^$\n\r]*? \$' % self.escape)
274
274
275 def substitute(self, data, path, ctx, subfunc):
275 def substitute(self, data, path, ctx, subfunc):
276 '''Replaces keywords in data with expanded template.'''
276 '''Replaces keywords in data with expanded template.'''
277
277
278 def kwsub(mobj):
278 def kwsub(mobj):
279 kw = mobj.group(1)
279 kw = mobj.group(1)
280 ct = logcmdutil.maketemplater(
280 ct = logcmdutil.maketemplater(
281 self.ui, self.repo, self.templates[kw]
281 self.ui, self.repo, self.templates[kw]
282 )
282 )
283 self.ui.pushbuffer()
283 self.ui.pushbuffer()
284 ct.show(ctx, root=self.repo.root, file=path)
284 ct.show(ctx, root=self.repo.root, file=path)
285 ekw = templatefilters.firstline(self.ui.popbuffer())
285 ekw = templatefilters.firstline(self.ui.popbuffer())
286 return b'$%s: %s $' % (kw, ekw)
286 return b'$%s: %s $' % (kw, ekw)
287
287
288 return subfunc(kwsub, data)
288 return subfunc(kwsub, data)
289
289
290 def linkctx(self, path, fileid):
290 def linkctx(self, path, fileid):
291 '''Similar to filelog.linkrev, but returns a changectx.'''
291 '''Similar to filelog.linkrev, but returns a changectx.'''
292 return self.repo.filectx(path, fileid=fileid).changectx()
292 return self.repo.filectx(path, fileid=fileid).changectx()
293
293
294 def expand(self, path, node, data):
294 def expand(self, path, node, data):
295 '''Returns data with keywords expanded.'''
295 '''Returns data with keywords expanded.'''
296 if (
296 if (
297 not self.restrict
297 not self.restrict
298 and self.match(path)
298 and self.match(path)
299 and not stringutil.binary(data)
299 and not stringutil.binary(data)
300 ):
300 ):
301 ctx = self.linkctx(path, node)
301 ctx = self.linkctx(path, node)
302 return self.substitute(data, path, ctx, self.rekw.sub)
302 return self.substitute(data, path, ctx, self.rekw.sub)
303 return data
303 return data
304
304
305 def iskwfile(self, cand, ctx):
305 def iskwfile(self, cand, ctx):
306 '''Returns subset of candidates which are configured for keyword
306 '''Returns subset of candidates which are configured for keyword
307 expansion but are not symbolic links.'''
307 expansion but are not symbolic links.'''
308 return [f for f in cand if self.match(f) and b'l' not in ctx.flags(f)]
308 return [f for f in cand if self.match(f) and b'l' not in ctx.flags(f)]
309
309
310 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
310 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
311 '''Overwrites selected files expanding/shrinking keywords.'''
311 '''Overwrites selected files expanding/shrinking keywords.'''
312 if self.restrict or lookup or self.postcommit: # exclude kw_copy
312 if self.restrict or lookup or self.postcommit: # exclude kw_copy
313 candidates = self.iskwfile(candidates, ctx)
313 candidates = self.iskwfile(candidates, ctx)
314 if not candidates:
314 if not candidates:
315 return
315 return
316 kwcmd = self.restrict and lookup # kwexpand/kwshrink
316 kwcmd = self.restrict and lookup # kwexpand/kwshrink
317 if self.restrict or expand and lookup:
317 if self.restrict or expand and lookup:
318 mf = ctx.manifest()
318 mf = ctx.manifest()
319 if self.restrict or rekw:
319 if self.restrict or rekw:
320 re_kw = self.rekw
320 re_kw = self.rekw
321 else:
321 else:
322 re_kw = self.rekwexp
322 re_kw = self.rekwexp
323 if expand:
323 if expand:
324 msg = _(b'overwriting %s expanding keywords\n')
324 msg = _(b'overwriting %s expanding keywords\n')
325 else:
325 else:
326 msg = _(b'overwriting %s shrinking keywords\n')
326 msg = _(b'overwriting %s shrinking keywords\n')
327 for f in candidates:
327 for f in candidates:
328 if self.restrict:
328 if self.restrict:
329 data = self.repo.file(f).read(mf[f])
329 data = self.repo.file(f).read(mf[f])
330 else:
330 else:
331 data = self.repo.wread(f)
331 data = self.repo.wread(f)
332 if stringutil.binary(data):
332 if stringutil.binary(data):
333 continue
333 continue
334 if expand:
334 if expand:
335 parents = ctx.parents()
335 parents = ctx.parents()
336 if lookup:
336 if lookup:
337 ctx = self.linkctx(f, mf[f])
337 ctx = self.linkctx(f, mf[f])
338 elif self.restrict and len(parents) > 1:
338 elif self.restrict and len(parents) > 1:
339 # merge commit
339 # merge commit
340 # in case of conflict f is in modified state during
340 # in case of conflict f is in modified state during
341 # merge, even if f does not differ from f in parent
341 # merge, even if f does not differ from f in parent
342 for p in parents:
342 for p in parents:
343 if f in p and not p[f].cmp(ctx[f]):
343 if f in p and not p[f].cmp(ctx[f]):
344 ctx = p[f].changectx()
344 ctx = p[f].changectx()
345 break
345 break
346 data, found = self.substitute(data, f, ctx, re_kw.subn)
346 data, found = self.substitute(data, f, ctx, re_kw.subn)
347 elif self.restrict:
347 elif self.restrict:
348 found = re_kw.search(data)
348 found = re_kw.search(data)
349 else:
349 else:
350 data, found = _shrinktext(data, re_kw.subn)
350 data, found = _shrinktext(data, re_kw.subn)
351 if found:
351 if found:
352 self.ui.note(msg % f)
352 self.ui.note(msg % f)
353 fp = self.repo.wvfs(f, b"wb", atomictemp=True)
353 fp = self.repo.wvfs(f, b"wb", atomictemp=True)
354 fp.write(data)
354 fp.write(data)
355 fp.close()
355 fp.close()
356 if kwcmd:
356 if kwcmd:
357 self.repo.dirstate.normal(f)
357 self.repo.dirstate.normal(f)
358 elif self.postcommit:
358 elif self.postcommit:
359 self.repo.dirstate.normallookup(f)
359 self.repo.dirstate.normallookup(f)
360
360
361 def shrink(self, fname, text):
361 def shrink(self, fname, text):
362 '''Returns text with all keyword substitutions removed.'''
362 '''Returns text with all keyword substitutions removed.'''
363 if self.match(fname) and not stringutil.binary(text):
363 if self.match(fname) and not stringutil.binary(text):
364 return _shrinktext(text, self.rekwexp.sub)
364 return _shrinktext(text, self.rekwexp.sub)
365 return text
365 return text
366
366
367 def shrinklines(self, fname, lines):
367 def shrinklines(self, fname, lines):
368 '''Returns lines with keyword substitutions removed.'''
368 '''Returns lines with keyword substitutions removed.'''
369 if self.match(fname):
369 if self.match(fname):
370 text = b''.join(lines)
370 text = b''.join(lines)
371 if not stringutil.binary(text):
371 if not stringutil.binary(text):
372 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
372 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
373 return lines
373 return lines
374
374
375 def wread(self, fname, data):
375 def wread(self, fname, data):
376 '''If in restricted mode returns data read from wdir with
376 '''If in restricted mode returns data read from wdir with
377 keyword substitutions removed.'''
377 keyword substitutions removed.'''
378 if self.restrict:
378 if self.restrict:
379 return self.shrink(fname, data)
379 return self.shrink(fname, data)
380 return data
380 return data
381
381
382
382
383 class kwfilelog(filelog.filelog):
383 class kwfilelog(filelog.filelog):
384 '''
384 '''
385 Subclass of filelog to hook into its read, add, cmp methods.
385 Subclass of filelog to hook into its read, add, cmp methods.
386 Keywords are "stored" unexpanded, and processed on reading.
386 Keywords are "stored" unexpanded, and processed on reading.
387 '''
387 '''
388
388
389 def __init__(self, opener, kwt, path):
389 def __init__(self, opener, kwt, path):
390 super(kwfilelog, self).__init__(opener, path)
390 super(kwfilelog, self).__init__(opener, path)
391 self.kwt = kwt
391 self.kwt = kwt
392 self.path = path
392 self.path = path
393
393
394 def read(self, node):
394 def read(self, node):
395 '''Expands keywords when reading filelog.'''
395 '''Expands keywords when reading filelog.'''
396 data = super(kwfilelog, self).read(node)
396 data = super(kwfilelog, self).read(node)
397 if self.renamed(node):
397 if self.renamed(node):
398 return data
398 return data
399 return self.kwt.expand(self.path, node, data)
399 return self.kwt.expand(self.path, node, data)
400
400
401 def add(self, text, meta, tr, link, p1=None, p2=None):
401 def add(self, text, meta, tr, link, p1=None, p2=None):
402 '''Removes keyword substitutions when adding to filelog.'''
402 '''Removes keyword substitutions when adding to filelog.'''
403 text = self.kwt.shrink(self.path, text)
403 text = self.kwt.shrink(self.path, text)
404 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
404 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
405
405
406 def cmp(self, node, text):
406 def cmp(self, node, text):
407 '''Removes keyword substitutions for comparison.'''
407 '''Removes keyword substitutions for comparison.'''
408 text = self.kwt.shrink(self.path, text)
408 text = self.kwt.shrink(self.path, text)
409 return super(kwfilelog, self).cmp(node, text)
409 return super(kwfilelog, self).cmp(node, text)
410
410
411
411
412 def _status(ui, repo, wctx, kwt, *pats, **opts):
412 def _status(ui, repo, wctx, kwt, *pats, **opts):
413 '''Bails out if [keyword] configuration is not active.
413 '''Bails out if [keyword] configuration is not active.
414 Returns status of working directory.'''
414 Returns status of working directory.'''
415 if kwt:
415 if kwt:
416 opts = pycompat.byteskwargs(opts)
416 opts = pycompat.byteskwargs(opts)
417 return repo.status(
417 return repo.status(
418 match=scmutil.match(wctx, pats, opts),
418 match=scmutil.match(wctx, pats, opts),
419 clean=True,
419 clean=True,
420 unknown=opts.get(b'unknown') or opts.get(b'all'),
420 unknown=opts.get(b'unknown') or opts.get(b'all'),
421 )
421 )
422 if ui.configitems(b'keyword'):
422 if ui.configitems(b'keyword'):
423 raise error.Abort(_(b'[keyword] patterns cannot match'))
423 raise error.Abort(_(b'[keyword] patterns cannot match'))
424 raise error.Abort(_(b'no [keyword] patterns configured'))
424 raise error.Abort(_(b'no [keyword] patterns configured'))
425
425
426
426
427 def _kwfwrite(ui, repo, expand, *pats, **opts):
427 def _kwfwrite(ui, repo, expand, *pats, **opts):
428 '''Selects files and passes them to kwtemplater.overwrite.'''
428 '''Selects files and passes them to kwtemplater.overwrite.'''
429 wctx = repo[None]
429 wctx = repo[None]
430 if len(wctx.parents()) > 1:
430 if len(wctx.parents()) > 1:
431 raise error.Abort(_(b'outstanding uncommitted merge'))
431 raise error.Abort(_(b'outstanding uncommitted merge'))
432 kwt = getattr(repo, '_keywordkwt', None)
432 kwt = getattr(repo, '_keywordkwt', None)
433 with repo.wlock():
433 with repo.wlock():
434 status = _status(ui, repo, wctx, kwt, *pats, **opts)
434 status = _status(ui, repo, wctx, kwt, *pats, **opts)
435 if status.modified or status.added or status.removed or status.deleted:
435 if status.modified or status.added or status.removed or status.deleted:
436 raise error.Abort(_(b'outstanding uncommitted changes'))
436 raise error.Abort(_(b'outstanding uncommitted changes'))
437 kwt.overwrite(wctx, status.clean, True, expand)
437 kwt.overwrite(wctx, status.clean, True, expand)
438
438
439
439
440 @command(
440 @command(
441 b'kwdemo',
441 b'kwdemo',
442 [
442 [
443 (b'd', b'default', None, _(b'show default keyword template maps')),
443 (b'd', b'default', None, _(b'show default keyword template maps')),
444 (b'f', b'rcfile', b'', _(b'read maps from rcfile'), _(b'FILE')),
444 (b'f', b'rcfile', b'', _(b'read maps from rcfile'), _(b'FILE')),
445 ],
445 ],
446 _(b'hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'),
446 _(b'hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'),
447 optionalrepo=True,
447 optionalrepo=True,
448 )
448 )
449 def demo(ui, repo, *args, **opts):
449 def demo(ui, repo, *args, **opts):
450 '''print [keywordmaps] configuration and an expansion example
450 '''print [keywordmaps] configuration and an expansion example
451
451
452 Show current, custom, or default keyword template maps and their
452 Show current, custom, or default keyword template maps and their
453 expansions.
453 expansions.
454
454
455 Extend the current configuration by specifying maps as arguments
455 Extend the current configuration by specifying maps as arguments
456 and using -f/--rcfile to source an external hgrc file.
456 and using -f/--rcfile to source an external hgrc file.
457
457
458 Use -d/--default to disable current configuration.
458 Use -d/--default to disable current configuration.
459
459
460 See :hg:`help templates` for information on templates and filters.
460 See :hg:`help templates` for information on templates and filters.
461 '''
461 '''
462
462
463 def demoitems(section, items):
463 def demoitems(section, items):
464 ui.write(b'[%s]\n' % section)
464 ui.write(b'[%s]\n' % section)
465 for k, v in sorted(items):
465 for k, v in sorted(items):
466 if isinstance(v, bool):
466 if isinstance(v, bool):
467 v = stringutil.pprint(v)
467 v = stringutil.pprint(v)
468 ui.write(b'%s = %s\n' % (k, v))
468 ui.write(b'%s = %s\n' % (k, v))
469
469
470 fn = b'demo.txt'
470 fn = b'demo.txt'
471 tmpdir = pycompat.mkdtemp(b'', b'kwdemo.')
471 tmpdir = pycompat.mkdtemp(b'', b'kwdemo.')
472 ui.note(_(b'creating temporary repository at %s\n') % tmpdir)
472 ui.note(_(b'creating temporary repository at %s\n') % tmpdir)
473 if repo is None:
473 if repo is None:
474 baseui = ui
474 baseui = ui
475 else:
475 else:
476 baseui = repo.baseui
476 baseui = repo.baseui
477 repo = localrepo.instance(baseui, tmpdir, create=True)
477 repo = localrepo.instance(baseui, tmpdir, create=True)
478 ui.setconfig(b'keyword', fn, b'', b'keyword')
478 ui.setconfig(b'keyword', fn, b'', b'keyword')
479 svn = ui.configbool(b'keywordset', b'svn')
479 svn = ui.configbool(b'keywordset', b'svn')
480 # explicitly set keywordset for demo output
480 # explicitly set keywordset for demo output
481 ui.setconfig(b'keywordset', b'svn', svn, b'keyword')
481 ui.setconfig(b'keywordset', b'svn', svn, b'keyword')
482
482
483 uikwmaps = ui.configitems(b'keywordmaps')
483 uikwmaps = ui.configitems(b'keywordmaps')
484 if args or opts.get(r'rcfile'):
484 if args or opts.get(r'rcfile'):
485 ui.status(_(b'\n\tconfiguration using custom keyword template maps\n'))
485 ui.status(_(b'\n\tconfiguration using custom keyword template maps\n'))
486 if uikwmaps:
486 if uikwmaps:
487 ui.status(_(b'\textending current template maps\n'))
487 ui.status(_(b'\textending current template maps\n'))
488 if opts.get(r'default') or not uikwmaps:
488 if opts.get(r'default') or not uikwmaps:
489 if svn:
489 if svn:
490 ui.status(_(b'\toverriding default svn keywordset\n'))
490 ui.status(_(b'\toverriding default svn keywordset\n'))
491 else:
491 else:
492 ui.status(_(b'\toverriding default cvs keywordset\n'))
492 ui.status(_(b'\toverriding default cvs keywordset\n'))
493 if opts.get(r'rcfile'):
493 if opts.get(r'rcfile'):
494 ui.readconfig(opts.get(b'rcfile'))
494 ui.readconfig(opts.get(b'rcfile'))
495 if args:
495 if args:
496 # simulate hgrc parsing
496 # simulate hgrc parsing
497 rcmaps = b'[keywordmaps]\n%s\n' % b'\n'.join(args)
497 rcmaps = b'[keywordmaps]\n%s\n' % b'\n'.join(args)
498 repo.vfs.write(b'hgrc', rcmaps)
498 repo.vfs.write(b'hgrc', rcmaps)
499 ui.readconfig(repo.vfs.join(b'hgrc'))
499 ui.readconfig(repo.vfs.join(b'hgrc'))
500 kwmaps = dict(ui.configitems(b'keywordmaps'))
500 kwmaps = dict(ui.configitems(b'keywordmaps'))
501 elif opts.get(r'default'):
501 elif opts.get(r'default'):
502 if svn:
502 if svn:
503 ui.status(_(b'\n\tconfiguration using default svn keywordset\n'))
503 ui.status(_(b'\n\tconfiguration using default svn keywordset\n'))
504 else:
504 else:
505 ui.status(_(b'\n\tconfiguration using default cvs keywordset\n'))
505 ui.status(_(b'\n\tconfiguration using default cvs keywordset\n'))
506 kwmaps = _defaultkwmaps(ui)
506 kwmaps = _defaultkwmaps(ui)
507 if uikwmaps:
507 if uikwmaps:
508 ui.status(_(b'\tdisabling current template maps\n'))
508 ui.status(_(b'\tdisabling current template maps\n'))
509 for k, v in kwmaps.iteritems():
509 for k, v in kwmaps.iteritems():
510 ui.setconfig(b'keywordmaps', k, v, b'keyword')
510 ui.setconfig(b'keywordmaps', k, v, b'keyword')
511 else:
511 else:
512 ui.status(_(b'\n\tconfiguration using current keyword template maps\n'))
512 ui.status(_(b'\n\tconfiguration using current keyword template maps\n'))
513 if uikwmaps:
513 if uikwmaps:
514 kwmaps = dict(uikwmaps)
514 kwmaps = dict(uikwmaps)
515 else:
515 else:
516 kwmaps = _defaultkwmaps(ui)
516 kwmaps = _defaultkwmaps(ui)
517
517
518 uisetup(ui)
518 uisetup(ui)
519 reposetup(ui, repo)
519 reposetup(ui, repo)
520 ui.write(b'[extensions]\nkeyword =\n')
520 ui.writenoi18n(b'[extensions]\nkeyword =\n')
521 demoitems(b'keyword', ui.configitems(b'keyword'))
521 demoitems(b'keyword', ui.configitems(b'keyword'))
522 demoitems(b'keywordset', ui.configitems(b'keywordset'))
522 demoitems(b'keywordset', ui.configitems(b'keywordset'))
523 demoitems(b'keywordmaps', kwmaps.iteritems())
523 demoitems(b'keywordmaps', kwmaps.iteritems())
524 keywords = b'$' + b'$\n$'.join(sorted(kwmaps.keys())) + b'$\n'
524 keywords = b'$' + b'$\n$'.join(sorted(kwmaps.keys())) + b'$\n'
525 repo.wvfs.write(fn, keywords)
525 repo.wvfs.write(fn, keywords)
526 repo[None].add([fn])
526 repo[None].add([fn])
527 ui.note(_(b'\nkeywords written to %s:\n') % fn)
527 ui.note(_(b'\nkeywords written to %s:\n') % fn)
528 ui.note(keywords)
528 ui.note(keywords)
529 with repo.wlock():
529 with repo.wlock():
530 repo.dirstate.setbranch(b'demobranch')
530 repo.dirstate.setbranch(b'demobranch')
531 for name, cmd in ui.configitems(b'hooks'):
531 for name, cmd in ui.configitems(b'hooks'):
532 if name.split(b'.', 1)[0].find(b'commit') > -1:
532 if name.split(b'.', 1)[0].find(b'commit') > -1:
533 repo.ui.setconfig(b'hooks', name, b'', b'keyword')
533 repo.ui.setconfig(b'hooks', name, b'', b'keyword')
534 msg = _(b'hg keyword configuration and expansion example')
534 msg = _(b'hg keyword configuration and expansion example')
535 ui.note((b"hg ci -m '%s'\n" % msg))
535 ui.note((b"hg ci -m '%s'\n" % msg))
536 repo.commit(text=msg)
536 repo.commit(text=msg)
537 ui.status(_(b'\n\tkeywords expanded\n'))
537 ui.status(_(b'\n\tkeywords expanded\n'))
538 ui.write(repo.wread(fn))
538 ui.write(repo.wread(fn))
539 repo.wvfs.rmtree(repo.root)
539 repo.wvfs.rmtree(repo.root)
540
540
541
541
542 @command(
542 @command(
543 b'kwexpand',
543 b'kwexpand',
544 cmdutil.walkopts,
544 cmdutil.walkopts,
545 _(b'hg kwexpand [OPTION]... [FILE]...'),
545 _(b'hg kwexpand [OPTION]... [FILE]...'),
546 inferrepo=True,
546 inferrepo=True,
547 )
547 )
548 def expand(ui, repo, *pats, **opts):
548 def expand(ui, repo, *pats, **opts):
549 '''expand keywords in the working directory
549 '''expand keywords in the working directory
550
550
551 Run after (re)enabling keyword expansion.
551 Run after (re)enabling keyword expansion.
552
552
553 kwexpand refuses to run if given files contain local changes.
553 kwexpand refuses to run if given files contain local changes.
554 '''
554 '''
555 # 3rd argument sets expansion to True
555 # 3rd argument sets expansion to True
556 _kwfwrite(ui, repo, True, *pats, **opts)
556 _kwfwrite(ui, repo, True, *pats, **opts)
557
557
558
558
559 @command(
559 @command(
560 b'kwfiles',
560 b'kwfiles',
561 [
561 [
562 (b'A', b'all', None, _(b'show keyword status flags of all files')),
562 (b'A', b'all', None, _(b'show keyword status flags of all files')),
563 (b'i', b'ignore', None, _(b'show files excluded from expansion')),
563 (b'i', b'ignore', None, _(b'show files excluded from expansion')),
564 (b'u', b'unknown', None, _(b'only show unknown (not tracked) files')),
564 (b'u', b'unknown', None, _(b'only show unknown (not tracked) files')),
565 ]
565 ]
566 + cmdutil.walkopts,
566 + cmdutil.walkopts,
567 _(b'hg kwfiles [OPTION]... [FILE]...'),
567 _(b'hg kwfiles [OPTION]... [FILE]...'),
568 inferrepo=True,
568 inferrepo=True,
569 )
569 )
570 def files(ui, repo, *pats, **opts):
570 def files(ui, repo, *pats, **opts):
571 '''show files configured for keyword expansion
571 '''show files configured for keyword expansion
572
572
573 List which files in the working directory are matched by the
573 List which files in the working directory are matched by the
574 [keyword] configuration patterns.
574 [keyword] configuration patterns.
575
575
576 Useful to prevent inadvertent keyword expansion and to speed up
576 Useful to prevent inadvertent keyword expansion and to speed up
577 execution by including only files that are actual candidates for
577 execution by including only files that are actual candidates for
578 expansion.
578 expansion.
579
579
580 See :hg:`help keyword` on how to construct patterns both for
580 See :hg:`help keyword` on how to construct patterns both for
581 inclusion and exclusion of files.
581 inclusion and exclusion of files.
582
582
583 With -A/--all and -v/--verbose the codes used to show the status
583 With -A/--all and -v/--verbose the codes used to show the status
584 of files are::
584 of files are::
585
585
586 K = keyword expansion candidate
586 K = keyword expansion candidate
587 k = keyword expansion candidate (not tracked)
587 k = keyword expansion candidate (not tracked)
588 I = ignored
588 I = ignored
589 i = ignored (not tracked)
589 i = ignored (not tracked)
590 '''
590 '''
591 kwt = getattr(repo, '_keywordkwt', None)
591 kwt = getattr(repo, '_keywordkwt', None)
592 wctx = repo[None]
592 wctx = repo[None]
593 status = _status(ui, repo, wctx, kwt, *pats, **opts)
593 status = _status(ui, repo, wctx, kwt, *pats, **opts)
594 if pats:
594 if pats:
595 cwd = repo.getcwd()
595 cwd = repo.getcwd()
596 else:
596 else:
597 cwd = b''
597 cwd = b''
598 files = []
598 files = []
599 opts = pycompat.byteskwargs(opts)
599 opts = pycompat.byteskwargs(opts)
600 if not opts.get(b'unknown') or opts.get(b'all'):
600 if not opts.get(b'unknown') or opts.get(b'all'):
601 files = sorted(status.modified + status.added + status.clean)
601 files = sorted(status.modified + status.added + status.clean)
602 kwfiles = kwt.iskwfile(files, wctx)
602 kwfiles = kwt.iskwfile(files, wctx)
603 kwdeleted = kwt.iskwfile(status.deleted, wctx)
603 kwdeleted = kwt.iskwfile(status.deleted, wctx)
604 kwunknown = kwt.iskwfile(status.unknown, wctx)
604 kwunknown = kwt.iskwfile(status.unknown, wctx)
605 if not opts.get(b'ignore') or opts.get(b'all'):
605 if not opts.get(b'ignore') or opts.get(b'all'):
606 showfiles = kwfiles, kwdeleted, kwunknown
606 showfiles = kwfiles, kwdeleted, kwunknown
607 else:
607 else:
608 showfiles = [], [], []
608 showfiles = [], [], []
609 if opts.get(b'all') or opts.get(b'ignore'):
609 if opts.get(b'all') or opts.get(b'ignore'):
610 showfiles += (
610 showfiles += (
611 [f for f in files if f not in kwfiles],
611 [f for f in files if f not in kwfiles],
612 [f for f in status.unknown if f not in kwunknown],
612 [f for f in status.unknown if f not in kwunknown],
613 )
613 )
614 kwlabels = b'enabled deleted enabledunknown ignored ignoredunknown'.split()
614 kwlabels = b'enabled deleted enabledunknown ignored ignoredunknown'.split()
615 kwstates = zip(kwlabels, pycompat.bytestr(b'K!kIi'), showfiles)
615 kwstates = zip(kwlabels, pycompat.bytestr(b'K!kIi'), showfiles)
616 fm = ui.formatter(b'kwfiles', opts)
616 fm = ui.formatter(b'kwfiles', opts)
617 fmt = b'%.0s%s\n'
617 fmt = b'%.0s%s\n'
618 if opts.get(b'all') or ui.verbose:
618 if opts.get(b'all') or ui.verbose:
619 fmt = b'%s %s\n'
619 fmt = b'%s %s\n'
620 for kwstate, char, filenames in kwstates:
620 for kwstate, char, filenames in kwstates:
621 label = b'kwfiles.' + kwstate
621 label = b'kwfiles.' + kwstate
622 for f in filenames:
622 for f in filenames:
623 fm.startitem()
623 fm.startitem()
624 fm.data(kwstatus=char, path=f)
624 fm.data(kwstatus=char, path=f)
625 fm.plain(fmt % (char, repo.pathto(f, cwd)), label=label)
625 fm.plain(fmt % (char, repo.pathto(f, cwd)), label=label)
626 fm.end()
626 fm.end()
627
627
628
628
629 @command(
629 @command(
630 b'kwshrink',
630 b'kwshrink',
631 cmdutil.walkopts,
631 cmdutil.walkopts,
632 _(b'hg kwshrink [OPTION]... [FILE]...'),
632 _(b'hg kwshrink [OPTION]... [FILE]...'),
633 inferrepo=True,
633 inferrepo=True,
634 )
634 )
635 def shrink(ui, repo, *pats, **opts):
635 def shrink(ui, repo, *pats, **opts):
636 '''revert expanded keywords in the working directory
636 '''revert expanded keywords in the working directory
637
637
638 Must be run before changing/disabling active keywords.
638 Must be run before changing/disabling active keywords.
639
639
640 kwshrink refuses to run if given files contain local changes.
640 kwshrink refuses to run if given files contain local changes.
641 '''
641 '''
642 # 3rd argument sets expansion to False
642 # 3rd argument sets expansion to False
643 _kwfwrite(ui, repo, False, *pats, **opts)
643 _kwfwrite(ui, repo, False, *pats, **opts)
644
644
645
645
646 # monkeypatches
646 # monkeypatches
647
647
648
648
649 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
649 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
650 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
650 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
651 rejects or conflicts due to expanded keywords in working dir.'''
651 rejects or conflicts due to expanded keywords in working dir.'''
652 orig(self, ui, gp, backend, store, eolmode)
652 orig(self, ui, gp, backend, store, eolmode)
653 kwt = getattr(getattr(backend, 'repo', None), '_keywordkwt', None)
653 kwt = getattr(getattr(backend, 'repo', None), '_keywordkwt', None)
654 if kwt:
654 if kwt:
655 # shrink keywords read from working dir
655 # shrink keywords read from working dir
656 self.lines = kwt.shrinklines(self.fname, self.lines)
656 self.lines = kwt.shrinklines(self.fname, self.lines)
657
657
658
658
659 def kwdiff(orig, repo, *args, **kwargs):
659 def kwdiff(orig, repo, *args, **kwargs):
660 '''Monkeypatch patch.diff to avoid expansion.'''
660 '''Monkeypatch patch.diff to avoid expansion.'''
661 kwt = getattr(repo, '_keywordkwt', None)
661 kwt = getattr(repo, '_keywordkwt', None)
662 if kwt:
662 if kwt:
663 restrict = kwt.restrict
663 restrict = kwt.restrict
664 kwt.restrict = True
664 kwt.restrict = True
665 try:
665 try:
666 for chunk in orig(repo, *args, **kwargs):
666 for chunk in orig(repo, *args, **kwargs):
667 yield chunk
667 yield chunk
668 finally:
668 finally:
669 if kwt:
669 if kwt:
670 kwt.restrict = restrict
670 kwt.restrict = restrict
671
671
672
672
673 def kwweb_skip(orig, web):
673 def kwweb_skip(orig, web):
674 '''Wraps webcommands.x turning off keyword expansion.'''
674 '''Wraps webcommands.x turning off keyword expansion.'''
675 kwt = getattr(web.repo, '_keywordkwt', None)
675 kwt = getattr(web.repo, '_keywordkwt', None)
676 if kwt:
676 if kwt:
677 origmatch = kwt.match
677 origmatch = kwt.match
678 kwt.match = util.never
678 kwt.match = util.never
679 try:
679 try:
680 for chunk in orig(web):
680 for chunk in orig(web):
681 yield chunk
681 yield chunk
682 finally:
682 finally:
683 if kwt:
683 if kwt:
684 kwt.match = origmatch
684 kwt.match = origmatch
685
685
686
686
687 def kw_amend(orig, ui, repo, old, extra, pats, opts):
687 def kw_amend(orig, ui, repo, old, extra, pats, opts):
688 '''Wraps cmdutil.amend expanding keywords after amend.'''
688 '''Wraps cmdutil.amend expanding keywords after amend.'''
689 kwt = getattr(repo, '_keywordkwt', None)
689 kwt = getattr(repo, '_keywordkwt', None)
690 if kwt is None:
690 if kwt is None:
691 return orig(ui, repo, old, extra, pats, opts)
691 return orig(ui, repo, old, extra, pats, opts)
692 with repo.wlock():
692 with repo.wlock():
693 kwt.postcommit = True
693 kwt.postcommit = True
694 newid = orig(ui, repo, old, extra, pats, opts)
694 newid = orig(ui, repo, old, extra, pats, opts)
695 if newid != old.node():
695 if newid != old.node():
696 ctx = repo[newid]
696 ctx = repo[newid]
697 kwt.restrict = True
697 kwt.restrict = True
698 kwt.overwrite(ctx, ctx.files(), False, True)
698 kwt.overwrite(ctx, ctx.files(), False, True)
699 kwt.restrict = False
699 kwt.restrict = False
700 return newid
700 return newid
701
701
702
702
703 def kw_copy(orig, ui, repo, pats, opts, rename=False):
703 def kw_copy(orig, ui, repo, pats, opts, rename=False):
704 '''Wraps cmdutil.copy so that copy/rename destinations do not
704 '''Wraps cmdutil.copy so that copy/rename destinations do not
705 contain expanded keywords.
705 contain expanded keywords.
706 Note that the source of a regular file destination may also be a
706 Note that the source of a regular file destination may also be a
707 symlink:
707 symlink:
708 hg cp sym x -> x is symlink
708 hg cp sym x -> x is symlink
709 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
709 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
710 For the latter we have to follow the symlink to find out whether its
710 For the latter we have to follow the symlink to find out whether its
711 target is configured for expansion and we therefore must unexpand the
711 target is configured for expansion and we therefore must unexpand the
712 keywords in the destination.'''
712 keywords in the destination.'''
713 kwt = getattr(repo, '_keywordkwt', None)
713 kwt = getattr(repo, '_keywordkwt', None)
714 if kwt is None:
714 if kwt is None:
715 return orig(ui, repo, pats, opts, rename)
715 return orig(ui, repo, pats, opts, rename)
716 with repo.wlock():
716 with repo.wlock():
717 orig(ui, repo, pats, opts, rename)
717 orig(ui, repo, pats, opts, rename)
718 if opts.get(b'dry_run'):
718 if opts.get(b'dry_run'):
719 return
719 return
720 wctx = repo[None]
720 wctx = repo[None]
721 cwd = repo.getcwd()
721 cwd = repo.getcwd()
722
722
723 def haskwsource(dest):
723 def haskwsource(dest):
724 '''Returns true if dest is a regular file and configured for
724 '''Returns true if dest is a regular file and configured for
725 expansion or a symlink which points to a file configured for
725 expansion or a symlink which points to a file configured for
726 expansion. '''
726 expansion. '''
727 source = repo.dirstate.copied(dest)
727 source = repo.dirstate.copied(dest)
728 if b'l' in wctx.flags(source):
728 if b'l' in wctx.flags(source):
729 source = pathutil.canonpath(
729 source = pathutil.canonpath(
730 repo.root, cwd, os.path.realpath(source)
730 repo.root, cwd, os.path.realpath(source)
731 )
731 )
732 return kwt.match(source)
732 return kwt.match(source)
733
733
734 candidates = [
734 candidates = [
735 f
735 f
736 for f in repo.dirstate.copies()
736 for f in repo.dirstate.copies()
737 if b'l' not in wctx.flags(f) and haskwsource(f)
737 if b'l' not in wctx.flags(f) and haskwsource(f)
738 ]
738 ]
739 kwt.overwrite(wctx, candidates, False, False)
739 kwt.overwrite(wctx, candidates, False, False)
740
740
741
741
742 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
742 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
743 '''Wraps record.dorecord expanding keywords after recording.'''
743 '''Wraps record.dorecord expanding keywords after recording.'''
744 kwt = getattr(repo, '_keywordkwt', None)
744 kwt = getattr(repo, '_keywordkwt', None)
745 if kwt is None:
745 if kwt is None:
746 return orig(ui, repo, commitfunc, *pats, **opts)
746 return orig(ui, repo, commitfunc, *pats, **opts)
747 with repo.wlock():
747 with repo.wlock():
748 # record returns 0 even when nothing has changed
748 # record returns 0 even when nothing has changed
749 # therefore compare nodes before and after
749 # therefore compare nodes before and after
750 kwt.postcommit = True
750 kwt.postcommit = True
751 ctx = repo[b'.']
751 ctx = repo[b'.']
752 wstatus = ctx.status()
752 wstatus = ctx.status()
753 ret = orig(ui, repo, commitfunc, *pats, **opts)
753 ret = orig(ui, repo, commitfunc, *pats, **opts)
754 recctx = repo[b'.']
754 recctx = repo[b'.']
755 if ctx != recctx:
755 if ctx != recctx:
756 modified, added = _preselect(wstatus, recctx.files())
756 modified, added = _preselect(wstatus, recctx.files())
757 kwt.restrict = False
757 kwt.restrict = False
758 kwt.overwrite(recctx, modified, False, True)
758 kwt.overwrite(recctx, modified, False, True)
759 kwt.overwrite(recctx, added, False, True, True)
759 kwt.overwrite(recctx, added, False, True, True)
760 kwt.restrict = True
760 kwt.restrict = True
761 return ret
761 return ret
762
762
763
763
764 def kwfilectx_cmp(orig, self, fctx):
764 def kwfilectx_cmp(orig, self, fctx):
765 if fctx._customcmp:
765 if fctx._customcmp:
766 return fctx.cmp(self)
766 return fctx.cmp(self)
767 kwt = getattr(self._repo, '_keywordkwt', None)
767 kwt = getattr(self._repo, '_keywordkwt', None)
768 if kwt is None:
768 if kwt is None:
769 return orig(self, fctx)
769 return orig(self, fctx)
770 # keyword affects data size, comparing wdir and filelog size does
770 # keyword affects data size, comparing wdir and filelog size does
771 # not make sense
771 # not make sense
772 if (
772 if (
773 fctx._filenode is None
773 fctx._filenode is None
774 and (
774 and (
775 self._repo._encodefilterpats
775 self._repo._encodefilterpats
776 or kwt.match(fctx.path())
776 or kwt.match(fctx.path())
777 and b'l' not in fctx.flags()
777 and b'l' not in fctx.flags()
778 or self.size() - 4 == fctx.size()
778 or self.size() - 4 == fctx.size()
779 )
779 )
780 or self.size() == fctx.size()
780 or self.size() == fctx.size()
781 ):
781 ):
782 return self._filelog.cmp(self._filenode, fctx.data())
782 return self._filelog.cmp(self._filenode, fctx.data())
783 return True
783 return True
784
784
785
785
786 def uisetup(ui):
786 def uisetup(ui):
787 ''' Monkeypatches dispatch._parse to retrieve user command.
787 ''' Monkeypatches dispatch._parse to retrieve user command.
788 Overrides file method to return kwfilelog instead of filelog
788 Overrides file method to return kwfilelog instead of filelog
789 if file matches user configuration.
789 if file matches user configuration.
790 Wraps commit to overwrite configured files with updated
790 Wraps commit to overwrite configured files with updated
791 keyword substitutions.
791 keyword substitutions.
792 Monkeypatches patch and webcommands.'''
792 Monkeypatches patch and webcommands.'''
793
793
794 def kwdispatch_parse(orig, ui, args):
794 def kwdispatch_parse(orig, ui, args):
795 '''Monkeypatch dispatch._parse to obtain running hg command.'''
795 '''Monkeypatch dispatch._parse to obtain running hg command.'''
796 cmd, func, args, options, cmdoptions = orig(ui, args)
796 cmd, func, args, options, cmdoptions = orig(ui, args)
797 kwtools[b'hgcmd'] = cmd
797 kwtools[b'hgcmd'] = cmd
798 return cmd, func, args, options, cmdoptions
798 return cmd, func, args, options, cmdoptions
799
799
800 extensions.wrapfunction(dispatch, b'_parse', kwdispatch_parse)
800 extensions.wrapfunction(dispatch, b'_parse', kwdispatch_parse)
801
801
802 extensions.wrapfunction(context.filectx, b'cmp', kwfilectx_cmp)
802 extensions.wrapfunction(context.filectx, b'cmp', kwfilectx_cmp)
803 extensions.wrapfunction(patch.patchfile, b'__init__', kwpatchfile_init)
803 extensions.wrapfunction(patch.patchfile, b'__init__', kwpatchfile_init)
804 extensions.wrapfunction(patch, b'diff', kwdiff)
804 extensions.wrapfunction(patch, b'diff', kwdiff)
805 extensions.wrapfunction(cmdutil, b'amend', kw_amend)
805 extensions.wrapfunction(cmdutil, b'amend', kw_amend)
806 extensions.wrapfunction(cmdutil, b'copy', kw_copy)
806 extensions.wrapfunction(cmdutil, b'copy', kw_copy)
807 extensions.wrapfunction(cmdutil, b'dorecord', kw_dorecord)
807 extensions.wrapfunction(cmdutil, b'dorecord', kw_dorecord)
808 for c in nokwwebcommands.split():
808 for c in nokwwebcommands.split():
809 extensions.wrapfunction(webcommands, c, kwweb_skip)
809 extensions.wrapfunction(webcommands, c, kwweb_skip)
810
810
811
811
812 def reposetup(ui, repo):
812 def reposetup(ui, repo):
813 '''Sets up repo as kwrepo for keyword substitution.'''
813 '''Sets up repo as kwrepo for keyword substitution.'''
814
814
815 try:
815 try:
816 if (
816 if (
817 not repo.local()
817 not repo.local()
818 or kwtools[b'hgcmd'] in nokwcommands.split()
818 or kwtools[b'hgcmd'] in nokwcommands.split()
819 or b'.hg' in util.splitpath(repo.root)
819 or b'.hg' in util.splitpath(repo.root)
820 or repo._url.startswith(b'bundle:')
820 or repo._url.startswith(b'bundle:')
821 ):
821 ):
822 return
822 return
823 except AttributeError:
823 except AttributeError:
824 pass
824 pass
825
825
826 inc, exc = [], [b'.hg*']
826 inc, exc = [], [b'.hg*']
827 for pat, opt in ui.configitems(b'keyword'):
827 for pat, opt in ui.configitems(b'keyword'):
828 if opt != b'ignore':
828 if opt != b'ignore':
829 inc.append(pat)
829 inc.append(pat)
830 else:
830 else:
831 exc.append(pat)
831 exc.append(pat)
832 if not inc:
832 if not inc:
833 return
833 return
834
834
835 kwt = kwtemplater(ui, repo, inc, exc)
835 kwt = kwtemplater(ui, repo, inc, exc)
836
836
837 class kwrepo(repo.__class__):
837 class kwrepo(repo.__class__):
838 def file(self, f):
838 def file(self, f):
839 if f[0] == b'/':
839 if f[0] == b'/':
840 f = f[1:]
840 f = f[1:]
841 return kwfilelog(self.svfs, kwt, f)
841 return kwfilelog(self.svfs, kwt, f)
842
842
843 def wread(self, filename):
843 def wread(self, filename):
844 data = super(kwrepo, self).wread(filename)
844 data = super(kwrepo, self).wread(filename)
845 return kwt.wread(filename, data)
845 return kwt.wread(filename, data)
846
846
847 def commit(self, *args, **opts):
847 def commit(self, *args, **opts):
848 # use custom commitctx for user commands
848 # use custom commitctx for user commands
849 # other extensions can still wrap repo.commitctx directly
849 # other extensions can still wrap repo.commitctx directly
850 self.commitctx = self.kwcommitctx
850 self.commitctx = self.kwcommitctx
851 try:
851 try:
852 return super(kwrepo, self).commit(*args, **opts)
852 return super(kwrepo, self).commit(*args, **opts)
853 finally:
853 finally:
854 del self.commitctx
854 del self.commitctx
855
855
856 def kwcommitctx(self, ctx, error=False, origctx=None):
856 def kwcommitctx(self, ctx, error=False, origctx=None):
857 n = super(kwrepo, self).commitctx(ctx, error, origctx)
857 n = super(kwrepo, self).commitctx(ctx, error, origctx)
858 # no lock needed, only called from repo.commit() which already locks
858 # no lock needed, only called from repo.commit() which already locks
859 if not kwt.postcommit:
859 if not kwt.postcommit:
860 restrict = kwt.restrict
860 restrict = kwt.restrict
861 kwt.restrict = True
861 kwt.restrict = True
862 kwt.overwrite(
862 kwt.overwrite(
863 self[n], sorted(ctx.added() + ctx.modified()), False, True
863 self[n], sorted(ctx.added() + ctx.modified()), False, True
864 )
864 )
865 kwt.restrict = restrict
865 kwt.restrict = restrict
866 return n
866 return n
867
867
868 def rollback(self, dryrun=False, force=False):
868 def rollback(self, dryrun=False, force=False):
869 with self.wlock():
869 with self.wlock():
870 origrestrict = kwt.restrict
870 origrestrict = kwt.restrict
871 try:
871 try:
872 if not dryrun:
872 if not dryrun:
873 changed = self[b'.'].files()
873 changed = self[b'.'].files()
874 ret = super(kwrepo, self).rollback(dryrun, force)
874 ret = super(kwrepo, self).rollback(dryrun, force)
875 if not dryrun:
875 if not dryrun:
876 ctx = self[b'.']
876 ctx = self[b'.']
877 modified, added = _preselect(ctx.status(), changed)
877 modified, added = _preselect(ctx.status(), changed)
878 kwt.restrict = False
878 kwt.restrict = False
879 kwt.overwrite(ctx, modified, True, True)
879 kwt.overwrite(ctx, modified, True, True)
880 kwt.overwrite(ctx, added, True, False)
880 kwt.overwrite(ctx, added, True, False)
881 return ret
881 return ret
882 finally:
882 finally:
883 kwt.restrict = origrestrict
883 kwt.restrict = origrestrict
884
884
885 repo.__class__ = kwrepo
885 repo.__class__ = kwrepo
886 repo._keywordkwt = kwt
886 repo._keywordkwt = kwt
@@ -1,1256 +1,1256 b''
1 # phabricator.py - simple Phabricator integration
1 # phabricator.py - simple Phabricator integration
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """simple Phabricator integration (EXPERIMENTAL)
7 """simple Phabricator integration (EXPERIMENTAL)
8
8
9 This extension provides a ``phabsend`` command which sends a stack of
9 This extension provides a ``phabsend`` command which sends a stack of
10 changesets to Phabricator, and a ``phabread`` command which prints a stack of
10 changesets to Phabricator, and a ``phabread`` command which prints a stack of
11 revisions in a format suitable for :hg:`import`, and a ``phabupdate`` command
11 revisions in a format suitable for :hg:`import`, and a ``phabupdate`` command
12 to update statuses in batch.
12 to update statuses in batch.
13
13
14 By default, Phabricator requires ``Test Plan`` which might prevent some
14 By default, Phabricator requires ``Test Plan`` which might prevent some
15 changeset from being sent. The requirement could be disabled by changing
15 changeset from being sent. The requirement could be disabled by changing
16 ``differential.require-test-plan-field`` config server side.
16 ``differential.require-test-plan-field`` config server side.
17
17
18 Config::
18 Config::
19
19
20 [phabricator]
20 [phabricator]
21 # Phabricator URL
21 # Phabricator URL
22 url = https://phab.example.com/
22 url = https://phab.example.com/
23
23
24 # Repo callsign. If a repo has a URL https://$HOST/diffusion/FOO, then its
24 # Repo callsign. If a repo has a URL https://$HOST/diffusion/FOO, then its
25 # callsign is "FOO".
25 # callsign is "FOO".
26 callsign = FOO
26 callsign = FOO
27
27
28 # curl command to use. If not set (default), use builtin HTTP library to
28 # curl command to use. If not set (default), use builtin HTTP library to
29 # communicate. If set, use the specified curl command. This could be useful
29 # communicate. If set, use the specified curl command. This could be useful
30 # if you need to specify advanced options that is not easily supported by
30 # if you need to specify advanced options that is not easily supported by
31 # the internal library.
31 # the internal library.
32 curlcmd = curl --connect-timeout 2 --retry 3 --silent
32 curlcmd = curl --connect-timeout 2 --retry 3 --silent
33
33
34 [auth]
34 [auth]
35 example.schemes = https
35 example.schemes = https
36 example.prefix = phab.example.com
36 example.prefix = phab.example.com
37
37
38 # API token. Get it from https://$HOST/conduit/login/
38 # API token. Get it from https://$HOST/conduit/login/
39 example.phabtoken = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
39 example.phabtoken = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
40 """
40 """
41
41
42 from __future__ import absolute_import
42 from __future__ import absolute_import
43
43
44 import contextlib
44 import contextlib
45 import itertools
45 import itertools
46 import json
46 import json
47 import operator
47 import operator
48 import re
48 import re
49
49
50 from mercurial.node import bin, nullid
50 from mercurial.node import bin, nullid
51 from mercurial.i18n import _
51 from mercurial.i18n import _
52 from mercurial import (
52 from mercurial import (
53 cmdutil,
53 cmdutil,
54 context,
54 context,
55 encoding,
55 encoding,
56 error,
56 error,
57 exthelper,
57 exthelper,
58 httpconnection as httpconnectionmod,
58 httpconnection as httpconnectionmod,
59 mdiff,
59 mdiff,
60 obsutil,
60 obsutil,
61 parser,
61 parser,
62 patch,
62 patch,
63 phases,
63 phases,
64 pycompat,
64 pycompat,
65 scmutil,
65 scmutil,
66 smartset,
66 smartset,
67 tags,
67 tags,
68 templatefilters,
68 templatefilters,
69 templateutil,
69 templateutil,
70 url as urlmod,
70 url as urlmod,
71 util,
71 util,
72 )
72 )
73 from mercurial.utils import (
73 from mercurial.utils import (
74 procutil,
74 procutil,
75 stringutil,
75 stringutil,
76 )
76 )
77
77
78 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
78 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
79 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
79 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
80 # be specifying the version(s) of Mercurial they are tested with, or
80 # be specifying the version(s) of Mercurial they are tested with, or
81 # leave the attribute unspecified.
81 # leave the attribute unspecified.
82 testedwith = b'ships-with-hg-core'
82 testedwith = b'ships-with-hg-core'
83
83
84 eh = exthelper.exthelper()
84 eh = exthelper.exthelper()
85
85
86 cmdtable = eh.cmdtable
86 cmdtable = eh.cmdtable
87 command = eh.command
87 command = eh.command
88 configtable = eh.configtable
88 configtable = eh.configtable
89 templatekeyword = eh.templatekeyword
89 templatekeyword = eh.templatekeyword
90
90
91 # developer config: phabricator.batchsize
91 # developer config: phabricator.batchsize
92 eh.configitem(
92 eh.configitem(
93 b'phabricator', b'batchsize', default=12,
93 b'phabricator', b'batchsize', default=12,
94 )
94 )
95 eh.configitem(
95 eh.configitem(
96 b'phabricator', b'callsign', default=None,
96 b'phabricator', b'callsign', default=None,
97 )
97 )
98 eh.configitem(
98 eh.configitem(
99 b'phabricator', b'curlcmd', default=None,
99 b'phabricator', b'curlcmd', default=None,
100 )
100 )
101 # developer config: phabricator.repophid
101 # developer config: phabricator.repophid
102 eh.configitem(
102 eh.configitem(
103 b'phabricator', b'repophid', default=None,
103 b'phabricator', b'repophid', default=None,
104 )
104 )
105 eh.configitem(
105 eh.configitem(
106 b'phabricator', b'url', default=None,
106 b'phabricator', b'url', default=None,
107 )
107 )
108 eh.configitem(
108 eh.configitem(
109 b'phabsend', b'confirm', default=False,
109 b'phabsend', b'confirm', default=False,
110 )
110 )
111
111
112 colortable = {
112 colortable = {
113 b'phabricator.action.created': b'green',
113 b'phabricator.action.created': b'green',
114 b'phabricator.action.skipped': b'magenta',
114 b'phabricator.action.skipped': b'magenta',
115 b'phabricator.action.updated': b'magenta',
115 b'phabricator.action.updated': b'magenta',
116 b'phabricator.desc': b'',
116 b'phabricator.desc': b'',
117 b'phabricator.drev': b'bold',
117 b'phabricator.drev': b'bold',
118 b'phabricator.node': b'',
118 b'phabricator.node': b'',
119 }
119 }
120
120
121 _VCR_FLAGS = [
121 _VCR_FLAGS = [
122 (
122 (
123 b'',
123 b'',
124 b'test-vcr',
124 b'test-vcr',
125 b'',
125 b'',
126 _(
126 _(
127 b'Path to a vcr file. If nonexistent, will record a new vcr transcript'
127 b'Path to a vcr file. If nonexistent, will record a new vcr transcript'
128 b', otherwise will mock all http requests using the specified vcr file.'
128 b', otherwise will mock all http requests using the specified vcr file.'
129 b' (ADVANCED)'
129 b' (ADVANCED)'
130 ),
130 ),
131 ),
131 ),
132 ]
132 ]
133
133
134
134
135 def vcrcommand(name, flags, spec, helpcategory=None, optionalrepo=False):
135 def vcrcommand(name, flags, spec, helpcategory=None, optionalrepo=False):
136 fullflags = flags + _VCR_FLAGS
136 fullflags = flags + _VCR_FLAGS
137
137
138 def hgmatcher(r1, r2):
138 def hgmatcher(r1, r2):
139 if r1.uri != r2.uri or r1.method != r2.method:
139 if r1.uri != r2.uri or r1.method != r2.method:
140 return False
140 return False
141 r1params = r1.body.split(b'&')
141 r1params = r1.body.split(b'&')
142 r2params = r2.body.split(b'&')
142 r2params = r2.body.split(b'&')
143 return set(r1params) == set(r2params)
143 return set(r1params) == set(r2params)
144
144
145 def sanitiserequest(request):
145 def sanitiserequest(request):
146 request.body = re.sub(
146 request.body = re.sub(
147 br'cli-[a-z0-9]+', br'cli-hahayouwish', request.body
147 br'cli-[a-z0-9]+', br'cli-hahayouwish', request.body
148 )
148 )
149 return request
149 return request
150
150
151 def sanitiseresponse(response):
151 def sanitiseresponse(response):
152 if r'set-cookie' in response[r'headers']:
152 if r'set-cookie' in response[r'headers']:
153 del response[r'headers'][r'set-cookie']
153 del response[r'headers'][r'set-cookie']
154 return response
154 return response
155
155
156 def decorate(fn):
156 def decorate(fn):
157 def inner(*args, **kwargs):
157 def inner(*args, **kwargs):
158 cassette = pycompat.fsdecode(kwargs.pop(r'test_vcr', None))
158 cassette = pycompat.fsdecode(kwargs.pop(r'test_vcr', None))
159 if cassette:
159 if cassette:
160 import hgdemandimport
160 import hgdemandimport
161
161
162 with hgdemandimport.deactivated():
162 with hgdemandimport.deactivated():
163 import vcr as vcrmod
163 import vcr as vcrmod
164 import vcr.stubs as stubs
164 import vcr.stubs as stubs
165
165
166 vcr = vcrmod.VCR(
166 vcr = vcrmod.VCR(
167 serializer=r'json',
167 serializer=r'json',
168 before_record_request=sanitiserequest,
168 before_record_request=sanitiserequest,
169 before_record_response=sanitiseresponse,
169 before_record_response=sanitiseresponse,
170 custom_patches=[
170 custom_patches=[
171 (
171 (
172 urlmod,
172 urlmod,
173 r'httpconnection',
173 r'httpconnection',
174 stubs.VCRHTTPConnection,
174 stubs.VCRHTTPConnection,
175 ),
175 ),
176 (
176 (
177 urlmod,
177 urlmod,
178 r'httpsconnection',
178 r'httpsconnection',
179 stubs.VCRHTTPSConnection,
179 stubs.VCRHTTPSConnection,
180 ),
180 ),
181 ],
181 ],
182 )
182 )
183 vcr.register_matcher(r'hgmatcher', hgmatcher)
183 vcr.register_matcher(r'hgmatcher', hgmatcher)
184 with vcr.use_cassette(cassette, match_on=[r'hgmatcher']):
184 with vcr.use_cassette(cassette, match_on=[r'hgmatcher']):
185 return fn(*args, **kwargs)
185 return fn(*args, **kwargs)
186 return fn(*args, **kwargs)
186 return fn(*args, **kwargs)
187
187
188 inner.__name__ = fn.__name__
188 inner.__name__ = fn.__name__
189 inner.__doc__ = fn.__doc__
189 inner.__doc__ = fn.__doc__
190 return command(
190 return command(
191 name,
191 name,
192 fullflags,
192 fullflags,
193 spec,
193 spec,
194 helpcategory=helpcategory,
194 helpcategory=helpcategory,
195 optionalrepo=optionalrepo,
195 optionalrepo=optionalrepo,
196 )(inner)
196 )(inner)
197
197
198 return decorate
198 return decorate
199
199
200
200
201 def urlencodenested(params):
201 def urlencodenested(params):
202 """like urlencode, but works with nested parameters.
202 """like urlencode, but works with nested parameters.
203
203
204 For example, if params is {'a': ['b', 'c'], 'd': {'e': 'f'}}, it will be
204 For example, if params is {'a': ['b', 'c'], 'd': {'e': 'f'}}, it will be
205 flattened to {'a[0]': 'b', 'a[1]': 'c', 'd[e]': 'f'} and then passed to
205 flattened to {'a[0]': 'b', 'a[1]': 'c', 'd[e]': 'f'} and then passed to
206 urlencode. Note: the encoding is consistent with PHP's http_build_query.
206 urlencode. Note: the encoding is consistent with PHP's http_build_query.
207 """
207 """
208 flatparams = util.sortdict()
208 flatparams = util.sortdict()
209
209
210 def process(prefix, obj):
210 def process(prefix, obj):
211 if isinstance(obj, bool):
211 if isinstance(obj, bool):
212 obj = {True: b'true', False: b'false'}[obj] # Python -> PHP form
212 obj = {True: b'true', False: b'false'}[obj] # Python -> PHP form
213 lister = lambda l: [(b'%d' % k, v) for k, v in enumerate(l)]
213 lister = lambda l: [(b'%d' % k, v) for k, v in enumerate(l)]
214 items = {list: lister, dict: lambda x: x.items()}.get(type(obj))
214 items = {list: lister, dict: lambda x: x.items()}.get(type(obj))
215 if items is None:
215 if items is None:
216 flatparams[prefix] = obj
216 flatparams[prefix] = obj
217 else:
217 else:
218 for k, v in items(obj):
218 for k, v in items(obj):
219 if prefix:
219 if prefix:
220 process(b'%s[%s]' % (prefix, k), v)
220 process(b'%s[%s]' % (prefix, k), v)
221 else:
221 else:
222 process(k, v)
222 process(k, v)
223
223
224 process(b'', params)
224 process(b'', params)
225 return util.urlreq.urlencode(flatparams)
225 return util.urlreq.urlencode(flatparams)
226
226
227
227
228 def readurltoken(ui):
228 def readurltoken(ui):
229 """return conduit url, token and make sure they exist
229 """return conduit url, token and make sure they exist
230
230
231 Currently read from [auth] config section. In the future, it might
231 Currently read from [auth] config section. In the future, it might
232 make sense to read from .arcconfig and .arcrc as well.
232 make sense to read from .arcconfig and .arcrc as well.
233 """
233 """
234 url = ui.config(b'phabricator', b'url')
234 url = ui.config(b'phabricator', b'url')
235 if not url:
235 if not url:
236 raise error.Abort(
236 raise error.Abort(
237 _(b'config %s.%s is required') % (b'phabricator', b'url')
237 _(b'config %s.%s is required') % (b'phabricator', b'url')
238 )
238 )
239
239
240 res = httpconnectionmod.readauthforuri(ui, url, util.url(url).user)
240 res = httpconnectionmod.readauthforuri(ui, url, util.url(url).user)
241 token = None
241 token = None
242
242
243 if res:
243 if res:
244 group, auth = res
244 group, auth = res
245
245
246 ui.debug(b"using auth.%s.* for authentication\n" % group)
246 ui.debug(b"using auth.%s.* for authentication\n" % group)
247
247
248 token = auth.get(b'phabtoken')
248 token = auth.get(b'phabtoken')
249
249
250 if not token:
250 if not token:
251 raise error.Abort(
251 raise error.Abort(
252 _(b'Can\'t find conduit token associated to %s') % (url,)
252 _(b'Can\'t find conduit token associated to %s') % (url,)
253 )
253 )
254
254
255 return url, token
255 return url, token
256
256
257
257
258 def callconduit(ui, name, params):
258 def callconduit(ui, name, params):
259 """call Conduit API, params is a dict. return json.loads result, or None"""
259 """call Conduit API, params is a dict. return json.loads result, or None"""
260 host, token = readurltoken(ui)
260 host, token = readurltoken(ui)
261 url, authinfo = util.url(b'/'.join([host, b'api', name])).authinfo()
261 url, authinfo = util.url(b'/'.join([host, b'api', name])).authinfo()
262 ui.debug(b'Conduit Call: %s %s\n' % (url, pycompat.byterepr(params)))
262 ui.debug(b'Conduit Call: %s %s\n' % (url, pycompat.byterepr(params)))
263 params = params.copy()
263 params = params.copy()
264 params[b'api.token'] = token
264 params[b'api.token'] = token
265 data = urlencodenested(params)
265 data = urlencodenested(params)
266 curlcmd = ui.config(b'phabricator', b'curlcmd')
266 curlcmd = ui.config(b'phabricator', b'curlcmd')
267 if curlcmd:
267 if curlcmd:
268 sin, sout = procutil.popen2(
268 sin, sout = procutil.popen2(
269 b'%s -d @- %s' % (curlcmd, procutil.shellquote(url))
269 b'%s -d @- %s' % (curlcmd, procutil.shellquote(url))
270 )
270 )
271 sin.write(data)
271 sin.write(data)
272 sin.close()
272 sin.close()
273 body = sout.read()
273 body = sout.read()
274 else:
274 else:
275 urlopener = urlmod.opener(ui, authinfo)
275 urlopener = urlmod.opener(ui, authinfo)
276 request = util.urlreq.request(pycompat.strurl(url), data=data)
276 request = util.urlreq.request(pycompat.strurl(url), data=data)
277 with contextlib.closing(urlopener.open(request)) as rsp:
277 with contextlib.closing(urlopener.open(request)) as rsp:
278 body = rsp.read()
278 body = rsp.read()
279 ui.debug(b'Conduit Response: %s\n' % body)
279 ui.debug(b'Conduit Response: %s\n' % body)
280 parsed = pycompat.rapply(
280 parsed = pycompat.rapply(
281 lambda x: encoding.unitolocal(x)
281 lambda x: encoding.unitolocal(x)
282 if isinstance(x, pycompat.unicode)
282 if isinstance(x, pycompat.unicode)
283 else x,
283 else x,
284 # json.loads only accepts bytes from py3.6+
284 # json.loads only accepts bytes from py3.6+
285 json.loads(encoding.unifromlocal(body)),
285 json.loads(encoding.unifromlocal(body)),
286 )
286 )
287 if parsed.get(b'error_code'):
287 if parsed.get(b'error_code'):
288 msg = _(b'Conduit Error (%s): %s') % (
288 msg = _(b'Conduit Error (%s): %s') % (
289 parsed[b'error_code'],
289 parsed[b'error_code'],
290 parsed[b'error_info'],
290 parsed[b'error_info'],
291 )
291 )
292 raise error.Abort(msg)
292 raise error.Abort(msg)
293 return parsed[b'result']
293 return parsed[b'result']
294
294
295
295
296 @vcrcommand(b'debugcallconduit', [], _(b'METHOD'), optionalrepo=True)
296 @vcrcommand(b'debugcallconduit', [], _(b'METHOD'), optionalrepo=True)
297 def debugcallconduit(ui, repo, name):
297 def debugcallconduit(ui, repo, name):
298 """call Conduit API
298 """call Conduit API
299
299
300 Call parameters are read from stdin as a JSON blob. Result will be written
300 Call parameters are read from stdin as a JSON blob. Result will be written
301 to stdout as a JSON blob.
301 to stdout as a JSON blob.
302 """
302 """
303 # json.loads only accepts bytes from 3.6+
303 # json.loads only accepts bytes from 3.6+
304 rawparams = encoding.unifromlocal(ui.fin.read())
304 rawparams = encoding.unifromlocal(ui.fin.read())
305 # json.loads only returns unicode strings
305 # json.loads only returns unicode strings
306 params = pycompat.rapply(
306 params = pycompat.rapply(
307 lambda x: encoding.unitolocal(x)
307 lambda x: encoding.unitolocal(x)
308 if isinstance(x, pycompat.unicode)
308 if isinstance(x, pycompat.unicode)
309 else x,
309 else x,
310 json.loads(rawparams),
310 json.loads(rawparams),
311 )
311 )
312 # json.dumps only accepts unicode strings
312 # json.dumps only accepts unicode strings
313 result = pycompat.rapply(
313 result = pycompat.rapply(
314 lambda x: encoding.unifromlocal(x) if isinstance(x, bytes) else x,
314 lambda x: encoding.unifromlocal(x) if isinstance(x, bytes) else x,
315 callconduit(ui, name, params),
315 callconduit(ui, name, params),
316 )
316 )
317 s = json.dumps(result, sort_keys=True, indent=2, separators=(u',', u': '))
317 s = json.dumps(result, sort_keys=True, indent=2, separators=(u',', u': '))
318 ui.write(b'%s\n' % encoding.unitolocal(s))
318 ui.write(b'%s\n' % encoding.unitolocal(s))
319
319
320
320
321 def getrepophid(repo):
321 def getrepophid(repo):
322 """given callsign, return repository PHID or None"""
322 """given callsign, return repository PHID or None"""
323 # developer config: phabricator.repophid
323 # developer config: phabricator.repophid
324 repophid = repo.ui.config(b'phabricator', b'repophid')
324 repophid = repo.ui.config(b'phabricator', b'repophid')
325 if repophid:
325 if repophid:
326 return repophid
326 return repophid
327 callsign = repo.ui.config(b'phabricator', b'callsign')
327 callsign = repo.ui.config(b'phabricator', b'callsign')
328 if not callsign:
328 if not callsign:
329 return None
329 return None
330 query = callconduit(
330 query = callconduit(
331 repo.ui,
331 repo.ui,
332 b'diffusion.repository.search',
332 b'diffusion.repository.search',
333 {b'constraints': {b'callsigns': [callsign]}},
333 {b'constraints': {b'callsigns': [callsign]}},
334 )
334 )
335 if len(query[b'data']) == 0:
335 if len(query[b'data']) == 0:
336 return None
336 return None
337 repophid = query[b'data'][0][b'phid']
337 repophid = query[b'data'][0][b'phid']
338 repo.ui.setconfig(b'phabricator', b'repophid', repophid)
338 repo.ui.setconfig(b'phabricator', b'repophid', repophid)
339 return repophid
339 return repophid
340
340
341
341
342 _differentialrevisiontagre = re.compile(br'\AD([1-9][0-9]*)\Z')
342 _differentialrevisiontagre = re.compile(br'\AD([1-9][0-9]*)\Z')
343 _differentialrevisiondescre = re.compile(
343 _differentialrevisiondescre = re.compile(
344 br'^Differential Revision:\s*(?P<url>(?:.*)D(?P<id>[1-9][0-9]*))$', re.M
344 br'^Differential Revision:\s*(?P<url>(?:.*)D(?P<id>[1-9][0-9]*))$', re.M
345 )
345 )
346
346
347
347
348 def getoldnodedrevmap(repo, nodelist):
348 def getoldnodedrevmap(repo, nodelist):
349 """find previous nodes that has been sent to Phabricator
349 """find previous nodes that has been sent to Phabricator
350
350
351 return {node: (oldnode, Differential diff, Differential Revision ID)}
351 return {node: (oldnode, Differential diff, Differential Revision ID)}
352 for node in nodelist with known previous sent versions, or associated
352 for node in nodelist with known previous sent versions, or associated
353 Differential Revision IDs. ``oldnode`` and ``Differential diff`` could
353 Differential Revision IDs. ``oldnode`` and ``Differential diff`` could
354 be ``None``.
354 be ``None``.
355
355
356 Examines commit messages like "Differential Revision:" to get the
356 Examines commit messages like "Differential Revision:" to get the
357 association information.
357 association information.
358
358
359 If such commit message line is not found, examines all precursors and their
359 If such commit message line is not found, examines all precursors and their
360 tags. Tags with format like "D1234" are considered a match and the node
360 tags. Tags with format like "D1234" are considered a match and the node
361 with that tag, and the number after "D" (ex. 1234) will be returned.
361 with that tag, and the number after "D" (ex. 1234) will be returned.
362
362
363 The ``old node``, if not None, is guaranteed to be the last diff of
363 The ``old node``, if not None, is guaranteed to be the last diff of
364 corresponding Differential Revision, and exist in the repo.
364 corresponding Differential Revision, and exist in the repo.
365 """
365 """
366 unfi = repo.unfiltered()
366 unfi = repo.unfiltered()
367 nodemap = unfi.changelog.nodemap
367 nodemap = unfi.changelog.nodemap
368
368
369 result = {} # {node: (oldnode?, lastdiff?, drev)}
369 result = {} # {node: (oldnode?, lastdiff?, drev)}
370 toconfirm = {} # {node: (force, {precnode}, drev)}
370 toconfirm = {} # {node: (force, {precnode}, drev)}
371 for node in nodelist:
371 for node in nodelist:
372 ctx = unfi[node]
372 ctx = unfi[node]
373 # For tags like "D123", put them into "toconfirm" to verify later
373 # For tags like "D123", put them into "toconfirm" to verify later
374 precnodes = list(obsutil.allpredecessors(unfi.obsstore, [node]))
374 precnodes = list(obsutil.allpredecessors(unfi.obsstore, [node]))
375 for n in precnodes:
375 for n in precnodes:
376 if n in nodemap:
376 if n in nodemap:
377 for tag in unfi.nodetags(n):
377 for tag in unfi.nodetags(n):
378 m = _differentialrevisiontagre.match(tag)
378 m = _differentialrevisiontagre.match(tag)
379 if m:
379 if m:
380 toconfirm[node] = (0, set(precnodes), int(m.group(1)))
380 toconfirm[node] = (0, set(precnodes), int(m.group(1)))
381 continue
381 continue
382
382
383 # Check commit message
383 # Check commit message
384 m = _differentialrevisiondescre.search(ctx.description())
384 m = _differentialrevisiondescre.search(ctx.description())
385 if m:
385 if m:
386 toconfirm[node] = (1, set(precnodes), int(m.group(r'id')))
386 toconfirm[node] = (1, set(precnodes), int(m.group(r'id')))
387
387
388 # Double check if tags are genuine by collecting all old nodes from
388 # Double check if tags are genuine by collecting all old nodes from
389 # Phabricator, and expect precursors overlap with it.
389 # Phabricator, and expect precursors overlap with it.
390 if toconfirm:
390 if toconfirm:
391 drevs = [drev for force, precs, drev in toconfirm.values()]
391 drevs = [drev for force, precs, drev in toconfirm.values()]
392 alldiffs = callconduit(
392 alldiffs = callconduit(
393 unfi.ui, b'differential.querydiffs', {b'revisionIDs': drevs}
393 unfi.ui, b'differential.querydiffs', {b'revisionIDs': drevs}
394 )
394 )
395 getnode = lambda d: bin(getdiffmeta(d).get(b'node', b'')) or None
395 getnode = lambda d: bin(getdiffmeta(d).get(b'node', b'')) or None
396 for newnode, (force, precset, drev) in toconfirm.items():
396 for newnode, (force, precset, drev) in toconfirm.items():
397 diffs = [
397 diffs = [
398 d for d in alldiffs.values() if int(d[b'revisionID']) == drev
398 d for d in alldiffs.values() if int(d[b'revisionID']) == drev
399 ]
399 ]
400
400
401 # "precursors" as known by Phabricator
401 # "precursors" as known by Phabricator
402 phprecset = set(getnode(d) for d in diffs)
402 phprecset = set(getnode(d) for d in diffs)
403
403
404 # Ignore if precursors (Phabricator and local repo) do not overlap,
404 # Ignore if precursors (Phabricator and local repo) do not overlap,
405 # and force is not set (when commit message says nothing)
405 # and force is not set (when commit message says nothing)
406 if not force and not bool(phprecset & precset):
406 if not force and not bool(phprecset & precset):
407 tagname = b'D%d' % drev
407 tagname = b'D%d' % drev
408 tags.tag(
408 tags.tag(
409 repo,
409 repo,
410 tagname,
410 tagname,
411 nullid,
411 nullid,
412 message=None,
412 message=None,
413 user=None,
413 user=None,
414 date=None,
414 date=None,
415 local=True,
415 local=True,
416 )
416 )
417 unfi.ui.warn(
417 unfi.ui.warn(
418 _(
418 _(
419 b'D%s: local tag removed - does not match '
419 b'D%s: local tag removed - does not match '
420 b'Differential history\n'
420 b'Differential history\n'
421 )
421 )
422 % drev
422 % drev
423 )
423 )
424 continue
424 continue
425
425
426 # Find the last node using Phabricator metadata, and make sure it
426 # Find the last node using Phabricator metadata, and make sure it
427 # exists in the repo
427 # exists in the repo
428 oldnode = lastdiff = None
428 oldnode = lastdiff = None
429 if diffs:
429 if diffs:
430 lastdiff = max(diffs, key=lambda d: int(d[b'id']))
430 lastdiff = max(diffs, key=lambda d: int(d[b'id']))
431 oldnode = getnode(lastdiff)
431 oldnode = getnode(lastdiff)
432 if oldnode and oldnode not in nodemap:
432 if oldnode and oldnode not in nodemap:
433 oldnode = None
433 oldnode = None
434
434
435 result[newnode] = (oldnode, lastdiff, drev)
435 result[newnode] = (oldnode, lastdiff, drev)
436
436
437 return result
437 return result
438
438
439
439
440 def getdiff(ctx, diffopts):
440 def getdiff(ctx, diffopts):
441 """plain-text diff without header (user, commit message, etc)"""
441 """plain-text diff without header (user, commit message, etc)"""
442 output = util.stringio()
442 output = util.stringio()
443 for chunk, _label in patch.diffui(
443 for chunk, _label in patch.diffui(
444 ctx.repo(), ctx.p1().node(), ctx.node(), None, opts=diffopts
444 ctx.repo(), ctx.p1().node(), ctx.node(), None, opts=diffopts
445 ):
445 ):
446 output.write(chunk)
446 output.write(chunk)
447 return output.getvalue()
447 return output.getvalue()
448
448
449
449
450 def creatediff(ctx):
450 def creatediff(ctx):
451 """create a Differential Diff"""
451 """create a Differential Diff"""
452 repo = ctx.repo()
452 repo = ctx.repo()
453 repophid = getrepophid(repo)
453 repophid = getrepophid(repo)
454 # Create a "Differential Diff" via "differential.createrawdiff" API
454 # Create a "Differential Diff" via "differential.createrawdiff" API
455 params = {b'diff': getdiff(ctx, mdiff.diffopts(git=True, context=32767))}
455 params = {b'diff': getdiff(ctx, mdiff.diffopts(git=True, context=32767))}
456 if repophid:
456 if repophid:
457 params[b'repositoryPHID'] = repophid
457 params[b'repositoryPHID'] = repophid
458 diff = callconduit(repo.ui, b'differential.createrawdiff', params)
458 diff = callconduit(repo.ui, b'differential.createrawdiff', params)
459 if not diff:
459 if not diff:
460 raise error.Abort(_(b'cannot create diff for %s') % ctx)
460 raise error.Abort(_(b'cannot create diff for %s') % ctx)
461 return diff
461 return diff
462
462
463
463
464 def writediffproperties(ctx, diff):
464 def writediffproperties(ctx, diff):
465 """write metadata to diff so patches could be applied losslessly"""
465 """write metadata to diff so patches could be applied losslessly"""
466 params = {
466 params = {
467 b'diff_id': diff[b'id'],
467 b'diff_id': diff[b'id'],
468 b'name': b'hg:meta',
468 b'name': b'hg:meta',
469 b'data': templatefilters.json(
469 b'data': templatefilters.json(
470 {
470 {
471 b'user': ctx.user(),
471 b'user': ctx.user(),
472 b'date': b'%d %d' % ctx.date(),
472 b'date': b'%d %d' % ctx.date(),
473 b'branch': ctx.branch(),
473 b'branch': ctx.branch(),
474 b'node': ctx.hex(),
474 b'node': ctx.hex(),
475 b'parent': ctx.p1().hex(),
475 b'parent': ctx.p1().hex(),
476 }
476 }
477 ),
477 ),
478 }
478 }
479 callconduit(ctx.repo().ui, b'differential.setdiffproperty', params)
479 callconduit(ctx.repo().ui, b'differential.setdiffproperty', params)
480
480
481 params = {
481 params = {
482 b'diff_id': diff[b'id'],
482 b'diff_id': diff[b'id'],
483 b'name': b'local:commits',
483 b'name': b'local:commits',
484 b'data': templatefilters.json(
484 b'data': templatefilters.json(
485 {
485 {
486 ctx.hex(): {
486 ctx.hex(): {
487 b'author': stringutil.person(ctx.user()),
487 b'author': stringutil.person(ctx.user()),
488 b'authorEmail': stringutil.email(ctx.user()),
488 b'authorEmail': stringutil.email(ctx.user()),
489 b'time': int(ctx.date()[0]),
489 b'time': int(ctx.date()[0]),
490 b'commit': ctx.hex(),
490 b'commit': ctx.hex(),
491 b'parents': [ctx.p1().hex()],
491 b'parents': [ctx.p1().hex()],
492 b'branch': ctx.branch(),
492 b'branch': ctx.branch(),
493 },
493 },
494 }
494 }
495 ),
495 ),
496 }
496 }
497 callconduit(ctx.repo().ui, b'differential.setdiffproperty', params)
497 callconduit(ctx.repo().ui, b'differential.setdiffproperty', params)
498
498
499
499
500 def createdifferentialrevision(
500 def createdifferentialrevision(
501 ctx,
501 ctx,
502 revid=None,
502 revid=None,
503 parentrevphid=None,
503 parentrevphid=None,
504 oldnode=None,
504 oldnode=None,
505 olddiff=None,
505 olddiff=None,
506 actions=None,
506 actions=None,
507 comment=None,
507 comment=None,
508 ):
508 ):
509 """create or update a Differential Revision
509 """create or update a Differential Revision
510
510
511 If revid is None, create a new Differential Revision, otherwise update
511 If revid is None, create a new Differential Revision, otherwise update
512 revid. If parentrevphid is not None, set it as a dependency.
512 revid. If parentrevphid is not None, set it as a dependency.
513
513
514 If oldnode is not None, check if the patch content (without commit message
514 If oldnode is not None, check if the patch content (without commit message
515 and metadata) has changed before creating another diff.
515 and metadata) has changed before creating another diff.
516
516
517 If actions is not None, they will be appended to the transaction.
517 If actions is not None, they will be appended to the transaction.
518 """
518 """
519 repo = ctx.repo()
519 repo = ctx.repo()
520 if oldnode:
520 if oldnode:
521 diffopts = mdiff.diffopts(git=True, context=32767)
521 diffopts = mdiff.diffopts(git=True, context=32767)
522 oldctx = repo.unfiltered()[oldnode]
522 oldctx = repo.unfiltered()[oldnode]
523 neednewdiff = getdiff(ctx, diffopts) != getdiff(oldctx, diffopts)
523 neednewdiff = getdiff(ctx, diffopts) != getdiff(oldctx, diffopts)
524 else:
524 else:
525 neednewdiff = True
525 neednewdiff = True
526
526
527 transactions = []
527 transactions = []
528 if neednewdiff:
528 if neednewdiff:
529 diff = creatediff(ctx)
529 diff = creatediff(ctx)
530 transactions.append({b'type': b'update', b'value': diff[b'phid']})
530 transactions.append({b'type': b'update', b'value': diff[b'phid']})
531 if comment:
531 if comment:
532 transactions.append({b'type': b'comment', b'value': comment})
532 transactions.append({b'type': b'comment', b'value': comment})
533 else:
533 else:
534 # Even if we don't need to upload a new diff because the patch content
534 # Even if we don't need to upload a new diff because the patch content
535 # does not change. We might still need to update its metadata so
535 # does not change. We might still need to update its metadata so
536 # pushers could know the correct node metadata.
536 # pushers could know the correct node metadata.
537 assert olddiff
537 assert olddiff
538 diff = olddiff
538 diff = olddiff
539 writediffproperties(ctx, diff)
539 writediffproperties(ctx, diff)
540
540
541 # Set the parent Revision every time, so commit re-ordering is picked-up
541 # Set the parent Revision every time, so commit re-ordering is picked-up
542 if parentrevphid:
542 if parentrevphid:
543 transactions.append(
543 transactions.append(
544 {b'type': b'parents.set', b'value': [parentrevphid]}
544 {b'type': b'parents.set', b'value': [parentrevphid]}
545 )
545 )
546
546
547 if actions:
547 if actions:
548 transactions += actions
548 transactions += actions
549
549
550 # Parse commit message and update related fields.
550 # Parse commit message and update related fields.
551 desc = ctx.description()
551 desc = ctx.description()
552 info = callconduit(
552 info = callconduit(
553 repo.ui, b'differential.parsecommitmessage', {b'corpus': desc}
553 repo.ui, b'differential.parsecommitmessage', {b'corpus': desc}
554 )
554 )
555 for k, v in info[b'fields'].items():
555 for k, v in info[b'fields'].items():
556 if k in [b'title', b'summary', b'testPlan']:
556 if k in [b'title', b'summary', b'testPlan']:
557 transactions.append({b'type': k, b'value': v})
557 transactions.append({b'type': k, b'value': v})
558
558
559 params = {b'transactions': transactions}
559 params = {b'transactions': transactions}
560 if revid is not None:
560 if revid is not None:
561 # Update an existing Differential Revision
561 # Update an existing Differential Revision
562 params[b'objectIdentifier'] = revid
562 params[b'objectIdentifier'] = revid
563
563
564 revision = callconduit(repo.ui, b'differential.revision.edit', params)
564 revision = callconduit(repo.ui, b'differential.revision.edit', params)
565 if not revision:
565 if not revision:
566 raise error.Abort(_(b'cannot create revision for %s') % ctx)
566 raise error.Abort(_(b'cannot create revision for %s') % ctx)
567
567
568 return revision, diff
568 return revision, diff
569
569
570
570
571 def userphids(repo, names):
571 def userphids(repo, names):
572 """convert user names to PHIDs"""
572 """convert user names to PHIDs"""
573 names = [name.lower() for name in names]
573 names = [name.lower() for name in names]
574 query = {b'constraints': {b'usernames': names}}
574 query = {b'constraints': {b'usernames': names}}
575 result = callconduit(repo.ui, b'user.search', query)
575 result = callconduit(repo.ui, b'user.search', query)
576 # username not found is not an error of the API. So check if we have missed
576 # username not found is not an error of the API. So check if we have missed
577 # some names here.
577 # some names here.
578 data = result[b'data']
578 data = result[b'data']
579 resolved = set(entry[b'fields'][b'username'].lower() for entry in data)
579 resolved = set(entry[b'fields'][b'username'].lower() for entry in data)
580 unresolved = set(names) - resolved
580 unresolved = set(names) - resolved
581 if unresolved:
581 if unresolved:
582 raise error.Abort(
582 raise error.Abort(
583 _(b'unknown username: %s') % b' '.join(sorted(unresolved))
583 _(b'unknown username: %s') % b' '.join(sorted(unresolved))
584 )
584 )
585 return [entry[b'phid'] for entry in data]
585 return [entry[b'phid'] for entry in data]
586
586
587
587
588 @vcrcommand(
588 @vcrcommand(
589 b'phabsend',
589 b'phabsend',
590 [
590 [
591 (b'r', b'rev', [], _(b'revisions to send'), _(b'REV')),
591 (b'r', b'rev', [], _(b'revisions to send'), _(b'REV')),
592 (b'', b'amend', True, _(b'update commit messages')),
592 (b'', b'amend', True, _(b'update commit messages')),
593 (b'', b'reviewer', [], _(b'specify reviewers')),
593 (b'', b'reviewer', [], _(b'specify reviewers')),
594 (b'', b'blocker', [], _(b'specify blocking reviewers')),
594 (b'', b'blocker', [], _(b'specify blocking reviewers')),
595 (
595 (
596 b'm',
596 b'm',
597 b'comment',
597 b'comment',
598 b'',
598 b'',
599 _(b'add a comment to Revisions with new/updated Diffs'),
599 _(b'add a comment to Revisions with new/updated Diffs'),
600 ),
600 ),
601 (b'', b'confirm', None, _(b'ask for confirmation before sending')),
601 (b'', b'confirm', None, _(b'ask for confirmation before sending')),
602 ],
602 ],
603 _(b'REV [OPTIONS]'),
603 _(b'REV [OPTIONS]'),
604 helpcategory=command.CATEGORY_IMPORT_EXPORT,
604 helpcategory=command.CATEGORY_IMPORT_EXPORT,
605 )
605 )
606 def phabsend(ui, repo, *revs, **opts):
606 def phabsend(ui, repo, *revs, **opts):
607 """upload changesets to Phabricator
607 """upload changesets to Phabricator
608
608
609 If there are multiple revisions specified, they will be send as a stack
609 If there are multiple revisions specified, they will be send as a stack
610 with a linear dependencies relationship using the order specified by the
610 with a linear dependencies relationship using the order specified by the
611 revset.
611 revset.
612
612
613 For the first time uploading changesets, local tags will be created to
613 For the first time uploading changesets, local tags will be created to
614 maintain the association. After the first time, phabsend will check
614 maintain the association. After the first time, phabsend will check
615 obsstore and tags information so it can figure out whether to update an
615 obsstore and tags information so it can figure out whether to update an
616 existing Differential Revision, or create a new one.
616 existing Differential Revision, or create a new one.
617
617
618 If --amend is set, update commit messages so they have the
618 If --amend is set, update commit messages so they have the
619 ``Differential Revision`` URL, remove related tags. This is similar to what
619 ``Differential Revision`` URL, remove related tags. This is similar to what
620 arcanist will do, and is more desired in author-push workflows. Otherwise,
620 arcanist will do, and is more desired in author-push workflows. Otherwise,
621 use local tags to record the ``Differential Revision`` association.
621 use local tags to record the ``Differential Revision`` association.
622
622
623 The --confirm option lets you confirm changesets before sending them. You
623 The --confirm option lets you confirm changesets before sending them. You
624 can also add following to your configuration file to make it default
624 can also add following to your configuration file to make it default
625 behaviour::
625 behaviour::
626
626
627 [phabsend]
627 [phabsend]
628 confirm = true
628 confirm = true
629
629
630 phabsend will check obsstore and the above association to decide whether to
630 phabsend will check obsstore and the above association to decide whether to
631 update an existing Differential Revision, or create a new one.
631 update an existing Differential Revision, or create a new one.
632 """
632 """
633 opts = pycompat.byteskwargs(opts)
633 opts = pycompat.byteskwargs(opts)
634 revs = list(revs) + opts.get(b'rev', [])
634 revs = list(revs) + opts.get(b'rev', [])
635 revs = scmutil.revrange(repo, revs)
635 revs = scmutil.revrange(repo, revs)
636
636
637 if not revs:
637 if not revs:
638 raise error.Abort(_(b'phabsend requires at least one changeset'))
638 raise error.Abort(_(b'phabsend requires at least one changeset'))
639 if opts.get(b'amend'):
639 if opts.get(b'amend'):
640 cmdutil.checkunfinished(repo)
640 cmdutil.checkunfinished(repo)
641
641
642 # {newnode: (oldnode, olddiff, olddrev}
642 # {newnode: (oldnode, olddiff, olddrev}
643 oldmap = getoldnodedrevmap(repo, [repo[r].node() for r in revs])
643 oldmap = getoldnodedrevmap(repo, [repo[r].node() for r in revs])
644
644
645 confirm = ui.configbool(b'phabsend', b'confirm')
645 confirm = ui.configbool(b'phabsend', b'confirm')
646 confirm |= bool(opts.get(b'confirm'))
646 confirm |= bool(opts.get(b'confirm'))
647 if confirm:
647 if confirm:
648 confirmed = _confirmbeforesend(repo, revs, oldmap)
648 confirmed = _confirmbeforesend(repo, revs, oldmap)
649 if not confirmed:
649 if not confirmed:
650 raise error.Abort(_(b'phabsend cancelled'))
650 raise error.Abort(_(b'phabsend cancelled'))
651
651
652 actions = []
652 actions = []
653 reviewers = opts.get(b'reviewer', [])
653 reviewers = opts.get(b'reviewer', [])
654 blockers = opts.get(b'blocker', [])
654 blockers = opts.get(b'blocker', [])
655 phids = []
655 phids = []
656 if reviewers:
656 if reviewers:
657 phids.extend(userphids(repo, reviewers))
657 phids.extend(userphids(repo, reviewers))
658 if blockers:
658 if blockers:
659 phids.extend(
659 phids.extend(
660 map(lambda phid: b'blocking(%s)' % phid, userphids(repo, blockers))
660 map(lambda phid: b'blocking(%s)' % phid, userphids(repo, blockers))
661 )
661 )
662 if phids:
662 if phids:
663 actions.append({b'type': b'reviewers.add', b'value': phids})
663 actions.append({b'type': b'reviewers.add', b'value': phids})
664
664
665 drevids = [] # [int]
665 drevids = [] # [int]
666 diffmap = {} # {newnode: diff}
666 diffmap = {} # {newnode: diff}
667
667
668 # Send patches one by one so we know their Differential Revision PHIDs and
668 # Send patches one by one so we know their Differential Revision PHIDs and
669 # can provide dependency relationship
669 # can provide dependency relationship
670 lastrevphid = None
670 lastrevphid = None
671 for rev in revs:
671 for rev in revs:
672 ui.debug(b'sending rev %d\n' % rev)
672 ui.debug(b'sending rev %d\n' % rev)
673 ctx = repo[rev]
673 ctx = repo[rev]
674
674
675 # Get Differential Revision ID
675 # Get Differential Revision ID
676 oldnode, olddiff, revid = oldmap.get(ctx.node(), (None, None, None))
676 oldnode, olddiff, revid = oldmap.get(ctx.node(), (None, None, None))
677 if oldnode != ctx.node() or opts.get(b'amend'):
677 if oldnode != ctx.node() or opts.get(b'amend'):
678 # Create or update Differential Revision
678 # Create or update Differential Revision
679 revision, diff = createdifferentialrevision(
679 revision, diff = createdifferentialrevision(
680 ctx,
680 ctx,
681 revid,
681 revid,
682 lastrevphid,
682 lastrevphid,
683 oldnode,
683 oldnode,
684 olddiff,
684 olddiff,
685 actions,
685 actions,
686 opts.get(b'comment'),
686 opts.get(b'comment'),
687 )
687 )
688 diffmap[ctx.node()] = diff
688 diffmap[ctx.node()] = diff
689 newrevid = int(revision[b'object'][b'id'])
689 newrevid = int(revision[b'object'][b'id'])
690 newrevphid = revision[b'object'][b'phid']
690 newrevphid = revision[b'object'][b'phid']
691 if revid:
691 if revid:
692 action = b'updated'
692 action = b'updated'
693 else:
693 else:
694 action = b'created'
694 action = b'created'
695
695
696 # Create a local tag to note the association, if commit message
696 # Create a local tag to note the association, if commit message
697 # does not have it already
697 # does not have it already
698 m = _differentialrevisiondescre.search(ctx.description())
698 m = _differentialrevisiondescre.search(ctx.description())
699 if not m or int(m.group(r'id')) != newrevid:
699 if not m or int(m.group(r'id')) != newrevid:
700 tagname = b'D%d' % newrevid
700 tagname = b'D%d' % newrevid
701 tags.tag(
701 tags.tag(
702 repo,
702 repo,
703 tagname,
703 tagname,
704 ctx.node(),
704 ctx.node(),
705 message=None,
705 message=None,
706 user=None,
706 user=None,
707 date=None,
707 date=None,
708 local=True,
708 local=True,
709 )
709 )
710 else:
710 else:
711 # Nothing changed. But still set "newrevphid" so the next revision
711 # Nothing changed. But still set "newrevphid" so the next revision
712 # could depend on this one and "newrevid" for the summary line.
712 # could depend on this one and "newrevid" for the summary line.
713 newrevphid = querydrev(repo, b'%d' % revid)[0][b'phid']
713 newrevphid = querydrev(repo, b'%d' % revid)[0][b'phid']
714 newrevid = revid
714 newrevid = revid
715 action = b'skipped'
715 action = b'skipped'
716
716
717 actiondesc = ui.label(
717 actiondesc = ui.label(
718 {
718 {
719 b'created': _(b'created'),
719 b'created': _(b'created'),
720 b'skipped': _(b'skipped'),
720 b'skipped': _(b'skipped'),
721 b'updated': _(b'updated'),
721 b'updated': _(b'updated'),
722 }[action],
722 }[action],
723 b'phabricator.action.%s' % action,
723 b'phabricator.action.%s' % action,
724 )
724 )
725 drevdesc = ui.label(b'D%d' % newrevid, b'phabricator.drev')
725 drevdesc = ui.label(b'D%d' % newrevid, b'phabricator.drev')
726 nodedesc = ui.label(bytes(ctx), b'phabricator.node')
726 nodedesc = ui.label(bytes(ctx), b'phabricator.node')
727 desc = ui.label(ctx.description().split(b'\n')[0], b'phabricator.desc')
727 desc = ui.label(ctx.description().split(b'\n')[0], b'phabricator.desc')
728 ui.write(
728 ui.write(
729 _(b'%s - %s - %s: %s\n') % (drevdesc, actiondesc, nodedesc, desc)
729 _(b'%s - %s - %s: %s\n') % (drevdesc, actiondesc, nodedesc, desc)
730 )
730 )
731 drevids.append(newrevid)
731 drevids.append(newrevid)
732 lastrevphid = newrevphid
732 lastrevphid = newrevphid
733
733
734 # Update commit messages and remove tags
734 # Update commit messages and remove tags
735 if opts.get(b'amend'):
735 if opts.get(b'amend'):
736 unfi = repo.unfiltered()
736 unfi = repo.unfiltered()
737 drevs = callconduit(ui, b'differential.query', {b'ids': drevids})
737 drevs = callconduit(ui, b'differential.query', {b'ids': drevids})
738 with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'):
738 with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'):
739 wnode = unfi[b'.'].node()
739 wnode = unfi[b'.'].node()
740 mapping = {} # {oldnode: [newnode]}
740 mapping = {} # {oldnode: [newnode]}
741 for i, rev in enumerate(revs):
741 for i, rev in enumerate(revs):
742 old = unfi[rev]
742 old = unfi[rev]
743 drevid = drevids[i]
743 drevid = drevids[i]
744 drev = [d for d in drevs if int(d[b'id']) == drevid][0]
744 drev = [d for d in drevs if int(d[b'id']) == drevid][0]
745 newdesc = getdescfromdrev(drev)
745 newdesc = getdescfromdrev(drev)
746 # Make sure commit message contain "Differential Revision"
746 # Make sure commit message contain "Differential Revision"
747 if old.description() != newdesc:
747 if old.description() != newdesc:
748 if old.phase() == phases.public:
748 if old.phase() == phases.public:
749 ui.warn(
749 ui.warn(
750 _(b"warning: not updating public commit %s\n")
750 _(b"warning: not updating public commit %s\n")
751 % scmutil.formatchangeid(old)
751 % scmutil.formatchangeid(old)
752 )
752 )
753 continue
753 continue
754 parents = [
754 parents = [
755 mapping.get(old.p1().node(), (old.p1(),))[0],
755 mapping.get(old.p1().node(), (old.p1(),))[0],
756 mapping.get(old.p2().node(), (old.p2(),))[0],
756 mapping.get(old.p2().node(), (old.p2(),))[0],
757 ]
757 ]
758 new = context.metadataonlyctx(
758 new = context.metadataonlyctx(
759 repo,
759 repo,
760 old,
760 old,
761 parents=parents,
761 parents=parents,
762 text=newdesc,
762 text=newdesc,
763 user=old.user(),
763 user=old.user(),
764 date=old.date(),
764 date=old.date(),
765 extra=old.extra(),
765 extra=old.extra(),
766 )
766 )
767
767
768 newnode = new.commit()
768 newnode = new.commit()
769
769
770 mapping[old.node()] = [newnode]
770 mapping[old.node()] = [newnode]
771 # Update diff property
771 # Update diff property
772 # If it fails just warn and keep going, otherwise the DREV
772 # If it fails just warn and keep going, otherwise the DREV
773 # associations will be lost
773 # associations will be lost
774 try:
774 try:
775 writediffproperties(unfi[newnode], diffmap[old.node()])
775 writediffproperties(unfi[newnode], diffmap[old.node()])
776 except util.urlerr.urlerror:
776 except util.urlerr.urlerror:
777 ui.warn(b'Failed to update metadata for D%s\n' % drevid)
777 ui.warnnoi18n(b'Failed to update metadata for D%s\n' % drevid)
778 # Remove local tags since it's no longer necessary
778 # Remove local tags since it's no longer necessary
779 tagname = b'D%d' % drevid
779 tagname = b'D%d' % drevid
780 if tagname in repo.tags():
780 if tagname in repo.tags():
781 tags.tag(
781 tags.tag(
782 repo,
782 repo,
783 tagname,
783 tagname,
784 nullid,
784 nullid,
785 message=None,
785 message=None,
786 user=None,
786 user=None,
787 date=None,
787 date=None,
788 local=True,
788 local=True,
789 )
789 )
790 scmutil.cleanupnodes(repo, mapping, b'phabsend', fixphase=True)
790 scmutil.cleanupnodes(repo, mapping, b'phabsend', fixphase=True)
791 if wnode in mapping:
791 if wnode in mapping:
792 unfi.setparents(mapping[wnode][0])
792 unfi.setparents(mapping[wnode][0])
793
793
794
794
795 # Map from "hg:meta" keys to header understood by "hg import". The order is
795 # Map from "hg:meta" keys to header understood by "hg import". The order is
796 # consistent with "hg export" output.
796 # consistent with "hg export" output.
797 _metanamemap = util.sortdict(
797 _metanamemap = util.sortdict(
798 [
798 [
799 (b'user', b'User'),
799 (b'user', b'User'),
800 (b'date', b'Date'),
800 (b'date', b'Date'),
801 (b'branch', b'Branch'),
801 (b'branch', b'Branch'),
802 (b'node', b'Node ID'),
802 (b'node', b'Node ID'),
803 (b'parent', b'Parent '),
803 (b'parent', b'Parent '),
804 ]
804 ]
805 )
805 )
806
806
807
807
808 def _confirmbeforesend(repo, revs, oldmap):
808 def _confirmbeforesend(repo, revs, oldmap):
809 url, token = readurltoken(repo.ui)
809 url, token = readurltoken(repo.ui)
810 ui = repo.ui
810 ui = repo.ui
811 for rev in revs:
811 for rev in revs:
812 ctx = repo[rev]
812 ctx = repo[rev]
813 desc = ctx.description().splitlines()[0]
813 desc = ctx.description().splitlines()[0]
814 oldnode, olddiff, drevid = oldmap.get(ctx.node(), (None, None, None))
814 oldnode, olddiff, drevid = oldmap.get(ctx.node(), (None, None, None))
815 if drevid:
815 if drevid:
816 drevdesc = ui.label(b'D%s' % drevid, b'phabricator.drev')
816 drevdesc = ui.label(b'D%s' % drevid, b'phabricator.drev')
817 else:
817 else:
818 drevdesc = ui.label(_(b'NEW'), b'phabricator.drev')
818 drevdesc = ui.label(_(b'NEW'), b'phabricator.drev')
819
819
820 ui.write(
820 ui.write(
821 _(b'%s - %s: %s\n')
821 _(b'%s - %s: %s\n')
822 % (
822 % (
823 drevdesc,
823 drevdesc,
824 ui.label(bytes(ctx), b'phabricator.node'),
824 ui.label(bytes(ctx), b'phabricator.node'),
825 ui.label(desc, b'phabricator.desc'),
825 ui.label(desc, b'phabricator.desc'),
826 )
826 )
827 )
827 )
828
828
829 if ui.promptchoice(
829 if ui.promptchoice(
830 _(b'Send the above changes to %s (yn)?' b'$$ &Yes $$ &No') % url
830 _(b'Send the above changes to %s (yn)?' b'$$ &Yes $$ &No') % url
831 ):
831 ):
832 return False
832 return False
833
833
834 return True
834 return True
835
835
836
836
837 _knownstatusnames = {
837 _knownstatusnames = {
838 b'accepted',
838 b'accepted',
839 b'needsreview',
839 b'needsreview',
840 b'needsrevision',
840 b'needsrevision',
841 b'closed',
841 b'closed',
842 b'abandoned',
842 b'abandoned',
843 }
843 }
844
844
845
845
846 def _getstatusname(drev):
846 def _getstatusname(drev):
847 """get normalized status name from a Differential Revision"""
847 """get normalized status name from a Differential Revision"""
848 return drev[b'statusName'].replace(b' ', b'').lower()
848 return drev[b'statusName'].replace(b' ', b'').lower()
849
849
850
850
851 # Small language to specify differential revisions. Support symbols: (), :X,
851 # Small language to specify differential revisions. Support symbols: (), :X,
852 # +, and -.
852 # +, and -.
853
853
854 _elements = {
854 _elements = {
855 # token-type: binding-strength, primary, prefix, infix, suffix
855 # token-type: binding-strength, primary, prefix, infix, suffix
856 b'(': (12, None, (b'group', 1, b')'), None, None),
856 b'(': (12, None, (b'group', 1, b')'), None, None),
857 b':': (8, None, (b'ancestors', 8), None, None),
857 b':': (8, None, (b'ancestors', 8), None, None),
858 b'&': (5, None, None, (b'and_', 5), None),
858 b'&': (5, None, None, (b'and_', 5), None),
859 b'+': (4, None, None, (b'add', 4), None),
859 b'+': (4, None, None, (b'add', 4), None),
860 b'-': (4, None, None, (b'sub', 4), None),
860 b'-': (4, None, None, (b'sub', 4), None),
861 b')': (0, None, None, None, None),
861 b')': (0, None, None, None, None),
862 b'symbol': (0, b'symbol', None, None, None),
862 b'symbol': (0, b'symbol', None, None, None),
863 b'end': (0, None, None, None, None),
863 b'end': (0, None, None, None, None),
864 }
864 }
865
865
866
866
867 def _tokenize(text):
867 def _tokenize(text):
868 view = memoryview(text) # zero-copy slice
868 view = memoryview(text) # zero-copy slice
869 special = b'():+-& '
869 special = b'():+-& '
870 pos = 0
870 pos = 0
871 length = len(text)
871 length = len(text)
872 while pos < length:
872 while pos < length:
873 symbol = b''.join(
873 symbol = b''.join(
874 itertools.takewhile(
874 itertools.takewhile(
875 lambda ch: ch not in special, pycompat.iterbytestr(view[pos:])
875 lambda ch: ch not in special, pycompat.iterbytestr(view[pos:])
876 )
876 )
877 )
877 )
878 if symbol:
878 if symbol:
879 yield (b'symbol', symbol, pos)
879 yield (b'symbol', symbol, pos)
880 pos += len(symbol)
880 pos += len(symbol)
881 else: # special char, ignore space
881 else: # special char, ignore space
882 if text[pos] != b' ':
882 if text[pos] != b' ':
883 yield (text[pos], None, pos)
883 yield (text[pos], None, pos)
884 pos += 1
884 pos += 1
885 yield (b'end', None, pos)
885 yield (b'end', None, pos)
886
886
887
887
888 def _parse(text):
888 def _parse(text):
889 tree, pos = parser.parser(_elements).parse(_tokenize(text))
889 tree, pos = parser.parser(_elements).parse(_tokenize(text))
890 if pos != len(text):
890 if pos != len(text):
891 raise error.ParseError(b'invalid token', pos)
891 raise error.ParseError(b'invalid token', pos)
892 return tree
892 return tree
893
893
894
894
895 def _parsedrev(symbol):
895 def _parsedrev(symbol):
896 """str -> int or None, ex. 'D45' -> 45; '12' -> 12; 'x' -> None"""
896 """str -> int or None, ex. 'D45' -> 45; '12' -> 12; 'x' -> None"""
897 if symbol.startswith(b'D') and symbol[1:].isdigit():
897 if symbol.startswith(b'D') and symbol[1:].isdigit():
898 return int(symbol[1:])
898 return int(symbol[1:])
899 if symbol.isdigit():
899 if symbol.isdigit():
900 return int(symbol)
900 return int(symbol)
901
901
902
902
903 def _prefetchdrevs(tree):
903 def _prefetchdrevs(tree):
904 """return ({single-drev-id}, {ancestor-drev-id}) to prefetch"""
904 """return ({single-drev-id}, {ancestor-drev-id}) to prefetch"""
905 drevs = set()
905 drevs = set()
906 ancestordrevs = set()
906 ancestordrevs = set()
907 op = tree[0]
907 op = tree[0]
908 if op == b'symbol':
908 if op == b'symbol':
909 r = _parsedrev(tree[1])
909 r = _parsedrev(tree[1])
910 if r:
910 if r:
911 drevs.add(r)
911 drevs.add(r)
912 elif op == b'ancestors':
912 elif op == b'ancestors':
913 r, a = _prefetchdrevs(tree[1])
913 r, a = _prefetchdrevs(tree[1])
914 drevs.update(r)
914 drevs.update(r)
915 ancestordrevs.update(r)
915 ancestordrevs.update(r)
916 ancestordrevs.update(a)
916 ancestordrevs.update(a)
917 else:
917 else:
918 for t in tree[1:]:
918 for t in tree[1:]:
919 r, a = _prefetchdrevs(t)
919 r, a = _prefetchdrevs(t)
920 drevs.update(r)
920 drevs.update(r)
921 ancestordrevs.update(a)
921 ancestordrevs.update(a)
922 return drevs, ancestordrevs
922 return drevs, ancestordrevs
923
923
924
924
925 def querydrev(repo, spec):
925 def querydrev(repo, spec):
926 """return a list of "Differential Revision" dicts
926 """return a list of "Differential Revision" dicts
927
927
928 spec is a string using a simple query language, see docstring in phabread
928 spec is a string using a simple query language, see docstring in phabread
929 for details.
929 for details.
930
930
931 A "Differential Revision dict" looks like:
931 A "Differential Revision dict" looks like:
932
932
933 {
933 {
934 "id": "2",
934 "id": "2",
935 "phid": "PHID-DREV-672qvysjcczopag46qty",
935 "phid": "PHID-DREV-672qvysjcczopag46qty",
936 "title": "example",
936 "title": "example",
937 "uri": "https://phab.example.com/D2",
937 "uri": "https://phab.example.com/D2",
938 "dateCreated": "1499181406",
938 "dateCreated": "1499181406",
939 "dateModified": "1499182103",
939 "dateModified": "1499182103",
940 "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye",
940 "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye",
941 "status": "0",
941 "status": "0",
942 "statusName": "Needs Review",
942 "statusName": "Needs Review",
943 "properties": [],
943 "properties": [],
944 "branch": null,
944 "branch": null,
945 "summary": "",
945 "summary": "",
946 "testPlan": "",
946 "testPlan": "",
947 "lineCount": "2",
947 "lineCount": "2",
948 "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72",
948 "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72",
949 "diffs": [
949 "diffs": [
950 "3",
950 "3",
951 "4",
951 "4",
952 ],
952 ],
953 "commits": [],
953 "commits": [],
954 "reviewers": [],
954 "reviewers": [],
955 "ccs": [],
955 "ccs": [],
956 "hashes": [],
956 "hashes": [],
957 "auxiliary": {
957 "auxiliary": {
958 "phabricator:projects": [],
958 "phabricator:projects": [],
959 "phabricator:depends-on": [
959 "phabricator:depends-on": [
960 "PHID-DREV-gbapp366kutjebt7agcd"
960 "PHID-DREV-gbapp366kutjebt7agcd"
961 ]
961 ]
962 },
962 },
963 "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv",
963 "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv",
964 "sourcePath": null
964 "sourcePath": null
965 }
965 }
966 """
966 """
967
967
968 def fetch(params):
968 def fetch(params):
969 """params -> single drev or None"""
969 """params -> single drev or None"""
970 key = (params.get(b'ids') or params.get(b'phids') or [None])[0]
970 key = (params.get(b'ids') or params.get(b'phids') or [None])[0]
971 if key in prefetched:
971 if key in prefetched:
972 return prefetched[key]
972 return prefetched[key]
973 drevs = callconduit(repo.ui, b'differential.query', params)
973 drevs = callconduit(repo.ui, b'differential.query', params)
974 # Fill prefetched with the result
974 # Fill prefetched with the result
975 for drev in drevs:
975 for drev in drevs:
976 prefetched[drev[b'phid']] = drev
976 prefetched[drev[b'phid']] = drev
977 prefetched[int(drev[b'id'])] = drev
977 prefetched[int(drev[b'id'])] = drev
978 if key not in prefetched:
978 if key not in prefetched:
979 raise error.Abort(
979 raise error.Abort(
980 _(b'cannot get Differential Revision %r') % params
980 _(b'cannot get Differential Revision %r') % params
981 )
981 )
982 return prefetched[key]
982 return prefetched[key]
983
983
984 def getstack(topdrevids):
984 def getstack(topdrevids):
985 """given a top, get a stack from the bottom, [id] -> [id]"""
985 """given a top, get a stack from the bottom, [id] -> [id]"""
986 visited = set()
986 visited = set()
987 result = []
987 result = []
988 queue = [{b'ids': [i]} for i in topdrevids]
988 queue = [{b'ids': [i]} for i in topdrevids]
989 while queue:
989 while queue:
990 params = queue.pop()
990 params = queue.pop()
991 drev = fetch(params)
991 drev = fetch(params)
992 if drev[b'id'] in visited:
992 if drev[b'id'] in visited:
993 continue
993 continue
994 visited.add(drev[b'id'])
994 visited.add(drev[b'id'])
995 result.append(int(drev[b'id']))
995 result.append(int(drev[b'id']))
996 auxiliary = drev.get(b'auxiliary', {})
996 auxiliary = drev.get(b'auxiliary', {})
997 depends = auxiliary.get(b'phabricator:depends-on', [])
997 depends = auxiliary.get(b'phabricator:depends-on', [])
998 for phid in depends:
998 for phid in depends:
999 queue.append({b'phids': [phid]})
999 queue.append({b'phids': [phid]})
1000 result.reverse()
1000 result.reverse()
1001 return smartset.baseset(result)
1001 return smartset.baseset(result)
1002
1002
1003 # Initialize prefetch cache
1003 # Initialize prefetch cache
1004 prefetched = {} # {id or phid: drev}
1004 prefetched = {} # {id or phid: drev}
1005
1005
1006 tree = _parse(spec)
1006 tree = _parse(spec)
1007 drevs, ancestordrevs = _prefetchdrevs(tree)
1007 drevs, ancestordrevs = _prefetchdrevs(tree)
1008
1008
1009 # developer config: phabricator.batchsize
1009 # developer config: phabricator.batchsize
1010 batchsize = repo.ui.configint(b'phabricator', b'batchsize')
1010 batchsize = repo.ui.configint(b'phabricator', b'batchsize')
1011
1011
1012 # Prefetch Differential Revisions in batch
1012 # Prefetch Differential Revisions in batch
1013 tofetch = set(drevs)
1013 tofetch = set(drevs)
1014 for r in ancestordrevs:
1014 for r in ancestordrevs:
1015 tofetch.update(range(max(1, r - batchsize), r + 1))
1015 tofetch.update(range(max(1, r - batchsize), r + 1))
1016 if drevs:
1016 if drevs:
1017 fetch({b'ids': list(tofetch)})
1017 fetch({b'ids': list(tofetch)})
1018 validids = sorted(set(getstack(list(ancestordrevs))) | set(drevs))
1018 validids = sorted(set(getstack(list(ancestordrevs))) | set(drevs))
1019
1019
1020 # Walk through the tree, return smartsets
1020 # Walk through the tree, return smartsets
1021 def walk(tree):
1021 def walk(tree):
1022 op = tree[0]
1022 op = tree[0]
1023 if op == b'symbol':
1023 if op == b'symbol':
1024 drev = _parsedrev(tree[1])
1024 drev = _parsedrev(tree[1])
1025 if drev:
1025 if drev:
1026 return smartset.baseset([drev])
1026 return smartset.baseset([drev])
1027 elif tree[1] in _knownstatusnames:
1027 elif tree[1] in _knownstatusnames:
1028 drevs = [
1028 drevs = [
1029 r
1029 r
1030 for r in validids
1030 for r in validids
1031 if _getstatusname(prefetched[r]) == tree[1]
1031 if _getstatusname(prefetched[r]) == tree[1]
1032 ]
1032 ]
1033 return smartset.baseset(drevs)
1033 return smartset.baseset(drevs)
1034 else:
1034 else:
1035 raise error.Abort(_(b'unknown symbol: %s') % tree[1])
1035 raise error.Abort(_(b'unknown symbol: %s') % tree[1])
1036 elif op in {b'and_', b'add', b'sub'}:
1036 elif op in {b'and_', b'add', b'sub'}:
1037 assert len(tree) == 3
1037 assert len(tree) == 3
1038 return getattr(operator, op)(walk(tree[1]), walk(tree[2]))
1038 return getattr(operator, op)(walk(tree[1]), walk(tree[2]))
1039 elif op == b'group':
1039 elif op == b'group':
1040 return walk(tree[1])
1040 return walk(tree[1])
1041 elif op == b'ancestors':
1041 elif op == b'ancestors':
1042 return getstack(walk(tree[1]))
1042 return getstack(walk(tree[1]))
1043 else:
1043 else:
1044 raise error.ProgrammingError(b'illegal tree: %r' % tree)
1044 raise error.ProgrammingError(b'illegal tree: %r' % tree)
1045
1045
1046 return [prefetched[r] for r in walk(tree)]
1046 return [prefetched[r] for r in walk(tree)]
1047
1047
1048
1048
1049 def getdescfromdrev(drev):
1049 def getdescfromdrev(drev):
1050 """get description (commit message) from "Differential Revision"
1050 """get description (commit message) from "Differential Revision"
1051
1051
1052 This is similar to differential.getcommitmessage API. But we only care
1052 This is similar to differential.getcommitmessage API. But we only care
1053 about limited fields: title, summary, test plan, and URL.
1053 about limited fields: title, summary, test plan, and URL.
1054 """
1054 """
1055 title = drev[b'title']
1055 title = drev[b'title']
1056 summary = drev[b'summary'].rstrip()
1056 summary = drev[b'summary'].rstrip()
1057 testplan = drev[b'testPlan'].rstrip()
1057 testplan = drev[b'testPlan'].rstrip()
1058 if testplan:
1058 if testplan:
1059 testplan = b'Test Plan:\n%s' % testplan
1059 testplan = b'Test Plan:\n%s' % testplan
1060 uri = b'Differential Revision: %s' % drev[b'uri']
1060 uri = b'Differential Revision: %s' % drev[b'uri']
1061 return b'\n\n'.join(filter(None, [title, summary, testplan, uri]))
1061 return b'\n\n'.join(filter(None, [title, summary, testplan, uri]))
1062
1062
1063
1063
1064 def getdiffmeta(diff):
1064 def getdiffmeta(diff):
1065 """get commit metadata (date, node, user, p1) from a diff object
1065 """get commit metadata (date, node, user, p1) from a diff object
1066
1066
1067 The metadata could be "hg:meta", sent by phabsend, like:
1067 The metadata could be "hg:meta", sent by phabsend, like:
1068
1068
1069 "properties": {
1069 "properties": {
1070 "hg:meta": {
1070 "hg:meta": {
1071 "date": "1499571514 25200",
1071 "date": "1499571514 25200",
1072 "node": "98c08acae292b2faf60a279b4189beb6cff1414d",
1072 "node": "98c08acae292b2faf60a279b4189beb6cff1414d",
1073 "user": "Foo Bar <foo@example.com>",
1073 "user": "Foo Bar <foo@example.com>",
1074 "parent": "6d0abad76b30e4724a37ab8721d630394070fe16"
1074 "parent": "6d0abad76b30e4724a37ab8721d630394070fe16"
1075 }
1075 }
1076 }
1076 }
1077
1077
1078 Or converted from "local:commits", sent by "arc", like:
1078 Or converted from "local:commits", sent by "arc", like:
1079
1079
1080 "properties": {
1080 "properties": {
1081 "local:commits": {
1081 "local:commits": {
1082 "98c08acae292b2faf60a279b4189beb6cff1414d": {
1082 "98c08acae292b2faf60a279b4189beb6cff1414d": {
1083 "author": "Foo Bar",
1083 "author": "Foo Bar",
1084 "time": 1499546314,
1084 "time": 1499546314,
1085 "branch": "default",
1085 "branch": "default",
1086 "tag": "",
1086 "tag": "",
1087 "commit": "98c08acae292b2faf60a279b4189beb6cff1414d",
1087 "commit": "98c08acae292b2faf60a279b4189beb6cff1414d",
1088 "rev": "98c08acae292b2faf60a279b4189beb6cff1414d",
1088 "rev": "98c08acae292b2faf60a279b4189beb6cff1414d",
1089 "local": "1000",
1089 "local": "1000",
1090 "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"],
1090 "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"],
1091 "summary": "...",
1091 "summary": "...",
1092 "message": "...",
1092 "message": "...",
1093 "authorEmail": "foo@example.com"
1093 "authorEmail": "foo@example.com"
1094 }
1094 }
1095 }
1095 }
1096 }
1096 }
1097
1097
1098 Note: metadata extracted from "local:commits" will lose time zone
1098 Note: metadata extracted from "local:commits" will lose time zone
1099 information.
1099 information.
1100 """
1100 """
1101 props = diff.get(b'properties') or {}
1101 props = diff.get(b'properties') or {}
1102 meta = props.get(b'hg:meta')
1102 meta = props.get(b'hg:meta')
1103 if not meta:
1103 if not meta:
1104 if props.get(b'local:commits'):
1104 if props.get(b'local:commits'):
1105 commit = sorted(props[b'local:commits'].values())[0]
1105 commit = sorted(props[b'local:commits'].values())[0]
1106 meta = {}
1106 meta = {}
1107 if b'author' in commit and b'authorEmail' in commit:
1107 if b'author' in commit and b'authorEmail' in commit:
1108 meta[b'user'] = b'%s <%s>' % (
1108 meta[b'user'] = b'%s <%s>' % (
1109 commit[b'author'],
1109 commit[b'author'],
1110 commit[b'authorEmail'],
1110 commit[b'authorEmail'],
1111 )
1111 )
1112 if b'time' in commit:
1112 if b'time' in commit:
1113 meta[b'date'] = b'%d 0' % int(commit[b'time'])
1113 meta[b'date'] = b'%d 0' % int(commit[b'time'])
1114 if b'branch' in commit:
1114 if b'branch' in commit:
1115 meta[b'branch'] = commit[b'branch']
1115 meta[b'branch'] = commit[b'branch']
1116 node = commit.get(b'commit', commit.get(b'rev'))
1116 node = commit.get(b'commit', commit.get(b'rev'))
1117 if node:
1117 if node:
1118 meta[b'node'] = node
1118 meta[b'node'] = node
1119 if len(commit.get(b'parents', ())) >= 1:
1119 if len(commit.get(b'parents', ())) >= 1:
1120 meta[b'parent'] = commit[b'parents'][0]
1120 meta[b'parent'] = commit[b'parents'][0]
1121 else:
1121 else:
1122 meta = {}
1122 meta = {}
1123 if b'date' not in meta and b'dateCreated' in diff:
1123 if b'date' not in meta and b'dateCreated' in diff:
1124 meta[b'date'] = b'%s 0' % diff[b'dateCreated']
1124 meta[b'date'] = b'%s 0' % diff[b'dateCreated']
1125 if b'branch' not in meta and diff.get(b'branch'):
1125 if b'branch' not in meta and diff.get(b'branch'):
1126 meta[b'branch'] = diff[b'branch']
1126 meta[b'branch'] = diff[b'branch']
1127 if b'parent' not in meta and diff.get(b'sourceControlBaseRevision'):
1127 if b'parent' not in meta and diff.get(b'sourceControlBaseRevision'):
1128 meta[b'parent'] = diff[b'sourceControlBaseRevision']
1128 meta[b'parent'] = diff[b'sourceControlBaseRevision']
1129 return meta
1129 return meta
1130
1130
1131
1131
1132 def readpatch(repo, drevs, write):
1132 def readpatch(repo, drevs, write):
1133 """generate plain-text patch readable by 'hg import'
1133 """generate plain-text patch readable by 'hg import'
1134
1134
1135 write is usually ui.write. drevs is what "querydrev" returns, results of
1135 write is usually ui.write. drevs is what "querydrev" returns, results of
1136 "differential.query".
1136 "differential.query".
1137 """
1137 """
1138 # Prefetch hg:meta property for all diffs
1138 # Prefetch hg:meta property for all diffs
1139 diffids = sorted(set(max(int(v) for v in drev[b'diffs']) for drev in drevs))
1139 diffids = sorted(set(max(int(v) for v in drev[b'diffs']) for drev in drevs))
1140 diffs = callconduit(repo.ui, b'differential.querydiffs', {b'ids': diffids})
1140 diffs = callconduit(repo.ui, b'differential.querydiffs', {b'ids': diffids})
1141
1141
1142 # Generate patch for each drev
1142 # Generate patch for each drev
1143 for drev in drevs:
1143 for drev in drevs:
1144 repo.ui.note(_(b'reading D%s\n') % drev[b'id'])
1144 repo.ui.note(_(b'reading D%s\n') % drev[b'id'])
1145
1145
1146 diffid = max(int(v) for v in drev[b'diffs'])
1146 diffid = max(int(v) for v in drev[b'diffs'])
1147 body = callconduit(
1147 body = callconduit(
1148 repo.ui, b'differential.getrawdiff', {b'diffID': diffid}
1148 repo.ui, b'differential.getrawdiff', {b'diffID': diffid}
1149 )
1149 )
1150 desc = getdescfromdrev(drev)
1150 desc = getdescfromdrev(drev)
1151 header = b'# HG changeset patch\n'
1151 header = b'# HG changeset patch\n'
1152
1152
1153 # Try to preserve metadata from hg:meta property. Write hg patch
1153 # Try to preserve metadata from hg:meta property. Write hg patch
1154 # headers that can be read by the "import" command. See patchheadermap
1154 # headers that can be read by the "import" command. See patchheadermap
1155 # and extract in mercurial/patch.py for supported headers.
1155 # and extract in mercurial/patch.py for supported headers.
1156 meta = getdiffmeta(diffs[b'%d' % diffid])
1156 meta = getdiffmeta(diffs[b'%d' % diffid])
1157 for k in _metanamemap.keys():
1157 for k in _metanamemap.keys():
1158 if k in meta:
1158 if k in meta:
1159 header += b'# %s %s\n' % (_metanamemap[k], meta[k])
1159 header += b'# %s %s\n' % (_metanamemap[k], meta[k])
1160
1160
1161 content = b'%s%s\n%s' % (header, desc, body)
1161 content = b'%s%s\n%s' % (header, desc, body)
1162 write(content)
1162 write(content)
1163
1163
1164
1164
1165 @vcrcommand(
1165 @vcrcommand(
1166 b'phabread',
1166 b'phabread',
1167 [(b'', b'stack', False, _(b'read dependencies'))],
1167 [(b'', b'stack', False, _(b'read dependencies'))],
1168 _(b'DREVSPEC [OPTIONS]'),
1168 _(b'DREVSPEC [OPTIONS]'),
1169 helpcategory=command.CATEGORY_IMPORT_EXPORT,
1169 helpcategory=command.CATEGORY_IMPORT_EXPORT,
1170 )
1170 )
1171 def phabread(ui, repo, spec, **opts):
1171 def phabread(ui, repo, spec, **opts):
1172 """print patches from Phabricator suitable for importing
1172 """print patches from Phabricator suitable for importing
1173
1173
1174 DREVSPEC could be a Differential Revision identity, like ``D123``, or just
1174 DREVSPEC could be a Differential Revision identity, like ``D123``, or just
1175 the number ``123``. It could also have common operators like ``+``, ``-``,
1175 the number ``123``. It could also have common operators like ``+``, ``-``,
1176 ``&``, ``(``, ``)`` for complex queries. Prefix ``:`` could be used to
1176 ``&``, ``(``, ``)`` for complex queries. Prefix ``:`` could be used to
1177 select a stack.
1177 select a stack.
1178
1178
1179 ``abandoned``, ``accepted``, ``closed``, ``needsreview``, ``needsrevision``
1179 ``abandoned``, ``accepted``, ``closed``, ``needsreview``, ``needsrevision``
1180 could be used to filter patches by status. For performance reason, they
1180 could be used to filter patches by status. For performance reason, they
1181 only represent a subset of non-status selections and cannot be used alone.
1181 only represent a subset of non-status selections and cannot be used alone.
1182
1182
1183 For example, ``:D6+8-(2+D4)`` selects a stack up to D6, plus D8 and exclude
1183 For example, ``:D6+8-(2+D4)`` selects a stack up to D6, plus D8 and exclude
1184 D2 and D4. ``:D9 & needsreview`` selects "Needs Review" revisions in a
1184 D2 and D4. ``:D9 & needsreview`` selects "Needs Review" revisions in a
1185 stack up to D9.
1185 stack up to D9.
1186
1186
1187 If --stack is given, follow dependencies information and read all patches.
1187 If --stack is given, follow dependencies information and read all patches.
1188 It is equivalent to the ``:`` operator.
1188 It is equivalent to the ``:`` operator.
1189 """
1189 """
1190 opts = pycompat.byteskwargs(opts)
1190 opts = pycompat.byteskwargs(opts)
1191 if opts.get(b'stack'):
1191 if opts.get(b'stack'):
1192 spec = b':(%s)' % spec
1192 spec = b':(%s)' % spec
1193 drevs = querydrev(repo, spec)
1193 drevs = querydrev(repo, spec)
1194 readpatch(repo, drevs, ui.write)
1194 readpatch(repo, drevs, ui.write)
1195
1195
1196
1196
1197 @vcrcommand(
1197 @vcrcommand(
1198 b'phabupdate',
1198 b'phabupdate',
1199 [
1199 [
1200 (b'', b'accept', False, _(b'accept revisions')),
1200 (b'', b'accept', False, _(b'accept revisions')),
1201 (b'', b'reject', False, _(b'reject revisions')),
1201 (b'', b'reject', False, _(b'reject revisions')),
1202 (b'', b'abandon', False, _(b'abandon revisions')),
1202 (b'', b'abandon', False, _(b'abandon revisions')),
1203 (b'', b'reclaim', False, _(b'reclaim revisions')),
1203 (b'', b'reclaim', False, _(b'reclaim revisions')),
1204 (b'm', b'comment', b'', _(b'comment on the last revision')),
1204 (b'm', b'comment', b'', _(b'comment on the last revision')),
1205 ],
1205 ],
1206 _(b'DREVSPEC [OPTIONS]'),
1206 _(b'DREVSPEC [OPTIONS]'),
1207 helpcategory=command.CATEGORY_IMPORT_EXPORT,
1207 helpcategory=command.CATEGORY_IMPORT_EXPORT,
1208 )
1208 )
1209 def phabupdate(ui, repo, spec, **opts):
1209 def phabupdate(ui, repo, spec, **opts):
1210 """update Differential Revision in batch
1210 """update Differential Revision in batch
1211
1211
1212 DREVSPEC selects revisions. See :hg:`help phabread` for its usage.
1212 DREVSPEC selects revisions. See :hg:`help phabread` for its usage.
1213 """
1213 """
1214 opts = pycompat.byteskwargs(opts)
1214 opts = pycompat.byteskwargs(opts)
1215 flags = [n for n in b'accept reject abandon reclaim'.split() if opts.get(n)]
1215 flags = [n for n in b'accept reject abandon reclaim'.split() if opts.get(n)]
1216 if len(flags) > 1:
1216 if len(flags) > 1:
1217 raise error.Abort(_(b'%s cannot be used together') % b', '.join(flags))
1217 raise error.Abort(_(b'%s cannot be used together') % b', '.join(flags))
1218
1218
1219 actions = []
1219 actions = []
1220 for f in flags:
1220 for f in flags:
1221 actions.append({b'type': f, b'value': b'true'})
1221 actions.append({b'type': f, b'value': b'true'})
1222
1222
1223 drevs = querydrev(repo, spec)
1223 drevs = querydrev(repo, spec)
1224 for i, drev in enumerate(drevs):
1224 for i, drev in enumerate(drevs):
1225 if i + 1 == len(drevs) and opts.get(b'comment'):
1225 if i + 1 == len(drevs) and opts.get(b'comment'):
1226 actions.append({b'type': b'comment', b'value': opts[b'comment']})
1226 actions.append({b'type': b'comment', b'value': opts[b'comment']})
1227 if actions:
1227 if actions:
1228 params = {
1228 params = {
1229 b'objectIdentifier': drev[b'phid'],
1229 b'objectIdentifier': drev[b'phid'],
1230 b'transactions': actions,
1230 b'transactions': actions,
1231 }
1231 }
1232 callconduit(ui, b'differential.revision.edit', params)
1232 callconduit(ui, b'differential.revision.edit', params)
1233
1233
1234
1234
1235 @eh.templatekeyword(b'phabreview', requires={b'ctx'})
1235 @eh.templatekeyword(b'phabreview', requires={b'ctx'})
1236 def template_review(context, mapping):
1236 def template_review(context, mapping):
1237 """:phabreview: Object describing the review for this changeset.
1237 """:phabreview: Object describing the review for this changeset.
1238 Has attributes `url` and `id`.
1238 Has attributes `url` and `id`.
1239 """
1239 """
1240 ctx = context.resource(mapping, b'ctx')
1240 ctx = context.resource(mapping, b'ctx')
1241 m = _differentialrevisiondescre.search(ctx.description())
1241 m = _differentialrevisiondescre.search(ctx.description())
1242 if m:
1242 if m:
1243 return templateutil.hybriddict(
1243 return templateutil.hybriddict(
1244 {b'url': m.group(r'url'), b'id': b"D%s" % m.group(r'id'),}
1244 {b'url': m.group(r'url'), b'id': b"D%s" % m.group(r'id'),}
1245 )
1245 )
1246 else:
1246 else:
1247 tags = ctx.repo().nodetags(ctx.node())
1247 tags = ctx.repo().nodetags(ctx.node())
1248 for t in tags:
1248 for t in tags:
1249 if _differentialrevisiontagre.match(t):
1249 if _differentialrevisiontagre.match(t):
1250 url = ctx.repo().ui.config(b'phabricator', b'url')
1250 url = ctx.repo().ui.config(b'phabricator', b'url')
1251 if not url.endswith(b'/'):
1251 if not url.endswith(b'/'):
1252 url += b'/'
1252 url += b'/'
1253 url += t
1253 url += t
1254
1254
1255 return templateutil.hybriddict({b'url': url, b'id': t,})
1255 return templateutil.hybriddict({b'url': url, b'id': t,})
1256 return None
1256 return None
@@ -1,476 +1,476 b''
1 # debugcommands.py - debug logic for remotefilelog
1 # debugcommands.py - debug logic for remotefilelog
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import hashlib
9 import hashlib
10 import os
10 import os
11 import zlib
11 import zlib
12
12
13 from mercurial.node import bin, hex, nullid, short
13 from mercurial.node import bin, hex, nullid, short
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15 from mercurial import (
15 from mercurial import (
16 error,
16 error,
17 filelog,
17 filelog,
18 lock as lockmod,
18 lock as lockmod,
19 node as nodemod,
19 node as nodemod,
20 pycompat,
20 pycompat,
21 revlog,
21 revlog,
22 )
22 )
23 from . import (
23 from . import (
24 constants,
24 constants,
25 datapack,
25 datapack,
26 fileserverclient,
26 fileserverclient,
27 historypack,
27 historypack,
28 repack,
28 repack,
29 shallowutil,
29 shallowutil,
30 )
30 )
31
31
32
32
33 def debugremotefilelog(ui, path, **opts):
33 def debugremotefilelog(ui, path, **opts):
34 decompress = opts.get(r'decompress')
34 decompress = opts.get(r'decompress')
35
35
36 size, firstnode, mapping = parsefileblob(path, decompress)
36 size, firstnode, mapping = parsefileblob(path, decompress)
37
37
38 ui.status(_(b"size: %d bytes\n") % size)
38 ui.status(_(b"size: %d bytes\n") % size)
39 ui.status(_(b"path: %s \n") % path)
39 ui.status(_(b"path: %s \n") % path)
40 ui.status(_(b"key: %s \n") % (short(firstnode)))
40 ui.status(_(b"key: %s \n") % (short(firstnode)))
41 ui.status(_(b"\n"))
41 ui.status(_(b"\n"))
42 ui.status(
42 ui.status(
43 _(b"%12s => %12s %13s %13s %12s\n")
43 _(b"%12s => %12s %13s %13s %12s\n")
44 % (b"node", b"p1", b"p2", b"linknode", b"copyfrom")
44 % (b"node", b"p1", b"p2", b"linknode", b"copyfrom")
45 )
45 )
46
46
47 queue = [firstnode]
47 queue = [firstnode]
48 while queue:
48 while queue:
49 node = queue.pop(0)
49 node = queue.pop(0)
50 p1, p2, linknode, copyfrom = mapping[node]
50 p1, p2, linknode, copyfrom = mapping[node]
51 ui.status(
51 ui.status(
52 _(b"%s => %s %s %s %s\n")
52 _(b"%s => %s %s %s %s\n")
53 % (short(node), short(p1), short(p2), short(linknode), copyfrom)
53 % (short(node), short(p1), short(p2), short(linknode), copyfrom)
54 )
54 )
55 if p1 != nullid:
55 if p1 != nullid:
56 queue.append(p1)
56 queue.append(p1)
57 if p2 != nullid:
57 if p2 != nullid:
58 queue.append(p2)
58 queue.append(p2)
59
59
60
60
61 def buildtemprevlog(repo, file):
61 def buildtemprevlog(repo, file):
62 # get filename key
62 # get filename key
63 filekey = nodemod.hex(hashlib.sha1(file).digest())
63 filekey = nodemod.hex(hashlib.sha1(file).digest())
64 filedir = os.path.join(repo.path, b'store/data', filekey)
64 filedir = os.path.join(repo.path, b'store/data', filekey)
65
65
66 # sort all entries based on linkrev
66 # sort all entries based on linkrev
67 fctxs = []
67 fctxs = []
68 for filenode in os.listdir(filedir):
68 for filenode in os.listdir(filedir):
69 if b'_old' not in filenode:
69 if b'_old' not in filenode:
70 fctxs.append(repo.filectx(file, fileid=bin(filenode)))
70 fctxs.append(repo.filectx(file, fileid=bin(filenode)))
71
71
72 fctxs = sorted(fctxs, key=lambda x: x.linkrev())
72 fctxs = sorted(fctxs, key=lambda x: x.linkrev())
73
73
74 # add to revlog
74 # add to revlog
75 temppath = repo.sjoin(b'data/temprevlog.i')
75 temppath = repo.sjoin(b'data/temprevlog.i')
76 if os.path.exists(temppath):
76 if os.path.exists(temppath):
77 os.remove(temppath)
77 os.remove(temppath)
78 r = filelog.filelog(repo.svfs, b'temprevlog')
78 r = filelog.filelog(repo.svfs, b'temprevlog')
79
79
80 class faket(object):
80 class faket(object):
81 def add(self, a, b, c):
81 def add(self, a, b, c):
82 pass
82 pass
83
83
84 t = faket()
84 t = faket()
85 for fctx in fctxs:
85 for fctx in fctxs:
86 if fctx.node() not in repo:
86 if fctx.node() not in repo:
87 continue
87 continue
88
88
89 p = fctx.filelog().parents(fctx.filenode())
89 p = fctx.filelog().parents(fctx.filenode())
90 meta = {}
90 meta = {}
91 if fctx.renamed():
91 if fctx.renamed():
92 meta[b'copy'] = fctx.renamed()[0]
92 meta[b'copy'] = fctx.renamed()[0]
93 meta[b'copyrev'] = hex(fctx.renamed()[1])
93 meta[b'copyrev'] = hex(fctx.renamed()[1])
94
94
95 r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1])
95 r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1])
96
96
97 return r
97 return r
98
98
99
99
100 def debugindex(orig, ui, repo, file_=None, **opts):
100 def debugindex(orig, ui, repo, file_=None, **opts):
101 """dump the contents of an index file"""
101 """dump the contents of an index file"""
102 if (
102 if (
103 opts.get(r'changelog')
103 opts.get(r'changelog')
104 or opts.get(r'manifest')
104 or opts.get(r'manifest')
105 or opts.get(r'dir')
105 or opts.get(r'dir')
106 or not shallowutil.isenabled(repo)
106 or not shallowutil.isenabled(repo)
107 or not repo.shallowmatch(file_)
107 or not repo.shallowmatch(file_)
108 ):
108 ):
109 return orig(ui, repo, file_, **opts)
109 return orig(ui, repo, file_, **opts)
110
110
111 r = buildtemprevlog(repo, file_)
111 r = buildtemprevlog(repo, file_)
112
112
113 # debugindex like normal
113 # debugindex like normal
114 format = opts.get(b'format', 0)
114 format = opts.get(b'format', 0)
115 if format not in (0, 1):
115 if format not in (0, 1):
116 raise error.Abort(_(b"unknown format %d") % format)
116 raise error.Abort(_(b"unknown format %d") % format)
117
117
118 generaldelta = r.version & revlog.FLAG_GENERALDELTA
118 generaldelta = r.version & revlog.FLAG_GENERALDELTA
119 if generaldelta:
119 if generaldelta:
120 basehdr = b' delta'
120 basehdr = b' delta'
121 else:
121 else:
122 basehdr = b' base'
122 basehdr = b' base'
123
123
124 if format == 0:
124 if format == 0:
125 ui.write(
125 ui.write(
126 (
126 (
127 b" rev offset length " + basehdr + b" linkrev"
127 b" rev offset length " + basehdr + b" linkrev"
128 b" nodeid p1 p2\n"
128 b" nodeid p1 p2\n"
129 )
129 )
130 )
130 )
131 elif format == 1:
131 elif format == 1:
132 ui.write(
132 ui.write(
133 (
133 (
134 b" rev flag offset length"
134 b" rev flag offset length"
135 b" size " + basehdr + b" link p1 p2"
135 b" size " + basehdr + b" link p1 p2"
136 b" nodeid\n"
136 b" nodeid\n"
137 )
137 )
138 )
138 )
139
139
140 for i in r:
140 for i in r:
141 node = r.node(i)
141 node = r.node(i)
142 if generaldelta:
142 if generaldelta:
143 base = r.deltaparent(i)
143 base = r.deltaparent(i)
144 else:
144 else:
145 base = r.chainbase(i)
145 base = r.chainbase(i)
146 if format == 0:
146 if format == 0:
147 try:
147 try:
148 pp = r.parents(node)
148 pp = r.parents(node)
149 except Exception:
149 except Exception:
150 pp = [nullid, nullid]
150 pp = [nullid, nullid]
151 ui.write(
151 ui.write(
152 b"% 6d % 9d % 7d % 6d % 7d %s %s %s\n"
152 b"% 6d % 9d % 7d % 6d % 7d %s %s %s\n"
153 % (
153 % (
154 i,
154 i,
155 r.start(i),
155 r.start(i),
156 r.length(i),
156 r.length(i),
157 base,
157 base,
158 r.linkrev(i),
158 r.linkrev(i),
159 short(node),
159 short(node),
160 short(pp[0]),
160 short(pp[0]),
161 short(pp[1]),
161 short(pp[1]),
162 )
162 )
163 )
163 )
164 elif format == 1:
164 elif format == 1:
165 pr = r.parentrevs(i)
165 pr = r.parentrevs(i)
166 ui.write(
166 ui.write(
167 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n"
167 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n"
168 % (
168 % (
169 i,
169 i,
170 r.flags(i),
170 r.flags(i),
171 r.start(i),
171 r.start(i),
172 r.length(i),
172 r.length(i),
173 r.rawsize(i),
173 r.rawsize(i),
174 base,
174 base,
175 r.linkrev(i),
175 r.linkrev(i),
176 pr[0],
176 pr[0],
177 pr[1],
177 pr[1],
178 short(node),
178 short(node),
179 )
179 )
180 )
180 )
181
181
182
182
183 def debugindexdot(orig, ui, repo, file_):
183 def debugindexdot(orig, ui, repo, file_):
184 """dump an index DAG as a graphviz dot file"""
184 """dump an index DAG as a graphviz dot file"""
185 if not shallowutil.isenabled(repo):
185 if not shallowutil.isenabled(repo):
186 return orig(ui, repo, file_)
186 return orig(ui, repo, file_)
187
187
188 r = buildtemprevlog(repo, os.path.basename(file_)[:-2])
188 r = buildtemprevlog(repo, os.path.basename(file_)[:-2])
189
189
190 ui.write(b"digraph G {\n")
190 ui.writenoi18n(b"digraph G {\n")
191 for i in r:
191 for i in r:
192 node = r.node(i)
192 node = r.node(i)
193 pp = r.parents(node)
193 pp = r.parents(node)
194 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
194 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
195 if pp[1] != nullid:
195 if pp[1] != nullid:
196 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
196 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
197 ui.write(b"}\n")
197 ui.write(b"}\n")
198
198
199
199
200 def verifyremotefilelog(ui, path, **opts):
200 def verifyremotefilelog(ui, path, **opts):
201 decompress = opts.get(r'decompress')
201 decompress = opts.get(r'decompress')
202
202
203 for root, dirs, files in os.walk(path):
203 for root, dirs, files in os.walk(path):
204 for file in files:
204 for file in files:
205 if file == b"repos":
205 if file == b"repos":
206 continue
206 continue
207 filepath = os.path.join(root, file)
207 filepath = os.path.join(root, file)
208 size, firstnode, mapping = parsefileblob(filepath, decompress)
208 size, firstnode, mapping = parsefileblob(filepath, decompress)
209 for p1, p2, linknode, copyfrom in mapping.itervalues():
209 for p1, p2, linknode, copyfrom in mapping.itervalues():
210 if linknode == nullid:
210 if linknode == nullid:
211 actualpath = os.path.relpath(root, path)
211 actualpath = os.path.relpath(root, path)
212 key = fileserverclient.getcachekey(
212 key = fileserverclient.getcachekey(
213 b"reponame", actualpath, file
213 b"reponame", actualpath, file
214 )
214 )
215 ui.status(
215 ui.status(
216 b"%s %s\n" % (key, os.path.relpath(filepath, path))
216 b"%s %s\n" % (key, os.path.relpath(filepath, path))
217 )
217 )
218
218
219
219
220 def _decompressblob(raw):
220 def _decompressblob(raw):
221 return zlib.decompress(raw)
221 return zlib.decompress(raw)
222
222
223
223
224 def parsefileblob(path, decompress):
224 def parsefileblob(path, decompress):
225 f = open(path, b"rb")
225 f = open(path, b"rb")
226 try:
226 try:
227 raw = f.read()
227 raw = f.read()
228 finally:
228 finally:
229 f.close()
229 f.close()
230
230
231 if decompress:
231 if decompress:
232 raw = _decompressblob(raw)
232 raw = _decompressblob(raw)
233
233
234 offset, size, flags = shallowutil.parsesizeflags(raw)
234 offset, size, flags = shallowutil.parsesizeflags(raw)
235 start = offset + size
235 start = offset + size
236
236
237 firstnode = None
237 firstnode = None
238
238
239 mapping = {}
239 mapping = {}
240 while start < len(raw):
240 while start < len(raw):
241 divider = raw.index(b'\0', start + 80)
241 divider = raw.index(b'\0', start + 80)
242
242
243 currentnode = raw[start : (start + 20)]
243 currentnode = raw[start : (start + 20)]
244 if not firstnode:
244 if not firstnode:
245 firstnode = currentnode
245 firstnode = currentnode
246
246
247 p1 = raw[(start + 20) : (start + 40)]
247 p1 = raw[(start + 20) : (start + 40)]
248 p2 = raw[(start + 40) : (start + 60)]
248 p2 = raw[(start + 40) : (start + 60)]
249 linknode = raw[(start + 60) : (start + 80)]
249 linknode = raw[(start + 60) : (start + 80)]
250 copyfrom = raw[(start + 80) : divider]
250 copyfrom = raw[(start + 80) : divider]
251
251
252 mapping[currentnode] = (p1, p2, linknode, copyfrom)
252 mapping[currentnode] = (p1, p2, linknode, copyfrom)
253 start = divider + 1
253 start = divider + 1
254
254
255 return size, firstnode, mapping
255 return size, firstnode, mapping
256
256
257
257
258 def debugdatapack(ui, *paths, **opts):
258 def debugdatapack(ui, *paths, **opts):
259 for path in paths:
259 for path in paths:
260 if b'.data' in path:
260 if b'.data' in path:
261 path = path[: path.index(b'.data')]
261 path = path[: path.index(b'.data')]
262 ui.write(b"%s:\n" % path)
262 ui.write(b"%s:\n" % path)
263 dpack = datapack.datapack(path)
263 dpack = datapack.datapack(path)
264 node = opts.get(r'node')
264 node = opts.get(r'node')
265 if node:
265 if node:
266 deltachain = dpack.getdeltachain(b'', bin(node))
266 deltachain = dpack.getdeltachain(b'', bin(node))
267 dumpdeltachain(ui, deltachain, **opts)
267 dumpdeltachain(ui, deltachain, **opts)
268 return
268 return
269
269
270 if opts.get(r'long'):
270 if opts.get(r'long'):
271 hashformatter = hex
271 hashformatter = hex
272 hashlen = 42
272 hashlen = 42
273 else:
273 else:
274 hashformatter = short
274 hashformatter = short
275 hashlen = 14
275 hashlen = 14
276
276
277 lastfilename = None
277 lastfilename = None
278 totaldeltasize = 0
278 totaldeltasize = 0
279 totalblobsize = 0
279 totalblobsize = 0
280
280
281 def printtotals():
281 def printtotals():
282 if lastfilename is not None:
282 if lastfilename is not None:
283 ui.write(b"\n")
283 ui.write(b"\n")
284 if not totaldeltasize or not totalblobsize:
284 if not totaldeltasize or not totalblobsize:
285 return
285 return
286 difference = totalblobsize - totaldeltasize
286 difference = totalblobsize - totaldeltasize
287 deltastr = b"%0.1f%% %s" % (
287 deltastr = b"%0.1f%% %s" % (
288 (100.0 * abs(difference) / totalblobsize),
288 (100.0 * abs(difference) / totalblobsize),
289 (b"smaller" if difference > 0 else b"bigger"),
289 (b"smaller" if difference > 0 else b"bigger"),
290 )
290 )
291
291
292 ui.write(
292 ui.writenoi18n(
293 b"Total:%s%s %s (%s)\n"
293 b"Total:%s%s %s (%s)\n"
294 % (
294 % (
295 b"".ljust(2 * hashlen - len(b"Total:")),
295 b"".ljust(2 * hashlen - len(b"Total:")),
296 (b'%d' % totaldeltasize).ljust(12),
296 (b'%d' % totaldeltasize).ljust(12),
297 (b'%d' % totalblobsize).ljust(9),
297 (b'%d' % totalblobsize).ljust(9),
298 deltastr,
298 deltastr,
299 )
299 )
300 )
300 )
301
301
302 bases = {}
302 bases = {}
303 nodes = set()
303 nodes = set()
304 failures = 0
304 failures = 0
305 for filename, node, deltabase, deltalen in dpack.iterentries():
305 for filename, node, deltabase, deltalen in dpack.iterentries():
306 bases[node] = deltabase
306 bases[node] = deltabase
307 if node in nodes:
307 if node in nodes:
308 ui.write((b"Bad entry: %s appears twice\n" % short(node)))
308 ui.write((b"Bad entry: %s appears twice\n" % short(node)))
309 failures += 1
309 failures += 1
310 nodes.add(node)
310 nodes.add(node)
311 if filename != lastfilename:
311 if filename != lastfilename:
312 printtotals()
312 printtotals()
313 name = b'(empty name)' if filename == b'' else filename
313 name = b'(empty name)' if filename == b'' else filename
314 ui.write(b"%s:\n" % name)
314 ui.write(b"%s:\n" % name)
315 ui.write(
315 ui.write(
316 b"%s%s%s%s\n"
316 b"%s%s%s%s\n"
317 % (
317 % (
318 b"Node".ljust(hashlen),
318 b"Node".ljust(hashlen),
319 b"Delta Base".ljust(hashlen),
319 b"Delta Base".ljust(hashlen),
320 b"Delta Length".ljust(14),
320 b"Delta Length".ljust(14),
321 b"Blob Size".ljust(9),
321 b"Blob Size".ljust(9),
322 )
322 )
323 )
323 )
324 lastfilename = filename
324 lastfilename = filename
325 totalblobsize = 0
325 totalblobsize = 0
326 totaldeltasize = 0
326 totaldeltasize = 0
327
327
328 # Metadata could be missing, in which case it will be an empty dict.
328 # Metadata could be missing, in which case it will be an empty dict.
329 meta = dpack.getmeta(filename, node)
329 meta = dpack.getmeta(filename, node)
330 if constants.METAKEYSIZE in meta:
330 if constants.METAKEYSIZE in meta:
331 blobsize = meta[constants.METAKEYSIZE]
331 blobsize = meta[constants.METAKEYSIZE]
332 totaldeltasize += deltalen
332 totaldeltasize += deltalen
333 totalblobsize += blobsize
333 totalblobsize += blobsize
334 else:
334 else:
335 blobsize = b"(missing)"
335 blobsize = b"(missing)"
336 ui.write(
336 ui.write(
337 b"%s %s %s%s\n"
337 b"%s %s %s%s\n"
338 % (
338 % (
339 hashformatter(node),
339 hashformatter(node),
340 hashformatter(deltabase),
340 hashformatter(deltabase),
341 (b'%d' % deltalen).ljust(14),
341 (b'%d' % deltalen).ljust(14),
342 pycompat.bytestr(blobsize),
342 pycompat.bytestr(blobsize),
343 )
343 )
344 )
344 )
345
345
346 if filename is not None:
346 if filename is not None:
347 printtotals()
347 printtotals()
348
348
349 failures += _sanitycheck(ui, set(nodes), bases)
349 failures += _sanitycheck(ui, set(nodes), bases)
350 if failures > 1:
350 if failures > 1:
351 ui.warn((b"%d failures\n" % failures))
351 ui.warn((b"%d failures\n" % failures))
352 return 1
352 return 1
353
353
354
354
355 def _sanitycheck(ui, nodes, bases):
355 def _sanitycheck(ui, nodes, bases):
356 """
356 """
357 Does some basic sanity checking on a packfiles with ``nodes`` ``bases`` (a
357 Does some basic sanity checking on a packfiles with ``nodes`` ``bases`` (a
358 mapping of node->base):
358 mapping of node->base):
359
359
360 - Each deltabase must itself be a node elsewhere in the pack
360 - Each deltabase must itself be a node elsewhere in the pack
361 - There must be no cycles
361 - There must be no cycles
362 """
362 """
363 failures = 0
363 failures = 0
364 for node in nodes:
364 for node in nodes:
365 seen = set()
365 seen = set()
366 current = node
366 current = node
367 deltabase = bases[current]
367 deltabase = bases[current]
368
368
369 while deltabase != nullid:
369 while deltabase != nullid:
370 if deltabase not in nodes:
370 if deltabase not in nodes:
371 ui.warn(
371 ui.warn(
372 (
372 (
373 b"Bad entry: %s has an unknown deltabase (%s)\n"
373 b"Bad entry: %s has an unknown deltabase (%s)\n"
374 % (short(node), short(deltabase))
374 % (short(node), short(deltabase))
375 )
375 )
376 )
376 )
377 failures += 1
377 failures += 1
378 break
378 break
379
379
380 if deltabase in seen:
380 if deltabase in seen:
381 ui.warn(
381 ui.warn(
382 (
382 (
383 b"Bad entry: %s has a cycle (at %s)\n"
383 b"Bad entry: %s has a cycle (at %s)\n"
384 % (short(node), short(deltabase))
384 % (short(node), short(deltabase))
385 )
385 )
386 )
386 )
387 failures += 1
387 failures += 1
388 break
388 break
389
389
390 current = deltabase
390 current = deltabase
391 seen.add(current)
391 seen.add(current)
392 deltabase = bases[current]
392 deltabase = bases[current]
393 # Since ``node`` begins a valid chain, reset/memoize its base to nullid
393 # Since ``node`` begins a valid chain, reset/memoize its base to nullid
394 # so we don't traverse it again.
394 # so we don't traverse it again.
395 bases[node] = nullid
395 bases[node] = nullid
396 return failures
396 return failures
397
397
398
398
399 def dumpdeltachain(ui, deltachain, **opts):
399 def dumpdeltachain(ui, deltachain, **opts):
400 hashformatter = hex
400 hashformatter = hex
401 hashlen = 40
401 hashlen = 40
402
402
403 lastfilename = None
403 lastfilename = None
404 for filename, node, filename, deltabasenode, delta in deltachain:
404 for filename, node, filename, deltabasenode, delta in deltachain:
405 if filename != lastfilename:
405 if filename != lastfilename:
406 ui.write(b"\n%s\n" % filename)
406 ui.write(b"\n%s\n" % filename)
407 lastfilename = filename
407 lastfilename = filename
408 ui.write(
408 ui.write(
409 b"%s %s %s %s\n"
409 b"%s %s %s %s\n"
410 % (
410 % (
411 b"Node".ljust(hashlen),
411 b"Node".ljust(hashlen),
412 b"Delta Base".ljust(hashlen),
412 b"Delta Base".ljust(hashlen),
413 b"Delta SHA1".ljust(hashlen),
413 b"Delta SHA1".ljust(hashlen),
414 b"Delta Length".ljust(6),
414 b"Delta Length".ljust(6),
415 )
415 )
416 )
416 )
417
417
418 ui.write(
418 ui.write(
419 b"%s %s %s %d\n"
419 b"%s %s %s %d\n"
420 % (
420 % (
421 hashformatter(node),
421 hashformatter(node),
422 hashformatter(deltabasenode),
422 hashformatter(deltabasenode),
423 nodemod.hex(hashlib.sha1(delta).digest()),
423 nodemod.hex(hashlib.sha1(delta).digest()),
424 len(delta),
424 len(delta),
425 )
425 )
426 )
426 )
427
427
428
428
429 def debughistorypack(ui, path):
429 def debughistorypack(ui, path):
430 if b'.hist' in path:
430 if b'.hist' in path:
431 path = path[: path.index(b'.hist')]
431 path = path[: path.index(b'.hist')]
432 hpack = historypack.historypack(path)
432 hpack = historypack.historypack(path)
433
433
434 lastfilename = None
434 lastfilename = None
435 for entry in hpack.iterentries():
435 for entry in hpack.iterentries():
436 filename, node, p1node, p2node, linknode, copyfrom = entry
436 filename, node, p1node, p2node, linknode, copyfrom = entry
437 if filename != lastfilename:
437 if filename != lastfilename:
438 ui.write(b"\n%s\n" % filename)
438 ui.write(b"\n%s\n" % filename)
439 ui.write(
439 ui.write(
440 b"%s%s%s%s%s\n"
440 b"%s%s%s%s%s\n"
441 % (
441 % (
442 b"Node".ljust(14),
442 b"Node".ljust(14),
443 b"P1 Node".ljust(14),
443 b"P1 Node".ljust(14),
444 b"P2 Node".ljust(14),
444 b"P2 Node".ljust(14),
445 b"Link Node".ljust(14),
445 b"Link Node".ljust(14),
446 b"Copy From",
446 b"Copy From",
447 )
447 )
448 )
448 )
449 lastfilename = filename
449 lastfilename = filename
450 ui.write(
450 ui.write(
451 b"%s %s %s %s %s\n"
451 b"%s %s %s %s %s\n"
452 % (
452 % (
453 short(node),
453 short(node),
454 short(p1node),
454 short(p1node),
455 short(p2node),
455 short(p2node),
456 short(linknode),
456 short(linknode),
457 copyfrom,
457 copyfrom,
458 )
458 )
459 )
459 )
460
460
461
461
462 def debugwaitonrepack(repo):
462 def debugwaitonrepack(repo):
463 with lockmod.lock(repack.repacklockvfs(repo), b"repacklock", timeout=-1):
463 with lockmod.lock(repack.repacklockvfs(repo), b"repacklock", timeout=-1):
464 return
464 return
465
465
466
466
467 def debugwaitonprefetch(repo):
467 def debugwaitonprefetch(repo):
468 with repo._lock(
468 with repo._lock(
469 repo.svfs,
469 repo.svfs,
470 b"prefetchlock",
470 b"prefetchlock",
471 True,
471 True,
472 None,
472 None,
473 None,
473 None,
474 _(b'prefetching in %s') % repo.origroot,
474 _(b'prefetching in %s') % repo.origroot,
475 ):
475 ):
476 pass
476 pass
@@ -1,530 +1,530 b''
1 # show.py - Extension implementing `hg show`
1 # show.py - Extension implementing `hg show`
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """unified command to show various repository information (EXPERIMENTAL)
8 """unified command to show various repository information (EXPERIMENTAL)
9
9
10 This extension provides the :hg:`show` command, which provides a central
10 This extension provides the :hg:`show` command, which provides a central
11 command for displaying commonly-accessed repository data and views of that
11 command for displaying commonly-accessed repository data and views of that
12 data.
12 data.
13
13
14 The following config options can influence operation.
14 The following config options can influence operation.
15
15
16 ``commands``
16 ``commands``
17 ------------
17 ------------
18
18
19 ``show.aliasprefix``
19 ``show.aliasprefix``
20 List of strings that will register aliases for views. e.g. ``s`` will
20 List of strings that will register aliases for views. e.g. ``s`` will
21 effectively set config options ``alias.s<view> = show <view>`` for all
21 effectively set config options ``alias.s<view> = show <view>`` for all
22 views. i.e. `hg swork` would execute `hg show work`.
22 views. i.e. `hg swork` would execute `hg show work`.
23
23
24 Aliases that would conflict with existing registrations will not be
24 Aliases that would conflict with existing registrations will not be
25 performed.
25 performed.
26 """
26 """
27
27
28 from __future__ import absolute_import
28 from __future__ import absolute_import
29
29
30 from mercurial.i18n import _
30 from mercurial.i18n import _
31 from mercurial.node import nullrev
31 from mercurial.node import nullrev
32 from mercurial import (
32 from mercurial import (
33 cmdutil,
33 cmdutil,
34 commands,
34 commands,
35 destutil,
35 destutil,
36 error,
36 error,
37 formatter,
37 formatter,
38 graphmod,
38 graphmod,
39 logcmdutil,
39 logcmdutil,
40 phases,
40 phases,
41 pycompat,
41 pycompat,
42 registrar,
42 registrar,
43 revset,
43 revset,
44 revsetlang,
44 revsetlang,
45 scmutil,
45 scmutil,
46 )
46 )
47
47
48 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
48 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
49 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
49 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
50 # be specifying the version(s) of Mercurial they are tested with, or
50 # be specifying the version(s) of Mercurial they are tested with, or
51 # leave the attribute unspecified.
51 # leave the attribute unspecified.
52 testedwith = b'ships-with-hg-core'
52 testedwith = b'ships-with-hg-core'
53
53
54 cmdtable = {}
54 cmdtable = {}
55 command = registrar.command(cmdtable)
55 command = registrar.command(cmdtable)
56
56
57 revsetpredicate = registrar.revsetpredicate()
57 revsetpredicate = registrar.revsetpredicate()
58
58
59
59
60 class showcmdfunc(registrar._funcregistrarbase):
60 class showcmdfunc(registrar._funcregistrarbase):
61 """Register a function to be invoked for an `hg show <thing>`."""
61 """Register a function to be invoked for an `hg show <thing>`."""
62
62
63 # Used by _formatdoc().
63 # Used by _formatdoc().
64 _docformat = b'%s -- %s'
64 _docformat = b'%s -- %s'
65
65
66 def _extrasetup(self, name, func, fmtopic=None, csettopic=None):
66 def _extrasetup(self, name, func, fmtopic=None, csettopic=None):
67 """Called with decorator arguments to register a show view.
67 """Called with decorator arguments to register a show view.
68
68
69 ``name`` is the sub-command name.
69 ``name`` is the sub-command name.
70
70
71 ``func`` is the function being decorated.
71 ``func`` is the function being decorated.
72
72
73 ``fmtopic`` is the topic in the style that will be rendered for
73 ``fmtopic`` is the topic in the style that will be rendered for
74 this view.
74 this view.
75
75
76 ``csettopic`` is the topic in the style to be used for a changeset
76 ``csettopic`` is the topic in the style to be used for a changeset
77 printer.
77 printer.
78
78
79 If ``fmtopic`` is specified, the view function will receive a
79 If ``fmtopic`` is specified, the view function will receive a
80 formatter instance. If ``csettopic`` is specified, the view
80 formatter instance. If ``csettopic`` is specified, the view
81 function will receive a changeset printer.
81 function will receive a changeset printer.
82 """
82 """
83 func._fmtopic = fmtopic
83 func._fmtopic = fmtopic
84 func._csettopic = csettopic
84 func._csettopic = csettopic
85
85
86
86
87 showview = showcmdfunc()
87 showview = showcmdfunc()
88
88
89
89
90 @command(
90 @command(
91 b'show',
91 b'show',
92 [
92 [
93 # TODO: Switch this template flag to use cmdutil.formatteropts if
93 # TODO: Switch this template flag to use cmdutil.formatteropts if
94 # 'hg show' becomes stable before --template/-T is stable. For now,
94 # 'hg show' becomes stable before --template/-T is stable. For now,
95 # we are putting it here without the '(EXPERIMENTAL)' flag because it
95 # we are putting it here without the '(EXPERIMENTAL)' flag because it
96 # is an important part of the 'hg show' user experience and the entire
96 # is an important part of the 'hg show' user experience and the entire
97 # 'hg show' experience is experimental.
97 # 'hg show' experience is experimental.
98 (b'T', b'template', b'', b'display with template', _(b'TEMPLATE')),
98 (b'T', b'template', b'', b'display with template', _(b'TEMPLATE')),
99 ],
99 ],
100 _(b'VIEW'),
100 _(b'VIEW'),
101 helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
101 helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
102 )
102 )
103 def show(ui, repo, view=None, template=None):
103 def show(ui, repo, view=None, template=None):
104 """show various repository information
104 """show various repository information
105
105
106 A requested view of repository data is displayed.
106 A requested view of repository data is displayed.
107
107
108 If no view is requested, the list of available views is shown and the
108 If no view is requested, the list of available views is shown and the
109 command aborts.
109 command aborts.
110
110
111 .. note::
111 .. note::
112
112
113 There are no backwards compatibility guarantees for the output of this
113 There are no backwards compatibility guarantees for the output of this
114 command. Output may change in any future Mercurial release.
114 command. Output may change in any future Mercurial release.
115
115
116 Consumers wanting stable command output should specify a template via
116 Consumers wanting stable command output should specify a template via
117 ``-T/--template``.
117 ``-T/--template``.
118
118
119 List of available views:
119 List of available views:
120 """
120 """
121 if ui.plain() and not template:
121 if ui.plain() and not template:
122 hint = _(b'invoke with -T/--template to control output format')
122 hint = _(b'invoke with -T/--template to control output format')
123 raise error.Abort(
123 raise error.Abort(
124 _(b'must specify a template in plain mode'), hint=hint
124 _(b'must specify a template in plain mode'), hint=hint
125 )
125 )
126
126
127 views = showview._table
127 views = showview._table
128
128
129 if not view:
129 if not view:
130 ui.pager(b'show')
130 ui.pager(b'show')
131 # TODO consider using formatter here so available views can be
131 # TODO consider using formatter here so available views can be
132 # rendered to custom format.
132 # rendered to custom format.
133 ui.write(_(b'available views:\n'))
133 ui.write(_(b'available views:\n'))
134 ui.write(b'\n')
134 ui.write(b'\n')
135
135
136 for name, func in sorted(views.items()):
136 for name, func in sorted(views.items()):
137 ui.write(b'%s\n' % pycompat.sysbytes(func.__doc__))
137 ui.write(b'%s\n' % pycompat.sysbytes(func.__doc__))
138
138
139 ui.write(b'\n')
139 ui.write(b'\n')
140 raise error.Abort(
140 raise error.Abort(
141 _(b'no view requested'),
141 _(b'no view requested'),
142 hint=_(b'use "hg show VIEW" to choose a view'),
142 hint=_(b'use "hg show VIEW" to choose a view'),
143 )
143 )
144
144
145 # TODO use same logic as dispatch to perform prefix matching.
145 # TODO use same logic as dispatch to perform prefix matching.
146 if view not in views:
146 if view not in views:
147 raise error.Abort(
147 raise error.Abort(
148 _(b'unknown view: %s') % view,
148 _(b'unknown view: %s') % view,
149 hint=_(b'run "hg show" to see available views'),
149 hint=_(b'run "hg show" to see available views'),
150 )
150 )
151
151
152 template = template or b'show'
152 template = template or b'show'
153
153
154 fn = views[view]
154 fn = views[view]
155 ui.pager(b'show')
155 ui.pager(b'show')
156
156
157 if fn._fmtopic:
157 if fn._fmtopic:
158 fmtopic = b'show%s' % fn._fmtopic
158 fmtopic = b'show%s' % fn._fmtopic
159 with ui.formatter(fmtopic, {b'template': template}) as fm:
159 with ui.formatter(fmtopic, {b'template': template}) as fm:
160 return fn(ui, repo, fm)
160 return fn(ui, repo, fm)
161 elif fn._csettopic:
161 elif fn._csettopic:
162 ref = b'show%s' % fn._csettopic
162 ref = b'show%s' % fn._csettopic
163 spec = formatter.lookuptemplate(ui, ref, template)
163 spec = formatter.lookuptemplate(ui, ref, template)
164 displayer = logcmdutil.changesettemplater(ui, repo, spec, buffered=True)
164 displayer = logcmdutil.changesettemplater(ui, repo, spec, buffered=True)
165 return fn(ui, repo, displayer)
165 return fn(ui, repo, displayer)
166 else:
166 else:
167 return fn(ui, repo)
167 return fn(ui, repo)
168
168
169
169
170 @showview(b'bookmarks', fmtopic=b'bookmarks')
170 @showview(b'bookmarks', fmtopic=b'bookmarks')
171 def showbookmarks(ui, repo, fm):
171 def showbookmarks(ui, repo, fm):
172 """bookmarks and their associated changeset"""
172 """bookmarks and their associated changeset"""
173 marks = repo._bookmarks
173 marks = repo._bookmarks
174 if not len(marks):
174 if not len(marks):
175 # This is a bit hacky. Ideally, templates would have a way to
175 # This is a bit hacky. Ideally, templates would have a way to
176 # specify an empty output, but we shouldn't corrupt JSON while
176 # specify an empty output, but we shouldn't corrupt JSON while
177 # waiting for this functionality.
177 # waiting for this functionality.
178 if not isinstance(fm, formatter.jsonformatter):
178 if not isinstance(fm, formatter.jsonformatter):
179 ui.write(_(b'(no bookmarks set)\n'))
179 ui.write(_(b'(no bookmarks set)\n'))
180 return
180 return
181
181
182 revs = [repo[node].rev() for node in marks.values()]
182 revs = [repo[node].rev() for node in marks.values()]
183 active = repo._activebookmark
183 active = repo._activebookmark
184 longestname = max(len(b) for b in marks)
184 longestname = max(len(b) for b in marks)
185 nodelen = longestshortest(repo, revs)
185 nodelen = longestshortest(repo, revs)
186
186
187 for bm, node in sorted(marks.items()):
187 for bm, node in sorted(marks.items()):
188 fm.startitem()
188 fm.startitem()
189 fm.context(ctx=repo[node])
189 fm.context(ctx=repo[node])
190 fm.write(b'bookmark', b'%s', bm)
190 fm.write(b'bookmark', b'%s', bm)
191 fm.write(b'node', fm.hexfunc(node), fm.hexfunc(node))
191 fm.write(b'node', fm.hexfunc(node), fm.hexfunc(node))
192 fm.data(
192 fm.data(
193 active=bm == active, longestbookmarklen=longestname, nodelen=nodelen
193 active=bm == active, longestbookmarklen=longestname, nodelen=nodelen
194 )
194 )
195
195
196
196
197 @showview(b'stack', csettopic=b'stack')
197 @showview(b'stack', csettopic=b'stack')
198 def showstack(ui, repo, displayer):
198 def showstack(ui, repo, displayer):
199 """current line of work"""
199 """current line of work"""
200 wdirctx = repo[b'.']
200 wdirctx = repo[b'.']
201 if wdirctx.rev() == nullrev:
201 if wdirctx.rev() == nullrev:
202 raise error.Abort(
202 raise error.Abort(
203 _(
203 _(
204 b'stack view only available when there is a '
204 b'stack view only available when there is a '
205 b'working directory'
205 b'working directory'
206 )
206 )
207 )
207 )
208
208
209 if wdirctx.phase() == phases.public:
209 if wdirctx.phase() == phases.public:
210 ui.write(
210 ui.write(
211 _(
211 _(
212 b'(empty stack; working directory parent is a published '
212 b'(empty stack; working directory parent is a published '
213 b'changeset)\n'
213 b'changeset)\n'
214 )
214 )
215 )
215 )
216 return
216 return
217
217
218 # TODO extract "find stack" into a function to facilitate
218 # TODO extract "find stack" into a function to facilitate
219 # customization and reuse.
219 # customization and reuse.
220
220
221 baserev = destutil.stackbase(ui, repo)
221 baserev = destutil.stackbase(ui, repo)
222 basectx = None
222 basectx = None
223
223
224 if baserev is None:
224 if baserev is None:
225 baserev = wdirctx.rev()
225 baserev = wdirctx.rev()
226 stackrevs = {wdirctx.rev()}
226 stackrevs = {wdirctx.rev()}
227 else:
227 else:
228 stackrevs = set(repo.revs(b'%d::.', baserev))
228 stackrevs = set(repo.revs(b'%d::.', baserev))
229
229
230 ctx = repo[baserev]
230 ctx = repo[baserev]
231 if ctx.p1().rev() != nullrev:
231 if ctx.p1().rev() != nullrev:
232 basectx = ctx.p1()
232 basectx = ctx.p1()
233
233
234 # And relevant descendants.
234 # And relevant descendants.
235 branchpointattip = False
235 branchpointattip = False
236 cl = repo.changelog
236 cl = repo.changelog
237
237
238 for rev in cl.descendants([wdirctx.rev()]):
238 for rev in cl.descendants([wdirctx.rev()]):
239 ctx = repo[rev]
239 ctx = repo[rev]
240
240
241 # Will only happen if . is public.
241 # Will only happen if . is public.
242 if ctx.phase() == phases.public:
242 if ctx.phase() == phases.public:
243 break
243 break
244
244
245 stackrevs.add(ctx.rev())
245 stackrevs.add(ctx.rev())
246
246
247 # ctx.children() within a function iterating on descandants
247 # ctx.children() within a function iterating on descandants
248 # potentially has severe performance concerns because revlog.children()
248 # potentially has severe performance concerns because revlog.children()
249 # iterates over all revisions after ctx's node. However, the number of
249 # iterates over all revisions after ctx's node. However, the number of
250 # draft changesets should be a reasonably small number. So even if
250 # draft changesets should be a reasonably small number. So even if
251 # this is quadratic, the perf impact should be minimal.
251 # this is quadratic, the perf impact should be minimal.
252 if len(ctx.children()) > 1:
252 if len(ctx.children()) > 1:
253 branchpointattip = True
253 branchpointattip = True
254 break
254 break
255
255
256 stackrevs = list(sorted(stackrevs, reverse=True))
256 stackrevs = list(sorted(stackrevs, reverse=True))
257
257
258 # Find likely target heads for the current stack. These are likely
258 # Find likely target heads for the current stack. These are likely
259 # merge or rebase targets.
259 # merge or rebase targets.
260 if basectx:
260 if basectx:
261 # TODO make this customizable?
261 # TODO make this customizable?
262 newheads = set(
262 newheads = set(
263 repo.revs(
263 repo.revs(
264 b'heads(%d::) - %ld - not public()', basectx.rev(), stackrevs
264 b'heads(%d::) - %ld - not public()', basectx.rev(), stackrevs
265 )
265 )
266 )
266 )
267 else:
267 else:
268 newheads = set()
268 newheads = set()
269
269
270 allrevs = set(stackrevs) | newheads | {baserev}
270 allrevs = set(stackrevs) | newheads | {baserev}
271 nodelen = longestshortest(repo, allrevs)
271 nodelen = longestshortest(repo, allrevs)
272
272
273 try:
273 try:
274 cmdutil.findcmd(b'rebase', commands.table)
274 cmdutil.findcmd(b'rebase', commands.table)
275 haverebase = True
275 haverebase = True
276 except (error.AmbiguousCommand, error.UnknownCommand):
276 except (error.AmbiguousCommand, error.UnknownCommand):
277 haverebase = False
277 haverebase = False
278
278
279 # TODO use templating.
279 # TODO use templating.
280 # TODO consider using graphmod. But it may not be necessary given
280 # TODO consider using graphmod. But it may not be necessary given
281 # our simplicity and the customizations required.
281 # our simplicity and the customizations required.
282 # TODO use proper graph symbols from graphmod
282 # TODO use proper graph symbols from graphmod
283
283
284 tres = formatter.templateresources(ui, repo)
284 tres = formatter.templateresources(ui, repo)
285 shortesttmpl = formatter.maketemplater(
285 shortesttmpl = formatter.maketemplater(
286 ui, b'{shortest(node, %d)}' % nodelen, resources=tres
286 ui, b'{shortest(node, %d)}' % nodelen, resources=tres
287 )
287 )
288
288
289 def shortest(ctx):
289 def shortest(ctx):
290 return shortesttmpl.renderdefault({b'ctx': ctx, b'node': ctx.hex()})
290 return shortesttmpl.renderdefault({b'ctx': ctx, b'node': ctx.hex()})
291
291
292 # We write out new heads to aid in DAG awareness and to help with decision
292 # We write out new heads to aid in DAG awareness and to help with decision
293 # making on how the stack should be reconciled with commits made since the
293 # making on how the stack should be reconciled with commits made since the
294 # branch point.
294 # branch point.
295 if newheads:
295 if newheads:
296 # Calculate distance from base so we can render the count and so we can
296 # Calculate distance from base so we can render the count and so we can
297 # sort display order by commit distance.
297 # sort display order by commit distance.
298 revdistance = {}
298 revdistance = {}
299 for head in newheads:
299 for head in newheads:
300 # There is some redundancy in DAG traversal here and therefore
300 # There is some redundancy in DAG traversal here and therefore
301 # room to optimize.
301 # room to optimize.
302 ancestors = cl.ancestors([head], stoprev=basectx.rev())
302 ancestors = cl.ancestors([head], stoprev=basectx.rev())
303 revdistance[head] = len(list(ancestors))
303 revdistance[head] = len(list(ancestors))
304
304
305 sourcectx = repo[stackrevs[-1]]
305 sourcectx = repo[stackrevs[-1]]
306
306
307 sortedheads = sorted(
307 sortedheads = sorted(
308 newheads, key=lambda x: revdistance[x], reverse=True
308 newheads, key=lambda x: revdistance[x], reverse=True
309 )
309 )
310
310
311 for i, rev in enumerate(sortedheads):
311 for i, rev in enumerate(sortedheads):
312 ctx = repo[rev]
312 ctx = repo[rev]
313
313
314 if i:
314 if i:
315 ui.write(b': ')
315 ui.write(b': ')
316 else:
316 else:
317 ui.write(b' ')
317 ui.write(b' ')
318
318
319 ui.write(b'o ')
319 ui.writenoi18n(b'o ')
320 displayer.show(ctx, nodelen=nodelen)
320 displayer.show(ctx, nodelen=nodelen)
321 displayer.flush(ctx)
321 displayer.flush(ctx)
322 ui.write(b'\n')
322 ui.write(b'\n')
323
323
324 if i:
324 if i:
325 ui.write(b':/')
325 ui.write(b':/')
326 else:
326 else:
327 ui.write(b' /')
327 ui.write(b' /')
328
328
329 ui.write(b' (')
329 ui.write(b' (')
330 ui.write(
330 ui.write(
331 _(b'%d commits ahead') % revdistance[rev],
331 _(b'%d commits ahead') % revdistance[rev],
332 label=b'stack.commitdistance',
332 label=b'stack.commitdistance',
333 )
333 )
334
334
335 if haverebase:
335 if haverebase:
336 # TODO may be able to omit --source in some scenarios
336 # TODO may be able to omit --source in some scenarios
337 ui.write(b'; ')
337 ui.write(b'; ')
338 ui.write(
338 ui.write(
339 (
339 (
340 b'hg rebase --source %s --dest %s'
340 b'hg rebase --source %s --dest %s'
341 % (shortest(sourcectx), shortest(ctx))
341 % (shortest(sourcectx), shortest(ctx))
342 ),
342 ),
343 label=b'stack.rebasehint',
343 label=b'stack.rebasehint',
344 )
344 )
345
345
346 ui.write(b')\n')
346 ui.write(b')\n')
347
347
348 ui.write(b':\n: ')
348 ui.write(b':\n: ')
349 ui.write(_(b'(stack head)\n'), label=b'stack.label')
349 ui.write(_(b'(stack head)\n'), label=b'stack.label')
350
350
351 if branchpointattip:
351 if branchpointattip:
352 ui.write(b' \\ / ')
352 ui.write(b' \\ / ')
353 ui.write(_(b'(multiple children)\n'), label=b'stack.label')
353 ui.write(_(b'(multiple children)\n'), label=b'stack.label')
354 ui.write(b' |\n')
354 ui.write(b' |\n')
355
355
356 for rev in stackrevs:
356 for rev in stackrevs:
357 ctx = repo[rev]
357 ctx = repo[rev]
358 symbol = b'@' if rev == wdirctx.rev() else b'o'
358 symbol = b'@' if rev == wdirctx.rev() else b'o'
359
359
360 if newheads:
360 if newheads:
361 ui.write(b': ')
361 ui.write(b': ')
362 else:
362 else:
363 ui.write(b' ')
363 ui.write(b' ')
364
364
365 ui.write(symbol, b' ')
365 ui.write(symbol, b' ')
366 displayer.show(ctx, nodelen=nodelen)
366 displayer.show(ctx, nodelen=nodelen)
367 displayer.flush(ctx)
367 displayer.flush(ctx)
368 ui.write(b'\n')
368 ui.write(b'\n')
369
369
370 # TODO display histedit hint?
370 # TODO display histedit hint?
371
371
372 if basectx:
372 if basectx:
373 # Vertically and horizontally separate stack base from parent
373 # Vertically and horizontally separate stack base from parent
374 # to reinforce stack boundary.
374 # to reinforce stack boundary.
375 if newheads:
375 if newheads:
376 ui.write(b':/ ')
376 ui.write(b':/ ')
377 else:
377 else:
378 ui.write(b' / ')
378 ui.write(b' / ')
379
379
380 ui.write(_(b'(stack base)'), b'\n', label=b'stack.label')
380 ui.write(_(b'(stack base)'), b'\n', label=b'stack.label')
381 ui.write(b'o ')
381 ui.writenoi18n(b'o ')
382
382
383 displayer.show(basectx, nodelen=nodelen)
383 displayer.show(basectx, nodelen=nodelen)
384 displayer.flush(basectx)
384 displayer.flush(basectx)
385 ui.write(b'\n')
385 ui.write(b'\n')
386
386
387
387
388 @revsetpredicate(b'_underway([commitage[, headage]])')
388 @revsetpredicate(b'_underway([commitage[, headage]])')
389 def underwayrevset(repo, subset, x):
389 def underwayrevset(repo, subset, x):
390 args = revset.getargsdict(x, b'underway', b'commitage headage')
390 args = revset.getargsdict(x, b'underway', b'commitage headage')
391 if b'commitage' not in args:
391 if b'commitage' not in args:
392 args[b'commitage'] = None
392 args[b'commitage'] = None
393 if b'headage' not in args:
393 if b'headage' not in args:
394 args[b'headage'] = None
394 args[b'headage'] = None
395
395
396 # We assume callers of this revset add a topographical sort on the
396 # We assume callers of this revset add a topographical sort on the
397 # result. This means there is no benefit to making the revset lazy
397 # result. This means there is no benefit to making the revset lazy
398 # since the topographical sort needs to consume all revs.
398 # since the topographical sort needs to consume all revs.
399 #
399 #
400 # With this in mind, we build up the set manually instead of constructing
400 # With this in mind, we build up the set manually instead of constructing
401 # a complex revset. This enables faster execution.
401 # a complex revset. This enables faster execution.
402
402
403 # Mutable changesets (non-public) are the most important changesets
403 # Mutable changesets (non-public) are the most important changesets
404 # to return. ``not public()`` will also pull in obsolete changesets if
404 # to return. ``not public()`` will also pull in obsolete changesets if
405 # there is a non-obsolete changeset with obsolete ancestors. This is
405 # there is a non-obsolete changeset with obsolete ancestors. This is
406 # why we exclude obsolete changesets from this query.
406 # why we exclude obsolete changesets from this query.
407 rs = b'not public() and not obsolete()'
407 rs = b'not public() and not obsolete()'
408 rsargs = []
408 rsargs = []
409 if args[b'commitage']:
409 if args[b'commitage']:
410 rs += b' and date(%s)'
410 rs += b' and date(%s)'
411 rsargs.append(
411 rsargs.append(
412 revsetlang.getstring(
412 revsetlang.getstring(
413 args[b'commitage'], _(b'commitage requires a string')
413 args[b'commitage'], _(b'commitage requires a string')
414 )
414 )
415 )
415 )
416
416
417 mutable = repo.revs(rs, *rsargs)
417 mutable = repo.revs(rs, *rsargs)
418 relevant = revset.baseset(mutable)
418 relevant = revset.baseset(mutable)
419
419
420 # Add parents of mutable changesets to provide context.
420 # Add parents of mutable changesets to provide context.
421 relevant += repo.revs(b'parents(%ld)', mutable)
421 relevant += repo.revs(b'parents(%ld)', mutable)
422
422
423 # We also pull in (public) heads if they a) aren't closing a branch
423 # We also pull in (public) heads if they a) aren't closing a branch
424 # b) are recent.
424 # b) are recent.
425 rs = b'head() and not closed()'
425 rs = b'head() and not closed()'
426 rsargs = []
426 rsargs = []
427 if args[b'headage']:
427 if args[b'headage']:
428 rs += b' and date(%s)'
428 rs += b' and date(%s)'
429 rsargs.append(
429 rsargs.append(
430 revsetlang.getstring(
430 revsetlang.getstring(
431 args[b'headage'], _(b'headage requires a string')
431 args[b'headage'], _(b'headage requires a string')
432 )
432 )
433 )
433 )
434
434
435 relevant += repo.revs(rs, *rsargs)
435 relevant += repo.revs(rs, *rsargs)
436
436
437 # Add working directory parent.
437 # Add working directory parent.
438 wdirrev = repo[b'.'].rev()
438 wdirrev = repo[b'.'].rev()
439 if wdirrev != nullrev:
439 if wdirrev != nullrev:
440 relevant += revset.baseset({wdirrev})
440 relevant += revset.baseset({wdirrev})
441
441
442 return subset & relevant
442 return subset & relevant
443
443
444
444
445 @showview(b'work', csettopic=b'work')
445 @showview(b'work', csettopic=b'work')
446 def showwork(ui, repo, displayer):
446 def showwork(ui, repo, displayer):
447 """changesets that aren't finished"""
447 """changesets that aren't finished"""
448 # TODO support date-based limiting when calling revset.
448 # TODO support date-based limiting when calling revset.
449 revs = repo.revs(b'sort(_underway(), topo)')
449 revs = repo.revs(b'sort(_underway(), topo)')
450 nodelen = longestshortest(repo, revs)
450 nodelen = longestshortest(repo, revs)
451
451
452 revdag = graphmod.dagwalker(repo, revs)
452 revdag = graphmod.dagwalker(repo, revs)
453
453
454 ui.setconfig(b'experimental', b'graphshorten', True)
454 ui.setconfig(b'experimental', b'graphshorten', True)
455 logcmdutil.displaygraph(
455 logcmdutil.displaygraph(
456 ui,
456 ui,
457 repo,
457 repo,
458 revdag,
458 revdag,
459 displayer,
459 displayer,
460 graphmod.asciiedges,
460 graphmod.asciiedges,
461 props={b'nodelen': nodelen},
461 props={b'nodelen': nodelen},
462 )
462 )
463
463
464
464
465 def extsetup(ui):
465 def extsetup(ui):
466 # Alias `hg <prefix><view>` to `hg show <view>`.
466 # Alias `hg <prefix><view>` to `hg show <view>`.
467 for prefix in ui.configlist(b'commands', b'show.aliasprefix'):
467 for prefix in ui.configlist(b'commands', b'show.aliasprefix'):
468 for view in showview._table:
468 for view in showview._table:
469 name = b'%s%s' % (prefix, view)
469 name = b'%s%s' % (prefix, view)
470
470
471 choice, allcommands = cmdutil.findpossible(
471 choice, allcommands = cmdutil.findpossible(
472 name, commands.table, strict=True
472 name, commands.table, strict=True
473 )
473 )
474
474
475 # This alias is already a command name. Don't set it.
475 # This alias is already a command name. Don't set it.
476 if name in choice:
476 if name in choice:
477 continue
477 continue
478
478
479 # Same for aliases.
479 # Same for aliases.
480 if ui.config(b'alias', name, None):
480 if ui.config(b'alias', name, None):
481 continue
481 continue
482
482
483 ui.setconfig(b'alias', name, b'show %s' % view, source=b'show')
483 ui.setconfig(b'alias', name, b'show %s' % view, source=b'show')
484
484
485
485
486 def longestshortest(repo, revs, minlen=4):
486 def longestshortest(repo, revs, minlen=4):
487 """Return the length of the longest shortest node to identify revisions.
487 """Return the length of the longest shortest node to identify revisions.
488
488
489 The result of this function can be used with the ``shortest()`` template
489 The result of this function can be used with the ``shortest()`` template
490 function to ensure that a value is unique and unambiguous for a given
490 function to ensure that a value is unique and unambiguous for a given
491 set of nodes.
491 set of nodes.
492
492
493 The number of revisions in the repo is taken into account to prevent
493 The number of revisions in the repo is taken into account to prevent
494 a numeric node prefix from conflicting with an integer revision number.
494 a numeric node prefix from conflicting with an integer revision number.
495 If we fail to do this, a value of e.g. ``10023`` could mean either
495 If we fail to do this, a value of e.g. ``10023`` could mean either
496 revision 10023 or node ``10023abc...``.
496 revision 10023 or node ``10023abc...``.
497 """
497 """
498 if not revs:
498 if not revs:
499 return minlen
499 return minlen
500 cl = repo.changelog
500 cl = repo.changelog
501 return max(
501 return max(
502 len(scmutil.shortesthexnodeidprefix(repo, cl.node(r), minlen))
502 len(scmutil.shortesthexnodeidprefix(repo, cl.node(r), minlen))
503 for r in revs
503 for r in revs
504 )
504 )
505
505
506
506
507 # Adjust the docstring of the show command so it shows all registered views.
507 # Adjust the docstring of the show command so it shows all registered views.
508 # This is a bit hacky because it runs at the end of module load. When moved
508 # This is a bit hacky because it runs at the end of module load. When moved
509 # into core or when another extension wants to provide a view, we'll need
509 # into core or when another extension wants to provide a view, we'll need
510 # to do this more robustly.
510 # to do this more robustly.
511 # TODO make this more robust.
511 # TODO make this more robust.
512 def _updatedocstring():
512 def _updatedocstring():
513 longest = max(map(len, showview._table.keys()))
513 longest = max(map(len, showview._table.keys()))
514 entries = []
514 entries = []
515 for key in sorted(showview._table.keys()):
515 for key in sorted(showview._table.keys()):
516 entries.append(
516 entries.append(
517 r' %s %s'
517 r' %s %s'
518 % (
518 % (
519 pycompat.sysstr(key.ljust(longest)),
519 pycompat.sysstr(key.ljust(longest)),
520 showview._table[key]._origdoc,
520 showview._table[key]._origdoc,
521 )
521 )
522 )
522 )
523
523
524 cmdtable[b'show'][0].__doc__ = pycompat.sysstr(b'%s\n\n%s\n ') % (
524 cmdtable[b'show'][0].__doc__ = pycompat.sysstr(b'%s\n\n%s\n ') % (
525 cmdtable[b'show'][0].__doc__.rstrip(),
525 cmdtable[b'show'][0].__doc__.rstrip(),
526 pycompat.sysstr(b'\n\n').join(entries),
526 pycompat.sysstr(b'\n\n').join(entries),
527 )
527 )
528
528
529
529
530 _updatedocstring()
530 _updatedocstring()
@@ -1,218 +1,218 b''
1 # win32mbcs.py -- MBCS filename support for Mercurial
1 # win32mbcs.py -- MBCS filename support for Mercurial
2 #
2 #
3 # Copyright (c) 2008 Shun-ichi Goto <shunichi.goto@gmail.com>
3 # Copyright (c) 2008 Shun-ichi Goto <shunichi.goto@gmail.com>
4 #
4 #
5 # Version: 0.3
5 # Version: 0.3
6 # Author: Shun-ichi Goto <shunichi.goto@gmail.com>
6 # Author: Shun-ichi Goto <shunichi.goto@gmail.com>
7 #
7 #
8 # This software may be used and distributed according to the terms of the
8 # This software may be used and distributed according to the terms of the
9 # GNU General Public License version 2 or any later version.
9 # GNU General Public License version 2 or any later version.
10 #
10 #
11
11
12 '''allow the use of MBCS paths with problematic encodings
12 '''allow the use of MBCS paths with problematic encodings
13
13
14 Some MBCS encodings are not good for some path operations (i.e.
14 Some MBCS encodings are not good for some path operations (i.e.
15 splitting path, case conversion, etc.) with its encoded bytes. We call
15 splitting path, case conversion, etc.) with its encoded bytes. We call
16 such a encoding (i.e. shift_jis and big5) as "problematic encoding".
16 such a encoding (i.e. shift_jis and big5) as "problematic encoding".
17 This extension can be used to fix the issue with those encodings by
17 This extension can be used to fix the issue with those encodings by
18 wrapping some functions to convert to Unicode string before path
18 wrapping some functions to convert to Unicode string before path
19 operation.
19 operation.
20
20
21 This extension is useful for:
21 This extension is useful for:
22
22
23 - Japanese Windows users using shift_jis encoding.
23 - Japanese Windows users using shift_jis encoding.
24 - Chinese Windows users using big5 encoding.
24 - Chinese Windows users using big5 encoding.
25 - All users who use a repository with one of problematic encodings on
25 - All users who use a repository with one of problematic encodings on
26 case-insensitive file system.
26 case-insensitive file system.
27
27
28 This extension is not needed for:
28 This extension is not needed for:
29
29
30 - Any user who use only ASCII chars in path.
30 - Any user who use only ASCII chars in path.
31 - Any user who do not use any of problematic encodings.
31 - Any user who do not use any of problematic encodings.
32
32
33 Note that there are some limitations on using this extension:
33 Note that there are some limitations on using this extension:
34
34
35 - You should use single encoding in one repository.
35 - You should use single encoding in one repository.
36 - If the repository path ends with 0x5c, .hg/hgrc cannot be read.
36 - If the repository path ends with 0x5c, .hg/hgrc cannot be read.
37 - win32mbcs is not compatible with fixutf8 extension.
37 - win32mbcs is not compatible with fixutf8 extension.
38
38
39 By default, win32mbcs uses encoding.encoding decided by Mercurial.
39 By default, win32mbcs uses encoding.encoding decided by Mercurial.
40 You can specify the encoding by config option::
40 You can specify the encoding by config option::
41
41
42 [win32mbcs]
42 [win32mbcs]
43 encoding = sjis
43 encoding = sjis
44
44
45 It is useful for the users who want to commit with UTF-8 log message.
45 It is useful for the users who want to commit with UTF-8 log message.
46 '''
46 '''
47 from __future__ import absolute_import
47 from __future__ import absolute_import
48
48
49 import os
49 import os
50 import sys
50 import sys
51
51
52 from mercurial.i18n import _
52 from mercurial.i18n import _
53 from mercurial import (
53 from mercurial import (
54 encoding,
54 encoding,
55 error,
55 error,
56 pycompat,
56 pycompat,
57 registrar,
57 registrar,
58 )
58 )
59
59
60 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
60 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
61 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 # be specifying the version(s) of Mercurial they are tested with, or
62 # be specifying the version(s) of Mercurial they are tested with, or
63 # leave the attribute unspecified.
63 # leave the attribute unspecified.
64 testedwith = b'ships-with-hg-core'
64 testedwith = b'ships-with-hg-core'
65
65
66 configtable = {}
66 configtable = {}
67 configitem = registrar.configitem(configtable)
67 configitem = registrar.configitem(configtable)
68
68
69 # Encoding.encoding may be updated by --encoding option.
69 # Encoding.encoding may be updated by --encoding option.
70 # Use a lambda do delay the resolution.
70 # Use a lambda do delay the resolution.
71 configitem(
71 configitem(
72 b'win32mbcs', b'encoding', default=lambda: encoding.encoding,
72 b'win32mbcs', b'encoding', default=lambda: encoding.encoding,
73 )
73 )
74
74
75 _encoding = None # see extsetup
75 _encoding = None # see extsetup
76
76
77
77
78 def decode(arg):
78 def decode(arg):
79 if isinstance(arg, str):
79 if isinstance(arg, str):
80 uarg = arg.decode(_encoding)
80 uarg = arg.decode(_encoding)
81 if arg == uarg.encode(_encoding):
81 if arg == uarg.encode(_encoding):
82 return uarg
82 return uarg
83 raise UnicodeError(b"Not local encoding")
83 raise UnicodeError(b"Not local encoding")
84 elif isinstance(arg, tuple):
84 elif isinstance(arg, tuple):
85 return tuple(map(decode, arg))
85 return tuple(map(decode, arg))
86 elif isinstance(arg, list):
86 elif isinstance(arg, list):
87 return map(decode, arg)
87 return map(decode, arg)
88 elif isinstance(arg, dict):
88 elif isinstance(arg, dict):
89 for k, v in arg.items():
89 for k, v in arg.items():
90 arg[k] = decode(v)
90 arg[k] = decode(v)
91 return arg
91 return arg
92
92
93
93
94 def encode(arg):
94 def encode(arg):
95 if isinstance(arg, pycompat.unicode):
95 if isinstance(arg, pycompat.unicode):
96 return arg.encode(_encoding)
96 return arg.encode(_encoding)
97 elif isinstance(arg, tuple):
97 elif isinstance(arg, tuple):
98 return tuple(map(encode, arg))
98 return tuple(map(encode, arg))
99 elif isinstance(arg, list):
99 elif isinstance(arg, list):
100 return map(encode, arg)
100 return map(encode, arg)
101 elif isinstance(arg, dict):
101 elif isinstance(arg, dict):
102 for k, v in arg.items():
102 for k, v in arg.items():
103 arg[k] = encode(v)
103 arg[k] = encode(v)
104 return arg
104 return arg
105
105
106
106
107 def appendsep(s):
107 def appendsep(s):
108 # ensure the path ends with os.sep, appending it if necessary.
108 # ensure the path ends with os.sep, appending it if necessary.
109 try:
109 try:
110 us = decode(s)
110 us = decode(s)
111 except UnicodeError:
111 except UnicodeError:
112 us = s
112 us = s
113 if us and us[-1] not in b':/\\':
113 if us and us[-1] not in b':/\\':
114 s += pycompat.ossep
114 s += pycompat.ossep
115 return s
115 return s
116
116
117
117
118 def basewrapper(func, argtype, enc, dec, args, kwds):
118 def basewrapper(func, argtype, enc, dec, args, kwds):
119 # check check already converted, then call original
119 # check check already converted, then call original
120 for arg in args:
120 for arg in args:
121 if isinstance(arg, argtype):
121 if isinstance(arg, argtype):
122 return func(*args, **kwds)
122 return func(*args, **kwds)
123
123
124 try:
124 try:
125 # convert string arguments, call func, then convert back the
125 # convert string arguments, call func, then convert back the
126 # return value.
126 # return value.
127 return enc(func(*dec(args), **dec(kwds)))
127 return enc(func(*dec(args), **dec(kwds)))
128 except UnicodeError:
128 except UnicodeError:
129 raise error.Abort(
129 raise error.Abort(
130 _(b"[win32mbcs] filename conversion failed with" b" %s encoding\n")
130 _(b"[win32mbcs] filename conversion failed with" b" %s encoding\n")
131 % _encoding
131 % _encoding
132 )
132 )
133
133
134
134
135 def wrapper(func, args, kwds):
135 def wrapper(func, args, kwds):
136 return basewrapper(func, pycompat.unicode, encode, decode, args, kwds)
136 return basewrapper(func, pycompat.unicode, encode, decode, args, kwds)
137
137
138
138
139 def reversewrapper(func, args, kwds):
139 def reversewrapper(func, args, kwds):
140 return basewrapper(func, str, decode, encode, args, kwds)
140 return basewrapper(func, str, decode, encode, args, kwds)
141
141
142
142
143 def wrapperforlistdir(func, args, kwds):
143 def wrapperforlistdir(func, args, kwds):
144 # Ensure 'path' argument ends with os.sep to avoids
144 # Ensure 'path' argument ends with os.sep to avoids
145 # misinterpreting last 0x5c of MBCS 2nd byte as path separator.
145 # misinterpreting last 0x5c of MBCS 2nd byte as path separator.
146 if args:
146 if args:
147 args = list(args)
147 args = list(args)
148 args[0] = appendsep(args[0])
148 args[0] = appendsep(args[0])
149 if b'path' in kwds:
149 if b'path' in kwds:
150 kwds[b'path'] = appendsep(kwds[b'path'])
150 kwds[b'path'] = appendsep(kwds[b'path'])
151 return func(*args, **kwds)
151 return func(*args, **kwds)
152
152
153
153
154 def wrapname(name, wrapper):
154 def wrapname(name, wrapper):
155 module, name = name.rsplit(b'.', 1)
155 module, name = name.rsplit(b'.', 1)
156 module = sys.modules[module]
156 module = sys.modules[module]
157 func = getattr(module, name)
157 func = getattr(module, name)
158
158
159 def f(*args, **kwds):
159 def f(*args, **kwds):
160 return wrapper(func, args, kwds)
160 return wrapper(func, args, kwds)
161
161
162 f.__name__ = func.__name__
162 f.__name__ = func.__name__
163 setattr(module, name, f)
163 setattr(module, name, f)
164
164
165
165
166 # List of functions to be wrapped.
166 # List of functions to be wrapped.
167 # NOTE: os.path.dirname() and os.path.basename() are safe because
167 # NOTE: os.path.dirname() and os.path.basename() are safe because
168 # they use result of os.path.split()
168 # they use result of os.path.split()
169 funcs = b'''os.path.join os.path.split os.path.splitext
169 funcs = b'''os.path.join os.path.split os.path.splitext
170 os.path.normpath os.makedirs mercurial.util.endswithsep
170 os.path.normpath os.makedirs mercurial.util.endswithsep
171 mercurial.util.splitpath mercurial.util.fscasesensitive
171 mercurial.util.splitpath mercurial.util.fscasesensitive
172 mercurial.util.fspath mercurial.util.pconvert mercurial.util.normpath
172 mercurial.util.fspath mercurial.util.pconvert mercurial.util.normpath
173 mercurial.util.checkwinfilename mercurial.util.checkosfilename
173 mercurial.util.checkwinfilename mercurial.util.checkosfilename
174 mercurial.util.split'''
174 mercurial.util.split'''
175
175
176 # These functions are required to be called with local encoded string
176 # These functions are required to be called with local encoded string
177 # because they expects argument is local encoded string and cause
177 # because they expects argument is local encoded string and cause
178 # problem with unicode string.
178 # problem with unicode string.
179 rfuncs = b'''mercurial.encoding.upper mercurial.encoding.lower
179 rfuncs = b'''mercurial.encoding.upper mercurial.encoding.lower
180 mercurial.util._filenamebytestr'''
180 mercurial.util._filenamebytestr'''
181
181
182 # List of Windows specific functions to be wrapped.
182 # List of Windows specific functions to be wrapped.
183 winfuncs = b'''os.path.splitunc'''
183 winfuncs = b'''os.path.splitunc'''
184
184
185 # codec and alias names of sjis and big5 to be faked.
185 # codec and alias names of sjis and big5 to be faked.
186 problematic_encodings = b'''big5 big5-tw csbig5 big5hkscs big5-hkscs
186 problematic_encodings = b'''big5 big5-tw csbig5 big5hkscs big5-hkscs
187 hkscs cp932 932 ms932 mskanji ms-kanji shift_jis csshiftjis shiftjis
187 hkscs cp932 932 ms932 mskanji ms-kanji shift_jis csshiftjis shiftjis
188 sjis s_jis shift_jis_2004 shiftjis2004 sjis_2004 sjis2004
188 sjis s_jis shift_jis_2004 shiftjis2004 sjis_2004 sjis2004
189 shift_jisx0213 shiftjisx0213 sjisx0213 s_jisx0213 950 cp950 ms950 '''
189 shift_jisx0213 shiftjisx0213 sjisx0213 s_jisx0213 950 cp950 ms950 '''
190
190
191
191
192 def extsetup(ui):
192 def extsetup(ui):
193 # TODO: decide use of config section for this extension
193 # TODO: decide use of config section for this extension
194 if (not os.path.supports_unicode_filenames) and (
194 if (not os.path.supports_unicode_filenames) and (
195 pycompat.sysplatform != b'cygwin'
195 pycompat.sysplatform != b'cygwin'
196 ):
196 ):
197 ui.warn(_(b"[win32mbcs] cannot activate on this platform.\n"))
197 ui.warn(_(b"[win32mbcs] cannot activate on this platform.\n"))
198 return
198 return
199 # determine encoding for filename
199 # determine encoding for filename
200 global _encoding
200 global _encoding
201 _encoding = ui.config(b'win32mbcs', b'encoding')
201 _encoding = ui.config(b'win32mbcs', b'encoding')
202 # fake is only for relevant environment.
202 # fake is only for relevant environment.
203 if _encoding.lower() in problematic_encodings.split():
203 if _encoding.lower() in problematic_encodings.split():
204 for f in funcs.split():
204 for f in funcs.split():
205 wrapname(f, wrapper)
205 wrapname(f, wrapper)
206 if pycompat.iswindows:
206 if pycompat.iswindows:
207 for f in winfuncs.split():
207 for f in winfuncs.split():
208 wrapname(f, wrapper)
208 wrapname(f, wrapper)
209 wrapname(b"mercurial.util.listdir", wrapperforlistdir)
209 wrapname(b"mercurial.util.listdir", wrapperforlistdir)
210 wrapname(b"mercurial.windows.listdir", wrapperforlistdir)
210 wrapname(b"mercurial.windows.listdir", wrapperforlistdir)
211 # wrap functions to be called with local byte string arguments
211 # wrap functions to be called with local byte string arguments
212 for f in rfuncs.split():
212 for f in rfuncs.split():
213 wrapname(f, reversewrapper)
213 wrapname(f, reversewrapper)
214 # Check sys.args manually instead of using ui.debug() because
214 # Check sys.args manually instead of using ui.debug() because
215 # command line options is not yet applied when
215 # command line options is not yet applied when
216 # extensions.loadall() is called.
216 # extensions.loadall() is called.
217 if b'--debug' in sys.argv:
217 if b'--debug' in sys.argv:
218 ui.write(b"[win32mbcs] activated with encoding: %s\n" % _encoding)
218 ui.writenoi18n(b"[win32mbcs] activated with encoding: %s\n" % _encoding)
@@ -1,2555 +1,2555 b''
1 # bundle2.py - generic container format to transmit arbitrary data.
1 # bundle2.py - generic container format to transmit arbitrary data.
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """Handling of the new bundle2 format
7 """Handling of the new bundle2 format
8
8
9 The goal of bundle2 is to act as an atomically packet to transmit a set of
9 The goal of bundle2 is to act as an atomically packet to transmit a set of
10 payloads in an application agnostic way. It consist in a sequence of "parts"
10 payloads in an application agnostic way. It consist in a sequence of "parts"
11 that will be handed to and processed by the application layer.
11 that will be handed to and processed by the application layer.
12
12
13
13
14 General format architecture
14 General format architecture
15 ===========================
15 ===========================
16
16
17 The format is architectured as follow
17 The format is architectured as follow
18
18
19 - magic string
19 - magic string
20 - stream level parameters
20 - stream level parameters
21 - payload parts (any number)
21 - payload parts (any number)
22 - end of stream marker.
22 - end of stream marker.
23
23
24 the Binary format
24 the Binary format
25 ============================
25 ============================
26
26
27 All numbers are unsigned and big-endian.
27 All numbers are unsigned and big-endian.
28
28
29 stream level parameters
29 stream level parameters
30 ------------------------
30 ------------------------
31
31
32 Binary format is as follow
32 Binary format is as follow
33
33
34 :params size: int32
34 :params size: int32
35
35
36 The total number of Bytes used by the parameters
36 The total number of Bytes used by the parameters
37
37
38 :params value: arbitrary number of Bytes
38 :params value: arbitrary number of Bytes
39
39
40 A blob of `params size` containing the serialized version of all stream level
40 A blob of `params size` containing the serialized version of all stream level
41 parameters.
41 parameters.
42
42
43 The blob contains a space separated list of parameters. Parameters with value
43 The blob contains a space separated list of parameters. Parameters with value
44 are stored in the form `<name>=<value>`. Both name and value are urlquoted.
44 are stored in the form `<name>=<value>`. Both name and value are urlquoted.
45
45
46 Empty name are obviously forbidden.
46 Empty name are obviously forbidden.
47
47
48 Name MUST start with a letter. If this first letter is lower case, the
48 Name MUST start with a letter. If this first letter is lower case, the
49 parameter is advisory and can be safely ignored. However when the first
49 parameter is advisory and can be safely ignored. However when the first
50 letter is capital, the parameter is mandatory and the bundling process MUST
50 letter is capital, the parameter is mandatory and the bundling process MUST
51 stop if he is not able to proceed it.
51 stop if he is not able to proceed it.
52
52
53 Stream parameters use a simple textual format for two main reasons:
53 Stream parameters use a simple textual format for two main reasons:
54
54
55 - Stream level parameters should remain simple and we want to discourage any
55 - Stream level parameters should remain simple and we want to discourage any
56 crazy usage.
56 crazy usage.
57 - Textual data allow easy human inspection of a bundle2 header in case of
57 - Textual data allow easy human inspection of a bundle2 header in case of
58 troubles.
58 troubles.
59
59
60 Any Applicative level options MUST go into a bundle2 part instead.
60 Any Applicative level options MUST go into a bundle2 part instead.
61
61
62 Payload part
62 Payload part
63 ------------------------
63 ------------------------
64
64
65 Binary format is as follow
65 Binary format is as follow
66
66
67 :header size: int32
67 :header size: int32
68
68
69 The total number of Bytes used by the part header. When the header is empty
69 The total number of Bytes used by the part header. When the header is empty
70 (size = 0) this is interpreted as the end of stream marker.
70 (size = 0) this is interpreted as the end of stream marker.
71
71
72 :header:
72 :header:
73
73
74 The header defines how to interpret the part. It contains two piece of
74 The header defines how to interpret the part. It contains two piece of
75 data: the part type, and the part parameters.
75 data: the part type, and the part parameters.
76
76
77 The part type is used to route an application level handler, that can
77 The part type is used to route an application level handler, that can
78 interpret payload.
78 interpret payload.
79
79
80 Part parameters are passed to the application level handler. They are
80 Part parameters are passed to the application level handler. They are
81 meant to convey information that will help the application level object to
81 meant to convey information that will help the application level object to
82 interpret the part payload.
82 interpret the part payload.
83
83
84 The binary format of the header is has follow
84 The binary format of the header is has follow
85
85
86 :typesize: (one byte)
86 :typesize: (one byte)
87
87
88 :parttype: alphanumerical part name (restricted to [a-zA-Z0-9_:-]*)
88 :parttype: alphanumerical part name (restricted to [a-zA-Z0-9_:-]*)
89
89
90 :partid: A 32bits integer (unique in the bundle) that can be used to refer
90 :partid: A 32bits integer (unique in the bundle) that can be used to refer
91 to this part.
91 to this part.
92
92
93 :parameters:
93 :parameters:
94
94
95 Part's parameter may have arbitrary content, the binary structure is::
95 Part's parameter may have arbitrary content, the binary structure is::
96
96
97 <mandatory-count><advisory-count><param-sizes><param-data>
97 <mandatory-count><advisory-count><param-sizes><param-data>
98
98
99 :mandatory-count: 1 byte, number of mandatory parameters
99 :mandatory-count: 1 byte, number of mandatory parameters
100
100
101 :advisory-count: 1 byte, number of advisory parameters
101 :advisory-count: 1 byte, number of advisory parameters
102
102
103 :param-sizes:
103 :param-sizes:
104
104
105 N couple of bytes, where N is the total number of parameters. Each
105 N couple of bytes, where N is the total number of parameters. Each
106 couple contains (<size-of-key>, <size-of-value) for one parameter.
106 couple contains (<size-of-key>, <size-of-value) for one parameter.
107
107
108 :param-data:
108 :param-data:
109
109
110 A blob of bytes from which each parameter key and value can be
110 A blob of bytes from which each parameter key and value can be
111 retrieved using the list of size couples stored in the previous
111 retrieved using the list of size couples stored in the previous
112 field.
112 field.
113
113
114 Mandatory parameters comes first, then the advisory ones.
114 Mandatory parameters comes first, then the advisory ones.
115
115
116 Each parameter's key MUST be unique within the part.
116 Each parameter's key MUST be unique within the part.
117
117
118 :payload:
118 :payload:
119
119
120 payload is a series of `<chunksize><chunkdata>`.
120 payload is a series of `<chunksize><chunkdata>`.
121
121
122 `chunksize` is an int32, `chunkdata` are plain bytes (as much as
122 `chunksize` is an int32, `chunkdata` are plain bytes (as much as
123 `chunksize` says)` The payload part is concluded by a zero size chunk.
123 `chunksize` says)` The payload part is concluded by a zero size chunk.
124
124
125 The current implementation always produces either zero or one chunk.
125 The current implementation always produces either zero or one chunk.
126 This is an implementation limitation that will ultimately be lifted.
126 This is an implementation limitation that will ultimately be lifted.
127
127
128 `chunksize` can be negative to trigger special case processing. No such
128 `chunksize` can be negative to trigger special case processing. No such
129 processing is in place yet.
129 processing is in place yet.
130
130
131 Bundle processing
131 Bundle processing
132 ============================
132 ============================
133
133
134 Each part is processed in order using a "part handler". Handler are registered
134 Each part is processed in order using a "part handler". Handler are registered
135 for a certain part type.
135 for a certain part type.
136
136
137 The matching of a part to its handler is case insensitive. The case of the
137 The matching of a part to its handler is case insensitive. The case of the
138 part type is used to know if a part is mandatory or advisory. If the Part type
138 part type is used to know if a part is mandatory or advisory. If the Part type
139 contains any uppercase char it is considered mandatory. When no handler is
139 contains any uppercase char it is considered mandatory. When no handler is
140 known for a Mandatory part, the process is aborted and an exception is raised.
140 known for a Mandatory part, the process is aborted and an exception is raised.
141 If the part is advisory and no handler is known, the part is ignored. When the
141 If the part is advisory and no handler is known, the part is ignored. When the
142 process is aborted, the full bundle is still read from the stream to keep the
142 process is aborted, the full bundle is still read from the stream to keep the
143 channel usable. But none of the part read from an abort are processed. In the
143 channel usable. But none of the part read from an abort are processed. In the
144 future, dropping the stream may become an option for channel we do not care to
144 future, dropping the stream may become an option for channel we do not care to
145 preserve.
145 preserve.
146 """
146 """
147
147
148 from __future__ import absolute_import, division
148 from __future__ import absolute_import, division
149
149
150 import collections
150 import collections
151 import errno
151 import errno
152 import os
152 import os
153 import re
153 import re
154 import string
154 import string
155 import struct
155 import struct
156 import sys
156 import sys
157
157
158 from .i18n import _
158 from .i18n import _
159 from . import (
159 from . import (
160 bookmarks,
160 bookmarks,
161 changegroup,
161 changegroup,
162 encoding,
162 encoding,
163 error,
163 error,
164 node as nodemod,
164 node as nodemod,
165 obsolete,
165 obsolete,
166 phases,
166 phases,
167 pushkey,
167 pushkey,
168 pycompat,
168 pycompat,
169 streamclone,
169 streamclone,
170 tags,
170 tags,
171 url,
171 url,
172 util,
172 util,
173 )
173 )
174 from .utils import stringutil
174 from .utils import stringutil
175
175
176 urlerr = util.urlerr
176 urlerr = util.urlerr
177 urlreq = util.urlreq
177 urlreq = util.urlreq
178
178
179 _pack = struct.pack
179 _pack = struct.pack
180 _unpack = struct.unpack
180 _unpack = struct.unpack
181
181
182 _fstreamparamsize = b'>i'
182 _fstreamparamsize = b'>i'
183 _fpartheadersize = b'>i'
183 _fpartheadersize = b'>i'
184 _fparttypesize = b'>B'
184 _fparttypesize = b'>B'
185 _fpartid = b'>I'
185 _fpartid = b'>I'
186 _fpayloadsize = b'>i'
186 _fpayloadsize = b'>i'
187 _fpartparamcount = b'>BB'
187 _fpartparamcount = b'>BB'
188
188
189 preferedchunksize = 32768
189 preferedchunksize = 32768
190
190
191 _parttypeforbidden = re.compile(b'[^a-zA-Z0-9_:-]')
191 _parttypeforbidden = re.compile(b'[^a-zA-Z0-9_:-]')
192
192
193
193
194 def outdebug(ui, message):
194 def outdebug(ui, message):
195 """debug regarding output stream (bundling)"""
195 """debug regarding output stream (bundling)"""
196 if ui.configbool(b'devel', b'bundle2.debug'):
196 if ui.configbool(b'devel', b'bundle2.debug'):
197 ui.debug(b'bundle2-output: %s\n' % message)
197 ui.debug(b'bundle2-output: %s\n' % message)
198
198
199
199
200 def indebug(ui, message):
200 def indebug(ui, message):
201 """debug on input stream (unbundling)"""
201 """debug on input stream (unbundling)"""
202 if ui.configbool(b'devel', b'bundle2.debug'):
202 if ui.configbool(b'devel', b'bundle2.debug'):
203 ui.debug(b'bundle2-input: %s\n' % message)
203 ui.debug(b'bundle2-input: %s\n' % message)
204
204
205
205
206 def validateparttype(parttype):
206 def validateparttype(parttype):
207 """raise ValueError if a parttype contains invalid character"""
207 """raise ValueError if a parttype contains invalid character"""
208 if _parttypeforbidden.search(parttype):
208 if _parttypeforbidden.search(parttype):
209 raise ValueError(parttype)
209 raise ValueError(parttype)
210
210
211
211
212 def _makefpartparamsizes(nbparams):
212 def _makefpartparamsizes(nbparams):
213 """return a struct format to read part parameter sizes
213 """return a struct format to read part parameter sizes
214
214
215 The number parameters is variable so we need to build that format
215 The number parameters is variable so we need to build that format
216 dynamically.
216 dynamically.
217 """
217 """
218 return b'>' + (b'BB' * nbparams)
218 return b'>' + (b'BB' * nbparams)
219
219
220
220
221 parthandlermapping = {}
221 parthandlermapping = {}
222
222
223
223
224 def parthandler(parttype, params=()):
224 def parthandler(parttype, params=()):
225 """decorator that register a function as a bundle2 part handler
225 """decorator that register a function as a bundle2 part handler
226
226
227 eg::
227 eg::
228
228
229 @parthandler('myparttype', ('mandatory', 'param', 'handled'))
229 @parthandler('myparttype', ('mandatory', 'param', 'handled'))
230 def myparttypehandler(...):
230 def myparttypehandler(...):
231 '''process a part of type "my part".'''
231 '''process a part of type "my part".'''
232 ...
232 ...
233 """
233 """
234 validateparttype(parttype)
234 validateparttype(parttype)
235
235
236 def _decorator(func):
236 def _decorator(func):
237 lparttype = parttype.lower() # enforce lower case matching.
237 lparttype = parttype.lower() # enforce lower case matching.
238 assert lparttype not in parthandlermapping
238 assert lparttype not in parthandlermapping
239 parthandlermapping[lparttype] = func
239 parthandlermapping[lparttype] = func
240 func.params = frozenset(params)
240 func.params = frozenset(params)
241 return func
241 return func
242
242
243 return _decorator
243 return _decorator
244
244
245
245
246 class unbundlerecords(object):
246 class unbundlerecords(object):
247 """keep record of what happens during and unbundle
247 """keep record of what happens during and unbundle
248
248
249 New records are added using `records.add('cat', obj)`. Where 'cat' is a
249 New records are added using `records.add('cat', obj)`. Where 'cat' is a
250 category of record and obj is an arbitrary object.
250 category of record and obj is an arbitrary object.
251
251
252 `records['cat']` will return all entries of this category 'cat'.
252 `records['cat']` will return all entries of this category 'cat'.
253
253
254 Iterating on the object itself will yield `('category', obj)` tuples
254 Iterating on the object itself will yield `('category', obj)` tuples
255 for all entries.
255 for all entries.
256
256
257 All iterations happens in chronological order.
257 All iterations happens in chronological order.
258 """
258 """
259
259
260 def __init__(self):
260 def __init__(self):
261 self._categories = {}
261 self._categories = {}
262 self._sequences = []
262 self._sequences = []
263 self._replies = {}
263 self._replies = {}
264
264
265 def add(self, category, entry, inreplyto=None):
265 def add(self, category, entry, inreplyto=None):
266 """add a new record of a given category.
266 """add a new record of a given category.
267
267
268 The entry can then be retrieved in the list returned by
268 The entry can then be retrieved in the list returned by
269 self['category']."""
269 self['category']."""
270 self._categories.setdefault(category, []).append(entry)
270 self._categories.setdefault(category, []).append(entry)
271 self._sequences.append((category, entry))
271 self._sequences.append((category, entry))
272 if inreplyto is not None:
272 if inreplyto is not None:
273 self.getreplies(inreplyto).add(category, entry)
273 self.getreplies(inreplyto).add(category, entry)
274
274
275 def getreplies(self, partid):
275 def getreplies(self, partid):
276 """get the records that are replies to a specific part"""
276 """get the records that are replies to a specific part"""
277 return self._replies.setdefault(partid, unbundlerecords())
277 return self._replies.setdefault(partid, unbundlerecords())
278
278
279 def __getitem__(self, cat):
279 def __getitem__(self, cat):
280 return tuple(self._categories.get(cat, ()))
280 return tuple(self._categories.get(cat, ()))
281
281
282 def __iter__(self):
282 def __iter__(self):
283 return iter(self._sequences)
283 return iter(self._sequences)
284
284
285 def __len__(self):
285 def __len__(self):
286 return len(self._sequences)
286 return len(self._sequences)
287
287
288 def __nonzero__(self):
288 def __nonzero__(self):
289 return bool(self._sequences)
289 return bool(self._sequences)
290
290
291 __bool__ = __nonzero__
291 __bool__ = __nonzero__
292
292
293
293
294 class bundleoperation(object):
294 class bundleoperation(object):
295 """an object that represents a single bundling process
295 """an object that represents a single bundling process
296
296
297 Its purpose is to carry unbundle-related objects and states.
297 Its purpose is to carry unbundle-related objects and states.
298
298
299 A new object should be created at the beginning of each bundle processing.
299 A new object should be created at the beginning of each bundle processing.
300 The object is to be returned by the processing function.
300 The object is to be returned by the processing function.
301
301
302 The object has very little content now it will ultimately contain:
302 The object has very little content now it will ultimately contain:
303 * an access to the repo the bundle is applied to,
303 * an access to the repo the bundle is applied to,
304 * a ui object,
304 * a ui object,
305 * a way to retrieve a transaction to add changes to the repo,
305 * a way to retrieve a transaction to add changes to the repo,
306 * a way to record the result of processing each part,
306 * a way to record the result of processing each part,
307 * a way to construct a bundle response when applicable.
307 * a way to construct a bundle response when applicable.
308 """
308 """
309
309
310 def __init__(self, repo, transactiongetter, captureoutput=True, source=b''):
310 def __init__(self, repo, transactiongetter, captureoutput=True, source=b''):
311 self.repo = repo
311 self.repo = repo
312 self.ui = repo.ui
312 self.ui = repo.ui
313 self.records = unbundlerecords()
313 self.records = unbundlerecords()
314 self.reply = None
314 self.reply = None
315 self.captureoutput = captureoutput
315 self.captureoutput = captureoutput
316 self.hookargs = {}
316 self.hookargs = {}
317 self._gettransaction = transactiongetter
317 self._gettransaction = transactiongetter
318 # carries value that can modify part behavior
318 # carries value that can modify part behavior
319 self.modes = {}
319 self.modes = {}
320 self.source = source
320 self.source = source
321
321
322 def gettransaction(self):
322 def gettransaction(self):
323 transaction = self._gettransaction()
323 transaction = self._gettransaction()
324
324
325 if self.hookargs:
325 if self.hookargs:
326 # the ones added to the transaction supercede those added
326 # the ones added to the transaction supercede those added
327 # to the operation.
327 # to the operation.
328 self.hookargs.update(transaction.hookargs)
328 self.hookargs.update(transaction.hookargs)
329 transaction.hookargs = self.hookargs
329 transaction.hookargs = self.hookargs
330
330
331 # mark the hookargs as flushed. further attempts to add to
331 # mark the hookargs as flushed. further attempts to add to
332 # hookargs will result in an abort.
332 # hookargs will result in an abort.
333 self.hookargs = None
333 self.hookargs = None
334
334
335 return transaction
335 return transaction
336
336
337 def addhookargs(self, hookargs):
337 def addhookargs(self, hookargs):
338 if self.hookargs is None:
338 if self.hookargs is None:
339 raise error.ProgrammingError(
339 raise error.ProgrammingError(
340 b'attempted to add hookargs to '
340 b'attempted to add hookargs to '
341 b'operation after transaction started'
341 b'operation after transaction started'
342 )
342 )
343 self.hookargs.update(hookargs)
343 self.hookargs.update(hookargs)
344
344
345
345
346 class TransactionUnavailable(RuntimeError):
346 class TransactionUnavailable(RuntimeError):
347 pass
347 pass
348
348
349
349
350 def _notransaction():
350 def _notransaction():
351 """default method to get a transaction while processing a bundle
351 """default method to get a transaction while processing a bundle
352
352
353 Raise an exception to highlight the fact that no transaction was expected
353 Raise an exception to highlight the fact that no transaction was expected
354 to be created"""
354 to be created"""
355 raise TransactionUnavailable()
355 raise TransactionUnavailable()
356
356
357
357
358 def applybundle(repo, unbundler, tr, source, url=None, **kwargs):
358 def applybundle(repo, unbundler, tr, source, url=None, **kwargs):
359 # transform me into unbundler.apply() as soon as the freeze is lifted
359 # transform me into unbundler.apply() as soon as the freeze is lifted
360 if isinstance(unbundler, unbundle20):
360 if isinstance(unbundler, unbundle20):
361 tr.hookargs[b'bundle2'] = b'1'
361 tr.hookargs[b'bundle2'] = b'1'
362 if source is not None and b'source' not in tr.hookargs:
362 if source is not None and b'source' not in tr.hookargs:
363 tr.hookargs[b'source'] = source
363 tr.hookargs[b'source'] = source
364 if url is not None and b'url' not in tr.hookargs:
364 if url is not None and b'url' not in tr.hookargs:
365 tr.hookargs[b'url'] = url
365 tr.hookargs[b'url'] = url
366 return processbundle(repo, unbundler, lambda: tr, source=source)
366 return processbundle(repo, unbundler, lambda: tr, source=source)
367 else:
367 else:
368 # the transactiongetter won't be used, but we might as well set it
368 # the transactiongetter won't be used, but we might as well set it
369 op = bundleoperation(repo, lambda: tr, source=source)
369 op = bundleoperation(repo, lambda: tr, source=source)
370 _processchangegroup(op, unbundler, tr, source, url, **kwargs)
370 _processchangegroup(op, unbundler, tr, source, url, **kwargs)
371 return op
371 return op
372
372
373
373
374 class partiterator(object):
374 class partiterator(object):
375 def __init__(self, repo, op, unbundler):
375 def __init__(self, repo, op, unbundler):
376 self.repo = repo
376 self.repo = repo
377 self.op = op
377 self.op = op
378 self.unbundler = unbundler
378 self.unbundler = unbundler
379 self.iterator = None
379 self.iterator = None
380 self.count = 0
380 self.count = 0
381 self.current = None
381 self.current = None
382
382
383 def __enter__(self):
383 def __enter__(self):
384 def func():
384 def func():
385 itr = enumerate(self.unbundler.iterparts(), 1)
385 itr = enumerate(self.unbundler.iterparts(), 1)
386 for count, p in itr:
386 for count, p in itr:
387 self.count = count
387 self.count = count
388 self.current = p
388 self.current = p
389 yield p
389 yield p
390 p.consume()
390 p.consume()
391 self.current = None
391 self.current = None
392
392
393 self.iterator = func()
393 self.iterator = func()
394 return self.iterator
394 return self.iterator
395
395
396 def __exit__(self, type, exc, tb):
396 def __exit__(self, type, exc, tb):
397 if not self.iterator:
397 if not self.iterator:
398 return
398 return
399
399
400 # Only gracefully abort in a normal exception situation. User aborts
400 # Only gracefully abort in a normal exception situation. User aborts
401 # like Ctrl+C throw a KeyboardInterrupt which is not a base Exception,
401 # like Ctrl+C throw a KeyboardInterrupt which is not a base Exception,
402 # and should not gracefully cleanup.
402 # and should not gracefully cleanup.
403 if isinstance(exc, Exception):
403 if isinstance(exc, Exception):
404 # Any exceptions seeking to the end of the bundle at this point are
404 # Any exceptions seeking to the end of the bundle at this point are
405 # almost certainly related to the underlying stream being bad.
405 # almost certainly related to the underlying stream being bad.
406 # And, chances are that the exception we're handling is related to
406 # And, chances are that the exception we're handling is related to
407 # getting in that bad state. So, we swallow the seeking error and
407 # getting in that bad state. So, we swallow the seeking error and
408 # re-raise the original error.
408 # re-raise the original error.
409 seekerror = False
409 seekerror = False
410 try:
410 try:
411 if self.current:
411 if self.current:
412 # consume the part content to not corrupt the stream.
412 # consume the part content to not corrupt the stream.
413 self.current.consume()
413 self.current.consume()
414
414
415 for part in self.iterator:
415 for part in self.iterator:
416 # consume the bundle content
416 # consume the bundle content
417 part.consume()
417 part.consume()
418 except Exception:
418 except Exception:
419 seekerror = True
419 seekerror = True
420
420
421 # Small hack to let caller code distinguish exceptions from bundle2
421 # Small hack to let caller code distinguish exceptions from bundle2
422 # processing from processing the old format. This is mostly needed
422 # processing from processing the old format. This is mostly needed
423 # to handle different return codes to unbundle according to the type
423 # to handle different return codes to unbundle according to the type
424 # of bundle. We should probably clean up or drop this return code
424 # of bundle. We should probably clean up or drop this return code
425 # craziness in a future version.
425 # craziness in a future version.
426 exc.duringunbundle2 = True
426 exc.duringunbundle2 = True
427 salvaged = []
427 salvaged = []
428 replycaps = None
428 replycaps = None
429 if self.op.reply is not None:
429 if self.op.reply is not None:
430 salvaged = self.op.reply.salvageoutput()
430 salvaged = self.op.reply.salvageoutput()
431 replycaps = self.op.reply.capabilities
431 replycaps = self.op.reply.capabilities
432 exc._replycaps = replycaps
432 exc._replycaps = replycaps
433 exc._bundle2salvagedoutput = salvaged
433 exc._bundle2salvagedoutput = salvaged
434
434
435 # Re-raising from a variable loses the original stack. So only use
435 # Re-raising from a variable loses the original stack. So only use
436 # that form if we need to.
436 # that form if we need to.
437 if seekerror:
437 if seekerror:
438 raise exc
438 raise exc
439
439
440 self.repo.ui.debug(
440 self.repo.ui.debug(
441 b'bundle2-input-bundle: %i parts total\n' % self.count
441 b'bundle2-input-bundle: %i parts total\n' % self.count
442 )
442 )
443
443
444
444
445 def processbundle(repo, unbundler, transactiongetter=None, op=None, source=b''):
445 def processbundle(repo, unbundler, transactiongetter=None, op=None, source=b''):
446 """This function process a bundle, apply effect to/from a repo
446 """This function process a bundle, apply effect to/from a repo
447
447
448 It iterates over each part then searches for and uses the proper handling
448 It iterates over each part then searches for and uses the proper handling
449 code to process the part. Parts are processed in order.
449 code to process the part. Parts are processed in order.
450
450
451 Unknown Mandatory part will abort the process.
451 Unknown Mandatory part will abort the process.
452
452
453 It is temporarily possible to provide a prebuilt bundleoperation to the
453 It is temporarily possible to provide a prebuilt bundleoperation to the
454 function. This is used to ensure output is properly propagated in case of
454 function. This is used to ensure output is properly propagated in case of
455 an error during the unbundling. This output capturing part will likely be
455 an error during the unbundling. This output capturing part will likely be
456 reworked and this ability will probably go away in the process.
456 reworked and this ability will probably go away in the process.
457 """
457 """
458 if op is None:
458 if op is None:
459 if transactiongetter is None:
459 if transactiongetter is None:
460 transactiongetter = _notransaction
460 transactiongetter = _notransaction
461 op = bundleoperation(repo, transactiongetter, source=source)
461 op = bundleoperation(repo, transactiongetter, source=source)
462 # todo:
462 # todo:
463 # - replace this is a init function soon.
463 # - replace this is a init function soon.
464 # - exception catching
464 # - exception catching
465 unbundler.params
465 unbundler.params
466 if repo.ui.debugflag:
466 if repo.ui.debugflag:
467 msg = [b'bundle2-input-bundle:']
467 msg = [b'bundle2-input-bundle:']
468 if unbundler.params:
468 if unbundler.params:
469 msg.append(b' %i params' % len(unbundler.params))
469 msg.append(b' %i params' % len(unbundler.params))
470 if op._gettransaction is None or op._gettransaction is _notransaction:
470 if op._gettransaction is None or op._gettransaction is _notransaction:
471 msg.append(b' no-transaction')
471 msg.append(b' no-transaction')
472 else:
472 else:
473 msg.append(b' with-transaction')
473 msg.append(b' with-transaction')
474 msg.append(b'\n')
474 msg.append(b'\n')
475 repo.ui.debug(b''.join(msg))
475 repo.ui.debug(b''.join(msg))
476
476
477 processparts(repo, op, unbundler)
477 processparts(repo, op, unbundler)
478
478
479 return op
479 return op
480
480
481
481
482 def processparts(repo, op, unbundler):
482 def processparts(repo, op, unbundler):
483 with partiterator(repo, op, unbundler) as parts:
483 with partiterator(repo, op, unbundler) as parts:
484 for part in parts:
484 for part in parts:
485 _processpart(op, part)
485 _processpart(op, part)
486
486
487
487
488 def _processchangegroup(op, cg, tr, source, url, **kwargs):
488 def _processchangegroup(op, cg, tr, source, url, **kwargs):
489 ret = cg.apply(op.repo, tr, source, url, **kwargs)
489 ret = cg.apply(op.repo, tr, source, url, **kwargs)
490 op.records.add(b'changegroup', {b'return': ret,})
490 op.records.add(b'changegroup', {b'return': ret,})
491 return ret
491 return ret
492
492
493
493
494 def _gethandler(op, part):
494 def _gethandler(op, part):
495 status = b'unknown' # used by debug output
495 status = b'unknown' # used by debug output
496 try:
496 try:
497 handler = parthandlermapping.get(part.type)
497 handler = parthandlermapping.get(part.type)
498 if handler is None:
498 if handler is None:
499 status = b'unsupported-type'
499 status = b'unsupported-type'
500 raise error.BundleUnknownFeatureError(parttype=part.type)
500 raise error.BundleUnknownFeatureError(parttype=part.type)
501 indebug(op.ui, b'found a handler for part %s' % part.type)
501 indebug(op.ui, b'found a handler for part %s' % part.type)
502 unknownparams = part.mandatorykeys - handler.params
502 unknownparams = part.mandatorykeys - handler.params
503 if unknownparams:
503 if unknownparams:
504 unknownparams = list(unknownparams)
504 unknownparams = list(unknownparams)
505 unknownparams.sort()
505 unknownparams.sort()
506 status = b'unsupported-params (%s)' % b', '.join(unknownparams)
506 status = b'unsupported-params (%s)' % b', '.join(unknownparams)
507 raise error.BundleUnknownFeatureError(
507 raise error.BundleUnknownFeatureError(
508 parttype=part.type, params=unknownparams
508 parttype=part.type, params=unknownparams
509 )
509 )
510 status = b'supported'
510 status = b'supported'
511 except error.BundleUnknownFeatureError as exc:
511 except error.BundleUnknownFeatureError as exc:
512 if part.mandatory: # mandatory parts
512 if part.mandatory: # mandatory parts
513 raise
513 raise
514 indebug(op.ui, b'ignoring unsupported advisory part %s' % exc)
514 indebug(op.ui, b'ignoring unsupported advisory part %s' % exc)
515 return # skip to part processing
515 return # skip to part processing
516 finally:
516 finally:
517 if op.ui.debugflag:
517 if op.ui.debugflag:
518 msg = [b'bundle2-input-part: "%s"' % part.type]
518 msg = [b'bundle2-input-part: "%s"' % part.type]
519 if not part.mandatory:
519 if not part.mandatory:
520 msg.append(b' (advisory)')
520 msg.append(b' (advisory)')
521 nbmp = len(part.mandatorykeys)
521 nbmp = len(part.mandatorykeys)
522 nbap = len(part.params) - nbmp
522 nbap = len(part.params) - nbmp
523 if nbmp or nbap:
523 if nbmp or nbap:
524 msg.append(b' (params:')
524 msg.append(b' (params:')
525 if nbmp:
525 if nbmp:
526 msg.append(b' %i mandatory' % nbmp)
526 msg.append(b' %i mandatory' % nbmp)
527 if nbap:
527 if nbap:
528 msg.append(b' %i advisory' % nbmp)
528 msg.append(b' %i advisory' % nbmp)
529 msg.append(b')')
529 msg.append(b')')
530 msg.append(b' %s\n' % status)
530 msg.append(b' %s\n' % status)
531 op.ui.debug(b''.join(msg))
531 op.ui.debug(b''.join(msg))
532
532
533 return handler
533 return handler
534
534
535
535
536 def _processpart(op, part):
536 def _processpart(op, part):
537 """process a single part from a bundle
537 """process a single part from a bundle
538
538
539 The part is guaranteed to have been fully consumed when the function exits
539 The part is guaranteed to have been fully consumed when the function exits
540 (even if an exception is raised)."""
540 (even if an exception is raised)."""
541 handler = _gethandler(op, part)
541 handler = _gethandler(op, part)
542 if handler is None:
542 if handler is None:
543 return
543 return
544
544
545 # handler is called outside the above try block so that we don't
545 # handler is called outside the above try block so that we don't
546 # risk catching KeyErrors from anything other than the
546 # risk catching KeyErrors from anything other than the
547 # parthandlermapping lookup (any KeyError raised by handler()
547 # parthandlermapping lookup (any KeyError raised by handler()
548 # itself represents a defect of a different variety).
548 # itself represents a defect of a different variety).
549 output = None
549 output = None
550 if op.captureoutput and op.reply is not None:
550 if op.captureoutput and op.reply is not None:
551 op.ui.pushbuffer(error=True, subproc=True)
551 op.ui.pushbuffer(error=True, subproc=True)
552 output = b''
552 output = b''
553 try:
553 try:
554 handler(op, part)
554 handler(op, part)
555 finally:
555 finally:
556 if output is not None:
556 if output is not None:
557 output = op.ui.popbuffer()
557 output = op.ui.popbuffer()
558 if output:
558 if output:
559 outpart = op.reply.newpart(b'output', data=output, mandatory=False)
559 outpart = op.reply.newpart(b'output', data=output, mandatory=False)
560 outpart.addparam(
560 outpart.addparam(
561 b'in-reply-to', pycompat.bytestr(part.id), mandatory=False
561 b'in-reply-to', pycompat.bytestr(part.id), mandatory=False
562 )
562 )
563
563
564
564
565 def decodecaps(blob):
565 def decodecaps(blob):
566 """decode a bundle2 caps bytes blob into a dictionary
566 """decode a bundle2 caps bytes blob into a dictionary
567
567
568 The blob is a list of capabilities (one per line)
568 The blob is a list of capabilities (one per line)
569 Capabilities may have values using a line of the form::
569 Capabilities may have values using a line of the form::
570
570
571 capability=value1,value2,value3
571 capability=value1,value2,value3
572
572
573 The values are always a list."""
573 The values are always a list."""
574 caps = {}
574 caps = {}
575 for line in blob.splitlines():
575 for line in blob.splitlines():
576 if not line:
576 if not line:
577 continue
577 continue
578 if b'=' not in line:
578 if b'=' not in line:
579 key, vals = line, ()
579 key, vals = line, ()
580 else:
580 else:
581 key, vals = line.split(b'=', 1)
581 key, vals = line.split(b'=', 1)
582 vals = vals.split(b',')
582 vals = vals.split(b',')
583 key = urlreq.unquote(key)
583 key = urlreq.unquote(key)
584 vals = [urlreq.unquote(v) for v in vals]
584 vals = [urlreq.unquote(v) for v in vals]
585 caps[key] = vals
585 caps[key] = vals
586 return caps
586 return caps
587
587
588
588
589 def encodecaps(caps):
589 def encodecaps(caps):
590 """encode a bundle2 caps dictionary into a bytes blob"""
590 """encode a bundle2 caps dictionary into a bytes blob"""
591 chunks = []
591 chunks = []
592 for ca in sorted(caps):
592 for ca in sorted(caps):
593 vals = caps[ca]
593 vals = caps[ca]
594 ca = urlreq.quote(ca)
594 ca = urlreq.quote(ca)
595 vals = [urlreq.quote(v) for v in vals]
595 vals = [urlreq.quote(v) for v in vals]
596 if vals:
596 if vals:
597 ca = b"%s=%s" % (ca, b','.join(vals))
597 ca = b"%s=%s" % (ca, b','.join(vals))
598 chunks.append(ca)
598 chunks.append(ca)
599 return b'\n'.join(chunks)
599 return b'\n'.join(chunks)
600
600
601
601
602 bundletypes = {
602 bundletypes = {
603 b"": (b"", b'UN'), # only when using unbundle on ssh and old http servers
603 b"": (b"", b'UN'), # only when using unbundle on ssh and old http servers
604 # since the unification ssh accepts a header but there
604 # since the unification ssh accepts a header but there
605 # is no capability signaling it.
605 # is no capability signaling it.
606 b"HG20": (), # special-cased below
606 b"HG20": (), # special-cased below
607 b"HG10UN": (b"HG10UN", b'UN'),
607 b"HG10UN": (b"HG10UN", b'UN'),
608 b"HG10BZ": (b"HG10", b'BZ'),
608 b"HG10BZ": (b"HG10", b'BZ'),
609 b"HG10GZ": (b"HG10GZ", b'GZ'),
609 b"HG10GZ": (b"HG10GZ", b'GZ'),
610 }
610 }
611
611
612 # hgweb uses this list to communicate its preferred type
612 # hgweb uses this list to communicate its preferred type
613 bundlepriority = [b'HG10GZ', b'HG10BZ', b'HG10UN']
613 bundlepriority = [b'HG10GZ', b'HG10BZ', b'HG10UN']
614
614
615
615
616 class bundle20(object):
616 class bundle20(object):
617 """represent an outgoing bundle2 container
617 """represent an outgoing bundle2 container
618
618
619 Use the `addparam` method to add stream level parameter. and `newpart` to
619 Use the `addparam` method to add stream level parameter. and `newpart` to
620 populate it. Then call `getchunks` to retrieve all the binary chunks of
620 populate it. Then call `getchunks` to retrieve all the binary chunks of
621 data that compose the bundle2 container."""
621 data that compose the bundle2 container."""
622
622
623 _magicstring = b'HG20'
623 _magicstring = b'HG20'
624
624
625 def __init__(self, ui, capabilities=()):
625 def __init__(self, ui, capabilities=()):
626 self.ui = ui
626 self.ui = ui
627 self._params = []
627 self._params = []
628 self._parts = []
628 self._parts = []
629 self.capabilities = dict(capabilities)
629 self.capabilities = dict(capabilities)
630 self._compengine = util.compengines.forbundletype(b'UN')
630 self._compengine = util.compengines.forbundletype(b'UN')
631 self._compopts = None
631 self._compopts = None
632 # If compression is being handled by a consumer of the raw
632 # If compression is being handled by a consumer of the raw
633 # data (e.g. the wire protocol), unsetting this flag tells
633 # data (e.g. the wire protocol), unsetting this flag tells
634 # consumers that the bundle is best left uncompressed.
634 # consumers that the bundle is best left uncompressed.
635 self.prefercompressed = True
635 self.prefercompressed = True
636
636
637 def setcompression(self, alg, compopts=None):
637 def setcompression(self, alg, compopts=None):
638 """setup core part compression to <alg>"""
638 """setup core part compression to <alg>"""
639 if alg in (None, b'UN'):
639 if alg in (None, b'UN'):
640 return
640 return
641 assert not any(n.lower() == b'compression' for n, v in self._params)
641 assert not any(n.lower() == b'compression' for n, v in self._params)
642 self.addparam(b'Compression', alg)
642 self.addparam(b'Compression', alg)
643 self._compengine = util.compengines.forbundletype(alg)
643 self._compengine = util.compengines.forbundletype(alg)
644 self._compopts = compopts
644 self._compopts = compopts
645
645
646 @property
646 @property
647 def nbparts(self):
647 def nbparts(self):
648 """total number of parts added to the bundler"""
648 """total number of parts added to the bundler"""
649 return len(self._parts)
649 return len(self._parts)
650
650
651 # methods used to defines the bundle2 content
651 # methods used to defines the bundle2 content
652 def addparam(self, name, value=None):
652 def addparam(self, name, value=None):
653 """add a stream level parameter"""
653 """add a stream level parameter"""
654 if not name:
654 if not name:
655 raise error.ProgrammingError(b'empty parameter name')
655 raise error.ProgrammingError(b'empty parameter name')
656 if name[0:1] not in pycompat.bytestr(string.ascii_letters):
656 if name[0:1] not in pycompat.bytestr(string.ascii_letters):
657 raise error.ProgrammingError(
657 raise error.ProgrammingError(
658 b'non letter first character: %s' % name
658 b'non letter first character: %s' % name
659 )
659 )
660 self._params.append((name, value))
660 self._params.append((name, value))
661
661
662 def addpart(self, part):
662 def addpart(self, part):
663 """add a new part to the bundle2 container
663 """add a new part to the bundle2 container
664
664
665 Parts contains the actual applicative payload."""
665 Parts contains the actual applicative payload."""
666 assert part.id is None
666 assert part.id is None
667 part.id = len(self._parts) # very cheap counter
667 part.id = len(self._parts) # very cheap counter
668 self._parts.append(part)
668 self._parts.append(part)
669
669
670 def newpart(self, typeid, *args, **kwargs):
670 def newpart(self, typeid, *args, **kwargs):
671 """create a new part and add it to the containers
671 """create a new part and add it to the containers
672
672
673 As the part is directly added to the containers. For now, this means
673 As the part is directly added to the containers. For now, this means
674 that any failure to properly initialize the part after calling
674 that any failure to properly initialize the part after calling
675 ``newpart`` should result in a failure of the whole bundling process.
675 ``newpart`` should result in a failure of the whole bundling process.
676
676
677 You can still fall back to manually create and add if you need better
677 You can still fall back to manually create and add if you need better
678 control."""
678 control."""
679 part = bundlepart(typeid, *args, **kwargs)
679 part = bundlepart(typeid, *args, **kwargs)
680 self.addpart(part)
680 self.addpart(part)
681 return part
681 return part
682
682
683 # methods used to generate the bundle2 stream
683 # methods used to generate the bundle2 stream
684 def getchunks(self):
684 def getchunks(self):
685 if self.ui.debugflag:
685 if self.ui.debugflag:
686 msg = [b'bundle2-output-bundle: "%s",' % self._magicstring]
686 msg = [b'bundle2-output-bundle: "%s",' % self._magicstring]
687 if self._params:
687 if self._params:
688 msg.append(b' (%i params)' % len(self._params))
688 msg.append(b' (%i params)' % len(self._params))
689 msg.append(b' %i parts total\n' % len(self._parts))
689 msg.append(b' %i parts total\n' % len(self._parts))
690 self.ui.debug(b''.join(msg))
690 self.ui.debug(b''.join(msg))
691 outdebug(self.ui, b'start emission of %s stream' % self._magicstring)
691 outdebug(self.ui, b'start emission of %s stream' % self._magicstring)
692 yield self._magicstring
692 yield self._magicstring
693 param = self._paramchunk()
693 param = self._paramchunk()
694 outdebug(self.ui, b'bundle parameter: %s' % param)
694 outdebug(self.ui, b'bundle parameter: %s' % param)
695 yield _pack(_fstreamparamsize, len(param))
695 yield _pack(_fstreamparamsize, len(param))
696 if param:
696 if param:
697 yield param
697 yield param
698 for chunk in self._compengine.compressstream(
698 for chunk in self._compengine.compressstream(
699 self._getcorechunk(), self._compopts
699 self._getcorechunk(), self._compopts
700 ):
700 ):
701 yield chunk
701 yield chunk
702
702
703 def _paramchunk(self):
703 def _paramchunk(self):
704 """return a encoded version of all stream parameters"""
704 """return a encoded version of all stream parameters"""
705 blocks = []
705 blocks = []
706 for par, value in self._params:
706 for par, value in self._params:
707 par = urlreq.quote(par)
707 par = urlreq.quote(par)
708 if value is not None:
708 if value is not None:
709 value = urlreq.quote(value)
709 value = urlreq.quote(value)
710 par = b'%s=%s' % (par, value)
710 par = b'%s=%s' % (par, value)
711 blocks.append(par)
711 blocks.append(par)
712 return b' '.join(blocks)
712 return b' '.join(blocks)
713
713
714 def _getcorechunk(self):
714 def _getcorechunk(self):
715 """yield chunk for the core part of the bundle
715 """yield chunk for the core part of the bundle
716
716
717 (all but headers and parameters)"""
717 (all but headers and parameters)"""
718 outdebug(self.ui, b'start of parts')
718 outdebug(self.ui, b'start of parts')
719 for part in self._parts:
719 for part in self._parts:
720 outdebug(self.ui, b'bundle part: "%s"' % part.type)
720 outdebug(self.ui, b'bundle part: "%s"' % part.type)
721 for chunk in part.getchunks(ui=self.ui):
721 for chunk in part.getchunks(ui=self.ui):
722 yield chunk
722 yield chunk
723 outdebug(self.ui, b'end of bundle')
723 outdebug(self.ui, b'end of bundle')
724 yield _pack(_fpartheadersize, 0)
724 yield _pack(_fpartheadersize, 0)
725
725
726 def salvageoutput(self):
726 def salvageoutput(self):
727 """return a list with a copy of all output parts in the bundle
727 """return a list with a copy of all output parts in the bundle
728
728
729 This is meant to be used during error handling to make sure we preserve
729 This is meant to be used during error handling to make sure we preserve
730 server output"""
730 server output"""
731 salvaged = []
731 salvaged = []
732 for part in self._parts:
732 for part in self._parts:
733 if part.type.startswith(b'output'):
733 if part.type.startswith(b'output'):
734 salvaged.append(part.copy())
734 salvaged.append(part.copy())
735 return salvaged
735 return salvaged
736
736
737
737
738 class unpackermixin(object):
738 class unpackermixin(object):
739 """A mixin to extract bytes and struct data from a stream"""
739 """A mixin to extract bytes and struct data from a stream"""
740
740
741 def __init__(self, fp):
741 def __init__(self, fp):
742 self._fp = fp
742 self._fp = fp
743
743
744 def _unpack(self, format):
744 def _unpack(self, format):
745 """unpack this struct format from the stream
745 """unpack this struct format from the stream
746
746
747 This method is meant for internal usage by the bundle2 protocol only.
747 This method is meant for internal usage by the bundle2 protocol only.
748 They directly manipulate the low level stream including bundle2 level
748 They directly manipulate the low level stream including bundle2 level
749 instruction.
749 instruction.
750
750
751 Do not use it to implement higher-level logic or methods."""
751 Do not use it to implement higher-level logic or methods."""
752 data = self._readexact(struct.calcsize(format))
752 data = self._readexact(struct.calcsize(format))
753 return _unpack(format, data)
753 return _unpack(format, data)
754
754
755 def _readexact(self, size):
755 def _readexact(self, size):
756 """read exactly <size> bytes from the stream
756 """read exactly <size> bytes from the stream
757
757
758 This method is meant for internal usage by the bundle2 protocol only.
758 This method is meant for internal usage by the bundle2 protocol only.
759 They directly manipulate the low level stream including bundle2 level
759 They directly manipulate the low level stream including bundle2 level
760 instruction.
760 instruction.
761
761
762 Do not use it to implement higher-level logic or methods."""
762 Do not use it to implement higher-level logic or methods."""
763 return changegroup.readexactly(self._fp, size)
763 return changegroup.readexactly(self._fp, size)
764
764
765
765
766 def getunbundler(ui, fp, magicstring=None):
766 def getunbundler(ui, fp, magicstring=None):
767 """return a valid unbundler object for a given magicstring"""
767 """return a valid unbundler object for a given magicstring"""
768 if magicstring is None:
768 if magicstring is None:
769 magicstring = changegroup.readexactly(fp, 4)
769 magicstring = changegroup.readexactly(fp, 4)
770 magic, version = magicstring[0:2], magicstring[2:4]
770 magic, version = magicstring[0:2], magicstring[2:4]
771 if magic != b'HG':
771 if magic != b'HG':
772 ui.debug(
772 ui.debug(
773 b"error: invalid magic: %r (version %r), should be 'HG'\n"
773 b"error: invalid magic: %r (version %r), should be 'HG'\n"
774 % (magic, version)
774 % (magic, version)
775 )
775 )
776 raise error.Abort(_(b'not a Mercurial bundle'))
776 raise error.Abort(_(b'not a Mercurial bundle'))
777 unbundlerclass = formatmap.get(version)
777 unbundlerclass = formatmap.get(version)
778 if unbundlerclass is None:
778 if unbundlerclass is None:
779 raise error.Abort(_(b'unknown bundle version %s') % version)
779 raise error.Abort(_(b'unknown bundle version %s') % version)
780 unbundler = unbundlerclass(ui, fp)
780 unbundler = unbundlerclass(ui, fp)
781 indebug(ui, b'start processing of %s stream' % magicstring)
781 indebug(ui, b'start processing of %s stream' % magicstring)
782 return unbundler
782 return unbundler
783
783
784
784
785 class unbundle20(unpackermixin):
785 class unbundle20(unpackermixin):
786 """interpret a bundle2 stream
786 """interpret a bundle2 stream
787
787
788 This class is fed with a binary stream and yields parts through its
788 This class is fed with a binary stream and yields parts through its
789 `iterparts` methods."""
789 `iterparts` methods."""
790
790
791 _magicstring = b'HG20'
791 _magicstring = b'HG20'
792
792
793 def __init__(self, ui, fp):
793 def __init__(self, ui, fp):
794 """If header is specified, we do not read it out of the stream."""
794 """If header is specified, we do not read it out of the stream."""
795 self.ui = ui
795 self.ui = ui
796 self._compengine = util.compengines.forbundletype(b'UN')
796 self._compengine = util.compengines.forbundletype(b'UN')
797 self._compressed = None
797 self._compressed = None
798 super(unbundle20, self).__init__(fp)
798 super(unbundle20, self).__init__(fp)
799
799
800 @util.propertycache
800 @util.propertycache
801 def params(self):
801 def params(self):
802 """dictionary of stream level parameters"""
802 """dictionary of stream level parameters"""
803 indebug(self.ui, b'reading bundle2 stream parameters')
803 indebug(self.ui, b'reading bundle2 stream parameters')
804 params = {}
804 params = {}
805 paramssize = self._unpack(_fstreamparamsize)[0]
805 paramssize = self._unpack(_fstreamparamsize)[0]
806 if paramssize < 0:
806 if paramssize < 0:
807 raise error.BundleValueError(
807 raise error.BundleValueError(
808 b'negative bundle param size: %i' % paramssize
808 b'negative bundle param size: %i' % paramssize
809 )
809 )
810 if paramssize:
810 if paramssize:
811 params = self._readexact(paramssize)
811 params = self._readexact(paramssize)
812 params = self._processallparams(params)
812 params = self._processallparams(params)
813 return params
813 return params
814
814
815 def _processallparams(self, paramsblock):
815 def _processallparams(self, paramsblock):
816 """"""
816 """"""
817 params = util.sortdict()
817 params = util.sortdict()
818 for p in paramsblock.split(b' '):
818 for p in paramsblock.split(b' '):
819 p = p.split(b'=', 1)
819 p = p.split(b'=', 1)
820 p = [urlreq.unquote(i) for i in p]
820 p = [urlreq.unquote(i) for i in p]
821 if len(p) < 2:
821 if len(p) < 2:
822 p.append(None)
822 p.append(None)
823 self._processparam(*p)
823 self._processparam(*p)
824 params[p[0]] = p[1]
824 params[p[0]] = p[1]
825 return params
825 return params
826
826
827 def _processparam(self, name, value):
827 def _processparam(self, name, value):
828 """process a parameter, applying its effect if needed
828 """process a parameter, applying its effect if needed
829
829
830 Parameter starting with a lower case letter are advisory and will be
830 Parameter starting with a lower case letter are advisory and will be
831 ignored when unknown. Those starting with an upper case letter are
831 ignored when unknown. Those starting with an upper case letter are
832 mandatory and will this function will raise a KeyError when unknown.
832 mandatory and will this function will raise a KeyError when unknown.
833
833
834 Note: no option are currently supported. Any input will be either
834 Note: no option are currently supported. Any input will be either
835 ignored or failing.
835 ignored or failing.
836 """
836 """
837 if not name:
837 if not name:
838 raise ValueError(r'empty parameter name')
838 raise ValueError(r'empty parameter name')
839 if name[0:1] not in pycompat.bytestr(string.ascii_letters):
839 if name[0:1] not in pycompat.bytestr(string.ascii_letters):
840 raise ValueError(r'non letter first character: %s' % name)
840 raise ValueError(r'non letter first character: %s' % name)
841 try:
841 try:
842 handler = b2streamparamsmap[name.lower()]
842 handler = b2streamparamsmap[name.lower()]
843 except KeyError:
843 except KeyError:
844 if name[0:1].islower():
844 if name[0:1].islower():
845 indebug(self.ui, b"ignoring unknown parameter %s" % name)
845 indebug(self.ui, b"ignoring unknown parameter %s" % name)
846 else:
846 else:
847 raise error.BundleUnknownFeatureError(params=(name,))
847 raise error.BundleUnknownFeatureError(params=(name,))
848 else:
848 else:
849 handler(self, name, value)
849 handler(self, name, value)
850
850
851 def _forwardchunks(self):
851 def _forwardchunks(self):
852 """utility to transfer a bundle2 as binary
852 """utility to transfer a bundle2 as binary
853
853
854 This is made necessary by the fact the 'getbundle' command over 'ssh'
854 This is made necessary by the fact the 'getbundle' command over 'ssh'
855 have no way to know then the reply end, relying on the bundle to be
855 have no way to know then the reply end, relying on the bundle to be
856 interpreted to know its end. This is terrible and we are sorry, but we
856 interpreted to know its end. This is terrible and we are sorry, but we
857 needed to move forward to get general delta enabled.
857 needed to move forward to get general delta enabled.
858 """
858 """
859 yield self._magicstring
859 yield self._magicstring
860 assert b'params' not in vars(self)
860 assert b'params' not in vars(self)
861 paramssize = self._unpack(_fstreamparamsize)[0]
861 paramssize = self._unpack(_fstreamparamsize)[0]
862 if paramssize < 0:
862 if paramssize < 0:
863 raise error.BundleValueError(
863 raise error.BundleValueError(
864 b'negative bundle param size: %i' % paramssize
864 b'negative bundle param size: %i' % paramssize
865 )
865 )
866 if paramssize:
866 if paramssize:
867 params = self._readexact(paramssize)
867 params = self._readexact(paramssize)
868 self._processallparams(params)
868 self._processallparams(params)
869 # The payload itself is decompressed below, so drop
869 # The payload itself is decompressed below, so drop
870 # the compression parameter passed down to compensate.
870 # the compression parameter passed down to compensate.
871 outparams = []
871 outparams = []
872 for p in params.split(b' '):
872 for p in params.split(b' '):
873 k, v = p.split(b'=', 1)
873 k, v = p.split(b'=', 1)
874 if k.lower() != b'compression':
874 if k.lower() != b'compression':
875 outparams.append(p)
875 outparams.append(p)
876 outparams = b' '.join(outparams)
876 outparams = b' '.join(outparams)
877 yield _pack(_fstreamparamsize, len(outparams))
877 yield _pack(_fstreamparamsize, len(outparams))
878 yield outparams
878 yield outparams
879 else:
879 else:
880 yield _pack(_fstreamparamsize, paramssize)
880 yield _pack(_fstreamparamsize, paramssize)
881 # From there, payload might need to be decompressed
881 # From there, payload might need to be decompressed
882 self._fp = self._compengine.decompressorreader(self._fp)
882 self._fp = self._compengine.decompressorreader(self._fp)
883 emptycount = 0
883 emptycount = 0
884 while emptycount < 2:
884 while emptycount < 2:
885 # so we can brainlessly loop
885 # so we can brainlessly loop
886 assert _fpartheadersize == _fpayloadsize
886 assert _fpartheadersize == _fpayloadsize
887 size = self._unpack(_fpartheadersize)[0]
887 size = self._unpack(_fpartheadersize)[0]
888 yield _pack(_fpartheadersize, size)
888 yield _pack(_fpartheadersize, size)
889 if size:
889 if size:
890 emptycount = 0
890 emptycount = 0
891 else:
891 else:
892 emptycount += 1
892 emptycount += 1
893 continue
893 continue
894 if size == flaginterrupt:
894 if size == flaginterrupt:
895 continue
895 continue
896 elif size < 0:
896 elif size < 0:
897 raise error.BundleValueError(b'negative chunk size: %i')
897 raise error.BundleValueError(b'negative chunk size: %i')
898 yield self._readexact(size)
898 yield self._readexact(size)
899
899
900 def iterparts(self, seekable=False):
900 def iterparts(self, seekable=False):
901 """yield all parts contained in the stream"""
901 """yield all parts contained in the stream"""
902 cls = seekableunbundlepart if seekable else unbundlepart
902 cls = seekableunbundlepart if seekable else unbundlepart
903 # make sure param have been loaded
903 # make sure param have been loaded
904 self.params
904 self.params
905 # From there, payload need to be decompressed
905 # From there, payload need to be decompressed
906 self._fp = self._compengine.decompressorreader(self._fp)
906 self._fp = self._compengine.decompressorreader(self._fp)
907 indebug(self.ui, b'start extraction of bundle2 parts')
907 indebug(self.ui, b'start extraction of bundle2 parts')
908 headerblock = self._readpartheader()
908 headerblock = self._readpartheader()
909 while headerblock is not None:
909 while headerblock is not None:
910 part = cls(self.ui, headerblock, self._fp)
910 part = cls(self.ui, headerblock, self._fp)
911 yield part
911 yield part
912 # Ensure part is fully consumed so we can start reading the next
912 # Ensure part is fully consumed so we can start reading the next
913 # part.
913 # part.
914 part.consume()
914 part.consume()
915
915
916 headerblock = self._readpartheader()
916 headerblock = self._readpartheader()
917 indebug(self.ui, b'end of bundle2 stream')
917 indebug(self.ui, b'end of bundle2 stream')
918
918
919 def _readpartheader(self):
919 def _readpartheader(self):
920 """reads a part header size and return the bytes blob
920 """reads a part header size and return the bytes blob
921
921
922 returns None if empty"""
922 returns None if empty"""
923 headersize = self._unpack(_fpartheadersize)[0]
923 headersize = self._unpack(_fpartheadersize)[0]
924 if headersize < 0:
924 if headersize < 0:
925 raise error.BundleValueError(
925 raise error.BundleValueError(
926 b'negative part header size: %i' % headersize
926 b'negative part header size: %i' % headersize
927 )
927 )
928 indebug(self.ui, b'part header size: %i' % headersize)
928 indebug(self.ui, b'part header size: %i' % headersize)
929 if headersize:
929 if headersize:
930 return self._readexact(headersize)
930 return self._readexact(headersize)
931 return None
931 return None
932
932
933 def compressed(self):
933 def compressed(self):
934 self.params # load params
934 self.params # load params
935 return self._compressed
935 return self._compressed
936
936
937 def close(self):
937 def close(self):
938 """close underlying file"""
938 """close underlying file"""
939 if util.safehasattr(self._fp, b'close'):
939 if util.safehasattr(self._fp, b'close'):
940 return self._fp.close()
940 return self._fp.close()
941
941
942
942
943 formatmap = {b'20': unbundle20}
943 formatmap = {b'20': unbundle20}
944
944
945 b2streamparamsmap = {}
945 b2streamparamsmap = {}
946
946
947
947
948 def b2streamparamhandler(name):
948 def b2streamparamhandler(name):
949 """register a handler for a stream level parameter"""
949 """register a handler for a stream level parameter"""
950
950
951 def decorator(func):
951 def decorator(func):
952 assert name not in formatmap
952 assert name not in formatmap
953 b2streamparamsmap[name] = func
953 b2streamparamsmap[name] = func
954 return func
954 return func
955
955
956 return decorator
956 return decorator
957
957
958
958
959 @b2streamparamhandler(b'compression')
959 @b2streamparamhandler(b'compression')
960 def processcompression(unbundler, param, value):
960 def processcompression(unbundler, param, value):
961 """read compression parameter and install payload decompression"""
961 """read compression parameter and install payload decompression"""
962 if value not in util.compengines.supportedbundletypes:
962 if value not in util.compengines.supportedbundletypes:
963 raise error.BundleUnknownFeatureError(params=(param,), values=(value,))
963 raise error.BundleUnknownFeatureError(params=(param,), values=(value,))
964 unbundler._compengine = util.compengines.forbundletype(value)
964 unbundler._compengine = util.compengines.forbundletype(value)
965 if value is not None:
965 if value is not None:
966 unbundler._compressed = True
966 unbundler._compressed = True
967
967
968
968
969 class bundlepart(object):
969 class bundlepart(object):
970 """A bundle2 part contains application level payload
970 """A bundle2 part contains application level payload
971
971
972 The part `type` is used to route the part to the application level
972 The part `type` is used to route the part to the application level
973 handler.
973 handler.
974
974
975 The part payload is contained in ``part.data``. It could be raw bytes or a
975 The part payload is contained in ``part.data``. It could be raw bytes or a
976 generator of byte chunks.
976 generator of byte chunks.
977
977
978 You can add parameters to the part using the ``addparam`` method.
978 You can add parameters to the part using the ``addparam`` method.
979 Parameters can be either mandatory (default) or advisory. Remote side
979 Parameters can be either mandatory (default) or advisory. Remote side
980 should be able to safely ignore the advisory ones.
980 should be able to safely ignore the advisory ones.
981
981
982 Both data and parameters cannot be modified after the generation has begun.
982 Both data and parameters cannot be modified after the generation has begun.
983 """
983 """
984
984
985 def __init__(
985 def __init__(
986 self,
986 self,
987 parttype,
987 parttype,
988 mandatoryparams=(),
988 mandatoryparams=(),
989 advisoryparams=(),
989 advisoryparams=(),
990 data=b'',
990 data=b'',
991 mandatory=True,
991 mandatory=True,
992 ):
992 ):
993 validateparttype(parttype)
993 validateparttype(parttype)
994 self.id = None
994 self.id = None
995 self.type = parttype
995 self.type = parttype
996 self._data = data
996 self._data = data
997 self._mandatoryparams = list(mandatoryparams)
997 self._mandatoryparams = list(mandatoryparams)
998 self._advisoryparams = list(advisoryparams)
998 self._advisoryparams = list(advisoryparams)
999 # checking for duplicated entries
999 # checking for duplicated entries
1000 self._seenparams = set()
1000 self._seenparams = set()
1001 for pname, __ in self._mandatoryparams + self._advisoryparams:
1001 for pname, __ in self._mandatoryparams + self._advisoryparams:
1002 if pname in self._seenparams:
1002 if pname in self._seenparams:
1003 raise error.ProgrammingError(b'duplicated params: %s' % pname)
1003 raise error.ProgrammingError(b'duplicated params: %s' % pname)
1004 self._seenparams.add(pname)
1004 self._seenparams.add(pname)
1005 # status of the part's generation:
1005 # status of the part's generation:
1006 # - None: not started,
1006 # - None: not started,
1007 # - False: currently generated,
1007 # - False: currently generated,
1008 # - True: generation done.
1008 # - True: generation done.
1009 self._generated = None
1009 self._generated = None
1010 self.mandatory = mandatory
1010 self.mandatory = mandatory
1011
1011
1012 def __repr__(self):
1012 def __repr__(self):
1013 cls = b"%s.%s" % (self.__class__.__module__, self.__class__.__name__)
1013 cls = b"%s.%s" % (self.__class__.__module__, self.__class__.__name__)
1014 return b'<%s object at %x; id: %s; type: %s; mandatory: %s>' % (
1014 return b'<%s object at %x; id: %s; type: %s; mandatory: %s>' % (
1015 cls,
1015 cls,
1016 id(self),
1016 id(self),
1017 self.id,
1017 self.id,
1018 self.type,
1018 self.type,
1019 self.mandatory,
1019 self.mandatory,
1020 )
1020 )
1021
1021
1022 def copy(self):
1022 def copy(self):
1023 """return a copy of the part
1023 """return a copy of the part
1024
1024
1025 The new part have the very same content but no partid assigned yet.
1025 The new part have the very same content but no partid assigned yet.
1026 Parts with generated data cannot be copied."""
1026 Parts with generated data cannot be copied."""
1027 assert not util.safehasattr(self.data, b'next')
1027 assert not util.safehasattr(self.data, b'next')
1028 return self.__class__(
1028 return self.__class__(
1029 self.type,
1029 self.type,
1030 self._mandatoryparams,
1030 self._mandatoryparams,
1031 self._advisoryparams,
1031 self._advisoryparams,
1032 self._data,
1032 self._data,
1033 self.mandatory,
1033 self.mandatory,
1034 )
1034 )
1035
1035
1036 # methods used to defines the part content
1036 # methods used to defines the part content
1037 @property
1037 @property
1038 def data(self):
1038 def data(self):
1039 return self._data
1039 return self._data
1040
1040
1041 @data.setter
1041 @data.setter
1042 def data(self, data):
1042 def data(self, data):
1043 if self._generated is not None:
1043 if self._generated is not None:
1044 raise error.ReadOnlyPartError(b'part is being generated')
1044 raise error.ReadOnlyPartError(b'part is being generated')
1045 self._data = data
1045 self._data = data
1046
1046
1047 @property
1047 @property
1048 def mandatoryparams(self):
1048 def mandatoryparams(self):
1049 # make it an immutable tuple to force people through ``addparam``
1049 # make it an immutable tuple to force people through ``addparam``
1050 return tuple(self._mandatoryparams)
1050 return tuple(self._mandatoryparams)
1051
1051
1052 @property
1052 @property
1053 def advisoryparams(self):
1053 def advisoryparams(self):
1054 # make it an immutable tuple to force people through ``addparam``
1054 # make it an immutable tuple to force people through ``addparam``
1055 return tuple(self._advisoryparams)
1055 return tuple(self._advisoryparams)
1056
1056
1057 def addparam(self, name, value=b'', mandatory=True):
1057 def addparam(self, name, value=b'', mandatory=True):
1058 """add a parameter to the part
1058 """add a parameter to the part
1059
1059
1060 If 'mandatory' is set to True, the remote handler must claim support
1060 If 'mandatory' is set to True, the remote handler must claim support
1061 for this parameter or the unbundling will be aborted.
1061 for this parameter or the unbundling will be aborted.
1062
1062
1063 The 'name' and 'value' cannot exceed 255 bytes each.
1063 The 'name' and 'value' cannot exceed 255 bytes each.
1064 """
1064 """
1065 if self._generated is not None:
1065 if self._generated is not None:
1066 raise error.ReadOnlyPartError(b'part is being generated')
1066 raise error.ReadOnlyPartError(b'part is being generated')
1067 if name in self._seenparams:
1067 if name in self._seenparams:
1068 raise ValueError(b'duplicated params: %s' % name)
1068 raise ValueError(b'duplicated params: %s' % name)
1069 self._seenparams.add(name)
1069 self._seenparams.add(name)
1070 params = self._advisoryparams
1070 params = self._advisoryparams
1071 if mandatory:
1071 if mandatory:
1072 params = self._mandatoryparams
1072 params = self._mandatoryparams
1073 params.append((name, value))
1073 params.append((name, value))
1074
1074
1075 # methods used to generates the bundle2 stream
1075 # methods used to generates the bundle2 stream
1076 def getchunks(self, ui):
1076 def getchunks(self, ui):
1077 if self._generated is not None:
1077 if self._generated is not None:
1078 raise error.ProgrammingError(b'part can only be consumed once')
1078 raise error.ProgrammingError(b'part can only be consumed once')
1079 self._generated = False
1079 self._generated = False
1080
1080
1081 if ui.debugflag:
1081 if ui.debugflag:
1082 msg = [b'bundle2-output-part: "%s"' % self.type]
1082 msg = [b'bundle2-output-part: "%s"' % self.type]
1083 if not self.mandatory:
1083 if not self.mandatory:
1084 msg.append(b' (advisory)')
1084 msg.append(b' (advisory)')
1085 nbmp = len(self.mandatoryparams)
1085 nbmp = len(self.mandatoryparams)
1086 nbap = len(self.advisoryparams)
1086 nbap = len(self.advisoryparams)
1087 if nbmp or nbap:
1087 if nbmp or nbap:
1088 msg.append(b' (params:')
1088 msg.append(b' (params:')
1089 if nbmp:
1089 if nbmp:
1090 msg.append(b' %i mandatory' % nbmp)
1090 msg.append(b' %i mandatory' % nbmp)
1091 if nbap:
1091 if nbap:
1092 msg.append(b' %i advisory' % nbmp)
1092 msg.append(b' %i advisory' % nbmp)
1093 msg.append(b')')
1093 msg.append(b')')
1094 if not self.data:
1094 if not self.data:
1095 msg.append(b' empty payload')
1095 msg.append(b' empty payload')
1096 elif util.safehasattr(self.data, b'next') or util.safehasattr(
1096 elif util.safehasattr(self.data, b'next') or util.safehasattr(
1097 self.data, b'__next__'
1097 self.data, b'__next__'
1098 ):
1098 ):
1099 msg.append(b' streamed payload')
1099 msg.append(b' streamed payload')
1100 else:
1100 else:
1101 msg.append(b' %i bytes payload' % len(self.data))
1101 msg.append(b' %i bytes payload' % len(self.data))
1102 msg.append(b'\n')
1102 msg.append(b'\n')
1103 ui.debug(b''.join(msg))
1103 ui.debug(b''.join(msg))
1104
1104
1105 #### header
1105 #### header
1106 if self.mandatory:
1106 if self.mandatory:
1107 parttype = self.type.upper()
1107 parttype = self.type.upper()
1108 else:
1108 else:
1109 parttype = self.type.lower()
1109 parttype = self.type.lower()
1110 outdebug(ui, b'part %s: "%s"' % (pycompat.bytestr(self.id), parttype))
1110 outdebug(ui, b'part %s: "%s"' % (pycompat.bytestr(self.id), parttype))
1111 ## parttype
1111 ## parttype
1112 header = [
1112 header = [
1113 _pack(_fparttypesize, len(parttype)),
1113 _pack(_fparttypesize, len(parttype)),
1114 parttype,
1114 parttype,
1115 _pack(_fpartid, self.id),
1115 _pack(_fpartid, self.id),
1116 ]
1116 ]
1117 ## parameters
1117 ## parameters
1118 # count
1118 # count
1119 manpar = self.mandatoryparams
1119 manpar = self.mandatoryparams
1120 advpar = self.advisoryparams
1120 advpar = self.advisoryparams
1121 header.append(_pack(_fpartparamcount, len(manpar), len(advpar)))
1121 header.append(_pack(_fpartparamcount, len(manpar), len(advpar)))
1122 # size
1122 # size
1123 parsizes = []
1123 parsizes = []
1124 for key, value in manpar:
1124 for key, value in manpar:
1125 parsizes.append(len(key))
1125 parsizes.append(len(key))
1126 parsizes.append(len(value))
1126 parsizes.append(len(value))
1127 for key, value in advpar:
1127 for key, value in advpar:
1128 parsizes.append(len(key))
1128 parsizes.append(len(key))
1129 parsizes.append(len(value))
1129 parsizes.append(len(value))
1130 paramsizes = _pack(_makefpartparamsizes(len(parsizes) // 2), *parsizes)
1130 paramsizes = _pack(_makefpartparamsizes(len(parsizes) // 2), *parsizes)
1131 header.append(paramsizes)
1131 header.append(paramsizes)
1132 # key, value
1132 # key, value
1133 for key, value in manpar:
1133 for key, value in manpar:
1134 header.append(key)
1134 header.append(key)
1135 header.append(value)
1135 header.append(value)
1136 for key, value in advpar:
1136 for key, value in advpar:
1137 header.append(key)
1137 header.append(key)
1138 header.append(value)
1138 header.append(value)
1139 ## finalize header
1139 ## finalize header
1140 try:
1140 try:
1141 headerchunk = b''.join(header)
1141 headerchunk = b''.join(header)
1142 except TypeError:
1142 except TypeError:
1143 raise TypeError(
1143 raise TypeError(
1144 r'Found a non-bytes trying to '
1144 r'Found a non-bytes trying to '
1145 r'build bundle part header: %r' % header
1145 r'build bundle part header: %r' % header
1146 )
1146 )
1147 outdebug(ui, b'header chunk size: %i' % len(headerchunk))
1147 outdebug(ui, b'header chunk size: %i' % len(headerchunk))
1148 yield _pack(_fpartheadersize, len(headerchunk))
1148 yield _pack(_fpartheadersize, len(headerchunk))
1149 yield headerchunk
1149 yield headerchunk
1150 ## payload
1150 ## payload
1151 try:
1151 try:
1152 for chunk in self._payloadchunks():
1152 for chunk in self._payloadchunks():
1153 outdebug(ui, b'payload chunk size: %i' % len(chunk))
1153 outdebug(ui, b'payload chunk size: %i' % len(chunk))
1154 yield _pack(_fpayloadsize, len(chunk))
1154 yield _pack(_fpayloadsize, len(chunk))
1155 yield chunk
1155 yield chunk
1156 except GeneratorExit:
1156 except GeneratorExit:
1157 # GeneratorExit means that nobody is listening for our
1157 # GeneratorExit means that nobody is listening for our
1158 # results anyway, so just bail quickly rather than trying
1158 # results anyway, so just bail quickly rather than trying
1159 # to produce an error part.
1159 # to produce an error part.
1160 ui.debug(b'bundle2-generatorexit\n')
1160 ui.debug(b'bundle2-generatorexit\n')
1161 raise
1161 raise
1162 except BaseException as exc:
1162 except BaseException as exc:
1163 bexc = stringutil.forcebytestr(exc)
1163 bexc = stringutil.forcebytestr(exc)
1164 # backup exception data for later
1164 # backup exception data for later
1165 ui.debug(
1165 ui.debug(
1166 b'bundle2-input-stream-interrupt: encoding exception %s' % bexc
1166 b'bundle2-input-stream-interrupt: encoding exception %s' % bexc
1167 )
1167 )
1168 tb = sys.exc_info()[2]
1168 tb = sys.exc_info()[2]
1169 msg = b'unexpected error: %s' % bexc
1169 msg = b'unexpected error: %s' % bexc
1170 interpart = bundlepart(
1170 interpart = bundlepart(
1171 b'error:abort', [(b'message', msg)], mandatory=False
1171 b'error:abort', [(b'message', msg)], mandatory=False
1172 )
1172 )
1173 interpart.id = 0
1173 interpart.id = 0
1174 yield _pack(_fpayloadsize, -1)
1174 yield _pack(_fpayloadsize, -1)
1175 for chunk in interpart.getchunks(ui=ui):
1175 for chunk in interpart.getchunks(ui=ui):
1176 yield chunk
1176 yield chunk
1177 outdebug(ui, b'closing payload chunk')
1177 outdebug(ui, b'closing payload chunk')
1178 # abort current part payload
1178 # abort current part payload
1179 yield _pack(_fpayloadsize, 0)
1179 yield _pack(_fpayloadsize, 0)
1180 pycompat.raisewithtb(exc, tb)
1180 pycompat.raisewithtb(exc, tb)
1181 # end of payload
1181 # end of payload
1182 outdebug(ui, b'closing payload chunk')
1182 outdebug(ui, b'closing payload chunk')
1183 yield _pack(_fpayloadsize, 0)
1183 yield _pack(_fpayloadsize, 0)
1184 self._generated = True
1184 self._generated = True
1185
1185
1186 def _payloadchunks(self):
1186 def _payloadchunks(self):
1187 """yield chunks of a the part payload
1187 """yield chunks of a the part payload
1188
1188
1189 Exists to handle the different methods to provide data to a part."""
1189 Exists to handle the different methods to provide data to a part."""
1190 # we only support fixed size data now.
1190 # we only support fixed size data now.
1191 # This will be improved in the future.
1191 # This will be improved in the future.
1192 if util.safehasattr(self.data, b'next') or util.safehasattr(
1192 if util.safehasattr(self.data, b'next') or util.safehasattr(
1193 self.data, b'__next__'
1193 self.data, b'__next__'
1194 ):
1194 ):
1195 buff = util.chunkbuffer(self.data)
1195 buff = util.chunkbuffer(self.data)
1196 chunk = buff.read(preferedchunksize)
1196 chunk = buff.read(preferedchunksize)
1197 while chunk:
1197 while chunk:
1198 yield chunk
1198 yield chunk
1199 chunk = buff.read(preferedchunksize)
1199 chunk = buff.read(preferedchunksize)
1200 elif len(self.data):
1200 elif len(self.data):
1201 yield self.data
1201 yield self.data
1202
1202
1203
1203
1204 flaginterrupt = -1
1204 flaginterrupt = -1
1205
1205
1206
1206
1207 class interrupthandler(unpackermixin):
1207 class interrupthandler(unpackermixin):
1208 """read one part and process it with restricted capability
1208 """read one part and process it with restricted capability
1209
1209
1210 This allows to transmit exception raised on the producer size during part
1210 This allows to transmit exception raised on the producer size during part
1211 iteration while the consumer is reading a part.
1211 iteration while the consumer is reading a part.
1212
1212
1213 Part processed in this manner only have access to a ui object,"""
1213 Part processed in this manner only have access to a ui object,"""
1214
1214
1215 def __init__(self, ui, fp):
1215 def __init__(self, ui, fp):
1216 super(interrupthandler, self).__init__(fp)
1216 super(interrupthandler, self).__init__(fp)
1217 self.ui = ui
1217 self.ui = ui
1218
1218
1219 def _readpartheader(self):
1219 def _readpartheader(self):
1220 """reads a part header size and return the bytes blob
1220 """reads a part header size and return the bytes blob
1221
1221
1222 returns None if empty"""
1222 returns None if empty"""
1223 headersize = self._unpack(_fpartheadersize)[0]
1223 headersize = self._unpack(_fpartheadersize)[0]
1224 if headersize < 0:
1224 if headersize < 0:
1225 raise error.BundleValueError(
1225 raise error.BundleValueError(
1226 b'negative part header size: %i' % headersize
1226 b'negative part header size: %i' % headersize
1227 )
1227 )
1228 indebug(self.ui, b'part header size: %i\n' % headersize)
1228 indebug(self.ui, b'part header size: %i\n' % headersize)
1229 if headersize:
1229 if headersize:
1230 return self._readexact(headersize)
1230 return self._readexact(headersize)
1231 return None
1231 return None
1232
1232
1233 def __call__(self):
1233 def __call__(self):
1234
1234
1235 self.ui.debug(
1235 self.ui.debug(
1236 b'bundle2-input-stream-interrupt:' b' opening out of band context\n'
1236 b'bundle2-input-stream-interrupt:' b' opening out of band context\n'
1237 )
1237 )
1238 indebug(self.ui, b'bundle2 stream interruption, looking for a part.')
1238 indebug(self.ui, b'bundle2 stream interruption, looking for a part.')
1239 headerblock = self._readpartheader()
1239 headerblock = self._readpartheader()
1240 if headerblock is None:
1240 if headerblock is None:
1241 indebug(self.ui, b'no part found during interruption.')
1241 indebug(self.ui, b'no part found during interruption.')
1242 return
1242 return
1243 part = unbundlepart(self.ui, headerblock, self._fp)
1243 part = unbundlepart(self.ui, headerblock, self._fp)
1244 op = interruptoperation(self.ui)
1244 op = interruptoperation(self.ui)
1245 hardabort = False
1245 hardabort = False
1246 try:
1246 try:
1247 _processpart(op, part)
1247 _processpart(op, part)
1248 except (SystemExit, KeyboardInterrupt):
1248 except (SystemExit, KeyboardInterrupt):
1249 hardabort = True
1249 hardabort = True
1250 raise
1250 raise
1251 finally:
1251 finally:
1252 if not hardabort:
1252 if not hardabort:
1253 part.consume()
1253 part.consume()
1254 self.ui.debug(
1254 self.ui.debug(
1255 b'bundle2-input-stream-interrupt:' b' closing out of band context\n'
1255 b'bundle2-input-stream-interrupt:' b' closing out of band context\n'
1256 )
1256 )
1257
1257
1258
1258
1259 class interruptoperation(object):
1259 class interruptoperation(object):
1260 """A limited operation to be use by part handler during interruption
1260 """A limited operation to be use by part handler during interruption
1261
1261
1262 It only have access to an ui object.
1262 It only have access to an ui object.
1263 """
1263 """
1264
1264
1265 def __init__(self, ui):
1265 def __init__(self, ui):
1266 self.ui = ui
1266 self.ui = ui
1267 self.reply = None
1267 self.reply = None
1268 self.captureoutput = False
1268 self.captureoutput = False
1269
1269
1270 @property
1270 @property
1271 def repo(self):
1271 def repo(self):
1272 raise error.ProgrammingError(b'no repo access from stream interruption')
1272 raise error.ProgrammingError(b'no repo access from stream interruption')
1273
1273
1274 def gettransaction(self):
1274 def gettransaction(self):
1275 raise TransactionUnavailable(b'no repo access from stream interruption')
1275 raise TransactionUnavailable(b'no repo access from stream interruption')
1276
1276
1277
1277
1278 def decodepayloadchunks(ui, fh):
1278 def decodepayloadchunks(ui, fh):
1279 """Reads bundle2 part payload data into chunks.
1279 """Reads bundle2 part payload data into chunks.
1280
1280
1281 Part payload data consists of framed chunks. This function takes
1281 Part payload data consists of framed chunks. This function takes
1282 a file handle and emits those chunks.
1282 a file handle and emits those chunks.
1283 """
1283 """
1284 dolog = ui.configbool(b'devel', b'bundle2.debug')
1284 dolog = ui.configbool(b'devel', b'bundle2.debug')
1285 debug = ui.debug
1285 debug = ui.debug
1286
1286
1287 headerstruct = struct.Struct(_fpayloadsize)
1287 headerstruct = struct.Struct(_fpayloadsize)
1288 headersize = headerstruct.size
1288 headersize = headerstruct.size
1289 unpack = headerstruct.unpack
1289 unpack = headerstruct.unpack
1290
1290
1291 readexactly = changegroup.readexactly
1291 readexactly = changegroup.readexactly
1292 read = fh.read
1292 read = fh.read
1293
1293
1294 chunksize = unpack(readexactly(fh, headersize))[0]
1294 chunksize = unpack(readexactly(fh, headersize))[0]
1295 indebug(ui, b'payload chunk size: %i' % chunksize)
1295 indebug(ui, b'payload chunk size: %i' % chunksize)
1296
1296
1297 # changegroup.readexactly() is inlined below for performance.
1297 # changegroup.readexactly() is inlined below for performance.
1298 while chunksize:
1298 while chunksize:
1299 if chunksize >= 0:
1299 if chunksize >= 0:
1300 s = read(chunksize)
1300 s = read(chunksize)
1301 if len(s) < chunksize:
1301 if len(s) < chunksize:
1302 raise error.Abort(
1302 raise error.Abort(
1303 _(
1303 _(
1304 b'stream ended unexpectedly '
1304 b'stream ended unexpectedly '
1305 b' (got %d bytes, expected %d)'
1305 b' (got %d bytes, expected %d)'
1306 )
1306 )
1307 % (len(s), chunksize)
1307 % (len(s), chunksize)
1308 )
1308 )
1309
1309
1310 yield s
1310 yield s
1311 elif chunksize == flaginterrupt:
1311 elif chunksize == flaginterrupt:
1312 # Interrupt "signal" detected. The regular stream is interrupted
1312 # Interrupt "signal" detected. The regular stream is interrupted
1313 # and a bundle2 part follows. Consume it.
1313 # and a bundle2 part follows. Consume it.
1314 interrupthandler(ui, fh)()
1314 interrupthandler(ui, fh)()
1315 else:
1315 else:
1316 raise error.BundleValueError(
1316 raise error.BundleValueError(
1317 b'negative payload chunk size: %s' % chunksize
1317 b'negative payload chunk size: %s' % chunksize
1318 )
1318 )
1319
1319
1320 s = read(headersize)
1320 s = read(headersize)
1321 if len(s) < headersize:
1321 if len(s) < headersize:
1322 raise error.Abort(
1322 raise error.Abort(
1323 _(b'stream ended unexpectedly ' b' (got %d bytes, expected %d)')
1323 _(b'stream ended unexpectedly ' b' (got %d bytes, expected %d)')
1324 % (len(s), chunksize)
1324 % (len(s), chunksize)
1325 )
1325 )
1326
1326
1327 chunksize = unpack(s)[0]
1327 chunksize = unpack(s)[0]
1328
1328
1329 # indebug() inlined for performance.
1329 # indebug() inlined for performance.
1330 if dolog:
1330 if dolog:
1331 debug(b'bundle2-input: payload chunk size: %i\n' % chunksize)
1331 debug(b'bundle2-input: payload chunk size: %i\n' % chunksize)
1332
1332
1333
1333
1334 class unbundlepart(unpackermixin):
1334 class unbundlepart(unpackermixin):
1335 """a bundle part read from a bundle"""
1335 """a bundle part read from a bundle"""
1336
1336
1337 def __init__(self, ui, header, fp):
1337 def __init__(self, ui, header, fp):
1338 super(unbundlepart, self).__init__(fp)
1338 super(unbundlepart, self).__init__(fp)
1339 self._seekable = util.safehasattr(fp, b'seek') and util.safehasattr(
1339 self._seekable = util.safehasattr(fp, b'seek') and util.safehasattr(
1340 fp, b'tell'
1340 fp, b'tell'
1341 )
1341 )
1342 self.ui = ui
1342 self.ui = ui
1343 # unbundle state attr
1343 # unbundle state attr
1344 self._headerdata = header
1344 self._headerdata = header
1345 self._headeroffset = 0
1345 self._headeroffset = 0
1346 self._initialized = False
1346 self._initialized = False
1347 self.consumed = False
1347 self.consumed = False
1348 # part data
1348 # part data
1349 self.id = None
1349 self.id = None
1350 self.type = None
1350 self.type = None
1351 self.mandatoryparams = None
1351 self.mandatoryparams = None
1352 self.advisoryparams = None
1352 self.advisoryparams = None
1353 self.params = None
1353 self.params = None
1354 self.mandatorykeys = ()
1354 self.mandatorykeys = ()
1355 self._readheader()
1355 self._readheader()
1356 self._mandatory = None
1356 self._mandatory = None
1357 self._pos = 0
1357 self._pos = 0
1358
1358
1359 def _fromheader(self, size):
1359 def _fromheader(self, size):
1360 """return the next <size> byte from the header"""
1360 """return the next <size> byte from the header"""
1361 offset = self._headeroffset
1361 offset = self._headeroffset
1362 data = self._headerdata[offset : (offset + size)]
1362 data = self._headerdata[offset : (offset + size)]
1363 self._headeroffset = offset + size
1363 self._headeroffset = offset + size
1364 return data
1364 return data
1365
1365
1366 def _unpackheader(self, format):
1366 def _unpackheader(self, format):
1367 """read given format from header
1367 """read given format from header
1368
1368
1369 This automatically compute the size of the format to read."""
1369 This automatically compute the size of the format to read."""
1370 data = self._fromheader(struct.calcsize(format))
1370 data = self._fromheader(struct.calcsize(format))
1371 return _unpack(format, data)
1371 return _unpack(format, data)
1372
1372
1373 def _initparams(self, mandatoryparams, advisoryparams):
1373 def _initparams(self, mandatoryparams, advisoryparams):
1374 """internal function to setup all logic related parameters"""
1374 """internal function to setup all logic related parameters"""
1375 # make it read only to prevent people touching it by mistake.
1375 # make it read only to prevent people touching it by mistake.
1376 self.mandatoryparams = tuple(mandatoryparams)
1376 self.mandatoryparams = tuple(mandatoryparams)
1377 self.advisoryparams = tuple(advisoryparams)
1377 self.advisoryparams = tuple(advisoryparams)
1378 # user friendly UI
1378 # user friendly UI
1379 self.params = util.sortdict(self.mandatoryparams)
1379 self.params = util.sortdict(self.mandatoryparams)
1380 self.params.update(self.advisoryparams)
1380 self.params.update(self.advisoryparams)
1381 self.mandatorykeys = frozenset(p[0] for p in mandatoryparams)
1381 self.mandatorykeys = frozenset(p[0] for p in mandatoryparams)
1382
1382
1383 def _readheader(self):
1383 def _readheader(self):
1384 """read the header and setup the object"""
1384 """read the header and setup the object"""
1385 typesize = self._unpackheader(_fparttypesize)[0]
1385 typesize = self._unpackheader(_fparttypesize)[0]
1386 self.type = self._fromheader(typesize)
1386 self.type = self._fromheader(typesize)
1387 indebug(self.ui, b'part type: "%s"' % self.type)
1387 indebug(self.ui, b'part type: "%s"' % self.type)
1388 self.id = self._unpackheader(_fpartid)[0]
1388 self.id = self._unpackheader(_fpartid)[0]
1389 indebug(self.ui, b'part id: "%s"' % pycompat.bytestr(self.id))
1389 indebug(self.ui, b'part id: "%s"' % pycompat.bytestr(self.id))
1390 # extract mandatory bit from type
1390 # extract mandatory bit from type
1391 self.mandatory = self.type != self.type.lower()
1391 self.mandatory = self.type != self.type.lower()
1392 self.type = self.type.lower()
1392 self.type = self.type.lower()
1393 ## reading parameters
1393 ## reading parameters
1394 # param count
1394 # param count
1395 mancount, advcount = self._unpackheader(_fpartparamcount)
1395 mancount, advcount = self._unpackheader(_fpartparamcount)
1396 indebug(self.ui, b'part parameters: %i' % (mancount + advcount))
1396 indebug(self.ui, b'part parameters: %i' % (mancount + advcount))
1397 # param size
1397 # param size
1398 fparamsizes = _makefpartparamsizes(mancount + advcount)
1398 fparamsizes = _makefpartparamsizes(mancount + advcount)
1399 paramsizes = self._unpackheader(fparamsizes)
1399 paramsizes = self._unpackheader(fparamsizes)
1400 # make it a list of couple again
1400 # make it a list of couple again
1401 paramsizes = list(zip(paramsizes[::2], paramsizes[1::2]))
1401 paramsizes = list(zip(paramsizes[::2], paramsizes[1::2]))
1402 # split mandatory from advisory
1402 # split mandatory from advisory
1403 mansizes = paramsizes[:mancount]
1403 mansizes = paramsizes[:mancount]
1404 advsizes = paramsizes[mancount:]
1404 advsizes = paramsizes[mancount:]
1405 # retrieve param value
1405 # retrieve param value
1406 manparams = []
1406 manparams = []
1407 for key, value in mansizes:
1407 for key, value in mansizes:
1408 manparams.append((self._fromheader(key), self._fromheader(value)))
1408 manparams.append((self._fromheader(key), self._fromheader(value)))
1409 advparams = []
1409 advparams = []
1410 for key, value in advsizes:
1410 for key, value in advsizes:
1411 advparams.append((self._fromheader(key), self._fromheader(value)))
1411 advparams.append((self._fromheader(key), self._fromheader(value)))
1412 self._initparams(manparams, advparams)
1412 self._initparams(manparams, advparams)
1413 ## part payload
1413 ## part payload
1414 self._payloadstream = util.chunkbuffer(self._payloadchunks())
1414 self._payloadstream = util.chunkbuffer(self._payloadchunks())
1415 # we read the data, tell it
1415 # we read the data, tell it
1416 self._initialized = True
1416 self._initialized = True
1417
1417
1418 def _payloadchunks(self):
1418 def _payloadchunks(self):
1419 """Generator of decoded chunks in the payload."""
1419 """Generator of decoded chunks in the payload."""
1420 return decodepayloadchunks(self.ui, self._fp)
1420 return decodepayloadchunks(self.ui, self._fp)
1421
1421
1422 def consume(self):
1422 def consume(self):
1423 """Read the part payload until completion.
1423 """Read the part payload until completion.
1424
1424
1425 By consuming the part data, the underlying stream read offset will
1425 By consuming the part data, the underlying stream read offset will
1426 be advanced to the next part (or end of stream).
1426 be advanced to the next part (or end of stream).
1427 """
1427 """
1428 if self.consumed:
1428 if self.consumed:
1429 return
1429 return
1430
1430
1431 chunk = self.read(32768)
1431 chunk = self.read(32768)
1432 while chunk:
1432 while chunk:
1433 self._pos += len(chunk)
1433 self._pos += len(chunk)
1434 chunk = self.read(32768)
1434 chunk = self.read(32768)
1435
1435
1436 def read(self, size=None):
1436 def read(self, size=None):
1437 """read payload data"""
1437 """read payload data"""
1438 if not self._initialized:
1438 if not self._initialized:
1439 self._readheader()
1439 self._readheader()
1440 if size is None:
1440 if size is None:
1441 data = self._payloadstream.read()
1441 data = self._payloadstream.read()
1442 else:
1442 else:
1443 data = self._payloadstream.read(size)
1443 data = self._payloadstream.read(size)
1444 self._pos += len(data)
1444 self._pos += len(data)
1445 if size is None or len(data) < size:
1445 if size is None or len(data) < size:
1446 if not self.consumed and self._pos:
1446 if not self.consumed and self._pos:
1447 self.ui.debug(
1447 self.ui.debug(
1448 b'bundle2-input-part: total payload size %i\n' % self._pos
1448 b'bundle2-input-part: total payload size %i\n' % self._pos
1449 )
1449 )
1450 self.consumed = True
1450 self.consumed = True
1451 return data
1451 return data
1452
1452
1453
1453
1454 class seekableunbundlepart(unbundlepart):
1454 class seekableunbundlepart(unbundlepart):
1455 """A bundle2 part in a bundle that is seekable.
1455 """A bundle2 part in a bundle that is seekable.
1456
1456
1457 Regular ``unbundlepart`` instances can only be read once. This class
1457 Regular ``unbundlepart`` instances can only be read once. This class
1458 extends ``unbundlepart`` to enable bi-directional seeking within the
1458 extends ``unbundlepart`` to enable bi-directional seeking within the
1459 part.
1459 part.
1460
1460
1461 Bundle2 part data consists of framed chunks. Offsets when seeking
1461 Bundle2 part data consists of framed chunks. Offsets when seeking
1462 refer to the decoded data, not the offsets in the underlying bundle2
1462 refer to the decoded data, not the offsets in the underlying bundle2
1463 stream.
1463 stream.
1464
1464
1465 To facilitate quickly seeking within the decoded data, instances of this
1465 To facilitate quickly seeking within the decoded data, instances of this
1466 class maintain a mapping between offsets in the underlying stream and
1466 class maintain a mapping between offsets in the underlying stream and
1467 the decoded payload. This mapping will consume memory in proportion
1467 the decoded payload. This mapping will consume memory in proportion
1468 to the number of chunks within the payload (which almost certainly
1468 to the number of chunks within the payload (which almost certainly
1469 increases in proportion with the size of the part).
1469 increases in proportion with the size of the part).
1470 """
1470 """
1471
1471
1472 def __init__(self, ui, header, fp):
1472 def __init__(self, ui, header, fp):
1473 # (payload, file) offsets for chunk starts.
1473 # (payload, file) offsets for chunk starts.
1474 self._chunkindex = []
1474 self._chunkindex = []
1475
1475
1476 super(seekableunbundlepart, self).__init__(ui, header, fp)
1476 super(seekableunbundlepart, self).__init__(ui, header, fp)
1477
1477
1478 def _payloadchunks(self, chunknum=0):
1478 def _payloadchunks(self, chunknum=0):
1479 '''seek to specified chunk and start yielding data'''
1479 '''seek to specified chunk and start yielding data'''
1480 if len(self._chunkindex) == 0:
1480 if len(self._chunkindex) == 0:
1481 assert chunknum == 0, b'Must start with chunk 0'
1481 assert chunknum == 0, b'Must start with chunk 0'
1482 self._chunkindex.append((0, self._tellfp()))
1482 self._chunkindex.append((0, self._tellfp()))
1483 else:
1483 else:
1484 assert chunknum < len(self._chunkindex), (
1484 assert chunknum < len(self._chunkindex), (
1485 b'Unknown chunk %d' % chunknum
1485 b'Unknown chunk %d' % chunknum
1486 )
1486 )
1487 self._seekfp(self._chunkindex[chunknum][1])
1487 self._seekfp(self._chunkindex[chunknum][1])
1488
1488
1489 pos = self._chunkindex[chunknum][0]
1489 pos = self._chunkindex[chunknum][0]
1490
1490
1491 for chunk in decodepayloadchunks(self.ui, self._fp):
1491 for chunk in decodepayloadchunks(self.ui, self._fp):
1492 chunknum += 1
1492 chunknum += 1
1493 pos += len(chunk)
1493 pos += len(chunk)
1494 if chunknum == len(self._chunkindex):
1494 if chunknum == len(self._chunkindex):
1495 self._chunkindex.append((pos, self._tellfp()))
1495 self._chunkindex.append((pos, self._tellfp()))
1496
1496
1497 yield chunk
1497 yield chunk
1498
1498
1499 def _findchunk(self, pos):
1499 def _findchunk(self, pos):
1500 '''for a given payload position, return a chunk number and offset'''
1500 '''for a given payload position, return a chunk number and offset'''
1501 for chunk, (ppos, fpos) in enumerate(self._chunkindex):
1501 for chunk, (ppos, fpos) in enumerate(self._chunkindex):
1502 if ppos == pos:
1502 if ppos == pos:
1503 return chunk, 0
1503 return chunk, 0
1504 elif ppos > pos:
1504 elif ppos > pos:
1505 return chunk - 1, pos - self._chunkindex[chunk - 1][0]
1505 return chunk - 1, pos - self._chunkindex[chunk - 1][0]
1506 raise ValueError(b'Unknown chunk')
1506 raise ValueError(b'Unknown chunk')
1507
1507
1508 def tell(self):
1508 def tell(self):
1509 return self._pos
1509 return self._pos
1510
1510
1511 def seek(self, offset, whence=os.SEEK_SET):
1511 def seek(self, offset, whence=os.SEEK_SET):
1512 if whence == os.SEEK_SET:
1512 if whence == os.SEEK_SET:
1513 newpos = offset
1513 newpos = offset
1514 elif whence == os.SEEK_CUR:
1514 elif whence == os.SEEK_CUR:
1515 newpos = self._pos + offset
1515 newpos = self._pos + offset
1516 elif whence == os.SEEK_END:
1516 elif whence == os.SEEK_END:
1517 if not self.consumed:
1517 if not self.consumed:
1518 # Can't use self.consume() here because it advances self._pos.
1518 # Can't use self.consume() here because it advances self._pos.
1519 chunk = self.read(32768)
1519 chunk = self.read(32768)
1520 while chunk:
1520 while chunk:
1521 chunk = self.read(32768)
1521 chunk = self.read(32768)
1522 newpos = self._chunkindex[-1][0] - offset
1522 newpos = self._chunkindex[-1][0] - offset
1523 else:
1523 else:
1524 raise ValueError(b'Unknown whence value: %r' % (whence,))
1524 raise ValueError(b'Unknown whence value: %r' % (whence,))
1525
1525
1526 if newpos > self._chunkindex[-1][0] and not self.consumed:
1526 if newpos > self._chunkindex[-1][0] and not self.consumed:
1527 # Can't use self.consume() here because it advances self._pos.
1527 # Can't use self.consume() here because it advances self._pos.
1528 chunk = self.read(32768)
1528 chunk = self.read(32768)
1529 while chunk:
1529 while chunk:
1530 chunk = self.read(32668)
1530 chunk = self.read(32668)
1531
1531
1532 if not 0 <= newpos <= self._chunkindex[-1][0]:
1532 if not 0 <= newpos <= self._chunkindex[-1][0]:
1533 raise ValueError(b'Offset out of range')
1533 raise ValueError(b'Offset out of range')
1534
1534
1535 if self._pos != newpos:
1535 if self._pos != newpos:
1536 chunk, internaloffset = self._findchunk(newpos)
1536 chunk, internaloffset = self._findchunk(newpos)
1537 self._payloadstream = util.chunkbuffer(self._payloadchunks(chunk))
1537 self._payloadstream = util.chunkbuffer(self._payloadchunks(chunk))
1538 adjust = self.read(internaloffset)
1538 adjust = self.read(internaloffset)
1539 if len(adjust) != internaloffset:
1539 if len(adjust) != internaloffset:
1540 raise error.Abort(_(b'Seek failed\n'))
1540 raise error.Abort(_(b'Seek failed\n'))
1541 self._pos = newpos
1541 self._pos = newpos
1542
1542
1543 def _seekfp(self, offset, whence=0):
1543 def _seekfp(self, offset, whence=0):
1544 """move the underlying file pointer
1544 """move the underlying file pointer
1545
1545
1546 This method is meant for internal usage by the bundle2 protocol only.
1546 This method is meant for internal usage by the bundle2 protocol only.
1547 They directly manipulate the low level stream including bundle2 level
1547 They directly manipulate the low level stream including bundle2 level
1548 instruction.
1548 instruction.
1549
1549
1550 Do not use it to implement higher-level logic or methods."""
1550 Do not use it to implement higher-level logic or methods."""
1551 if self._seekable:
1551 if self._seekable:
1552 return self._fp.seek(offset, whence)
1552 return self._fp.seek(offset, whence)
1553 else:
1553 else:
1554 raise NotImplementedError(_(b'File pointer is not seekable'))
1554 raise NotImplementedError(_(b'File pointer is not seekable'))
1555
1555
1556 def _tellfp(self):
1556 def _tellfp(self):
1557 """return the file offset, or None if file is not seekable
1557 """return the file offset, or None if file is not seekable
1558
1558
1559 This method is meant for internal usage by the bundle2 protocol only.
1559 This method is meant for internal usage by the bundle2 protocol only.
1560 They directly manipulate the low level stream including bundle2 level
1560 They directly manipulate the low level stream including bundle2 level
1561 instruction.
1561 instruction.
1562
1562
1563 Do not use it to implement higher-level logic or methods."""
1563 Do not use it to implement higher-level logic or methods."""
1564 if self._seekable:
1564 if self._seekable:
1565 try:
1565 try:
1566 return self._fp.tell()
1566 return self._fp.tell()
1567 except IOError as e:
1567 except IOError as e:
1568 if e.errno == errno.ESPIPE:
1568 if e.errno == errno.ESPIPE:
1569 self._seekable = False
1569 self._seekable = False
1570 else:
1570 else:
1571 raise
1571 raise
1572 return None
1572 return None
1573
1573
1574
1574
1575 # These are only the static capabilities.
1575 # These are only the static capabilities.
1576 # Check the 'getrepocaps' function for the rest.
1576 # Check the 'getrepocaps' function for the rest.
1577 capabilities = {
1577 capabilities = {
1578 b'HG20': (),
1578 b'HG20': (),
1579 b'bookmarks': (),
1579 b'bookmarks': (),
1580 b'error': (b'abort', b'unsupportedcontent', b'pushraced', b'pushkey'),
1580 b'error': (b'abort', b'unsupportedcontent', b'pushraced', b'pushkey'),
1581 b'listkeys': (),
1581 b'listkeys': (),
1582 b'pushkey': (),
1582 b'pushkey': (),
1583 b'digests': tuple(sorted(util.DIGESTS.keys())),
1583 b'digests': tuple(sorted(util.DIGESTS.keys())),
1584 b'remote-changegroup': (b'http', b'https'),
1584 b'remote-changegroup': (b'http', b'https'),
1585 b'hgtagsfnodes': (),
1585 b'hgtagsfnodes': (),
1586 b'rev-branch-cache': (),
1586 b'rev-branch-cache': (),
1587 b'phases': (b'heads',),
1587 b'phases': (b'heads',),
1588 b'stream': (b'v2',),
1588 b'stream': (b'v2',),
1589 }
1589 }
1590
1590
1591
1591
1592 def getrepocaps(repo, allowpushback=False, role=None):
1592 def getrepocaps(repo, allowpushback=False, role=None):
1593 """return the bundle2 capabilities for a given repo
1593 """return the bundle2 capabilities for a given repo
1594
1594
1595 Exists to allow extensions (like evolution) to mutate the capabilities.
1595 Exists to allow extensions (like evolution) to mutate the capabilities.
1596
1596
1597 The returned value is used for servers advertising their capabilities as
1597 The returned value is used for servers advertising their capabilities as
1598 well as clients advertising their capabilities to servers as part of
1598 well as clients advertising their capabilities to servers as part of
1599 bundle2 requests. The ``role`` argument specifies which is which.
1599 bundle2 requests. The ``role`` argument specifies which is which.
1600 """
1600 """
1601 if role not in (b'client', b'server'):
1601 if role not in (b'client', b'server'):
1602 raise error.ProgrammingError(b'role argument must be client or server')
1602 raise error.ProgrammingError(b'role argument must be client or server')
1603
1603
1604 caps = capabilities.copy()
1604 caps = capabilities.copy()
1605 caps[b'changegroup'] = tuple(
1605 caps[b'changegroup'] = tuple(
1606 sorted(changegroup.supportedincomingversions(repo))
1606 sorted(changegroup.supportedincomingversions(repo))
1607 )
1607 )
1608 if obsolete.isenabled(repo, obsolete.exchangeopt):
1608 if obsolete.isenabled(repo, obsolete.exchangeopt):
1609 supportedformat = tuple(b'V%i' % v for v in obsolete.formats)
1609 supportedformat = tuple(b'V%i' % v for v in obsolete.formats)
1610 caps[b'obsmarkers'] = supportedformat
1610 caps[b'obsmarkers'] = supportedformat
1611 if allowpushback:
1611 if allowpushback:
1612 caps[b'pushback'] = ()
1612 caps[b'pushback'] = ()
1613 cpmode = repo.ui.config(b'server', b'concurrent-push-mode')
1613 cpmode = repo.ui.config(b'server', b'concurrent-push-mode')
1614 if cpmode == b'check-related':
1614 if cpmode == b'check-related':
1615 caps[b'checkheads'] = (b'related',)
1615 caps[b'checkheads'] = (b'related',)
1616 if b'phases' in repo.ui.configlist(b'devel', b'legacy.exchange'):
1616 if b'phases' in repo.ui.configlist(b'devel', b'legacy.exchange'):
1617 caps.pop(b'phases')
1617 caps.pop(b'phases')
1618
1618
1619 # Don't advertise stream clone support in server mode if not configured.
1619 # Don't advertise stream clone support in server mode if not configured.
1620 if role == b'server':
1620 if role == b'server':
1621 streamsupported = repo.ui.configbool(
1621 streamsupported = repo.ui.configbool(
1622 b'server', b'uncompressed', untrusted=True
1622 b'server', b'uncompressed', untrusted=True
1623 )
1623 )
1624 featuresupported = repo.ui.configbool(b'server', b'bundle2.stream')
1624 featuresupported = repo.ui.configbool(b'server', b'bundle2.stream')
1625
1625
1626 if not streamsupported or not featuresupported:
1626 if not streamsupported or not featuresupported:
1627 caps.pop(b'stream')
1627 caps.pop(b'stream')
1628 # Else always advertise support on client, because payload support
1628 # Else always advertise support on client, because payload support
1629 # should always be advertised.
1629 # should always be advertised.
1630
1630
1631 return caps
1631 return caps
1632
1632
1633
1633
1634 def bundle2caps(remote):
1634 def bundle2caps(remote):
1635 """return the bundle capabilities of a peer as dict"""
1635 """return the bundle capabilities of a peer as dict"""
1636 raw = remote.capable(b'bundle2')
1636 raw = remote.capable(b'bundle2')
1637 if not raw and raw != b'':
1637 if not raw and raw != b'':
1638 return {}
1638 return {}
1639 capsblob = urlreq.unquote(remote.capable(b'bundle2'))
1639 capsblob = urlreq.unquote(remote.capable(b'bundle2'))
1640 return decodecaps(capsblob)
1640 return decodecaps(capsblob)
1641
1641
1642
1642
1643 def obsmarkersversion(caps):
1643 def obsmarkersversion(caps):
1644 """extract the list of supported obsmarkers versions from a bundle2caps dict
1644 """extract the list of supported obsmarkers versions from a bundle2caps dict
1645 """
1645 """
1646 obscaps = caps.get(b'obsmarkers', ())
1646 obscaps = caps.get(b'obsmarkers', ())
1647 return [int(c[1:]) for c in obscaps if c.startswith(b'V')]
1647 return [int(c[1:]) for c in obscaps if c.startswith(b'V')]
1648
1648
1649
1649
1650 def writenewbundle(
1650 def writenewbundle(
1651 ui,
1651 ui,
1652 repo,
1652 repo,
1653 source,
1653 source,
1654 filename,
1654 filename,
1655 bundletype,
1655 bundletype,
1656 outgoing,
1656 outgoing,
1657 opts,
1657 opts,
1658 vfs=None,
1658 vfs=None,
1659 compression=None,
1659 compression=None,
1660 compopts=None,
1660 compopts=None,
1661 ):
1661 ):
1662 if bundletype.startswith(b'HG10'):
1662 if bundletype.startswith(b'HG10'):
1663 cg = changegroup.makechangegroup(repo, outgoing, b'01', source)
1663 cg = changegroup.makechangegroup(repo, outgoing, b'01', source)
1664 return writebundle(
1664 return writebundle(
1665 ui,
1665 ui,
1666 cg,
1666 cg,
1667 filename,
1667 filename,
1668 bundletype,
1668 bundletype,
1669 vfs=vfs,
1669 vfs=vfs,
1670 compression=compression,
1670 compression=compression,
1671 compopts=compopts,
1671 compopts=compopts,
1672 )
1672 )
1673 elif not bundletype.startswith(b'HG20'):
1673 elif not bundletype.startswith(b'HG20'):
1674 raise error.ProgrammingError(b'unknown bundle type: %s' % bundletype)
1674 raise error.ProgrammingError(b'unknown bundle type: %s' % bundletype)
1675
1675
1676 caps = {}
1676 caps = {}
1677 if b'obsolescence' in opts:
1677 if b'obsolescence' in opts:
1678 caps[b'obsmarkers'] = (b'V1',)
1678 caps[b'obsmarkers'] = (b'V1',)
1679 bundle = bundle20(ui, caps)
1679 bundle = bundle20(ui, caps)
1680 bundle.setcompression(compression, compopts)
1680 bundle.setcompression(compression, compopts)
1681 _addpartsfromopts(ui, repo, bundle, source, outgoing, opts)
1681 _addpartsfromopts(ui, repo, bundle, source, outgoing, opts)
1682 chunkiter = bundle.getchunks()
1682 chunkiter = bundle.getchunks()
1683
1683
1684 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1684 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1685
1685
1686
1686
1687 def _addpartsfromopts(ui, repo, bundler, source, outgoing, opts):
1687 def _addpartsfromopts(ui, repo, bundler, source, outgoing, opts):
1688 # We should eventually reconcile this logic with the one behind
1688 # We should eventually reconcile this logic with the one behind
1689 # 'exchange.getbundle2partsgenerator'.
1689 # 'exchange.getbundle2partsgenerator'.
1690 #
1690 #
1691 # The type of input from 'getbundle' and 'writenewbundle' are a bit
1691 # The type of input from 'getbundle' and 'writenewbundle' are a bit
1692 # different right now. So we keep them separated for now for the sake of
1692 # different right now. So we keep them separated for now for the sake of
1693 # simplicity.
1693 # simplicity.
1694
1694
1695 # we might not always want a changegroup in such bundle, for example in
1695 # we might not always want a changegroup in such bundle, for example in
1696 # stream bundles
1696 # stream bundles
1697 if opts.get(b'changegroup', True):
1697 if opts.get(b'changegroup', True):
1698 cgversion = opts.get(b'cg.version')
1698 cgversion = opts.get(b'cg.version')
1699 if cgversion is None:
1699 if cgversion is None:
1700 cgversion = changegroup.safeversion(repo)
1700 cgversion = changegroup.safeversion(repo)
1701 cg = changegroup.makechangegroup(repo, outgoing, cgversion, source)
1701 cg = changegroup.makechangegroup(repo, outgoing, cgversion, source)
1702 part = bundler.newpart(b'changegroup', data=cg.getchunks())
1702 part = bundler.newpart(b'changegroup', data=cg.getchunks())
1703 part.addparam(b'version', cg.version)
1703 part.addparam(b'version', cg.version)
1704 if b'clcount' in cg.extras:
1704 if b'clcount' in cg.extras:
1705 part.addparam(
1705 part.addparam(
1706 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1706 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1707 )
1707 )
1708 if opts.get(b'phases') and repo.revs(
1708 if opts.get(b'phases') and repo.revs(
1709 b'%ln and secret()', outgoing.missingheads
1709 b'%ln and secret()', outgoing.missingheads
1710 ):
1710 ):
1711 part.addparam(
1711 part.addparam(
1712 b'targetphase', b'%d' % phases.secret, mandatory=False
1712 b'targetphase', b'%d' % phases.secret, mandatory=False
1713 )
1713 )
1714
1714
1715 if opts.get(b'streamv2', False):
1715 if opts.get(b'streamv2', False):
1716 addpartbundlestream2(bundler, repo, stream=True)
1716 addpartbundlestream2(bundler, repo, stream=True)
1717
1717
1718 if opts.get(b'tagsfnodescache', True):
1718 if opts.get(b'tagsfnodescache', True):
1719 addparttagsfnodescache(repo, bundler, outgoing)
1719 addparttagsfnodescache(repo, bundler, outgoing)
1720
1720
1721 if opts.get(b'revbranchcache', True):
1721 if opts.get(b'revbranchcache', True):
1722 addpartrevbranchcache(repo, bundler, outgoing)
1722 addpartrevbranchcache(repo, bundler, outgoing)
1723
1723
1724 if opts.get(b'obsolescence', False):
1724 if opts.get(b'obsolescence', False):
1725 obsmarkers = repo.obsstore.relevantmarkers(outgoing.missing)
1725 obsmarkers = repo.obsstore.relevantmarkers(outgoing.missing)
1726 buildobsmarkerspart(bundler, obsmarkers)
1726 buildobsmarkerspart(bundler, obsmarkers)
1727
1727
1728 if opts.get(b'phases', False):
1728 if opts.get(b'phases', False):
1729 headsbyphase = phases.subsetphaseheads(repo, outgoing.missing)
1729 headsbyphase = phases.subsetphaseheads(repo, outgoing.missing)
1730 phasedata = phases.binaryencode(headsbyphase)
1730 phasedata = phases.binaryencode(headsbyphase)
1731 bundler.newpart(b'phase-heads', data=phasedata)
1731 bundler.newpart(b'phase-heads', data=phasedata)
1732
1732
1733
1733
1734 def addparttagsfnodescache(repo, bundler, outgoing):
1734 def addparttagsfnodescache(repo, bundler, outgoing):
1735 # we include the tags fnode cache for the bundle changeset
1735 # we include the tags fnode cache for the bundle changeset
1736 # (as an optional parts)
1736 # (as an optional parts)
1737 cache = tags.hgtagsfnodescache(repo.unfiltered())
1737 cache = tags.hgtagsfnodescache(repo.unfiltered())
1738 chunks = []
1738 chunks = []
1739
1739
1740 # .hgtags fnodes are only relevant for head changesets. While we could
1740 # .hgtags fnodes are only relevant for head changesets. While we could
1741 # transfer values for all known nodes, there will likely be little to
1741 # transfer values for all known nodes, there will likely be little to
1742 # no benefit.
1742 # no benefit.
1743 #
1743 #
1744 # We don't bother using a generator to produce output data because
1744 # We don't bother using a generator to produce output data because
1745 # a) we only have 40 bytes per head and even esoteric numbers of heads
1745 # a) we only have 40 bytes per head and even esoteric numbers of heads
1746 # consume little memory (1M heads is 40MB) b) we don't want to send the
1746 # consume little memory (1M heads is 40MB) b) we don't want to send the
1747 # part if we don't have entries and knowing if we have entries requires
1747 # part if we don't have entries and knowing if we have entries requires
1748 # cache lookups.
1748 # cache lookups.
1749 for node in outgoing.missingheads:
1749 for node in outgoing.missingheads:
1750 # Don't compute missing, as this may slow down serving.
1750 # Don't compute missing, as this may slow down serving.
1751 fnode = cache.getfnode(node, computemissing=False)
1751 fnode = cache.getfnode(node, computemissing=False)
1752 if fnode is not None:
1752 if fnode is not None:
1753 chunks.extend([node, fnode])
1753 chunks.extend([node, fnode])
1754
1754
1755 if chunks:
1755 if chunks:
1756 bundler.newpart(b'hgtagsfnodes', data=b''.join(chunks))
1756 bundler.newpart(b'hgtagsfnodes', data=b''.join(chunks))
1757
1757
1758
1758
1759 def addpartrevbranchcache(repo, bundler, outgoing):
1759 def addpartrevbranchcache(repo, bundler, outgoing):
1760 # we include the rev branch cache for the bundle changeset
1760 # we include the rev branch cache for the bundle changeset
1761 # (as an optional parts)
1761 # (as an optional parts)
1762 cache = repo.revbranchcache()
1762 cache = repo.revbranchcache()
1763 cl = repo.unfiltered().changelog
1763 cl = repo.unfiltered().changelog
1764 branchesdata = collections.defaultdict(lambda: (set(), set()))
1764 branchesdata = collections.defaultdict(lambda: (set(), set()))
1765 for node in outgoing.missing:
1765 for node in outgoing.missing:
1766 branch, close = cache.branchinfo(cl.rev(node))
1766 branch, close = cache.branchinfo(cl.rev(node))
1767 branchesdata[branch][close].add(node)
1767 branchesdata[branch][close].add(node)
1768
1768
1769 def generate():
1769 def generate():
1770 for branch, (nodes, closed) in sorted(branchesdata.items()):
1770 for branch, (nodes, closed) in sorted(branchesdata.items()):
1771 utf8branch = encoding.fromlocal(branch)
1771 utf8branch = encoding.fromlocal(branch)
1772 yield rbcstruct.pack(len(utf8branch), len(nodes), len(closed))
1772 yield rbcstruct.pack(len(utf8branch), len(nodes), len(closed))
1773 yield utf8branch
1773 yield utf8branch
1774 for n in sorted(nodes):
1774 for n in sorted(nodes):
1775 yield n
1775 yield n
1776 for n in sorted(closed):
1776 for n in sorted(closed):
1777 yield n
1777 yield n
1778
1778
1779 bundler.newpart(b'cache:rev-branch-cache', data=generate(), mandatory=False)
1779 bundler.newpart(b'cache:rev-branch-cache', data=generate(), mandatory=False)
1780
1780
1781
1781
1782 def _formatrequirementsspec(requirements):
1782 def _formatrequirementsspec(requirements):
1783 requirements = [req for req in requirements if req != b"shared"]
1783 requirements = [req for req in requirements if req != b"shared"]
1784 return urlreq.quote(b','.join(sorted(requirements)))
1784 return urlreq.quote(b','.join(sorted(requirements)))
1785
1785
1786
1786
1787 def _formatrequirementsparams(requirements):
1787 def _formatrequirementsparams(requirements):
1788 requirements = _formatrequirementsspec(requirements)
1788 requirements = _formatrequirementsspec(requirements)
1789 params = b"%s%s" % (urlreq.quote(b"requirements="), requirements)
1789 params = b"%s%s" % (urlreq.quote(b"requirements="), requirements)
1790 return params
1790 return params
1791
1791
1792
1792
1793 def addpartbundlestream2(bundler, repo, **kwargs):
1793 def addpartbundlestream2(bundler, repo, **kwargs):
1794 if not kwargs.get(r'stream', False):
1794 if not kwargs.get(r'stream', False):
1795 return
1795 return
1796
1796
1797 if not streamclone.allowservergeneration(repo):
1797 if not streamclone.allowservergeneration(repo):
1798 raise error.Abort(
1798 raise error.Abort(
1799 _(
1799 _(
1800 b'stream data requested but server does not allow '
1800 b'stream data requested but server does not allow '
1801 b'this feature'
1801 b'this feature'
1802 ),
1802 ),
1803 hint=_(
1803 hint=_(
1804 b'well-behaved clients should not be '
1804 b'well-behaved clients should not be '
1805 b'requesting stream data from servers not '
1805 b'requesting stream data from servers not '
1806 b'advertising it; the client may be buggy'
1806 b'advertising it; the client may be buggy'
1807 ),
1807 ),
1808 )
1808 )
1809
1809
1810 # Stream clones don't compress well. And compression undermines a
1810 # Stream clones don't compress well. And compression undermines a
1811 # goal of stream clones, which is to be fast. Communicate the desire
1811 # goal of stream clones, which is to be fast. Communicate the desire
1812 # to avoid compression to consumers of the bundle.
1812 # to avoid compression to consumers of the bundle.
1813 bundler.prefercompressed = False
1813 bundler.prefercompressed = False
1814
1814
1815 # get the includes and excludes
1815 # get the includes and excludes
1816 includepats = kwargs.get(r'includepats')
1816 includepats = kwargs.get(r'includepats')
1817 excludepats = kwargs.get(r'excludepats')
1817 excludepats = kwargs.get(r'excludepats')
1818
1818
1819 narrowstream = repo.ui.configbool(
1819 narrowstream = repo.ui.configbool(
1820 b'experimental', b'server.stream-narrow-clones'
1820 b'experimental', b'server.stream-narrow-clones'
1821 )
1821 )
1822
1822
1823 if (includepats or excludepats) and not narrowstream:
1823 if (includepats or excludepats) and not narrowstream:
1824 raise error.Abort(_(b'server does not support narrow stream clones'))
1824 raise error.Abort(_(b'server does not support narrow stream clones'))
1825
1825
1826 includeobsmarkers = False
1826 includeobsmarkers = False
1827 if repo.obsstore:
1827 if repo.obsstore:
1828 remoteversions = obsmarkersversion(bundler.capabilities)
1828 remoteversions = obsmarkersversion(bundler.capabilities)
1829 if not remoteversions:
1829 if not remoteversions:
1830 raise error.Abort(
1830 raise error.Abort(
1831 _(
1831 _(
1832 b'server has obsolescence markers, but client '
1832 b'server has obsolescence markers, but client '
1833 b'cannot receive them via stream clone'
1833 b'cannot receive them via stream clone'
1834 )
1834 )
1835 )
1835 )
1836 elif repo.obsstore._version in remoteversions:
1836 elif repo.obsstore._version in remoteversions:
1837 includeobsmarkers = True
1837 includeobsmarkers = True
1838
1838
1839 filecount, bytecount, it = streamclone.generatev2(
1839 filecount, bytecount, it = streamclone.generatev2(
1840 repo, includepats, excludepats, includeobsmarkers
1840 repo, includepats, excludepats, includeobsmarkers
1841 )
1841 )
1842 requirements = _formatrequirementsspec(repo.requirements)
1842 requirements = _formatrequirementsspec(repo.requirements)
1843 part = bundler.newpart(b'stream2', data=it)
1843 part = bundler.newpart(b'stream2', data=it)
1844 part.addparam(b'bytecount', b'%d' % bytecount, mandatory=True)
1844 part.addparam(b'bytecount', b'%d' % bytecount, mandatory=True)
1845 part.addparam(b'filecount', b'%d' % filecount, mandatory=True)
1845 part.addparam(b'filecount', b'%d' % filecount, mandatory=True)
1846 part.addparam(b'requirements', requirements, mandatory=True)
1846 part.addparam(b'requirements', requirements, mandatory=True)
1847
1847
1848
1848
1849 def buildobsmarkerspart(bundler, markers):
1849 def buildobsmarkerspart(bundler, markers):
1850 """add an obsmarker part to the bundler with <markers>
1850 """add an obsmarker part to the bundler with <markers>
1851
1851
1852 No part is created if markers is empty.
1852 No part is created if markers is empty.
1853 Raises ValueError if the bundler doesn't support any known obsmarker format.
1853 Raises ValueError if the bundler doesn't support any known obsmarker format.
1854 """
1854 """
1855 if not markers:
1855 if not markers:
1856 return None
1856 return None
1857
1857
1858 remoteversions = obsmarkersversion(bundler.capabilities)
1858 remoteversions = obsmarkersversion(bundler.capabilities)
1859 version = obsolete.commonversion(remoteversions)
1859 version = obsolete.commonversion(remoteversions)
1860 if version is None:
1860 if version is None:
1861 raise ValueError(b'bundler does not support common obsmarker format')
1861 raise ValueError(b'bundler does not support common obsmarker format')
1862 stream = obsolete.encodemarkers(markers, True, version=version)
1862 stream = obsolete.encodemarkers(markers, True, version=version)
1863 return bundler.newpart(b'obsmarkers', data=stream)
1863 return bundler.newpart(b'obsmarkers', data=stream)
1864
1864
1865
1865
1866 def writebundle(
1866 def writebundle(
1867 ui, cg, filename, bundletype, vfs=None, compression=None, compopts=None
1867 ui, cg, filename, bundletype, vfs=None, compression=None, compopts=None
1868 ):
1868 ):
1869 """Write a bundle file and return its filename.
1869 """Write a bundle file and return its filename.
1870
1870
1871 Existing files will not be overwritten.
1871 Existing files will not be overwritten.
1872 If no filename is specified, a temporary file is created.
1872 If no filename is specified, a temporary file is created.
1873 bz2 compression can be turned off.
1873 bz2 compression can be turned off.
1874 The bundle file will be deleted in case of errors.
1874 The bundle file will be deleted in case of errors.
1875 """
1875 """
1876
1876
1877 if bundletype == b"HG20":
1877 if bundletype == b"HG20":
1878 bundle = bundle20(ui)
1878 bundle = bundle20(ui)
1879 bundle.setcompression(compression, compopts)
1879 bundle.setcompression(compression, compopts)
1880 part = bundle.newpart(b'changegroup', data=cg.getchunks())
1880 part = bundle.newpart(b'changegroup', data=cg.getchunks())
1881 part.addparam(b'version', cg.version)
1881 part.addparam(b'version', cg.version)
1882 if b'clcount' in cg.extras:
1882 if b'clcount' in cg.extras:
1883 part.addparam(
1883 part.addparam(
1884 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1884 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1885 )
1885 )
1886 chunkiter = bundle.getchunks()
1886 chunkiter = bundle.getchunks()
1887 else:
1887 else:
1888 # compression argument is only for the bundle2 case
1888 # compression argument is only for the bundle2 case
1889 assert compression is None
1889 assert compression is None
1890 if cg.version != b'01':
1890 if cg.version != b'01':
1891 raise error.Abort(
1891 raise error.Abort(
1892 _(b'old bundle types only supports v1 ' b'changegroups')
1892 _(b'old bundle types only supports v1 ' b'changegroups')
1893 )
1893 )
1894 header, comp = bundletypes[bundletype]
1894 header, comp = bundletypes[bundletype]
1895 if comp not in util.compengines.supportedbundletypes:
1895 if comp not in util.compengines.supportedbundletypes:
1896 raise error.Abort(_(b'unknown stream compression type: %s') % comp)
1896 raise error.Abort(_(b'unknown stream compression type: %s') % comp)
1897 compengine = util.compengines.forbundletype(comp)
1897 compengine = util.compengines.forbundletype(comp)
1898
1898
1899 def chunkiter():
1899 def chunkiter():
1900 yield header
1900 yield header
1901 for chunk in compengine.compressstream(cg.getchunks(), compopts):
1901 for chunk in compengine.compressstream(cg.getchunks(), compopts):
1902 yield chunk
1902 yield chunk
1903
1903
1904 chunkiter = chunkiter()
1904 chunkiter = chunkiter()
1905
1905
1906 # parse the changegroup data, otherwise we will block
1906 # parse the changegroup data, otherwise we will block
1907 # in case of sshrepo because we don't know the end of the stream
1907 # in case of sshrepo because we don't know the end of the stream
1908 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1908 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1909
1909
1910
1910
1911 def combinechangegroupresults(op):
1911 def combinechangegroupresults(op):
1912 """logic to combine 0 or more addchangegroup results into one"""
1912 """logic to combine 0 or more addchangegroup results into one"""
1913 results = [r.get(b'return', 0) for r in op.records[b'changegroup']]
1913 results = [r.get(b'return', 0) for r in op.records[b'changegroup']]
1914 changedheads = 0
1914 changedheads = 0
1915 result = 1
1915 result = 1
1916 for ret in results:
1916 for ret in results:
1917 # If any changegroup result is 0, return 0
1917 # If any changegroup result is 0, return 0
1918 if ret == 0:
1918 if ret == 0:
1919 result = 0
1919 result = 0
1920 break
1920 break
1921 if ret < -1:
1921 if ret < -1:
1922 changedheads += ret + 1
1922 changedheads += ret + 1
1923 elif ret > 1:
1923 elif ret > 1:
1924 changedheads += ret - 1
1924 changedheads += ret - 1
1925 if changedheads > 0:
1925 if changedheads > 0:
1926 result = 1 + changedheads
1926 result = 1 + changedheads
1927 elif changedheads < 0:
1927 elif changedheads < 0:
1928 result = -1 + changedheads
1928 result = -1 + changedheads
1929 return result
1929 return result
1930
1930
1931
1931
1932 @parthandler(
1932 @parthandler(
1933 b'changegroup', (b'version', b'nbchanges', b'treemanifest', b'targetphase')
1933 b'changegroup', (b'version', b'nbchanges', b'treemanifest', b'targetphase')
1934 )
1934 )
1935 def handlechangegroup(op, inpart):
1935 def handlechangegroup(op, inpart):
1936 """apply a changegroup part on the repo
1936 """apply a changegroup part on the repo
1937
1937
1938 This is a very early implementation that will massive rework before being
1938 This is a very early implementation that will massive rework before being
1939 inflicted to any end-user.
1939 inflicted to any end-user.
1940 """
1940 """
1941 from . import localrepo
1941 from . import localrepo
1942
1942
1943 tr = op.gettransaction()
1943 tr = op.gettransaction()
1944 unpackerversion = inpart.params.get(b'version', b'01')
1944 unpackerversion = inpart.params.get(b'version', b'01')
1945 # We should raise an appropriate exception here
1945 # We should raise an appropriate exception here
1946 cg = changegroup.getunbundler(unpackerversion, inpart, None)
1946 cg = changegroup.getunbundler(unpackerversion, inpart, None)
1947 # the source and url passed here are overwritten by the one contained in
1947 # the source and url passed here are overwritten by the one contained in
1948 # the transaction.hookargs argument. So 'bundle2' is a placeholder
1948 # the transaction.hookargs argument. So 'bundle2' is a placeholder
1949 nbchangesets = None
1949 nbchangesets = None
1950 if b'nbchanges' in inpart.params:
1950 if b'nbchanges' in inpart.params:
1951 nbchangesets = int(inpart.params.get(b'nbchanges'))
1951 nbchangesets = int(inpart.params.get(b'nbchanges'))
1952 if (
1952 if (
1953 b'treemanifest' in inpart.params
1953 b'treemanifest' in inpart.params
1954 and b'treemanifest' not in op.repo.requirements
1954 and b'treemanifest' not in op.repo.requirements
1955 ):
1955 ):
1956 if len(op.repo.changelog) != 0:
1956 if len(op.repo.changelog) != 0:
1957 raise error.Abort(
1957 raise error.Abort(
1958 _(
1958 _(
1959 b"bundle contains tree manifests, but local repo is "
1959 b"bundle contains tree manifests, but local repo is "
1960 b"non-empty and does not use tree manifests"
1960 b"non-empty and does not use tree manifests"
1961 )
1961 )
1962 )
1962 )
1963 op.repo.requirements.add(b'treemanifest')
1963 op.repo.requirements.add(b'treemanifest')
1964 op.repo.svfs.options = localrepo.resolvestorevfsoptions(
1964 op.repo.svfs.options = localrepo.resolvestorevfsoptions(
1965 op.repo.ui, op.repo.requirements, op.repo.features
1965 op.repo.ui, op.repo.requirements, op.repo.features
1966 )
1966 )
1967 op.repo._writerequirements()
1967 op.repo._writerequirements()
1968 extrakwargs = {}
1968 extrakwargs = {}
1969 targetphase = inpart.params.get(b'targetphase')
1969 targetphase = inpart.params.get(b'targetphase')
1970 if targetphase is not None:
1970 if targetphase is not None:
1971 extrakwargs[r'targetphase'] = int(targetphase)
1971 extrakwargs[r'targetphase'] = int(targetphase)
1972 ret = _processchangegroup(
1972 ret = _processchangegroup(
1973 op,
1973 op,
1974 cg,
1974 cg,
1975 tr,
1975 tr,
1976 b'bundle2',
1976 b'bundle2',
1977 b'bundle2',
1977 b'bundle2',
1978 expectedtotal=nbchangesets,
1978 expectedtotal=nbchangesets,
1979 **extrakwargs
1979 **extrakwargs
1980 )
1980 )
1981 if op.reply is not None:
1981 if op.reply is not None:
1982 # This is definitely not the final form of this
1982 # This is definitely not the final form of this
1983 # return. But one need to start somewhere.
1983 # return. But one need to start somewhere.
1984 part = op.reply.newpart(b'reply:changegroup', mandatory=False)
1984 part = op.reply.newpart(b'reply:changegroup', mandatory=False)
1985 part.addparam(
1985 part.addparam(
1986 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
1986 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
1987 )
1987 )
1988 part.addparam(b'return', b'%i' % ret, mandatory=False)
1988 part.addparam(b'return', b'%i' % ret, mandatory=False)
1989 assert not inpart.read()
1989 assert not inpart.read()
1990
1990
1991
1991
1992 _remotechangegroupparams = tuple(
1992 _remotechangegroupparams = tuple(
1993 [b'url', b'size', b'digests']
1993 [b'url', b'size', b'digests']
1994 + [b'digest:%s' % k for k in util.DIGESTS.keys()]
1994 + [b'digest:%s' % k for k in util.DIGESTS.keys()]
1995 )
1995 )
1996
1996
1997
1997
1998 @parthandler(b'remote-changegroup', _remotechangegroupparams)
1998 @parthandler(b'remote-changegroup', _remotechangegroupparams)
1999 def handleremotechangegroup(op, inpart):
1999 def handleremotechangegroup(op, inpart):
2000 """apply a bundle10 on the repo, given an url and validation information
2000 """apply a bundle10 on the repo, given an url and validation information
2001
2001
2002 All the information about the remote bundle to import are given as
2002 All the information about the remote bundle to import are given as
2003 parameters. The parameters include:
2003 parameters. The parameters include:
2004 - url: the url to the bundle10.
2004 - url: the url to the bundle10.
2005 - size: the bundle10 file size. It is used to validate what was
2005 - size: the bundle10 file size. It is used to validate what was
2006 retrieved by the client matches the server knowledge about the bundle.
2006 retrieved by the client matches the server knowledge about the bundle.
2007 - digests: a space separated list of the digest types provided as
2007 - digests: a space separated list of the digest types provided as
2008 parameters.
2008 parameters.
2009 - digest:<digest-type>: the hexadecimal representation of the digest with
2009 - digest:<digest-type>: the hexadecimal representation of the digest with
2010 that name. Like the size, it is used to validate what was retrieved by
2010 that name. Like the size, it is used to validate what was retrieved by
2011 the client matches what the server knows about the bundle.
2011 the client matches what the server knows about the bundle.
2012
2012
2013 When multiple digest types are given, all of them are checked.
2013 When multiple digest types are given, all of them are checked.
2014 """
2014 """
2015 try:
2015 try:
2016 raw_url = inpart.params[b'url']
2016 raw_url = inpart.params[b'url']
2017 except KeyError:
2017 except KeyError:
2018 raise error.Abort(_(b'remote-changegroup: missing "%s" param') % b'url')
2018 raise error.Abort(_(b'remote-changegroup: missing "%s" param') % b'url')
2019 parsed_url = util.url(raw_url)
2019 parsed_url = util.url(raw_url)
2020 if parsed_url.scheme not in capabilities[b'remote-changegroup']:
2020 if parsed_url.scheme not in capabilities[b'remote-changegroup']:
2021 raise error.Abort(
2021 raise error.Abort(
2022 _(b'remote-changegroup does not support %s urls')
2022 _(b'remote-changegroup does not support %s urls')
2023 % parsed_url.scheme
2023 % parsed_url.scheme
2024 )
2024 )
2025
2025
2026 try:
2026 try:
2027 size = int(inpart.params[b'size'])
2027 size = int(inpart.params[b'size'])
2028 except ValueError:
2028 except ValueError:
2029 raise error.Abort(
2029 raise error.Abort(
2030 _(b'remote-changegroup: invalid value for param "%s"') % b'size'
2030 _(b'remote-changegroup: invalid value for param "%s"') % b'size'
2031 )
2031 )
2032 except KeyError:
2032 except KeyError:
2033 raise error.Abort(
2033 raise error.Abort(
2034 _(b'remote-changegroup: missing "%s" param') % b'size'
2034 _(b'remote-changegroup: missing "%s" param') % b'size'
2035 )
2035 )
2036
2036
2037 digests = {}
2037 digests = {}
2038 for typ in inpart.params.get(b'digests', b'').split():
2038 for typ in inpart.params.get(b'digests', b'').split():
2039 param = b'digest:%s' % typ
2039 param = b'digest:%s' % typ
2040 try:
2040 try:
2041 value = inpart.params[param]
2041 value = inpart.params[param]
2042 except KeyError:
2042 except KeyError:
2043 raise error.Abort(
2043 raise error.Abort(
2044 _(b'remote-changegroup: missing "%s" param') % param
2044 _(b'remote-changegroup: missing "%s" param') % param
2045 )
2045 )
2046 digests[typ] = value
2046 digests[typ] = value
2047
2047
2048 real_part = util.digestchecker(url.open(op.ui, raw_url), size, digests)
2048 real_part = util.digestchecker(url.open(op.ui, raw_url), size, digests)
2049
2049
2050 tr = op.gettransaction()
2050 tr = op.gettransaction()
2051 from . import exchange
2051 from . import exchange
2052
2052
2053 cg = exchange.readbundle(op.repo.ui, real_part, raw_url)
2053 cg = exchange.readbundle(op.repo.ui, real_part, raw_url)
2054 if not isinstance(cg, changegroup.cg1unpacker):
2054 if not isinstance(cg, changegroup.cg1unpacker):
2055 raise error.Abort(
2055 raise error.Abort(
2056 _(b'%s: not a bundle version 1.0') % util.hidepassword(raw_url)
2056 _(b'%s: not a bundle version 1.0') % util.hidepassword(raw_url)
2057 )
2057 )
2058 ret = _processchangegroup(op, cg, tr, b'bundle2', b'bundle2')
2058 ret = _processchangegroup(op, cg, tr, b'bundle2', b'bundle2')
2059 if op.reply is not None:
2059 if op.reply is not None:
2060 # This is definitely not the final form of this
2060 # This is definitely not the final form of this
2061 # return. But one need to start somewhere.
2061 # return. But one need to start somewhere.
2062 part = op.reply.newpart(b'reply:changegroup')
2062 part = op.reply.newpart(b'reply:changegroup')
2063 part.addparam(
2063 part.addparam(
2064 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2064 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2065 )
2065 )
2066 part.addparam(b'return', b'%i' % ret, mandatory=False)
2066 part.addparam(b'return', b'%i' % ret, mandatory=False)
2067 try:
2067 try:
2068 real_part.validate()
2068 real_part.validate()
2069 except error.Abort as e:
2069 except error.Abort as e:
2070 raise error.Abort(
2070 raise error.Abort(
2071 _(b'bundle at %s is corrupted:\n%s')
2071 _(b'bundle at %s is corrupted:\n%s')
2072 % (util.hidepassword(raw_url), bytes(e))
2072 % (util.hidepassword(raw_url), bytes(e))
2073 )
2073 )
2074 assert not inpart.read()
2074 assert not inpart.read()
2075
2075
2076
2076
2077 @parthandler(b'reply:changegroup', (b'return', b'in-reply-to'))
2077 @parthandler(b'reply:changegroup', (b'return', b'in-reply-to'))
2078 def handlereplychangegroup(op, inpart):
2078 def handlereplychangegroup(op, inpart):
2079 ret = int(inpart.params[b'return'])
2079 ret = int(inpart.params[b'return'])
2080 replyto = int(inpart.params[b'in-reply-to'])
2080 replyto = int(inpart.params[b'in-reply-to'])
2081 op.records.add(b'changegroup', {b'return': ret}, replyto)
2081 op.records.add(b'changegroup', {b'return': ret}, replyto)
2082
2082
2083
2083
2084 @parthandler(b'check:bookmarks')
2084 @parthandler(b'check:bookmarks')
2085 def handlecheckbookmarks(op, inpart):
2085 def handlecheckbookmarks(op, inpart):
2086 """check location of bookmarks
2086 """check location of bookmarks
2087
2087
2088 This part is to be used to detect push race regarding bookmark, it
2088 This part is to be used to detect push race regarding bookmark, it
2089 contains binary encoded (bookmark, node) tuple. If the local state does
2089 contains binary encoded (bookmark, node) tuple. If the local state does
2090 not marks the one in the part, a PushRaced exception is raised
2090 not marks the one in the part, a PushRaced exception is raised
2091 """
2091 """
2092 bookdata = bookmarks.binarydecode(inpart)
2092 bookdata = bookmarks.binarydecode(inpart)
2093
2093
2094 msgstandard = (
2094 msgstandard = (
2095 b'remote repository changed while pushing - please try again '
2095 b'remote repository changed while pushing - please try again '
2096 b'(bookmark "%s" move from %s to %s)'
2096 b'(bookmark "%s" move from %s to %s)'
2097 )
2097 )
2098 msgmissing = (
2098 msgmissing = (
2099 b'remote repository changed while pushing - please try again '
2099 b'remote repository changed while pushing - please try again '
2100 b'(bookmark "%s" is missing, expected %s)'
2100 b'(bookmark "%s" is missing, expected %s)'
2101 )
2101 )
2102 msgexist = (
2102 msgexist = (
2103 b'remote repository changed while pushing - please try again '
2103 b'remote repository changed while pushing - please try again '
2104 b'(bookmark "%s" set on %s, expected missing)'
2104 b'(bookmark "%s" set on %s, expected missing)'
2105 )
2105 )
2106 for book, node in bookdata:
2106 for book, node in bookdata:
2107 currentnode = op.repo._bookmarks.get(book)
2107 currentnode = op.repo._bookmarks.get(book)
2108 if currentnode != node:
2108 if currentnode != node:
2109 if node is None:
2109 if node is None:
2110 finalmsg = msgexist % (book, nodemod.short(currentnode))
2110 finalmsg = msgexist % (book, nodemod.short(currentnode))
2111 elif currentnode is None:
2111 elif currentnode is None:
2112 finalmsg = msgmissing % (book, nodemod.short(node))
2112 finalmsg = msgmissing % (book, nodemod.short(node))
2113 else:
2113 else:
2114 finalmsg = msgstandard % (
2114 finalmsg = msgstandard % (
2115 book,
2115 book,
2116 nodemod.short(node),
2116 nodemod.short(node),
2117 nodemod.short(currentnode),
2117 nodemod.short(currentnode),
2118 )
2118 )
2119 raise error.PushRaced(finalmsg)
2119 raise error.PushRaced(finalmsg)
2120
2120
2121
2121
2122 @parthandler(b'check:heads')
2122 @parthandler(b'check:heads')
2123 def handlecheckheads(op, inpart):
2123 def handlecheckheads(op, inpart):
2124 """check that head of the repo did not change
2124 """check that head of the repo did not change
2125
2125
2126 This is used to detect a push race when using unbundle.
2126 This is used to detect a push race when using unbundle.
2127 This replaces the "heads" argument of unbundle."""
2127 This replaces the "heads" argument of unbundle."""
2128 h = inpart.read(20)
2128 h = inpart.read(20)
2129 heads = []
2129 heads = []
2130 while len(h) == 20:
2130 while len(h) == 20:
2131 heads.append(h)
2131 heads.append(h)
2132 h = inpart.read(20)
2132 h = inpart.read(20)
2133 assert not h
2133 assert not h
2134 # Trigger a transaction so that we are guaranteed to have the lock now.
2134 # Trigger a transaction so that we are guaranteed to have the lock now.
2135 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2135 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2136 op.gettransaction()
2136 op.gettransaction()
2137 if sorted(heads) != sorted(op.repo.heads()):
2137 if sorted(heads) != sorted(op.repo.heads()):
2138 raise error.PushRaced(
2138 raise error.PushRaced(
2139 b'remote repository changed while pushing - ' b'please try again'
2139 b'remote repository changed while pushing - ' b'please try again'
2140 )
2140 )
2141
2141
2142
2142
2143 @parthandler(b'check:updated-heads')
2143 @parthandler(b'check:updated-heads')
2144 def handlecheckupdatedheads(op, inpart):
2144 def handlecheckupdatedheads(op, inpart):
2145 """check for race on the heads touched by a push
2145 """check for race on the heads touched by a push
2146
2146
2147 This is similar to 'check:heads' but focus on the heads actually updated
2147 This is similar to 'check:heads' but focus on the heads actually updated
2148 during the push. If other activities happen on unrelated heads, it is
2148 during the push. If other activities happen on unrelated heads, it is
2149 ignored.
2149 ignored.
2150
2150
2151 This allow server with high traffic to avoid push contention as long as
2151 This allow server with high traffic to avoid push contention as long as
2152 unrelated parts of the graph are involved."""
2152 unrelated parts of the graph are involved."""
2153 h = inpart.read(20)
2153 h = inpart.read(20)
2154 heads = []
2154 heads = []
2155 while len(h) == 20:
2155 while len(h) == 20:
2156 heads.append(h)
2156 heads.append(h)
2157 h = inpart.read(20)
2157 h = inpart.read(20)
2158 assert not h
2158 assert not h
2159 # trigger a transaction so that we are guaranteed to have the lock now.
2159 # trigger a transaction so that we are guaranteed to have the lock now.
2160 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2160 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2161 op.gettransaction()
2161 op.gettransaction()
2162
2162
2163 currentheads = set()
2163 currentheads = set()
2164 for ls in op.repo.branchmap().iterheads():
2164 for ls in op.repo.branchmap().iterheads():
2165 currentheads.update(ls)
2165 currentheads.update(ls)
2166
2166
2167 for h in heads:
2167 for h in heads:
2168 if h not in currentheads:
2168 if h not in currentheads:
2169 raise error.PushRaced(
2169 raise error.PushRaced(
2170 b'remote repository changed while pushing - '
2170 b'remote repository changed while pushing - '
2171 b'please try again'
2171 b'please try again'
2172 )
2172 )
2173
2173
2174
2174
2175 @parthandler(b'check:phases')
2175 @parthandler(b'check:phases')
2176 def handlecheckphases(op, inpart):
2176 def handlecheckphases(op, inpart):
2177 """check that phase boundaries of the repository did not change
2177 """check that phase boundaries of the repository did not change
2178
2178
2179 This is used to detect a push race.
2179 This is used to detect a push race.
2180 """
2180 """
2181 phasetonodes = phases.binarydecode(inpart)
2181 phasetonodes = phases.binarydecode(inpart)
2182 unfi = op.repo.unfiltered()
2182 unfi = op.repo.unfiltered()
2183 cl = unfi.changelog
2183 cl = unfi.changelog
2184 phasecache = unfi._phasecache
2184 phasecache = unfi._phasecache
2185 msg = (
2185 msg = (
2186 b'remote repository changed while pushing - please try again '
2186 b'remote repository changed while pushing - please try again '
2187 b'(%s is %s expected %s)'
2187 b'(%s is %s expected %s)'
2188 )
2188 )
2189 for expectedphase, nodes in enumerate(phasetonodes):
2189 for expectedphase, nodes in enumerate(phasetonodes):
2190 for n in nodes:
2190 for n in nodes:
2191 actualphase = phasecache.phase(unfi, cl.rev(n))
2191 actualphase = phasecache.phase(unfi, cl.rev(n))
2192 if actualphase != expectedphase:
2192 if actualphase != expectedphase:
2193 finalmsg = msg % (
2193 finalmsg = msg % (
2194 nodemod.short(n),
2194 nodemod.short(n),
2195 phases.phasenames[actualphase],
2195 phases.phasenames[actualphase],
2196 phases.phasenames[expectedphase],
2196 phases.phasenames[expectedphase],
2197 )
2197 )
2198 raise error.PushRaced(finalmsg)
2198 raise error.PushRaced(finalmsg)
2199
2199
2200
2200
2201 @parthandler(b'output')
2201 @parthandler(b'output')
2202 def handleoutput(op, inpart):
2202 def handleoutput(op, inpart):
2203 """forward output captured on the server to the client"""
2203 """forward output captured on the server to the client"""
2204 for line in inpart.read().splitlines():
2204 for line in inpart.read().splitlines():
2205 op.ui.status(_(b'remote: %s\n') % line)
2205 op.ui.status(_(b'remote: %s\n') % line)
2206
2206
2207
2207
2208 @parthandler(b'replycaps')
2208 @parthandler(b'replycaps')
2209 def handlereplycaps(op, inpart):
2209 def handlereplycaps(op, inpart):
2210 """Notify that a reply bundle should be created
2210 """Notify that a reply bundle should be created
2211
2211
2212 The payload contains the capabilities information for the reply"""
2212 The payload contains the capabilities information for the reply"""
2213 caps = decodecaps(inpart.read())
2213 caps = decodecaps(inpart.read())
2214 if op.reply is None:
2214 if op.reply is None:
2215 op.reply = bundle20(op.ui, caps)
2215 op.reply = bundle20(op.ui, caps)
2216
2216
2217
2217
2218 class AbortFromPart(error.Abort):
2218 class AbortFromPart(error.Abort):
2219 """Sub-class of Abort that denotes an error from a bundle2 part."""
2219 """Sub-class of Abort that denotes an error from a bundle2 part."""
2220
2220
2221
2221
2222 @parthandler(b'error:abort', (b'message', b'hint'))
2222 @parthandler(b'error:abort', (b'message', b'hint'))
2223 def handleerrorabort(op, inpart):
2223 def handleerrorabort(op, inpart):
2224 """Used to transmit abort error over the wire"""
2224 """Used to transmit abort error over the wire"""
2225 raise AbortFromPart(
2225 raise AbortFromPart(
2226 inpart.params[b'message'], hint=inpart.params.get(b'hint')
2226 inpart.params[b'message'], hint=inpart.params.get(b'hint')
2227 )
2227 )
2228
2228
2229
2229
2230 @parthandler(
2230 @parthandler(
2231 b'error:pushkey',
2231 b'error:pushkey',
2232 (b'namespace', b'key', b'new', b'old', b'ret', b'in-reply-to'),
2232 (b'namespace', b'key', b'new', b'old', b'ret', b'in-reply-to'),
2233 )
2233 )
2234 def handleerrorpushkey(op, inpart):
2234 def handleerrorpushkey(op, inpart):
2235 """Used to transmit failure of a mandatory pushkey over the wire"""
2235 """Used to transmit failure of a mandatory pushkey over the wire"""
2236 kwargs = {}
2236 kwargs = {}
2237 for name in (b'namespace', b'key', b'new', b'old', b'ret'):
2237 for name in (b'namespace', b'key', b'new', b'old', b'ret'):
2238 value = inpart.params.get(name)
2238 value = inpart.params.get(name)
2239 if value is not None:
2239 if value is not None:
2240 kwargs[name] = value
2240 kwargs[name] = value
2241 raise error.PushkeyFailed(
2241 raise error.PushkeyFailed(
2242 inpart.params[b'in-reply-to'], **pycompat.strkwargs(kwargs)
2242 inpart.params[b'in-reply-to'], **pycompat.strkwargs(kwargs)
2243 )
2243 )
2244
2244
2245
2245
2246 @parthandler(b'error:unsupportedcontent', (b'parttype', b'params'))
2246 @parthandler(b'error:unsupportedcontent', (b'parttype', b'params'))
2247 def handleerrorunsupportedcontent(op, inpart):
2247 def handleerrorunsupportedcontent(op, inpart):
2248 """Used to transmit unknown content error over the wire"""
2248 """Used to transmit unknown content error over the wire"""
2249 kwargs = {}
2249 kwargs = {}
2250 parttype = inpart.params.get(b'parttype')
2250 parttype = inpart.params.get(b'parttype')
2251 if parttype is not None:
2251 if parttype is not None:
2252 kwargs[b'parttype'] = parttype
2252 kwargs[b'parttype'] = parttype
2253 params = inpart.params.get(b'params')
2253 params = inpart.params.get(b'params')
2254 if params is not None:
2254 if params is not None:
2255 kwargs[b'params'] = params.split(b'\0')
2255 kwargs[b'params'] = params.split(b'\0')
2256
2256
2257 raise error.BundleUnknownFeatureError(**pycompat.strkwargs(kwargs))
2257 raise error.BundleUnknownFeatureError(**pycompat.strkwargs(kwargs))
2258
2258
2259
2259
2260 @parthandler(b'error:pushraced', (b'message',))
2260 @parthandler(b'error:pushraced', (b'message',))
2261 def handleerrorpushraced(op, inpart):
2261 def handleerrorpushraced(op, inpart):
2262 """Used to transmit push race error over the wire"""
2262 """Used to transmit push race error over the wire"""
2263 raise error.ResponseError(_(b'push failed:'), inpart.params[b'message'])
2263 raise error.ResponseError(_(b'push failed:'), inpart.params[b'message'])
2264
2264
2265
2265
2266 @parthandler(b'listkeys', (b'namespace',))
2266 @parthandler(b'listkeys', (b'namespace',))
2267 def handlelistkeys(op, inpart):
2267 def handlelistkeys(op, inpart):
2268 """retrieve pushkey namespace content stored in a bundle2"""
2268 """retrieve pushkey namespace content stored in a bundle2"""
2269 namespace = inpart.params[b'namespace']
2269 namespace = inpart.params[b'namespace']
2270 r = pushkey.decodekeys(inpart.read())
2270 r = pushkey.decodekeys(inpart.read())
2271 op.records.add(b'listkeys', (namespace, r))
2271 op.records.add(b'listkeys', (namespace, r))
2272
2272
2273
2273
2274 @parthandler(b'pushkey', (b'namespace', b'key', b'old', b'new'))
2274 @parthandler(b'pushkey', (b'namespace', b'key', b'old', b'new'))
2275 def handlepushkey(op, inpart):
2275 def handlepushkey(op, inpart):
2276 """process a pushkey request"""
2276 """process a pushkey request"""
2277 dec = pushkey.decode
2277 dec = pushkey.decode
2278 namespace = dec(inpart.params[b'namespace'])
2278 namespace = dec(inpart.params[b'namespace'])
2279 key = dec(inpart.params[b'key'])
2279 key = dec(inpart.params[b'key'])
2280 old = dec(inpart.params[b'old'])
2280 old = dec(inpart.params[b'old'])
2281 new = dec(inpart.params[b'new'])
2281 new = dec(inpart.params[b'new'])
2282 # Grab the transaction to ensure that we have the lock before performing the
2282 # Grab the transaction to ensure that we have the lock before performing the
2283 # pushkey.
2283 # pushkey.
2284 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2284 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2285 op.gettransaction()
2285 op.gettransaction()
2286 ret = op.repo.pushkey(namespace, key, old, new)
2286 ret = op.repo.pushkey(namespace, key, old, new)
2287 record = {b'namespace': namespace, b'key': key, b'old': old, b'new': new}
2287 record = {b'namespace': namespace, b'key': key, b'old': old, b'new': new}
2288 op.records.add(b'pushkey', record)
2288 op.records.add(b'pushkey', record)
2289 if op.reply is not None:
2289 if op.reply is not None:
2290 rpart = op.reply.newpart(b'reply:pushkey')
2290 rpart = op.reply.newpart(b'reply:pushkey')
2291 rpart.addparam(
2291 rpart.addparam(
2292 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2292 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2293 )
2293 )
2294 rpart.addparam(b'return', b'%i' % ret, mandatory=False)
2294 rpart.addparam(b'return', b'%i' % ret, mandatory=False)
2295 if inpart.mandatory and not ret:
2295 if inpart.mandatory and not ret:
2296 kwargs = {}
2296 kwargs = {}
2297 for key in (b'namespace', b'key', b'new', b'old', b'ret'):
2297 for key in (b'namespace', b'key', b'new', b'old', b'ret'):
2298 if key in inpart.params:
2298 if key in inpart.params:
2299 kwargs[key] = inpart.params[key]
2299 kwargs[key] = inpart.params[key]
2300 raise error.PushkeyFailed(
2300 raise error.PushkeyFailed(
2301 partid=b'%d' % inpart.id, **pycompat.strkwargs(kwargs)
2301 partid=b'%d' % inpart.id, **pycompat.strkwargs(kwargs)
2302 )
2302 )
2303
2303
2304
2304
2305 @parthandler(b'bookmarks')
2305 @parthandler(b'bookmarks')
2306 def handlebookmark(op, inpart):
2306 def handlebookmark(op, inpart):
2307 """transmit bookmark information
2307 """transmit bookmark information
2308
2308
2309 The part contains binary encoded bookmark information.
2309 The part contains binary encoded bookmark information.
2310
2310
2311 The exact behavior of this part can be controlled by the 'bookmarks' mode
2311 The exact behavior of this part can be controlled by the 'bookmarks' mode
2312 on the bundle operation.
2312 on the bundle operation.
2313
2313
2314 When mode is 'apply' (the default) the bookmark information is applied as
2314 When mode is 'apply' (the default) the bookmark information is applied as
2315 is to the unbundling repository. Make sure a 'check:bookmarks' part is
2315 is to the unbundling repository. Make sure a 'check:bookmarks' part is
2316 issued earlier to check for push races in such update. This behavior is
2316 issued earlier to check for push races in such update. This behavior is
2317 suitable for pushing.
2317 suitable for pushing.
2318
2318
2319 When mode is 'records', the information is recorded into the 'bookmarks'
2319 When mode is 'records', the information is recorded into the 'bookmarks'
2320 records of the bundle operation. This behavior is suitable for pulling.
2320 records of the bundle operation. This behavior is suitable for pulling.
2321 """
2321 """
2322 changes = bookmarks.binarydecode(inpart)
2322 changes = bookmarks.binarydecode(inpart)
2323
2323
2324 pushkeycompat = op.repo.ui.configbool(
2324 pushkeycompat = op.repo.ui.configbool(
2325 b'server', b'bookmarks-pushkey-compat'
2325 b'server', b'bookmarks-pushkey-compat'
2326 )
2326 )
2327 bookmarksmode = op.modes.get(b'bookmarks', b'apply')
2327 bookmarksmode = op.modes.get(b'bookmarks', b'apply')
2328
2328
2329 if bookmarksmode == b'apply':
2329 if bookmarksmode == b'apply':
2330 tr = op.gettransaction()
2330 tr = op.gettransaction()
2331 bookstore = op.repo._bookmarks
2331 bookstore = op.repo._bookmarks
2332 if pushkeycompat:
2332 if pushkeycompat:
2333 allhooks = []
2333 allhooks = []
2334 for book, node in changes:
2334 for book, node in changes:
2335 hookargs = tr.hookargs.copy()
2335 hookargs = tr.hookargs.copy()
2336 hookargs[b'pushkeycompat'] = b'1'
2336 hookargs[b'pushkeycompat'] = b'1'
2337 hookargs[b'namespace'] = b'bookmarks'
2337 hookargs[b'namespace'] = b'bookmarks'
2338 hookargs[b'key'] = book
2338 hookargs[b'key'] = book
2339 hookargs[b'old'] = nodemod.hex(bookstore.get(book, b''))
2339 hookargs[b'old'] = nodemod.hex(bookstore.get(book, b''))
2340 hookargs[b'new'] = nodemod.hex(
2340 hookargs[b'new'] = nodemod.hex(
2341 node if node is not None else b''
2341 node if node is not None else b''
2342 )
2342 )
2343 allhooks.append(hookargs)
2343 allhooks.append(hookargs)
2344
2344
2345 for hookargs in allhooks:
2345 for hookargs in allhooks:
2346 op.repo.hook(
2346 op.repo.hook(
2347 b'prepushkey', throw=True, **pycompat.strkwargs(hookargs)
2347 b'prepushkey', throw=True, **pycompat.strkwargs(hookargs)
2348 )
2348 )
2349
2349
2350 bookstore.applychanges(op.repo, op.gettransaction(), changes)
2350 bookstore.applychanges(op.repo, op.gettransaction(), changes)
2351
2351
2352 if pushkeycompat:
2352 if pushkeycompat:
2353
2353
2354 def runhook():
2354 def runhook():
2355 for hookargs in allhooks:
2355 for hookargs in allhooks:
2356 op.repo.hook(b'pushkey', **pycompat.strkwargs(hookargs))
2356 op.repo.hook(b'pushkey', **pycompat.strkwargs(hookargs))
2357
2357
2358 op.repo._afterlock(runhook)
2358 op.repo._afterlock(runhook)
2359
2359
2360 elif bookmarksmode == b'records':
2360 elif bookmarksmode == b'records':
2361 for book, node in changes:
2361 for book, node in changes:
2362 record = {b'bookmark': book, b'node': node}
2362 record = {b'bookmark': book, b'node': node}
2363 op.records.add(b'bookmarks', record)
2363 op.records.add(b'bookmarks', record)
2364 else:
2364 else:
2365 raise error.ProgrammingError(
2365 raise error.ProgrammingError(
2366 b'unkown bookmark mode: %s' % bookmarksmode
2366 b'unkown bookmark mode: %s' % bookmarksmode
2367 )
2367 )
2368
2368
2369
2369
2370 @parthandler(b'phase-heads')
2370 @parthandler(b'phase-heads')
2371 def handlephases(op, inpart):
2371 def handlephases(op, inpart):
2372 """apply phases from bundle part to repo"""
2372 """apply phases from bundle part to repo"""
2373 headsbyphase = phases.binarydecode(inpart)
2373 headsbyphase = phases.binarydecode(inpart)
2374 phases.updatephases(op.repo.unfiltered(), op.gettransaction, headsbyphase)
2374 phases.updatephases(op.repo.unfiltered(), op.gettransaction, headsbyphase)
2375
2375
2376
2376
2377 @parthandler(b'reply:pushkey', (b'return', b'in-reply-to'))
2377 @parthandler(b'reply:pushkey', (b'return', b'in-reply-to'))
2378 def handlepushkeyreply(op, inpart):
2378 def handlepushkeyreply(op, inpart):
2379 """retrieve the result of a pushkey request"""
2379 """retrieve the result of a pushkey request"""
2380 ret = int(inpart.params[b'return'])
2380 ret = int(inpart.params[b'return'])
2381 partid = int(inpart.params[b'in-reply-to'])
2381 partid = int(inpart.params[b'in-reply-to'])
2382 op.records.add(b'pushkey', {b'return': ret}, partid)
2382 op.records.add(b'pushkey', {b'return': ret}, partid)
2383
2383
2384
2384
2385 @parthandler(b'obsmarkers')
2385 @parthandler(b'obsmarkers')
2386 def handleobsmarker(op, inpart):
2386 def handleobsmarker(op, inpart):
2387 """add a stream of obsmarkers to the repo"""
2387 """add a stream of obsmarkers to the repo"""
2388 tr = op.gettransaction()
2388 tr = op.gettransaction()
2389 markerdata = inpart.read()
2389 markerdata = inpart.read()
2390 if op.ui.config(b'experimental', b'obsmarkers-exchange-debug'):
2390 if op.ui.config(b'experimental', b'obsmarkers-exchange-debug'):
2391 op.ui.write(
2391 op.ui.writenoi18n(
2392 b'obsmarker-exchange: %i bytes received\n' % len(markerdata)
2392 b'obsmarker-exchange: %i bytes received\n' % len(markerdata)
2393 )
2393 )
2394 # The mergemarkers call will crash if marker creation is not enabled.
2394 # The mergemarkers call will crash if marker creation is not enabled.
2395 # we want to avoid this if the part is advisory.
2395 # we want to avoid this if the part is advisory.
2396 if not inpart.mandatory and op.repo.obsstore.readonly:
2396 if not inpart.mandatory and op.repo.obsstore.readonly:
2397 op.repo.ui.debug(
2397 op.repo.ui.debug(
2398 b'ignoring obsolescence markers, feature not enabled\n'
2398 b'ignoring obsolescence markers, feature not enabled\n'
2399 )
2399 )
2400 return
2400 return
2401 new = op.repo.obsstore.mergemarkers(tr, markerdata)
2401 new = op.repo.obsstore.mergemarkers(tr, markerdata)
2402 op.repo.invalidatevolatilesets()
2402 op.repo.invalidatevolatilesets()
2403 op.records.add(b'obsmarkers', {b'new': new})
2403 op.records.add(b'obsmarkers', {b'new': new})
2404 if op.reply is not None:
2404 if op.reply is not None:
2405 rpart = op.reply.newpart(b'reply:obsmarkers')
2405 rpart = op.reply.newpart(b'reply:obsmarkers')
2406 rpart.addparam(
2406 rpart.addparam(
2407 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2407 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2408 )
2408 )
2409 rpart.addparam(b'new', b'%i' % new, mandatory=False)
2409 rpart.addparam(b'new', b'%i' % new, mandatory=False)
2410
2410
2411
2411
2412 @parthandler(b'reply:obsmarkers', (b'new', b'in-reply-to'))
2412 @parthandler(b'reply:obsmarkers', (b'new', b'in-reply-to'))
2413 def handleobsmarkerreply(op, inpart):
2413 def handleobsmarkerreply(op, inpart):
2414 """retrieve the result of a pushkey request"""
2414 """retrieve the result of a pushkey request"""
2415 ret = int(inpart.params[b'new'])
2415 ret = int(inpart.params[b'new'])
2416 partid = int(inpart.params[b'in-reply-to'])
2416 partid = int(inpart.params[b'in-reply-to'])
2417 op.records.add(b'obsmarkers', {b'new': ret}, partid)
2417 op.records.add(b'obsmarkers', {b'new': ret}, partid)
2418
2418
2419
2419
2420 @parthandler(b'hgtagsfnodes')
2420 @parthandler(b'hgtagsfnodes')
2421 def handlehgtagsfnodes(op, inpart):
2421 def handlehgtagsfnodes(op, inpart):
2422 """Applies .hgtags fnodes cache entries to the local repo.
2422 """Applies .hgtags fnodes cache entries to the local repo.
2423
2423
2424 Payload is pairs of 20 byte changeset nodes and filenodes.
2424 Payload is pairs of 20 byte changeset nodes and filenodes.
2425 """
2425 """
2426 # Grab the transaction so we ensure that we have the lock at this point.
2426 # Grab the transaction so we ensure that we have the lock at this point.
2427 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2427 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2428 op.gettransaction()
2428 op.gettransaction()
2429 cache = tags.hgtagsfnodescache(op.repo.unfiltered())
2429 cache = tags.hgtagsfnodescache(op.repo.unfiltered())
2430
2430
2431 count = 0
2431 count = 0
2432 while True:
2432 while True:
2433 node = inpart.read(20)
2433 node = inpart.read(20)
2434 fnode = inpart.read(20)
2434 fnode = inpart.read(20)
2435 if len(node) < 20 or len(fnode) < 20:
2435 if len(node) < 20 or len(fnode) < 20:
2436 op.ui.debug(b'ignoring incomplete received .hgtags fnodes data\n')
2436 op.ui.debug(b'ignoring incomplete received .hgtags fnodes data\n')
2437 break
2437 break
2438 cache.setfnode(node, fnode)
2438 cache.setfnode(node, fnode)
2439 count += 1
2439 count += 1
2440
2440
2441 cache.write()
2441 cache.write()
2442 op.ui.debug(b'applied %i hgtags fnodes cache entries\n' % count)
2442 op.ui.debug(b'applied %i hgtags fnodes cache entries\n' % count)
2443
2443
2444
2444
2445 rbcstruct = struct.Struct(b'>III')
2445 rbcstruct = struct.Struct(b'>III')
2446
2446
2447
2447
2448 @parthandler(b'cache:rev-branch-cache')
2448 @parthandler(b'cache:rev-branch-cache')
2449 def handlerbc(op, inpart):
2449 def handlerbc(op, inpart):
2450 """receive a rev-branch-cache payload and update the local cache
2450 """receive a rev-branch-cache payload and update the local cache
2451
2451
2452 The payload is a series of data related to each branch
2452 The payload is a series of data related to each branch
2453
2453
2454 1) branch name length
2454 1) branch name length
2455 2) number of open heads
2455 2) number of open heads
2456 3) number of closed heads
2456 3) number of closed heads
2457 4) open heads nodes
2457 4) open heads nodes
2458 5) closed heads nodes
2458 5) closed heads nodes
2459 """
2459 """
2460 total = 0
2460 total = 0
2461 rawheader = inpart.read(rbcstruct.size)
2461 rawheader = inpart.read(rbcstruct.size)
2462 cache = op.repo.revbranchcache()
2462 cache = op.repo.revbranchcache()
2463 cl = op.repo.unfiltered().changelog
2463 cl = op.repo.unfiltered().changelog
2464 while rawheader:
2464 while rawheader:
2465 header = rbcstruct.unpack(rawheader)
2465 header = rbcstruct.unpack(rawheader)
2466 total += header[1] + header[2]
2466 total += header[1] + header[2]
2467 utf8branch = inpart.read(header[0])
2467 utf8branch = inpart.read(header[0])
2468 branch = encoding.tolocal(utf8branch)
2468 branch = encoding.tolocal(utf8branch)
2469 for x in pycompat.xrange(header[1]):
2469 for x in pycompat.xrange(header[1]):
2470 node = inpart.read(20)
2470 node = inpart.read(20)
2471 rev = cl.rev(node)
2471 rev = cl.rev(node)
2472 cache.setdata(branch, rev, node, False)
2472 cache.setdata(branch, rev, node, False)
2473 for x in pycompat.xrange(header[2]):
2473 for x in pycompat.xrange(header[2]):
2474 node = inpart.read(20)
2474 node = inpart.read(20)
2475 rev = cl.rev(node)
2475 rev = cl.rev(node)
2476 cache.setdata(branch, rev, node, True)
2476 cache.setdata(branch, rev, node, True)
2477 rawheader = inpart.read(rbcstruct.size)
2477 rawheader = inpart.read(rbcstruct.size)
2478 cache.write()
2478 cache.write()
2479
2479
2480
2480
2481 @parthandler(b'pushvars')
2481 @parthandler(b'pushvars')
2482 def bundle2getvars(op, part):
2482 def bundle2getvars(op, part):
2483 '''unbundle a bundle2 containing shellvars on the server'''
2483 '''unbundle a bundle2 containing shellvars on the server'''
2484 # An option to disable unbundling on server-side for security reasons
2484 # An option to disable unbundling on server-side for security reasons
2485 if op.ui.configbool(b'push', b'pushvars.server'):
2485 if op.ui.configbool(b'push', b'pushvars.server'):
2486 hookargs = {}
2486 hookargs = {}
2487 for key, value in part.advisoryparams:
2487 for key, value in part.advisoryparams:
2488 key = key.upper()
2488 key = key.upper()
2489 # We want pushed variables to have USERVAR_ prepended so we know
2489 # We want pushed variables to have USERVAR_ prepended so we know
2490 # they came from the --pushvar flag.
2490 # they came from the --pushvar flag.
2491 key = b"USERVAR_" + key
2491 key = b"USERVAR_" + key
2492 hookargs[key] = value
2492 hookargs[key] = value
2493 op.addhookargs(hookargs)
2493 op.addhookargs(hookargs)
2494
2494
2495
2495
2496 @parthandler(b'stream2', (b'requirements', b'filecount', b'bytecount'))
2496 @parthandler(b'stream2', (b'requirements', b'filecount', b'bytecount'))
2497 def handlestreamv2bundle(op, part):
2497 def handlestreamv2bundle(op, part):
2498
2498
2499 requirements = urlreq.unquote(part.params[b'requirements']).split(b',')
2499 requirements = urlreq.unquote(part.params[b'requirements']).split(b',')
2500 filecount = int(part.params[b'filecount'])
2500 filecount = int(part.params[b'filecount'])
2501 bytecount = int(part.params[b'bytecount'])
2501 bytecount = int(part.params[b'bytecount'])
2502
2502
2503 repo = op.repo
2503 repo = op.repo
2504 if len(repo):
2504 if len(repo):
2505 msg = _(b'cannot apply stream clone to non empty repository')
2505 msg = _(b'cannot apply stream clone to non empty repository')
2506 raise error.Abort(msg)
2506 raise error.Abort(msg)
2507
2507
2508 repo.ui.debug(b'applying stream bundle\n')
2508 repo.ui.debug(b'applying stream bundle\n')
2509 streamclone.applybundlev2(repo, part, filecount, bytecount, requirements)
2509 streamclone.applybundlev2(repo, part, filecount, bytecount, requirements)
2510
2510
2511
2511
2512 def widen_bundle(
2512 def widen_bundle(
2513 bundler, repo, oldmatcher, newmatcher, common, known, cgversion, ellipses
2513 bundler, repo, oldmatcher, newmatcher, common, known, cgversion, ellipses
2514 ):
2514 ):
2515 """generates bundle2 for widening a narrow clone
2515 """generates bundle2 for widening a narrow clone
2516
2516
2517 bundler is the bundle to which data should be added
2517 bundler is the bundle to which data should be added
2518 repo is the localrepository instance
2518 repo is the localrepository instance
2519 oldmatcher matches what the client already has
2519 oldmatcher matches what the client already has
2520 newmatcher matches what the client needs (including what it already has)
2520 newmatcher matches what the client needs (including what it already has)
2521 common is set of common heads between server and client
2521 common is set of common heads between server and client
2522 known is a set of revs known on the client side (used in ellipses)
2522 known is a set of revs known on the client side (used in ellipses)
2523 cgversion is the changegroup version to send
2523 cgversion is the changegroup version to send
2524 ellipses is boolean value telling whether to send ellipses data or not
2524 ellipses is boolean value telling whether to send ellipses data or not
2525
2525
2526 returns bundle2 of the data required for extending
2526 returns bundle2 of the data required for extending
2527 """
2527 """
2528 commonnodes = set()
2528 commonnodes = set()
2529 cl = repo.changelog
2529 cl = repo.changelog
2530 for r in repo.revs(b"::%ln", common):
2530 for r in repo.revs(b"::%ln", common):
2531 commonnodes.add(cl.node(r))
2531 commonnodes.add(cl.node(r))
2532 if commonnodes:
2532 if commonnodes:
2533 # XXX: we should only send the filelogs (and treemanifest). user
2533 # XXX: we should only send the filelogs (and treemanifest). user
2534 # already has the changelog and manifest
2534 # already has the changelog and manifest
2535 packer = changegroup.getbundler(
2535 packer = changegroup.getbundler(
2536 cgversion,
2536 cgversion,
2537 repo,
2537 repo,
2538 oldmatcher=oldmatcher,
2538 oldmatcher=oldmatcher,
2539 matcher=newmatcher,
2539 matcher=newmatcher,
2540 fullnodes=commonnodes,
2540 fullnodes=commonnodes,
2541 )
2541 )
2542 cgdata = packer.generate(
2542 cgdata = packer.generate(
2543 {nodemod.nullid},
2543 {nodemod.nullid},
2544 list(commonnodes),
2544 list(commonnodes),
2545 False,
2545 False,
2546 b'narrow_widen',
2546 b'narrow_widen',
2547 changelog=False,
2547 changelog=False,
2548 )
2548 )
2549
2549
2550 part = bundler.newpart(b'changegroup', data=cgdata)
2550 part = bundler.newpart(b'changegroup', data=cgdata)
2551 part.addparam(b'version', cgversion)
2551 part.addparam(b'version', cgversion)
2552 if b'treemanifest' in repo.requirements:
2552 if b'treemanifest' in repo.requirements:
2553 part.addparam(b'treemanifest', b'1')
2553 part.addparam(b'treemanifest', b'1')
2554
2554
2555 return bundler
2555 return bundler
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now