##// END OF EJS Templates
debugindexstats: handle the lack of Rust support better...
Raphaël Gomès -
r52125:3551f2a1 default
parent child Browse files
Show More
@@ -1,4751 +1,4751 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import binascii
9 import binascii
10 import codecs
10 import codecs
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import difflib
13 import difflib
14 import errno
14 import errno
15 import glob
15 import glob
16 import operator
16 import operator
17 import os
17 import os
18 import platform
18 import platform
19 import random
19 import random
20 import re
20 import re
21 import socket
21 import socket
22 import ssl
22 import ssl
23 import stat
23 import stat
24 import subprocess
24 import subprocess
25 import sys
25 import sys
26 import time
26 import time
27
27
28 from .i18n import _
28 from .i18n import _
29 from .node import (
29 from .node import (
30 bin,
30 bin,
31 hex,
31 hex,
32 nullrev,
32 nullrev,
33 short,
33 short,
34 )
34 )
35 from .pycompat import (
35 from .pycompat import (
36 open,
36 open,
37 )
37 )
38 from . import (
38 from . import (
39 bundle2,
39 bundle2,
40 bundlerepo,
40 bundlerepo,
41 changegroup,
41 changegroup,
42 cmdutil,
42 cmdutil,
43 color,
43 color,
44 context,
44 context,
45 copies,
45 copies,
46 dagparser,
46 dagparser,
47 dirstateutils,
47 dirstateutils,
48 encoding,
48 encoding,
49 error,
49 error,
50 exchange,
50 exchange,
51 extensions,
51 extensions,
52 filelog,
52 filelog,
53 filemerge,
53 filemerge,
54 filesetlang,
54 filesetlang,
55 formatter,
55 formatter,
56 hg,
56 hg,
57 httppeer,
57 httppeer,
58 localrepo,
58 localrepo,
59 lock as lockmod,
59 lock as lockmod,
60 logcmdutil,
60 logcmdutil,
61 manifest,
61 manifest,
62 mergestate as mergestatemod,
62 mergestate as mergestatemod,
63 metadata,
63 metadata,
64 obsolete,
64 obsolete,
65 obsutil,
65 obsutil,
66 pathutil,
66 pathutil,
67 phases,
67 phases,
68 policy,
68 policy,
69 pvec,
69 pvec,
70 pycompat,
70 pycompat,
71 registrar,
71 registrar,
72 repair,
72 repair,
73 repoview,
73 repoview,
74 requirements,
74 requirements,
75 revlog,
75 revlog,
76 revset,
76 revset,
77 revsetlang,
77 revsetlang,
78 scmutil,
78 scmutil,
79 setdiscovery,
79 setdiscovery,
80 simplemerge,
80 simplemerge,
81 sshpeer,
81 sshpeer,
82 sslutil,
82 sslutil,
83 streamclone,
83 streamclone,
84 strip,
84 strip,
85 tags as tagsmod,
85 tags as tagsmod,
86 templater,
86 templater,
87 treediscovery,
87 treediscovery,
88 upgrade,
88 upgrade,
89 url as urlmod,
89 url as urlmod,
90 util,
90 util,
91 verify,
91 verify,
92 vfs as vfsmod,
92 vfs as vfsmod,
93 wireprotoframing,
93 wireprotoframing,
94 wireprotoserver,
94 wireprotoserver,
95 )
95 )
96 from .interfaces import repository
96 from .interfaces import repository
97 from .stabletailgraph import stabletailsort
97 from .stabletailgraph import stabletailsort
98 from .utils import (
98 from .utils import (
99 cborutil,
99 cborutil,
100 compression,
100 compression,
101 dateutil,
101 dateutil,
102 procutil,
102 procutil,
103 stringutil,
103 stringutil,
104 urlutil,
104 urlutil,
105 )
105 )
106
106
107 from .revlogutils import (
107 from .revlogutils import (
108 debug as revlog_debug,
108 debug as revlog_debug,
109 nodemap,
109 nodemap,
110 rewrite,
110 rewrite,
111 sidedata,
111 sidedata,
112 )
112 )
113
113
114 release = lockmod.release
114 release = lockmod.release
115
115
116 table = {}
116 table = {}
117 table.update(strip.command._table)
117 table.update(strip.command._table)
118 command = registrar.command(table)
118 command = registrar.command(table)
119
119
120
120
121 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
121 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
122 def debugancestor(ui, repo, *args):
122 def debugancestor(ui, repo, *args):
123 """find the ancestor revision of two revisions in a given index"""
123 """find the ancestor revision of two revisions in a given index"""
124 if len(args) == 3:
124 if len(args) == 3:
125 index, rev1, rev2 = args
125 index, rev1, rev2 = args
126 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
126 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
127 lookup = r.lookup
127 lookup = r.lookup
128 elif len(args) == 2:
128 elif len(args) == 2:
129 if not repo:
129 if not repo:
130 raise error.Abort(
130 raise error.Abort(
131 _(b'there is no Mercurial repository here (.hg not found)')
131 _(b'there is no Mercurial repository here (.hg not found)')
132 )
132 )
133 rev1, rev2 = args
133 rev1, rev2 = args
134 r = repo.changelog
134 r = repo.changelog
135 lookup = repo.lookup
135 lookup = repo.lookup
136 else:
136 else:
137 raise error.Abort(_(b'either two or three arguments required'))
137 raise error.Abort(_(b'either two or three arguments required'))
138 a = r.ancestor(lookup(rev1), lookup(rev2))
138 a = r.ancestor(lookup(rev1), lookup(rev2))
139 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
139 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
140
140
141
141
142 @command(b'debugantivirusrunning', [])
142 @command(b'debugantivirusrunning', [])
143 def debugantivirusrunning(ui, repo):
143 def debugantivirusrunning(ui, repo):
144 """attempt to trigger an antivirus scanner to see if one is active"""
144 """attempt to trigger an antivirus scanner to see if one is active"""
145 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
145 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
146 f.write(
146 f.write(
147 util.b85decode(
147 util.b85decode(
148 # This is a base85-armored version of the EICAR test file. See
148 # This is a base85-armored version of the EICAR test file. See
149 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
149 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
150 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
150 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
151 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
151 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
152 )
152 )
153 )
153 )
154 # Give an AV engine time to scan the file.
154 # Give an AV engine time to scan the file.
155 time.sleep(2)
155 time.sleep(2)
156 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
156 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
157
157
158
158
159 @command(b'debugapplystreamclonebundle', [], b'FILE')
159 @command(b'debugapplystreamclonebundle', [], b'FILE')
160 def debugapplystreamclonebundle(ui, repo, fname):
160 def debugapplystreamclonebundle(ui, repo, fname):
161 """apply a stream clone bundle file"""
161 """apply a stream clone bundle file"""
162 f = hg.openpath(ui, fname)
162 f = hg.openpath(ui, fname)
163 gen = exchange.readbundle(ui, f, fname)
163 gen = exchange.readbundle(ui, f, fname)
164 gen.apply(repo)
164 gen.apply(repo)
165
165
166
166
167 @command(
167 @command(
168 b'debugbuilddag',
168 b'debugbuilddag',
169 [
169 [
170 (
170 (
171 b'm',
171 b'm',
172 b'mergeable-file',
172 b'mergeable-file',
173 None,
173 None,
174 _(b'add single file mergeable changes'),
174 _(b'add single file mergeable changes'),
175 ),
175 ),
176 (
176 (
177 b'o',
177 b'o',
178 b'overwritten-file',
178 b'overwritten-file',
179 None,
179 None,
180 _(b'add single file all revs overwrite'),
180 _(b'add single file all revs overwrite'),
181 ),
181 ),
182 (b'n', b'new-file', None, _(b'add new file at each rev')),
182 (b'n', b'new-file', None, _(b'add new file at each rev')),
183 (
183 (
184 b'',
184 b'',
185 b'from-existing',
185 b'from-existing',
186 None,
186 None,
187 _(b'continue from a non-empty repository'),
187 _(b'continue from a non-empty repository'),
188 ),
188 ),
189 ],
189 ],
190 _(b'[OPTION]... [TEXT]'),
190 _(b'[OPTION]... [TEXT]'),
191 )
191 )
192 def debugbuilddag(
192 def debugbuilddag(
193 ui,
193 ui,
194 repo,
194 repo,
195 text=None,
195 text=None,
196 mergeable_file=False,
196 mergeable_file=False,
197 overwritten_file=False,
197 overwritten_file=False,
198 new_file=False,
198 new_file=False,
199 from_existing=False,
199 from_existing=False,
200 ):
200 ):
201 """builds a repo with a given DAG from scratch in the current empty repo
201 """builds a repo with a given DAG from scratch in the current empty repo
202
202
203 The description of the DAG is read from stdin if not given on the
203 The description of the DAG is read from stdin if not given on the
204 command line.
204 command line.
205
205
206 Elements:
206 Elements:
207
207
208 - "+n" is a linear run of n nodes based on the current default parent
208 - "+n" is a linear run of n nodes based on the current default parent
209 - "." is a single node based on the current default parent
209 - "." is a single node based on the current default parent
210 - "$" resets the default parent to null (implied at the start);
210 - "$" resets the default parent to null (implied at the start);
211 otherwise the default parent is always the last node created
211 otherwise the default parent is always the last node created
212 - "<p" sets the default parent to the backref p
212 - "<p" sets the default parent to the backref p
213 - "*p" is a fork at parent p, which is a backref
213 - "*p" is a fork at parent p, which is a backref
214 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
214 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
215 - "/p2" is a merge of the preceding node and p2
215 - "/p2" is a merge of the preceding node and p2
216 - ":tag" defines a local tag for the preceding node
216 - ":tag" defines a local tag for the preceding node
217 - "@branch" sets the named branch for subsequent nodes
217 - "@branch" sets the named branch for subsequent nodes
218 - "#...\\n" is a comment up to the end of the line
218 - "#...\\n" is a comment up to the end of the line
219
219
220 Whitespace between the above elements is ignored.
220 Whitespace between the above elements is ignored.
221
221
222 A backref is either
222 A backref is either
223
223
224 - a number n, which references the node curr-n, where curr is the current
224 - a number n, which references the node curr-n, where curr is the current
225 node, or
225 node, or
226 - the name of a local tag you placed earlier using ":tag", or
226 - the name of a local tag you placed earlier using ":tag", or
227 - empty to denote the default parent.
227 - empty to denote the default parent.
228
228
229 All string valued-elements are either strictly alphanumeric, or must
229 All string valued-elements are either strictly alphanumeric, or must
230 be enclosed in double quotes ("..."), with "\\" as escape character.
230 be enclosed in double quotes ("..."), with "\\" as escape character.
231 """
231 """
232
232
233 if text is None:
233 if text is None:
234 ui.status(_(b"reading DAG from stdin\n"))
234 ui.status(_(b"reading DAG from stdin\n"))
235 text = ui.fin.read()
235 text = ui.fin.read()
236
236
237 cl = repo.changelog
237 cl = repo.changelog
238 if len(cl) > 0 and not from_existing:
238 if len(cl) > 0 and not from_existing:
239 raise error.Abort(_(b'repository is not empty'))
239 raise error.Abort(_(b'repository is not empty'))
240
240
241 # determine number of revs in DAG
241 # determine number of revs in DAG
242 total = 0
242 total = 0
243 for type, data in dagparser.parsedag(text):
243 for type, data in dagparser.parsedag(text):
244 if type == b'n':
244 if type == b'n':
245 total += 1
245 total += 1
246
246
247 if mergeable_file:
247 if mergeable_file:
248 linesperrev = 2
248 linesperrev = 2
249 # make a file with k lines per rev
249 # make a file with k lines per rev
250 initialmergedlines = [b'%d' % i for i in range(0, total * linesperrev)]
250 initialmergedlines = [b'%d' % i for i in range(0, total * linesperrev)]
251 initialmergedlines.append(b"")
251 initialmergedlines.append(b"")
252
252
253 tags = []
253 tags = []
254 progress = ui.makeprogress(
254 progress = ui.makeprogress(
255 _(b'building'), unit=_(b'revisions'), total=total
255 _(b'building'), unit=_(b'revisions'), total=total
256 )
256 )
257 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
257 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
258 at = -1
258 at = -1
259 atbranch = b'default'
259 atbranch = b'default'
260 nodeids = []
260 nodeids = []
261 id = 0
261 id = 0
262 progress.update(id)
262 progress.update(id)
263 for type, data in dagparser.parsedag(text):
263 for type, data in dagparser.parsedag(text):
264 if type == b'n':
264 if type == b'n':
265 ui.note((b'node %s\n' % pycompat.bytestr(data)))
265 ui.note((b'node %s\n' % pycompat.bytestr(data)))
266 id, ps = data
266 id, ps = data
267
267
268 files = []
268 files = []
269 filecontent = {}
269 filecontent = {}
270
270
271 p2 = None
271 p2 = None
272 if mergeable_file:
272 if mergeable_file:
273 fn = b"mf"
273 fn = b"mf"
274 p1 = repo[ps[0]]
274 p1 = repo[ps[0]]
275 if len(ps) > 1:
275 if len(ps) > 1:
276 p2 = repo[ps[1]]
276 p2 = repo[ps[1]]
277 pa = p1.ancestor(p2)
277 pa = p1.ancestor(p2)
278 base, local, other = [
278 base, local, other = [
279 x[fn].data() for x in (pa, p1, p2)
279 x[fn].data() for x in (pa, p1, p2)
280 ]
280 ]
281 m3 = simplemerge.Merge3Text(base, local, other)
281 m3 = simplemerge.Merge3Text(base, local, other)
282 ml = [
282 ml = [
283 l.strip()
283 l.strip()
284 for l in simplemerge.render_minimized(m3)[0]
284 for l in simplemerge.render_minimized(m3)[0]
285 ]
285 ]
286 ml.append(b"")
286 ml.append(b"")
287 elif at > 0:
287 elif at > 0:
288 ml = p1[fn].data().split(b"\n")
288 ml = p1[fn].data().split(b"\n")
289 else:
289 else:
290 ml = initialmergedlines
290 ml = initialmergedlines
291 ml[id * linesperrev] += b" r%i" % id
291 ml[id * linesperrev] += b" r%i" % id
292 mergedtext = b"\n".join(ml)
292 mergedtext = b"\n".join(ml)
293 files.append(fn)
293 files.append(fn)
294 filecontent[fn] = mergedtext
294 filecontent[fn] = mergedtext
295
295
296 if overwritten_file:
296 if overwritten_file:
297 fn = b"of"
297 fn = b"of"
298 files.append(fn)
298 files.append(fn)
299 filecontent[fn] = b"r%i\n" % id
299 filecontent[fn] = b"r%i\n" % id
300
300
301 if new_file:
301 if new_file:
302 fn = b"nf%i" % id
302 fn = b"nf%i" % id
303 files.append(fn)
303 files.append(fn)
304 filecontent[fn] = b"r%i\n" % id
304 filecontent[fn] = b"r%i\n" % id
305 if len(ps) > 1:
305 if len(ps) > 1:
306 if not p2:
306 if not p2:
307 p2 = repo[ps[1]]
307 p2 = repo[ps[1]]
308 for fn in p2:
308 for fn in p2:
309 if fn.startswith(b"nf"):
309 if fn.startswith(b"nf"):
310 files.append(fn)
310 files.append(fn)
311 filecontent[fn] = p2[fn].data()
311 filecontent[fn] = p2[fn].data()
312
312
313 def fctxfn(repo, cx, path):
313 def fctxfn(repo, cx, path):
314 if path in filecontent:
314 if path in filecontent:
315 return context.memfilectx(
315 return context.memfilectx(
316 repo, cx, path, filecontent[path]
316 repo, cx, path, filecontent[path]
317 )
317 )
318 return None
318 return None
319
319
320 if len(ps) == 0 or ps[0] < 0:
320 if len(ps) == 0 or ps[0] < 0:
321 pars = [None, None]
321 pars = [None, None]
322 elif len(ps) == 1:
322 elif len(ps) == 1:
323 pars = [nodeids[ps[0]], None]
323 pars = [nodeids[ps[0]], None]
324 else:
324 else:
325 pars = [nodeids[p] for p in ps]
325 pars = [nodeids[p] for p in ps]
326 cx = context.memctx(
326 cx = context.memctx(
327 repo,
327 repo,
328 pars,
328 pars,
329 b"r%i" % id,
329 b"r%i" % id,
330 files,
330 files,
331 fctxfn,
331 fctxfn,
332 date=(id, 0),
332 date=(id, 0),
333 user=b"debugbuilddag",
333 user=b"debugbuilddag",
334 extra={b'branch': atbranch},
334 extra={b'branch': atbranch},
335 )
335 )
336 nodeid = repo.commitctx(cx)
336 nodeid = repo.commitctx(cx)
337 nodeids.append(nodeid)
337 nodeids.append(nodeid)
338 at = id
338 at = id
339 elif type == b'l':
339 elif type == b'l':
340 id, name = data
340 id, name = data
341 ui.note((b'tag %s\n' % name))
341 ui.note((b'tag %s\n' % name))
342 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
342 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
343 elif type == b'a':
343 elif type == b'a':
344 ui.note((b'branch %s\n' % data))
344 ui.note((b'branch %s\n' % data))
345 atbranch = data
345 atbranch = data
346 progress.update(id)
346 progress.update(id)
347
347
348 if tags:
348 if tags:
349 repo.vfs.write(b"localtags", b"".join(tags))
349 repo.vfs.write(b"localtags", b"".join(tags))
350
350
351
351
352 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
352 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
353 indent_string = b' ' * indent
353 indent_string = b' ' * indent
354 if all:
354 if all:
355 ui.writenoi18n(
355 ui.writenoi18n(
356 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
356 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
357 % indent_string
357 % indent_string
358 )
358 )
359
359
360 def showchunks(named):
360 def showchunks(named):
361 ui.write(b"\n%s%s\n" % (indent_string, named))
361 ui.write(b"\n%s%s\n" % (indent_string, named))
362 for deltadata in gen.deltaiter():
362 for deltadata in gen.deltaiter():
363 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
363 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
364 ui.write(
364 ui.write(
365 b"%s%s %s %s %s %s %d\n"
365 b"%s%s %s %s %s %s %d\n"
366 % (
366 % (
367 indent_string,
367 indent_string,
368 hex(node),
368 hex(node),
369 hex(p1),
369 hex(p1),
370 hex(p2),
370 hex(p2),
371 hex(cs),
371 hex(cs),
372 hex(deltabase),
372 hex(deltabase),
373 len(delta),
373 len(delta),
374 )
374 )
375 )
375 )
376
376
377 gen.changelogheader()
377 gen.changelogheader()
378 showchunks(b"changelog")
378 showchunks(b"changelog")
379 gen.manifestheader()
379 gen.manifestheader()
380 showchunks(b"manifest")
380 showchunks(b"manifest")
381 for chunkdata in iter(gen.filelogheader, {}):
381 for chunkdata in iter(gen.filelogheader, {}):
382 fname = chunkdata[b'filename']
382 fname = chunkdata[b'filename']
383 showchunks(fname)
383 showchunks(fname)
384 else:
384 else:
385 if isinstance(gen, bundle2.unbundle20):
385 if isinstance(gen, bundle2.unbundle20):
386 raise error.Abort(_(b'use debugbundle2 for this file'))
386 raise error.Abort(_(b'use debugbundle2 for this file'))
387 gen.changelogheader()
387 gen.changelogheader()
388 for deltadata in gen.deltaiter():
388 for deltadata in gen.deltaiter():
389 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
389 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
390 ui.write(b"%s%s\n" % (indent_string, hex(node)))
390 ui.write(b"%s%s\n" % (indent_string, hex(node)))
391
391
392
392
393 def _debugobsmarkers(ui, part, indent=0, **opts):
393 def _debugobsmarkers(ui, part, indent=0, **opts):
394 """display version and markers contained in 'data'"""
394 """display version and markers contained in 'data'"""
395 data = part.read()
395 data = part.read()
396 indent_string = b' ' * indent
396 indent_string = b' ' * indent
397 try:
397 try:
398 version, markers = obsolete._readmarkers(data)
398 version, markers = obsolete._readmarkers(data)
399 except error.UnknownVersion as exc:
399 except error.UnknownVersion as exc:
400 msg = b"%sunsupported version: %s (%d bytes)\n"
400 msg = b"%sunsupported version: %s (%d bytes)\n"
401 msg %= indent_string, exc.version, len(data)
401 msg %= indent_string, exc.version, len(data)
402 ui.write(msg)
402 ui.write(msg)
403 else:
403 else:
404 msg = b"%sversion: %d (%d bytes)\n"
404 msg = b"%sversion: %d (%d bytes)\n"
405 msg %= indent_string, version, len(data)
405 msg %= indent_string, version, len(data)
406 ui.write(msg)
406 ui.write(msg)
407 fm = ui.formatter(b'debugobsolete', pycompat.byteskwargs(opts))
407 fm = ui.formatter(b'debugobsolete', pycompat.byteskwargs(opts))
408 for rawmarker in sorted(markers):
408 for rawmarker in sorted(markers):
409 m = obsutil.marker(None, rawmarker)
409 m = obsutil.marker(None, rawmarker)
410 fm.startitem()
410 fm.startitem()
411 fm.plain(indent_string)
411 fm.plain(indent_string)
412 cmdutil.showmarker(fm, m)
412 cmdutil.showmarker(fm, m)
413 fm.end()
413 fm.end()
414
414
415
415
416 def _debugphaseheads(ui, data, indent=0):
416 def _debugphaseheads(ui, data, indent=0):
417 """display version and markers contained in 'data'"""
417 """display version and markers contained in 'data'"""
418 indent_string = b' ' * indent
418 indent_string = b' ' * indent
419 headsbyphase = phases.binarydecode(data)
419 headsbyphase = phases.binarydecode(data)
420 for phase in phases.allphases:
420 for phase in phases.allphases:
421 for head in headsbyphase[phase]:
421 for head in headsbyphase[phase]:
422 ui.write(indent_string)
422 ui.write(indent_string)
423 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
423 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
424
424
425
425
426 def _quasirepr(thing):
426 def _quasirepr(thing):
427 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
427 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
428 return b'{%s}' % (
428 return b'{%s}' % (
429 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
429 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
430 )
430 )
431 return pycompat.bytestr(repr(thing))
431 return pycompat.bytestr(repr(thing))
432
432
433
433
434 def _debugbundle2(ui, gen, all=None, **opts):
434 def _debugbundle2(ui, gen, all=None, **opts):
435 """lists the contents of a bundle2"""
435 """lists the contents of a bundle2"""
436 if not isinstance(gen, bundle2.unbundle20):
436 if not isinstance(gen, bundle2.unbundle20):
437 raise error.Abort(_(b'not a bundle2 file'))
437 raise error.Abort(_(b'not a bundle2 file'))
438 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
438 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
439 parttypes = opts.get('part_type', [])
439 parttypes = opts.get('part_type', [])
440 for part in gen.iterparts():
440 for part in gen.iterparts():
441 if parttypes and part.type not in parttypes:
441 if parttypes and part.type not in parttypes:
442 continue
442 continue
443 msg = b'%s -- %s (mandatory: %r)\n'
443 msg = b'%s -- %s (mandatory: %r)\n'
444 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
444 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
445 if part.type == b'changegroup':
445 if part.type == b'changegroup':
446 version = part.params.get(b'version', b'01')
446 version = part.params.get(b'version', b'01')
447 cg = changegroup.getunbundler(version, part, b'UN')
447 cg = changegroup.getunbundler(version, part, b'UN')
448 if not ui.quiet:
448 if not ui.quiet:
449 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
449 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
450 if part.type == b'obsmarkers':
450 if part.type == b'obsmarkers':
451 if not ui.quiet:
451 if not ui.quiet:
452 _debugobsmarkers(ui, part, indent=4, **opts)
452 _debugobsmarkers(ui, part, indent=4, **opts)
453 if part.type == b'phase-heads':
453 if part.type == b'phase-heads':
454 if not ui.quiet:
454 if not ui.quiet:
455 _debugphaseheads(ui, part, indent=4)
455 _debugphaseheads(ui, part, indent=4)
456
456
457
457
458 @command(
458 @command(
459 b'debugbundle',
459 b'debugbundle',
460 [
460 [
461 (b'a', b'all', None, _(b'show all details')),
461 (b'a', b'all', None, _(b'show all details')),
462 (b'', b'part-type', [], _(b'show only the named part type')),
462 (b'', b'part-type', [], _(b'show only the named part type')),
463 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
463 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
464 ],
464 ],
465 _(b'FILE'),
465 _(b'FILE'),
466 norepo=True,
466 norepo=True,
467 )
467 )
468 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
468 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
469 """lists the contents of a bundle"""
469 """lists the contents of a bundle"""
470 with hg.openpath(ui, bundlepath) as f:
470 with hg.openpath(ui, bundlepath) as f:
471 if spec:
471 if spec:
472 spec = exchange.getbundlespec(ui, f)
472 spec = exchange.getbundlespec(ui, f)
473 ui.write(b'%s\n' % spec)
473 ui.write(b'%s\n' % spec)
474 return
474 return
475
475
476 gen = exchange.readbundle(ui, f, bundlepath)
476 gen = exchange.readbundle(ui, f, bundlepath)
477 if isinstance(gen, bundle2.unbundle20):
477 if isinstance(gen, bundle2.unbundle20):
478 return _debugbundle2(ui, gen, all=all, **opts)
478 return _debugbundle2(ui, gen, all=all, **opts)
479 _debugchangegroup(ui, gen, all=all, **opts)
479 _debugchangegroup(ui, gen, all=all, **opts)
480
480
481
481
482 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
482 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
483 def debugcapabilities(ui, path, **opts):
483 def debugcapabilities(ui, path, **opts):
484 """lists the capabilities of a remote peer"""
484 """lists the capabilities of a remote peer"""
485 peer = hg.peer(ui, pycompat.byteskwargs(opts), path)
485 peer = hg.peer(ui, pycompat.byteskwargs(opts), path)
486 try:
486 try:
487 caps = peer.capabilities()
487 caps = peer.capabilities()
488 ui.writenoi18n(b'Main capabilities:\n')
488 ui.writenoi18n(b'Main capabilities:\n')
489 for c in sorted(caps):
489 for c in sorted(caps):
490 ui.write(b' %s\n' % c)
490 ui.write(b' %s\n' % c)
491 b2caps = bundle2.bundle2caps(peer)
491 b2caps = bundle2.bundle2caps(peer)
492 if b2caps:
492 if b2caps:
493 ui.writenoi18n(b'Bundle2 capabilities:\n')
493 ui.writenoi18n(b'Bundle2 capabilities:\n')
494 for key, values in sorted(b2caps.items()):
494 for key, values in sorted(b2caps.items()):
495 ui.write(b' %s\n' % key)
495 ui.write(b' %s\n' % key)
496 for v in values:
496 for v in values:
497 ui.write(b' %s\n' % v)
497 ui.write(b' %s\n' % v)
498 finally:
498 finally:
499 peer.close()
499 peer.close()
500
500
501
501
502 @command(
502 @command(
503 b'debugchangedfiles',
503 b'debugchangedfiles',
504 [
504 [
505 (
505 (
506 b'',
506 b'',
507 b'compute',
507 b'compute',
508 False,
508 False,
509 b"compute information instead of reading it from storage",
509 b"compute information instead of reading it from storage",
510 ),
510 ),
511 ],
511 ],
512 b'REV',
512 b'REV',
513 )
513 )
514 def debugchangedfiles(ui, repo, rev, **opts):
514 def debugchangedfiles(ui, repo, rev, **opts):
515 """list the stored files changes for a revision"""
515 """list the stored files changes for a revision"""
516 ctx = logcmdutil.revsingle(repo, rev, None)
516 ctx = logcmdutil.revsingle(repo, rev, None)
517 files = None
517 files = None
518
518
519 if opts['compute']:
519 if opts['compute']:
520 files = metadata.compute_all_files_changes(ctx)
520 files = metadata.compute_all_files_changes(ctx)
521 else:
521 else:
522 sd = repo.changelog.sidedata(ctx.rev())
522 sd = repo.changelog.sidedata(ctx.rev())
523 files_block = sd.get(sidedata.SD_FILES)
523 files_block = sd.get(sidedata.SD_FILES)
524 if files_block is not None:
524 if files_block is not None:
525 files = metadata.decode_files_sidedata(sd)
525 files = metadata.decode_files_sidedata(sd)
526 if files is not None:
526 if files is not None:
527 for f in sorted(files.touched):
527 for f in sorted(files.touched):
528 if f in files.added:
528 if f in files.added:
529 action = b"added"
529 action = b"added"
530 elif f in files.removed:
530 elif f in files.removed:
531 action = b"removed"
531 action = b"removed"
532 elif f in files.merged:
532 elif f in files.merged:
533 action = b"merged"
533 action = b"merged"
534 elif f in files.salvaged:
534 elif f in files.salvaged:
535 action = b"salvaged"
535 action = b"salvaged"
536 else:
536 else:
537 action = b"touched"
537 action = b"touched"
538
538
539 copy_parent = b""
539 copy_parent = b""
540 copy_source = b""
540 copy_source = b""
541 if f in files.copied_from_p1:
541 if f in files.copied_from_p1:
542 copy_parent = b"p1"
542 copy_parent = b"p1"
543 copy_source = files.copied_from_p1[f]
543 copy_source = files.copied_from_p1[f]
544 elif f in files.copied_from_p2:
544 elif f in files.copied_from_p2:
545 copy_parent = b"p2"
545 copy_parent = b"p2"
546 copy_source = files.copied_from_p2[f]
546 copy_source = files.copied_from_p2[f]
547
547
548 data = (action, copy_parent, f, copy_source)
548 data = (action, copy_parent, f, copy_source)
549 template = b"%-8s %2s: %s, %s;\n"
549 template = b"%-8s %2s: %s, %s;\n"
550 ui.write(template % data)
550 ui.write(template % data)
551
551
552
552
553 @command(b'debugcheckstate', [], b'')
553 @command(b'debugcheckstate', [], b'')
554 def debugcheckstate(ui, repo):
554 def debugcheckstate(ui, repo):
555 """validate the correctness of the current dirstate"""
555 """validate the correctness of the current dirstate"""
556 errors = verify.verifier(repo)._verify_dirstate()
556 errors = verify.verifier(repo)._verify_dirstate()
557 if errors:
557 if errors:
558 errstr = _(b"dirstate inconsistent with current parent's manifest")
558 errstr = _(b"dirstate inconsistent with current parent's manifest")
559 raise error.Abort(errstr)
559 raise error.Abort(errstr)
560
560
561
561
562 @command(
562 @command(
563 b'debugcolor',
563 b'debugcolor',
564 [(b'', b'style', None, _(b'show all configured styles'))],
564 [(b'', b'style', None, _(b'show all configured styles'))],
565 b'hg debugcolor',
565 b'hg debugcolor',
566 )
566 )
567 def debugcolor(ui, repo, **opts):
567 def debugcolor(ui, repo, **opts):
568 """show available color, effects or style"""
568 """show available color, effects or style"""
569 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
569 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
570 if opts.get('style'):
570 if opts.get('style'):
571 return _debugdisplaystyle(ui)
571 return _debugdisplaystyle(ui)
572 else:
572 else:
573 return _debugdisplaycolor(ui)
573 return _debugdisplaycolor(ui)
574
574
575
575
576 def _debugdisplaycolor(ui):
576 def _debugdisplaycolor(ui):
577 ui = ui.copy()
577 ui = ui.copy()
578 ui._styles.clear()
578 ui._styles.clear()
579 for effect in color._activeeffects(ui).keys():
579 for effect in color._activeeffects(ui).keys():
580 ui._styles[effect] = effect
580 ui._styles[effect] = effect
581 if ui._terminfoparams:
581 if ui._terminfoparams:
582 for k, v in ui.configitems(b'color'):
582 for k, v in ui.configitems(b'color'):
583 if k.startswith(b'color.'):
583 if k.startswith(b'color.'):
584 ui._styles[k] = k[6:]
584 ui._styles[k] = k[6:]
585 elif k.startswith(b'terminfo.'):
585 elif k.startswith(b'terminfo.'):
586 ui._styles[k] = k[9:]
586 ui._styles[k] = k[9:]
587 ui.write(_(b'available colors:\n'))
587 ui.write(_(b'available colors:\n'))
588 # sort label with a '_' after the other to group '_background' entry.
588 # sort label with a '_' after the other to group '_background' entry.
589 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
589 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
590 for colorname, label in items:
590 for colorname, label in items:
591 ui.write(b'%s\n' % colorname, label=label)
591 ui.write(b'%s\n' % colorname, label=label)
592
592
593
593
594 def _debugdisplaystyle(ui):
594 def _debugdisplaystyle(ui):
595 ui.write(_(b'available style:\n'))
595 ui.write(_(b'available style:\n'))
596 if not ui._styles:
596 if not ui._styles:
597 return
597 return
598 width = max(len(s) for s in ui._styles)
598 width = max(len(s) for s in ui._styles)
599 for label, effects in sorted(ui._styles.items()):
599 for label, effects in sorted(ui._styles.items()):
600 ui.write(b'%s' % label, label=label)
600 ui.write(b'%s' % label, label=label)
601 if effects:
601 if effects:
602 # 50
602 # 50
603 ui.write(b': ')
603 ui.write(b': ')
604 ui.write(b' ' * (max(0, width - len(label))))
604 ui.write(b' ' * (max(0, width - len(label))))
605 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
605 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
606 ui.write(b'\n')
606 ui.write(b'\n')
607
607
608
608
609 @command(b'debugcreatestreamclonebundle', [], b'FILE')
609 @command(b'debugcreatestreamclonebundle', [], b'FILE')
610 def debugcreatestreamclonebundle(ui, repo, fname):
610 def debugcreatestreamclonebundle(ui, repo, fname):
611 """create a stream clone bundle file
611 """create a stream clone bundle file
612
612
613 Stream bundles are special bundles that are essentially archives of
613 Stream bundles are special bundles that are essentially archives of
614 revlog files. They are commonly used for cloning very quickly.
614 revlog files. They are commonly used for cloning very quickly.
615
615
616 This command creates a "version 1" stream clone, which is deprecated in
616 This command creates a "version 1" stream clone, which is deprecated in
617 favor of newer versions of the stream protocol. Bundles using such newer
617 favor of newer versions of the stream protocol. Bundles using such newer
618 versions can be generated using the `hg bundle` command.
618 versions can be generated using the `hg bundle` command.
619 """
619 """
620 # TODO we may want to turn this into an abort when this functionality
620 # TODO we may want to turn this into an abort when this functionality
621 # is moved into `hg bundle`.
621 # is moved into `hg bundle`.
622 if phases.hassecret(repo):
622 if phases.hassecret(repo):
623 ui.warn(
623 ui.warn(
624 _(
624 _(
625 b'(warning: stream clone bundle will contain secret '
625 b'(warning: stream clone bundle will contain secret '
626 b'revisions)\n'
626 b'revisions)\n'
627 )
627 )
628 )
628 )
629
629
630 requirements, gen = streamclone.generatebundlev1(repo)
630 requirements, gen = streamclone.generatebundlev1(repo)
631 changegroup.writechunks(ui, gen, fname)
631 changegroup.writechunks(ui, gen, fname)
632
632
633 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
633 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
634
634
635
635
636 @command(
636 @command(
637 b'debugdag',
637 b'debugdag',
638 [
638 [
639 (b't', b'tags', None, _(b'use tags as labels')),
639 (b't', b'tags', None, _(b'use tags as labels')),
640 (b'b', b'branches', None, _(b'annotate with branch names')),
640 (b'b', b'branches', None, _(b'annotate with branch names')),
641 (b'', b'dots', None, _(b'use dots for runs')),
641 (b'', b'dots', None, _(b'use dots for runs')),
642 (b's', b'spaces', None, _(b'separate elements by spaces')),
642 (b's', b'spaces', None, _(b'separate elements by spaces')),
643 ],
643 ],
644 _(b'[OPTION]... [FILE [REV]...]'),
644 _(b'[OPTION]... [FILE [REV]...]'),
645 optionalrepo=True,
645 optionalrepo=True,
646 )
646 )
647 def debugdag(ui, repo, file_=None, *revs, **opts):
647 def debugdag(ui, repo, file_=None, *revs, **opts):
648 """format the changelog or an index DAG as a concise textual description
648 """format the changelog or an index DAG as a concise textual description
649
649
650 If you pass a revlog index, the revlog's DAG is emitted. If you list
650 If you pass a revlog index, the revlog's DAG is emitted. If you list
651 revision numbers, they get labeled in the output as rN.
651 revision numbers, they get labeled in the output as rN.
652
652
653 Otherwise, the changelog DAG of the current repo is emitted.
653 Otherwise, the changelog DAG of the current repo is emitted.
654 """
654 """
655 spaces = opts.get('spaces')
655 spaces = opts.get('spaces')
656 dots = opts.get('dots')
656 dots = opts.get('dots')
657 if file_:
657 if file_:
658 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
658 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
659 revs = {int(r) for r in revs}
659 revs = {int(r) for r in revs}
660
660
661 def events():
661 def events():
662 for r in rlog:
662 for r in rlog:
663 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
663 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
664 if r in revs:
664 if r in revs:
665 yield b'l', (r, b"r%i" % r)
665 yield b'l', (r, b"r%i" % r)
666
666
667 elif repo:
667 elif repo:
668 cl = repo.changelog
668 cl = repo.changelog
669 tags = opts.get('tags')
669 tags = opts.get('tags')
670 branches = opts.get('branches')
670 branches = opts.get('branches')
671 if tags:
671 if tags:
672 labels = {}
672 labels = {}
673 for l, n in repo.tags().items():
673 for l, n in repo.tags().items():
674 labels.setdefault(cl.rev(n), []).append(l)
674 labels.setdefault(cl.rev(n), []).append(l)
675
675
676 def events():
676 def events():
677 b = b"default"
677 b = b"default"
678 for r in cl:
678 for r in cl:
679 if branches:
679 if branches:
680 newb = cl.read(cl.node(r))[5][b'branch']
680 newb = cl.read(cl.node(r))[5][b'branch']
681 if newb != b:
681 if newb != b:
682 yield b'a', newb
682 yield b'a', newb
683 b = newb
683 b = newb
684 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
684 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
685 if tags:
685 if tags:
686 ls = labels.get(r)
686 ls = labels.get(r)
687 if ls:
687 if ls:
688 for l in ls:
688 for l in ls:
689 yield b'l', (r, l)
689 yield b'l', (r, l)
690
690
691 else:
691 else:
692 raise error.Abort(_(b'need repo for changelog dag'))
692 raise error.Abort(_(b'need repo for changelog dag'))
693
693
694 for line in dagparser.dagtextlines(
694 for line in dagparser.dagtextlines(
695 events(),
695 events(),
696 addspaces=spaces,
696 addspaces=spaces,
697 wraplabels=True,
697 wraplabels=True,
698 wrapannotations=True,
698 wrapannotations=True,
699 wrapnonlinear=dots,
699 wrapnonlinear=dots,
700 usedots=dots,
700 usedots=dots,
701 maxlinewidth=70,
701 maxlinewidth=70,
702 ):
702 ):
703 ui.write(line)
703 ui.write(line)
704 ui.write(b"\n")
704 ui.write(b"\n")
705
705
706
706
707 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
707 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
708 def debugdata(ui, repo, file_, rev=None, **opts):
708 def debugdata(ui, repo, file_, rev=None, **opts):
709 """dump the contents of a data file revision"""
709 """dump the contents of a data file revision"""
710 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
710 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
711 if rev is not None:
711 if rev is not None:
712 raise error.InputError(
712 raise error.InputError(
713 _(b'cannot specify a revision with other arguments')
713 _(b'cannot specify a revision with other arguments')
714 )
714 )
715 file_, rev = None, file_
715 file_, rev = None, file_
716 elif rev is None:
716 elif rev is None:
717 raise error.InputError(_(b'please specify a revision'))
717 raise error.InputError(_(b'please specify a revision'))
718 r = cmdutil.openstorage(
718 r = cmdutil.openstorage(
719 repo, b'debugdata', file_, pycompat.byteskwargs(opts)
719 repo, b'debugdata', file_, pycompat.byteskwargs(opts)
720 )
720 )
721 try:
721 try:
722 ui.write(r.rawdata(r.lookup(rev)))
722 ui.write(r.rawdata(r.lookup(rev)))
723 except KeyError:
723 except KeyError:
724 raise error.Abort(_(b'invalid revision identifier %s') % rev)
724 raise error.Abort(_(b'invalid revision identifier %s') % rev)
725
725
726
726
727 @command(
727 @command(
728 b'debugdate',
728 b'debugdate',
729 [(b'e', b'extended', None, _(b'try extended date formats'))],
729 [(b'e', b'extended', None, _(b'try extended date formats'))],
730 _(b'[-e] DATE [RANGE]'),
730 _(b'[-e] DATE [RANGE]'),
731 norepo=True,
731 norepo=True,
732 optionalrepo=True,
732 optionalrepo=True,
733 )
733 )
734 def debugdate(ui, date, range=None, **opts):
734 def debugdate(ui, date, range=None, **opts):
735 """parse and display a date"""
735 """parse and display a date"""
736 if opts["extended"]:
736 if opts["extended"]:
737 d = dateutil.parsedate(date, dateutil.extendeddateformats)
737 d = dateutil.parsedate(date, dateutil.extendeddateformats)
738 else:
738 else:
739 d = dateutil.parsedate(date)
739 d = dateutil.parsedate(date)
740 ui.writenoi18n(b"internal: %d %d\n" % d)
740 ui.writenoi18n(b"internal: %d %d\n" % d)
741 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
741 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
742 if range:
742 if range:
743 m = dateutil.matchdate(range)
743 m = dateutil.matchdate(range)
744 ui.writenoi18n(b"match: %s\n" % m(d[0]))
744 ui.writenoi18n(b"match: %s\n" % m(d[0]))
745
745
746
746
747 @command(
747 @command(
748 b'debugdeltachain',
748 b'debugdeltachain',
749 [
749 [
750 (
750 (
751 b'r',
751 b'r',
752 b'rev',
752 b'rev',
753 [],
753 [],
754 _('restrict processing to these revlog revisions'),
754 _('restrict processing to these revlog revisions'),
755 ),
755 ),
756 (
756 (
757 b'',
757 b'',
758 b'all-info',
758 b'all-info',
759 False,
759 False,
760 _('compute all information unless specified otherwise'),
760 _('compute all information unless specified otherwise'),
761 ),
761 ),
762 (
762 (
763 b'',
763 b'',
764 b'size-info',
764 b'size-info',
765 None,
765 None,
766 _('compute information related to deltas size'),
766 _('compute information related to deltas size'),
767 ),
767 ),
768 (
768 (
769 b'',
769 b'',
770 b'dist-info',
770 b'dist-info',
771 None,
771 None,
772 _('compute information related to base distance'),
772 _('compute information related to base distance'),
773 ),
773 ),
774 (
774 (
775 b'',
775 b'',
776 b'sparse-info',
776 b'sparse-info',
777 None,
777 None,
778 _('compute information related to sparse read'),
778 _('compute information related to sparse read'),
779 ),
779 ),
780 ]
780 ]
781 + cmdutil.debugrevlogopts
781 + cmdutil.debugrevlogopts
782 + cmdutil.formatteropts,
782 + cmdutil.formatteropts,
783 _(b'-c|-m|FILE'),
783 _(b'-c|-m|FILE'),
784 optionalrepo=True,
784 optionalrepo=True,
785 )
785 )
786 def debugdeltachain(ui, repo, file_=None, **opts):
786 def debugdeltachain(ui, repo, file_=None, **opts):
787 """dump information about delta chains in a revlog
787 """dump information about delta chains in a revlog
788
788
789 Output can be templatized. Available template keywords are:
789 Output can be templatized. Available template keywords are:
790
790
791 :``rev``: revision number
791 :``rev``: revision number
792 :``p1``: parent 1 revision number (for reference)
792 :``p1``: parent 1 revision number (for reference)
793 :``p2``: parent 2 revision number (for reference)
793 :``p2``: parent 2 revision number (for reference)
794
794
795 :``chainid``: delta chain identifier (numbered by unique base)
795 :``chainid``: delta chain identifier (numbered by unique base)
796 :``chainlen``: delta chain length to this revision
796 :``chainlen``: delta chain length to this revision
797
797
798 :``prevrev``: previous revision in delta chain
798 :``prevrev``: previous revision in delta chain
799 :``deltatype``: role of delta / how it was computed
799 :``deltatype``: role of delta / how it was computed
800 - base: a full snapshot
800 - base: a full snapshot
801 - snap: an intermediate snapshot
801 - snap: an intermediate snapshot
802 - p1: a delta against the first parent
802 - p1: a delta against the first parent
803 - p2: a delta against the second parent
803 - p2: a delta against the second parent
804 - skip1: a delta against the same base as p1
804 - skip1: a delta against the same base as p1
805 (when p1 has empty delta
805 (when p1 has empty delta
806 - skip2: a delta against the same base as p2
806 - skip2: a delta against the same base as p2
807 (when p2 has empty delta
807 (when p2 has empty delta
808 - prev: a delta against the previous revision
808 - prev: a delta against the previous revision
809 - other: a delta against an arbitrary revision
809 - other: a delta against an arbitrary revision
810
810
811 :``compsize``: compressed size of revision
811 :``compsize``: compressed size of revision
812 :``uncompsize``: uncompressed size of revision
812 :``uncompsize``: uncompressed size of revision
813 :``chainsize``: total size of compressed revisions in chain
813 :``chainsize``: total size of compressed revisions in chain
814 :``chainratio``: total chain size divided by uncompressed revision size
814 :``chainratio``: total chain size divided by uncompressed revision size
815 (new delta chains typically start at ratio 2.00)
815 (new delta chains typically start at ratio 2.00)
816
816
817 :``lindist``: linear distance from base revision in delta chain to end
817 :``lindist``: linear distance from base revision in delta chain to end
818 of this revision
818 of this revision
819 :``extradist``: total size of revisions not part of this delta chain from
819 :``extradist``: total size of revisions not part of this delta chain from
820 base of delta chain to end of this revision; a measurement
820 base of delta chain to end of this revision; a measurement
821 of how much extra data we need to read/seek across to read
821 of how much extra data we need to read/seek across to read
822 the delta chain for this revision
822 the delta chain for this revision
823 :``extraratio``: extradist divided by chainsize; another representation of
823 :``extraratio``: extradist divided by chainsize; another representation of
824 how much unrelated data is needed to load this delta chain
824 how much unrelated data is needed to load this delta chain
825
825
826 If the repository is configured to use the sparse read, additional keywords
826 If the repository is configured to use the sparse read, additional keywords
827 are available:
827 are available:
828
828
829 :``readsize``: total size of data read from the disk for a revision
829 :``readsize``: total size of data read from the disk for a revision
830 (sum of the sizes of all the blocks)
830 (sum of the sizes of all the blocks)
831 :``largestblock``: size of the largest block of data read from the disk
831 :``largestblock``: size of the largest block of data read from the disk
832 :``readdensity``: density of useful bytes in the data read from the disk
832 :``readdensity``: density of useful bytes in the data read from the disk
833 :``srchunks``: in how many data hunks the whole revision would be read
833 :``srchunks``: in how many data hunks the whole revision would be read
834
834
835 It is possible to select the information to be computed, this can provide a
835 It is possible to select the information to be computed, this can provide a
836 noticeable speedup to the command in some cases.
836 noticeable speedup to the command in some cases.
837
837
838 Always computed:
838 Always computed:
839
839
840 - ``rev``
840 - ``rev``
841 - ``p1``
841 - ``p1``
842 - ``p2``
842 - ``p2``
843 - ``chainid``
843 - ``chainid``
844 - ``chainlen``
844 - ``chainlen``
845 - ``prevrev``
845 - ``prevrev``
846 - ``deltatype``
846 - ``deltatype``
847
847
848 Computed with --no-size-info
848 Computed with --no-size-info
849
849
850 - ``compsize``
850 - ``compsize``
851 - ``uncompsize``
851 - ``uncompsize``
852 - ``chainsize``
852 - ``chainsize``
853 - ``chainratio``
853 - ``chainratio``
854
854
855 Computed with --no-dist-info
855 Computed with --no-dist-info
856
856
857 - ``lindist``
857 - ``lindist``
858 - ``extradist``
858 - ``extradist``
859 - ``extraratio``
859 - ``extraratio``
860
860
861 Skipped with --no-sparse-info
861 Skipped with --no-sparse-info
862
862
863 - ``readsize``
863 - ``readsize``
864 - ``largestblock``
864 - ``largestblock``
865 - ``readdensity``
865 - ``readdensity``
866 - ``srchunks``
866 - ``srchunks``
867
867
868 --
868 --
869
869
870 The sparse read can be enabled with experimental.sparse-read = True
870 The sparse read can be enabled with experimental.sparse-read = True
871 """
871 """
872 revs = None
872 revs = None
873 revs_opt = opts.pop('rev', [])
873 revs_opt = opts.pop('rev', [])
874 if revs_opt:
874 if revs_opt:
875 revs = [int(r) for r in revs_opt]
875 revs = [int(r) for r in revs_opt]
876
876
877 all_info = opts.pop('all_info', False)
877 all_info = opts.pop('all_info', False)
878 size_info = opts.pop('size_info', None)
878 size_info = opts.pop('size_info', None)
879 if size_info is None:
879 if size_info is None:
880 size_info = all_info
880 size_info = all_info
881 dist_info = opts.pop('dist_info', None)
881 dist_info = opts.pop('dist_info', None)
882 if dist_info is None:
882 if dist_info is None:
883 dist_info = all_info
883 dist_info = all_info
884 sparse_info = opts.pop('sparse_info', None)
884 sparse_info = opts.pop('sparse_info', None)
885 if sparse_info is None:
885 if sparse_info is None:
886 sparse_info = all_info
886 sparse_info = all_info
887
887
888 revlog = cmdutil.openrevlog(
888 revlog = cmdutil.openrevlog(
889 repo, b'debugdeltachain', file_, pycompat.byteskwargs(opts)
889 repo, b'debugdeltachain', file_, pycompat.byteskwargs(opts)
890 )
890 )
891 fm = ui.formatter(b'debugdeltachain', pycompat.byteskwargs(opts))
891 fm = ui.formatter(b'debugdeltachain', pycompat.byteskwargs(opts))
892
892
893 lines = revlog_debug.debug_delta_chain(
893 lines = revlog_debug.debug_delta_chain(
894 revlog,
894 revlog,
895 revs=revs,
895 revs=revs,
896 size_info=size_info,
896 size_info=size_info,
897 dist_info=dist_info,
897 dist_info=dist_info,
898 sparse_info=sparse_info,
898 sparse_info=sparse_info,
899 )
899 )
900 # first entry is the header
900 # first entry is the header
901 header = next(lines)
901 header = next(lines)
902 fm.plain(header)
902 fm.plain(header)
903 for entry in lines:
903 for entry in lines:
904 label = b' '.join(e[0] for e in entry)
904 label = b' '.join(e[0] for e in entry)
905 format = b' '.join(e[1] for e in entry)
905 format = b' '.join(e[1] for e in entry)
906 values = [e[3] for e in entry]
906 values = [e[3] for e in entry]
907 data = dict((e[2], e[3]) for e in entry)
907 data = dict((e[2], e[3]) for e in entry)
908 fm.startitem()
908 fm.startitem()
909 fm.write(label, format, *values, **data)
909 fm.write(label, format, *values, **data)
910 fm.plain(b'\n')
910 fm.plain(b'\n')
911 fm.end()
911 fm.end()
912
912
913
913
914 @command(
914 @command(
915 b'debug-delta-find',
915 b'debug-delta-find',
916 cmdutil.debugrevlogopts
916 cmdutil.debugrevlogopts
917 + cmdutil.formatteropts
917 + cmdutil.formatteropts
918 + [
918 + [
919 (
919 (
920 b'',
920 b'',
921 b'source',
921 b'source',
922 b'full',
922 b'full',
923 _(b'input data feed to the process (full, storage, p1, p2, prev)'),
923 _(b'input data feed to the process (full, storage, p1, p2, prev)'),
924 ),
924 ),
925 ],
925 ],
926 _(b'-c|-m|FILE REV'),
926 _(b'-c|-m|FILE REV'),
927 optionalrepo=True,
927 optionalrepo=True,
928 )
928 )
929 def debugdeltafind(ui, repo, arg_1, arg_2=None, source=b'full', **opts):
929 def debugdeltafind(ui, repo, arg_1, arg_2=None, source=b'full', **opts):
930 """display the computation to get to a valid delta for storing REV
930 """display the computation to get to a valid delta for storing REV
931
931
932 This command will replay the process used to find the "best" delta to store
932 This command will replay the process used to find the "best" delta to store
933 a revision and display information about all the steps used to get to that
933 a revision and display information about all the steps used to get to that
934 result.
934 result.
935
935
936 By default, the process is fed with a the full-text for the revision. This
936 By default, the process is fed with a the full-text for the revision. This
937 can be controlled with the --source flag.
937 can be controlled with the --source flag.
938
938
939 The revision use the revision number of the target storage (not changelog
939 The revision use the revision number of the target storage (not changelog
940 revision number).
940 revision number).
941
941
942 note: the process is initiated from a full text of the revision to store.
942 note: the process is initiated from a full text of the revision to store.
943 """
943 """
944 if arg_2 is None:
944 if arg_2 is None:
945 file_ = None
945 file_ = None
946 rev = arg_1
946 rev = arg_1
947 else:
947 else:
948 file_ = arg_1
948 file_ = arg_1
949 rev = arg_2
949 rev = arg_2
950
950
951 rev = int(rev)
951 rev = int(rev)
952
952
953 revlog = cmdutil.openrevlog(
953 revlog = cmdutil.openrevlog(
954 repo, b'debugdeltachain', file_, pycompat.byteskwargs(opts)
954 repo, b'debugdeltachain', file_, pycompat.byteskwargs(opts)
955 )
955 )
956 p1r, p2r = revlog.parentrevs(rev)
956 p1r, p2r = revlog.parentrevs(rev)
957
957
958 if source == b'full':
958 if source == b'full':
959 base_rev = nullrev
959 base_rev = nullrev
960 elif source == b'storage':
960 elif source == b'storage':
961 base_rev = revlog.deltaparent(rev)
961 base_rev = revlog.deltaparent(rev)
962 elif source == b'p1':
962 elif source == b'p1':
963 base_rev = p1r
963 base_rev = p1r
964 elif source == b'p2':
964 elif source == b'p2':
965 base_rev = p2r
965 base_rev = p2r
966 elif source == b'prev':
966 elif source == b'prev':
967 base_rev = rev - 1
967 base_rev = rev - 1
968 else:
968 else:
969 raise error.InputError(b"invalid --source value: %s" % source)
969 raise error.InputError(b"invalid --source value: %s" % source)
970
970
971 revlog_debug.debug_delta_find(ui, revlog, rev, base_rev=base_rev)
971 revlog_debug.debug_delta_find(ui, revlog, rev, base_rev=base_rev)
972
972
973
973
974 @command(
974 @command(
975 b'debugdirstate|debugstate',
975 b'debugdirstate|debugstate',
976 [
976 [
977 (
977 (
978 b'',
978 b'',
979 b'nodates',
979 b'nodates',
980 None,
980 None,
981 _(b'do not display the saved mtime (DEPRECATED)'),
981 _(b'do not display the saved mtime (DEPRECATED)'),
982 ),
982 ),
983 (b'', b'dates', True, _(b'display the saved mtime')),
983 (b'', b'dates', True, _(b'display the saved mtime')),
984 (b'', b'datesort', None, _(b'sort by saved mtime')),
984 (b'', b'datesort', None, _(b'sort by saved mtime')),
985 (
985 (
986 b'',
986 b'',
987 b'docket',
987 b'docket',
988 False,
988 False,
989 _(b'display the docket (metadata file) instead'),
989 _(b'display the docket (metadata file) instead'),
990 ),
990 ),
991 (
991 (
992 b'',
992 b'',
993 b'all',
993 b'all',
994 False,
994 False,
995 _(b'display dirstate-v2 tree nodes that would not exist in v1'),
995 _(b'display dirstate-v2 tree nodes that would not exist in v1'),
996 ),
996 ),
997 ],
997 ],
998 _(b'[OPTION]...'),
998 _(b'[OPTION]...'),
999 )
999 )
1000 def debugstate(ui, repo, **opts):
1000 def debugstate(ui, repo, **opts):
1001 """show the contents of the current dirstate"""
1001 """show the contents of the current dirstate"""
1002
1002
1003 if opts.get("docket"):
1003 if opts.get("docket"):
1004 if not repo.dirstate._use_dirstate_v2:
1004 if not repo.dirstate._use_dirstate_v2:
1005 raise error.Abort(_(b'dirstate v1 does not have a docket'))
1005 raise error.Abort(_(b'dirstate v1 does not have a docket'))
1006
1006
1007 docket = repo.dirstate._map.docket
1007 docket = repo.dirstate._map.docket
1008 (
1008 (
1009 start_offset,
1009 start_offset,
1010 root_nodes,
1010 root_nodes,
1011 nodes_with_entry,
1011 nodes_with_entry,
1012 nodes_with_copy,
1012 nodes_with_copy,
1013 unused_bytes,
1013 unused_bytes,
1014 _unused,
1014 _unused,
1015 ignore_pattern,
1015 ignore_pattern,
1016 ) = dirstateutils.v2.TREE_METADATA.unpack(docket.tree_metadata)
1016 ) = dirstateutils.v2.TREE_METADATA.unpack(docket.tree_metadata)
1017
1017
1018 ui.write(_(b"size of dirstate data: %d\n") % docket.data_size)
1018 ui.write(_(b"size of dirstate data: %d\n") % docket.data_size)
1019 ui.write(_(b"data file uuid: %s\n") % docket.uuid)
1019 ui.write(_(b"data file uuid: %s\n") % docket.uuid)
1020 ui.write(_(b"start offset of root nodes: %d\n") % start_offset)
1020 ui.write(_(b"start offset of root nodes: %d\n") % start_offset)
1021 ui.write(_(b"number of root nodes: %d\n") % root_nodes)
1021 ui.write(_(b"number of root nodes: %d\n") % root_nodes)
1022 ui.write(_(b"nodes with entries: %d\n") % nodes_with_entry)
1022 ui.write(_(b"nodes with entries: %d\n") % nodes_with_entry)
1023 ui.write(_(b"nodes with copies: %d\n") % nodes_with_copy)
1023 ui.write(_(b"nodes with copies: %d\n") % nodes_with_copy)
1024 ui.write(_(b"number of unused bytes: %d\n") % unused_bytes)
1024 ui.write(_(b"number of unused bytes: %d\n") % unused_bytes)
1025 ui.write(
1025 ui.write(
1026 _(b"ignore pattern hash: %s\n") % binascii.hexlify(ignore_pattern)
1026 _(b"ignore pattern hash: %s\n") % binascii.hexlify(ignore_pattern)
1027 )
1027 )
1028 return
1028 return
1029
1029
1030 nodates = not opts['dates']
1030 nodates = not opts['dates']
1031 if opts.get('nodates') is not None:
1031 if opts.get('nodates') is not None:
1032 nodates = True
1032 nodates = True
1033 datesort = opts.get('datesort')
1033 datesort = opts.get('datesort')
1034
1034
1035 if datesort:
1035 if datesort:
1036
1036
1037 def keyfunc(entry):
1037 def keyfunc(entry):
1038 filename, _state, _mode, _size, mtime = entry
1038 filename, _state, _mode, _size, mtime = entry
1039 return (mtime, filename)
1039 return (mtime, filename)
1040
1040
1041 else:
1041 else:
1042 keyfunc = None # sort by filename
1042 keyfunc = None # sort by filename
1043 entries = list(repo.dirstate._map.debug_iter(all=opts['all']))
1043 entries = list(repo.dirstate._map.debug_iter(all=opts['all']))
1044 entries.sort(key=keyfunc)
1044 entries.sort(key=keyfunc)
1045 for entry in entries:
1045 for entry in entries:
1046 filename, state, mode, size, mtime = entry
1046 filename, state, mode, size, mtime = entry
1047 if mtime == -1:
1047 if mtime == -1:
1048 timestr = b'unset '
1048 timestr = b'unset '
1049 elif nodates:
1049 elif nodates:
1050 timestr = b'set '
1050 timestr = b'set '
1051 else:
1051 else:
1052 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime))
1052 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime))
1053 timestr = encoding.strtolocal(timestr)
1053 timestr = encoding.strtolocal(timestr)
1054 if mode & 0o20000:
1054 if mode & 0o20000:
1055 mode = b'lnk'
1055 mode = b'lnk'
1056 else:
1056 else:
1057 mode = b'%3o' % (mode & 0o777 & ~util.umask)
1057 mode = b'%3o' % (mode & 0o777 & ~util.umask)
1058 ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename))
1058 ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename))
1059 for f in repo.dirstate.copies():
1059 for f in repo.dirstate.copies():
1060 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
1060 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
1061
1061
1062
1062
1063 @command(
1063 @command(
1064 b'debugdirstateignorepatternshash',
1064 b'debugdirstateignorepatternshash',
1065 [],
1065 [],
1066 _(b''),
1066 _(b''),
1067 )
1067 )
1068 def debugdirstateignorepatternshash(ui, repo, **opts):
1068 def debugdirstateignorepatternshash(ui, repo, **opts):
1069 """show the hash of ignore patterns stored in dirstate if v2,
1069 """show the hash of ignore patterns stored in dirstate if v2,
1070 or nothing for dirstate-v2
1070 or nothing for dirstate-v2
1071 """
1071 """
1072 if repo.dirstate._use_dirstate_v2:
1072 if repo.dirstate._use_dirstate_v2:
1073 docket = repo.dirstate._map.docket
1073 docket = repo.dirstate._map.docket
1074 hash_len = 20 # 160 bits for SHA-1
1074 hash_len = 20 # 160 bits for SHA-1
1075 hash_bytes = docket.tree_metadata[-hash_len:]
1075 hash_bytes = docket.tree_metadata[-hash_len:]
1076 ui.write(binascii.hexlify(hash_bytes) + b'\n')
1076 ui.write(binascii.hexlify(hash_bytes) + b'\n')
1077
1077
1078
1078
1079 @command(
1079 @command(
1080 b'debugdiscovery',
1080 b'debugdiscovery',
1081 [
1081 [
1082 (b'', b'old', None, _(b'use old-style discovery')),
1082 (b'', b'old', None, _(b'use old-style discovery')),
1083 (
1083 (
1084 b'',
1084 b'',
1085 b'nonheads',
1085 b'nonheads',
1086 None,
1086 None,
1087 _(b'use old-style discovery with non-heads included'),
1087 _(b'use old-style discovery with non-heads included'),
1088 ),
1088 ),
1089 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1089 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1090 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1090 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1091 (
1091 (
1092 b'',
1092 b'',
1093 b'local-as-revs',
1093 b'local-as-revs',
1094 b"",
1094 b"",
1095 b'treat local has having these revisions only',
1095 b'treat local has having these revisions only',
1096 ),
1096 ),
1097 (
1097 (
1098 b'',
1098 b'',
1099 b'remote-as-revs',
1099 b'remote-as-revs',
1100 b"",
1100 b"",
1101 b'use local as remote, with only these revisions',
1101 b'use local as remote, with only these revisions',
1102 ),
1102 ),
1103 ]
1103 ]
1104 + cmdutil.remoteopts
1104 + cmdutil.remoteopts
1105 + cmdutil.formatteropts,
1105 + cmdutil.formatteropts,
1106 _(b'[--rev REV] [OTHER]'),
1106 _(b'[--rev REV] [OTHER]'),
1107 )
1107 )
1108 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1108 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1109 """runs the changeset discovery protocol in isolation
1109 """runs the changeset discovery protocol in isolation
1110
1110
1111 The local peer can be "replaced" by a subset of the local repository by
1111 The local peer can be "replaced" by a subset of the local repository by
1112 using the `--local-as-revs` flag. In the same way, the usual `remote` peer
1112 using the `--local-as-revs` flag. In the same way, the usual `remote` peer
1113 can be "replaced" by a subset of the local repository using the
1113 can be "replaced" by a subset of the local repository using the
1114 `--remote-as-revs` flag. This is useful to efficiently debug pathological
1114 `--remote-as-revs` flag. This is useful to efficiently debug pathological
1115 discovery situations.
1115 discovery situations.
1116
1116
1117 The following developer oriented config are relevant for people playing with this command:
1117 The following developer oriented config are relevant for people playing with this command:
1118
1118
1119 * devel.discovery.exchange-heads=True
1119 * devel.discovery.exchange-heads=True
1120
1120
1121 If False, the discovery will not start with
1121 If False, the discovery will not start with
1122 remote head fetching and local head querying.
1122 remote head fetching and local head querying.
1123
1123
1124 * devel.discovery.grow-sample=True
1124 * devel.discovery.grow-sample=True
1125
1125
1126 If False, the sample size used in set discovery will not be increased
1126 If False, the sample size used in set discovery will not be increased
1127 through the process
1127 through the process
1128
1128
1129 * devel.discovery.grow-sample.dynamic=True
1129 * devel.discovery.grow-sample.dynamic=True
1130
1130
1131 When discovery.grow-sample.dynamic is True, the default, the sample size is
1131 When discovery.grow-sample.dynamic is True, the default, the sample size is
1132 adapted to the shape of the undecided set (it is set to the max of:
1132 adapted to the shape of the undecided set (it is set to the max of:
1133 <target-size>, len(roots(undecided)), len(heads(undecided)
1133 <target-size>, len(roots(undecided)), len(heads(undecided)
1134
1134
1135 * devel.discovery.grow-sample.rate=1.05
1135 * devel.discovery.grow-sample.rate=1.05
1136
1136
1137 the rate at which the sample grow
1137 the rate at which the sample grow
1138
1138
1139 * devel.discovery.randomize=True
1139 * devel.discovery.randomize=True
1140
1140
1141 If andom sampling during discovery are deterministic. It is meant for
1141 If andom sampling during discovery are deterministic. It is meant for
1142 integration tests.
1142 integration tests.
1143
1143
1144 * devel.discovery.sample-size=200
1144 * devel.discovery.sample-size=200
1145
1145
1146 Control the initial size of the discovery sample
1146 Control the initial size of the discovery sample
1147
1147
1148 * devel.discovery.sample-size.initial=100
1148 * devel.discovery.sample-size.initial=100
1149
1149
1150 Control the initial size of the discovery for initial change
1150 Control the initial size of the discovery for initial change
1151 """
1151 """
1152 unfi = repo.unfiltered()
1152 unfi = repo.unfiltered()
1153
1153
1154 # setup potential extra filtering
1154 # setup potential extra filtering
1155 local_revs = opts["local_as_revs"]
1155 local_revs = opts["local_as_revs"]
1156 remote_revs = opts["remote_as_revs"]
1156 remote_revs = opts["remote_as_revs"]
1157
1157
1158 # make sure tests are repeatable
1158 # make sure tests are repeatable
1159 random.seed(int(opts['seed']))
1159 random.seed(int(opts['seed']))
1160
1160
1161 if not remote_revs:
1161 if not remote_revs:
1162 path = urlutil.get_unique_pull_path_obj(
1162 path = urlutil.get_unique_pull_path_obj(
1163 b'debugdiscovery', ui, remoteurl
1163 b'debugdiscovery', ui, remoteurl
1164 )
1164 )
1165 branches = (path.branch, [])
1165 branches = (path.branch, [])
1166 remote = hg.peer(repo, pycompat.byteskwargs(opts), path)
1166 remote = hg.peer(repo, pycompat.byteskwargs(opts), path)
1167 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
1167 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
1168 else:
1168 else:
1169 branches = (None, [])
1169 branches = (None, [])
1170 remote_filtered_revs = logcmdutil.revrange(
1170 remote_filtered_revs = logcmdutil.revrange(
1171 unfi, [b"not (::(%s))" % remote_revs]
1171 unfi, [b"not (::(%s))" % remote_revs]
1172 )
1172 )
1173 remote_filtered_revs = frozenset(remote_filtered_revs)
1173 remote_filtered_revs = frozenset(remote_filtered_revs)
1174
1174
1175 def remote_func(x):
1175 def remote_func(x):
1176 return remote_filtered_revs
1176 return remote_filtered_revs
1177
1177
1178 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1178 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1179
1179
1180 remote = repo.peer()
1180 remote = repo.peer()
1181 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1181 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1182
1182
1183 if local_revs:
1183 if local_revs:
1184 local_filtered_revs = logcmdutil.revrange(
1184 local_filtered_revs = logcmdutil.revrange(
1185 unfi, [b"not (::(%s))" % local_revs]
1185 unfi, [b"not (::(%s))" % local_revs]
1186 )
1186 )
1187 local_filtered_revs = frozenset(local_filtered_revs)
1187 local_filtered_revs = frozenset(local_filtered_revs)
1188
1188
1189 def local_func(x):
1189 def local_func(x):
1190 return local_filtered_revs
1190 return local_filtered_revs
1191
1191
1192 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1192 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1193 repo = repo.filtered(b'debug-discovery-local-filter')
1193 repo = repo.filtered(b'debug-discovery-local-filter')
1194
1194
1195 data = {}
1195 data = {}
1196 if opts.get('old'):
1196 if opts.get('old'):
1197
1197
1198 def doit(pushedrevs, remoteheads, remote=remote):
1198 def doit(pushedrevs, remoteheads, remote=remote):
1199 if not hasattr(remote, 'branches'):
1199 if not hasattr(remote, 'branches'):
1200 # enable in-client legacy support
1200 # enable in-client legacy support
1201 remote = localrepo.locallegacypeer(remote.local())
1201 remote = localrepo.locallegacypeer(remote.local())
1202 if remote_revs:
1202 if remote_revs:
1203 r = remote._repo.filtered(b'debug-discovery-remote-filter')
1203 r = remote._repo.filtered(b'debug-discovery-remote-filter')
1204 remote._repo = r
1204 remote._repo = r
1205 common, _in, hds = treediscovery.findcommonincoming(
1205 common, _in, hds = treediscovery.findcommonincoming(
1206 repo, remote, force=True, audit=data
1206 repo, remote, force=True, audit=data
1207 )
1207 )
1208 common = set(common)
1208 common = set(common)
1209 if not opts.get('nonheads'):
1209 if not opts.get('nonheads'):
1210 ui.writenoi18n(
1210 ui.writenoi18n(
1211 b"unpruned common: %s\n"
1211 b"unpruned common: %s\n"
1212 % b" ".join(sorted(short(n) for n in common))
1212 % b" ".join(sorted(short(n) for n in common))
1213 )
1213 )
1214
1214
1215 clnode = repo.changelog.node
1215 clnode = repo.changelog.node
1216 common = repo.revs(b'heads(::%ln)', common)
1216 common = repo.revs(b'heads(::%ln)', common)
1217 common = {clnode(r) for r in common}
1217 common = {clnode(r) for r in common}
1218 return common, hds
1218 return common, hds
1219
1219
1220 else:
1220 else:
1221
1221
1222 def doit(pushedrevs, remoteheads, remote=remote):
1222 def doit(pushedrevs, remoteheads, remote=remote):
1223 nodes = None
1223 nodes = None
1224 if pushedrevs:
1224 if pushedrevs:
1225 revs = logcmdutil.revrange(repo, pushedrevs)
1225 revs = logcmdutil.revrange(repo, pushedrevs)
1226 nodes = [repo[r].node() for r in revs]
1226 nodes = [repo[r].node() for r in revs]
1227 common, any, hds = setdiscovery.findcommonheads(
1227 common, any, hds = setdiscovery.findcommonheads(
1228 ui,
1228 ui,
1229 repo,
1229 repo,
1230 remote,
1230 remote,
1231 ancestorsof=nodes,
1231 ancestorsof=nodes,
1232 audit=data,
1232 audit=data,
1233 abortwhenunrelated=False,
1233 abortwhenunrelated=False,
1234 )
1234 )
1235 return common, hds
1235 return common, hds
1236
1236
1237 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1237 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1238 localrevs = opts['rev']
1238 localrevs = opts['rev']
1239
1239
1240 fm = ui.formatter(b'debugdiscovery', pycompat.byteskwargs(opts))
1240 fm = ui.formatter(b'debugdiscovery', pycompat.byteskwargs(opts))
1241 if fm.strict_format:
1241 if fm.strict_format:
1242
1242
1243 @contextlib.contextmanager
1243 @contextlib.contextmanager
1244 def may_capture_output():
1244 def may_capture_output():
1245 ui.pushbuffer()
1245 ui.pushbuffer()
1246 yield
1246 yield
1247 data[b'output'] = ui.popbuffer()
1247 data[b'output'] = ui.popbuffer()
1248
1248
1249 else:
1249 else:
1250 may_capture_output = util.nullcontextmanager
1250 may_capture_output = util.nullcontextmanager
1251 with may_capture_output():
1251 with may_capture_output():
1252 with util.timedcm('debug-discovery') as t:
1252 with util.timedcm('debug-discovery') as t:
1253 common, hds = doit(localrevs, remoterevs)
1253 common, hds = doit(localrevs, remoterevs)
1254
1254
1255 # compute all statistics
1255 # compute all statistics
1256 if len(common) == 1 and repo.nullid in common:
1256 if len(common) == 1 and repo.nullid in common:
1257 common = set()
1257 common = set()
1258 heads_common = set(common)
1258 heads_common = set(common)
1259 heads_remote = set(hds)
1259 heads_remote = set(hds)
1260 heads_local = set(repo.heads())
1260 heads_local = set(repo.heads())
1261 # note: they cannot be a local or remote head that is in common and not
1261 # note: they cannot be a local or remote head that is in common and not
1262 # itself a head of common.
1262 # itself a head of common.
1263 heads_common_local = heads_common & heads_local
1263 heads_common_local = heads_common & heads_local
1264 heads_common_remote = heads_common & heads_remote
1264 heads_common_remote = heads_common & heads_remote
1265 heads_common_both = heads_common & heads_remote & heads_local
1265 heads_common_both = heads_common & heads_remote & heads_local
1266
1266
1267 all = repo.revs(b'all()')
1267 all = repo.revs(b'all()')
1268 common = repo.revs(b'::%ln', common)
1268 common = repo.revs(b'::%ln', common)
1269 roots_common = repo.revs(b'roots(::%ld)', common)
1269 roots_common = repo.revs(b'roots(::%ld)', common)
1270 missing = repo.revs(b'not ::%ld', common)
1270 missing = repo.revs(b'not ::%ld', common)
1271 heads_missing = repo.revs(b'heads(%ld)', missing)
1271 heads_missing = repo.revs(b'heads(%ld)', missing)
1272 roots_missing = repo.revs(b'roots(%ld)', missing)
1272 roots_missing = repo.revs(b'roots(%ld)', missing)
1273 assert len(common) + len(missing) == len(all)
1273 assert len(common) + len(missing) == len(all)
1274
1274
1275 initial_undecided = repo.revs(
1275 initial_undecided = repo.revs(
1276 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1276 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1277 )
1277 )
1278 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1278 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1279 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1279 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1280 common_initial_undecided = initial_undecided & common
1280 common_initial_undecided = initial_undecided & common
1281 missing_initial_undecided = initial_undecided & missing
1281 missing_initial_undecided = initial_undecided & missing
1282
1282
1283 data[b'elapsed'] = t.elapsed
1283 data[b'elapsed'] = t.elapsed
1284 data[b'nb-common-heads'] = len(heads_common)
1284 data[b'nb-common-heads'] = len(heads_common)
1285 data[b'nb-common-heads-local'] = len(heads_common_local)
1285 data[b'nb-common-heads-local'] = len(heads_common_local)
1286 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1286 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1287 data[b'nb-common-heads-both'] = len(heads_common_both)
1287 data[b'nb-common-heads-both'] = len(heads_common_both)
1288 data[b'nb-common-roots'] = len(roots_common)
1288 data[b'nb-common-roots'] = len(roots_common)
1289 data[b'nb-head-local'] = len(heads_local)
1289 data[b'nb-head-local'] = len(heads_local)
1290 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1290 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1291 data[b'nb-head-remote'] = len(heads_remote)
1291 data[b'nb-head-remote'] = len(heads_remote)
1292 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1292 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1293 heads_common_remote
1293 heads_common_remote
1294 )
1294 )
1295 data[b'nb-revs'] = len(all)
1295 data[b'nb-revs'] = len(all)
1296 data[b'nb-revs-common'] = len(common)
1296 data[b'nb-revs-common'] = len(common)
1297 data[b'nb-revs-missing'] = len(missing)
1297 data[b'nb-revs-missing'] = len(missing)
1298 data[b'nb-missing-heads'] = len(heads_missing)
1298 data[b'nb-missing-heads'] = len(heads_missing)
1299 data[b'nb-missing-roots'] = len(roots_missing)
1299 data[b'nb-missing-roots'] = len(roots_missing)
1300 data[b'nb-ini_und'] = len(initial_undecided)
1300 data[b'nb-ini_und'] = len(initial_undecided)
1301 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1301 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1302 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1302 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1303 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1303 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1304 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1304 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1305
1305
1306 fm.startitem()
1306 fm.startitem()
1307 fm.data(**pycompat.strkwargs(data))
1307 fm.data(**pycompat.strkwargs(data))
1308 # display discovery summary
1308 # display discovery summary
1309 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1309 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1310 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1310 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1311 if b'total-round-trips-heads' in data:
1311 if b'total-round-trips-heads' in data:
1312 fm.plain(
1312 fm.plain(
1313 b" round-trips-heads: %(total-round-trips-heads)9d\n" % data
1313 b" round-trips-heads: %(total-round-trips-heads)9d\n" % data
1314 )
1314 )
1315 if b'total-round-trips-branches' in data:
1315 if b'total-round-trips-branches' in data:
1316 fm.plain(
1316 fm.plain(
1317 b" round-trips-branches: %(total-round-trips-branches)9d\n"
1317 b" round-trips-branches: %(total-round-trips-branches)9d\n"
1318 % data
1318 % data
1319 )
1319 )
1320 if b'total-round-trips-between' in data:
1320 if b'total-round-trips-between' in data:
1321 fm.plain(
1321 fm.plain(
1322 b" round-trips-between: %(total-round-trips-between)9d\n" % data
1322 b" round-trips-between: %(total-round-trips-between)9d\n" % data
1323 )
1323 )
1324 fm.plain(b"queries: %(total-queries)9d\n" % data)
1324 fm.plain(b"queries: %(total-queries)9d\n" % data)
1325 if b'total-queries-branches' in data:
1325 if b'total-queries-branches' in data:
1326 fm.plain(b" queries-branches: %(total-queries-branches)9d\n" % data)
1326 fm.plain(b" queries-branches: %(total-queries-branches)9d\n" % data)
1327 if b'total-queries-between' in data:
1327 if b'total-queries-between' in data:
1328 fm.plain(b" queries-between: %(total-queries-between)9d\n" % data)
1328 fm.plain(b" queries-between: %(total-queries-between)9d\n" % data)
1329 fm.plain(b"heads summary:\n")
1329 fm.plain(b"heads summary:\n")
1330 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1330 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1331 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1331 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1332 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1332 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1333 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1333 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1334 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1334 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1335 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1335 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1336 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1336 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1337 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1337 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1338 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1338 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1339 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1339 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1340 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1340 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1341 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1341 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1342 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1342 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1343 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1343 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1344 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1344 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1345 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1345 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1346 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1346 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1347 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1347 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1348 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1348 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1349 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1349 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1350 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1350 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1351 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1351 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1352
1352
1353 if ui.verbose:
1353 if ui.verbose:
1354 fm.plain(
1354 fm.plain(
1355 b"common heads: %s\n"
1355 b"common heads: %s\n"
1356 % b" ".join(sorted(short(n) for n in heads_common))
1356 % b" ".join(sorted(short(n) for n in heads_common))
1357 )
1357 )
1358 fm.end()
1358 fm.end()
1359
1359
1360
1360
1361 _chunksize = 4 << 10
1361 _chunksize = 4 << 10
1362
1362
1363
1363
1364 @command(
1364 @command(
1365 b'debugdownload',
1365 b'debugdownload',
1366 [
1366 [
1367 (b'o', b'output', b'', _(b'path')),
1367 (b'o', b'output', b'', _(b'path')),
1368 ],
1368 ],
1369 optionalrepo=True,
1369 optionalrepo=True,
1370 )
1370 )
1371 def debugdownload(ui, repo, url, output=None, **opts):
1371 def debugdownload(ui, repo, url, output=None, **opts):
1372 """download a resource using Mercurial logic and config"""
1372 """download a resource using Mercurial logic and config"""
1373 fh = urlmod.open(ui, url, output)
1373 fh = urlmod.open(ui, url, output)
1374
1374
1375 dest = ui
1375 dest = ui
1376 if output:
1376 if output:
1377 dest = open(output, b"wb", _chunksize)
1377 dest = open(output, b"wb", _chunksize)
1378 try:
1378 try:
1379 data = fh.read(_chunksize)
1379 data = fh.read(_chunksize)
1380 while data:
1380 while data:
1381 dest.write(data)
1381 dest.write(data)
1382 data = fh.read(_chunksize)
1382 data = fh.read(_chunksize)
1383 finally:
1383 finally:
1384 if output:
1384 if output:
1385 dest.close()
1385 dest.close()
1386
1386
1387
1387
1388 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1388 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1389 def debugextensions(ui, repo, **opts):
1389 def debugextensions(ui, repo, **opts):
1390 '''show information about active extensions'''
1390 '''show information about active extensions'''
1391 exts = extensions.extensions(ui)
1391 exts = extensions.extensions(ui)
1392 hgver = util.version()
1392 hgver = util.version()
1393 fm = ui.formatter(b'debugextensions', pycompat.byteskwargs(opts))
1393 fm = ui.formatter(b'debugextensions', pycompat.byteskwargs(opts))
1394 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1394 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1395 isinternal = extensions.ismoduleinternal(extmod)
1395 isinternal = extensions.ismoduleinternal(extmod)
1396 extsource = None
1396 extsource = None
1397
1397
1398 if hasattr(extmod, '__file__'):
1398 if hasattr(extmod, '__file__'):
1399 extsource = pycompat.fsencode(extmod.__file__)
1399 extsource = pycompat.fsencode(extmod.__file__)
1400 elif getattr(sys, 'oxidized', False):
1400 elif getattr(sys, 'oxidized', False):
1401 extsource = pycompat.sysexecutable
1401 extsource = pycompat.sysexecutable
1402 if isinternal:
1402 if isinternal:
1403 exttestedwith = [] # never expose magic string to users
1403 exttestedwith = [] # never expose magic string to users
1404 else:
1404 else:
1405 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1405 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1406 extbuglink = getattr(extmod, 'buglink', None)
1406 extbuglink = getattr(extmod, 'buglink', None)
1407
1407
1408 fm.startitem()
1408 fm.startitem()
1409
1409
1410 if ui.quiet or ui.verbose:
1410 if ui.quiet or ui.verbose:
1411 fm.write(b'name', b'%s\n', extname)
1411 fm.write(b'name', b'%s\n', extname)
1412 else:
1412 else:
1413 fm.write(b'name', b'%s', extname)
1413 fm.write(b'name', b'%s', extname)
1414 if isinternal or hgver in exttestedwith:
1414 if isinternal or hgver in exttestedwith:
1415 fm.plain(b'\n')
1415 fm.plain(b'\n')
1416 elif not exttestedwith:
1416 elif not exttestedwith:
1417 fm.plain(_(b' (untested!)\n'))
1417 fm.plain(_(b' (untested!)\n'))
1418 else:
1418 else:
1419 lasttestedversion = exttestedwith[-1]
1419 lasttestedversion = exttestedwith[-1]
1420 fm.plain(b' (%s!)\n' % lasttestedversion)
1420 fm.plain(b' (%s!)\n' % lasttestedversion)
1421
1421
1422 fm.condwrite(
1422 fm.condwrite(
1423 ui.verbose and extsource,
1423 ui.verbose and extsource,
1424 b'source',
1424 b'source',
1425 _(b' location: %s\n'),
1425 _(b' location: %s\n'),
1426 extsource or b"",
1426 extsource or b"",
1427 )
1427 )
1428
1428
1429 if ui.verbose:
1429 if ui.verbose:
1430 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1430 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1431 fm.data(bundled=isinternal)
1431 fm.data(bundled=isinternal)
1432
1432
1433 fm.condwrite(
1433 fm.condwrite(
1434 ui.verbose and exttestedwith,
1434 ui.verbose and exttestedwith,
1435 b'testedwith',
1435 b'testedwith',
1436 _(b' tested with: %s\n'),
1436 _(b' tested with: %s\n'),
1437 fm.formatlist(exttestedwith, name=b'ver'),
1437 fm.formatlist(exttestedwith, name=b'ver'),
1438 )
1438 )
1439
1439
1440 fm.condwrite(
1440 fm.condwrite(
1441 ui.verbose and extbuglink,
1441 ui.verbose and extbuglink,
1442 b'buglink',
1442 b'buglink',
1443 _(b' bug reporting: %s\n'),
1443 _(b' bug reporting: %s\n'),
1444 extbuglink or b"",
1444 extbuglink or b"",
1445 )
1445 )
1446
1446
1447 fm.end()
1447 fm.end()
1448
1448
1449
1449
1450 @command(
1450 @command(
1451 b'debugfileset',
1451 b'debugfileset',
1452 [
1452 [
1453 (
1453 (
1454 b'r',
1454 b'r',
1455 b'rev',
1455 b'rev',
1456 b'',
1456 b'',
1457 _(b'apply the filespec on this revision'),
1457 _(b'apply the filespec on this revision'),
1458 _(b'REV'),
1458 _(b'REV'),
1459 ),
1459 ),
1460 (
1460 (
1461 b'',
1461 b'',
1462 b'all-files',
1462 b'all-files',
1463 False,
1463 False,
1464 _(b'test files from all revisions and working directory'),
1464 _(b'test files from all revisions and working directory'),
1465 ),
1465 ),
1466 (
1466 (
1467 b's',
1467 b's',
1468 b'show-matcher',
1468 b'show-matcher',
1469 None,
1469 None,
1470 _(b'print internal representation of matcher'),
1470 _(b'print internal representation of matcher'),
1471 ),
1471 ),
1472 (
1472 (
1473 b'p',
1473 b'p',
1474 b'show-stage',
1474 b'show-stage',
1475 [],
1475 [],
1476 _(b'print parsed tree at the given stage'),
1476 _(b'print parsed tree at the given stage'),
1477 _(b'NAME'),
1477 _(b'NAME'),
1478 ),
1478 ),
1479 ],
1479 ],
1480 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1480 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1481 )
1481 )
1482 def debugfileset(ui, repo, expr, **opts):
1482 def debugfileset(ui, repo, expr, **opts):
1483 '''parse and apply a fileset specification'''
1483 '''parse and apply a fileset specification'''
1484 from . import fileset
1484 from . import fileset
1485
1485
1486 fileset.symbols # force import of fileset so we have predicates to optimize
1486 fileset.symbols # force import of fileset so we have predicates to optimize
1487
1487
1488 ctx = logcmdutil.revsingle(repo, opts.get('rev'), None)
1488 ctx = logcmdutil.revsingle(repo, opts.get('rev'), None)
1489
1489
1490 stages = [
1490 stages = [
1491 (b'parsed', pycompat.identity),
1491 (b'parsed', pycompat.identity),
1492 (b'analyzed', filesetlang.analyze),
1492 (b'analyzed', filesetlang.analyze),
1493 (b'optimized', filesetlang.optimize),
1493 (b'optimized', filesetlang.optimize),
1494 ]
1494 ]
1495 stagenames = {n for n, f in stages}
1495 stagenames = {n for n, f in stages}
1496
1496
1497 showalways = set()
1497 showalways = set()
1498 if ui.verbose and not opts['show_stage']:
1498 if ui.verbose and not opts['show_stage']:
1499 # show parsed tree by --verbose (deprecated)
1499 # show parsed tree by --verbose (deprecated)
1500 showalways.add(b'parsed')
1500 showalways.add(b'parsed')
1501 if opts['show_stage'] == [b'all']:
1501 if opts['show_stage'] == [b'all']:
1502 showalways.update(stagenames)
1502 showalways.update(stagenames)
1503 else:
1503 else:
1504 for n in opts['show_stage']:
1504 for n in opts['show_stage']:
1505 if n not in stagenames:
1505 if n not in stagenames:
1506 raise error.Abort(_(b'invalid stage name: %s') % n)
1506 raise error.Abort(_(b'invalid stage name: %s') % n)
1507 showalways.update(opts['show_stage'])
1507 showalways.update(opts['show_stage'])
1508
1508
1509 tree = filesetlang.parse(expr)
1509 tree = filesetlang.parse(expr)
1510 for n, f in stages:
1510 for n, f in stages:
1511 tree = f(tree)
1511 tree = f(tree)
1512 if n in showalways:
1512 if n in showalways:
1513 if opts['show_stage'] or n != b'parsed':
1513 if opts['show_stage'] or n != b'parsed':
1514 ui.write(b"* %s:\n" % n)
1514 ui.write(b"* %s:\n" % n)
1515 ui.write(filesetlang.prettyformat(tree), b"\n")
1515 ui.write(filesetlang.prettyformat(tree), b"\n")
1516
1516
1517 files = set()
1517 files = set()
1518 if opts['all_files']:
1518 if opts['all_files']:
1519 for r in repo:
1519 for r in repo:
1520 c = repo[r]
1520 c = repo[r]
1521 files.update(c.files())
1521 files.update(c.files())
1522 files.update(c.substate)
1522 files.update(c.substate)
1523 if opts['all_files'] or ctx.rev() is None:
1523 if opts['all_files'] or ctx.rev() is None:
1524 wctx = repo[None]
1524 wctx = repo[None]
1525 files.update(
1525 files.update(
1526 repo.dirstate.walk(
1526 repo.dirstate.walk(
1527 scmutil.matchall(repo),
1527 scmutil.matchall(repo),
1528 subrepos=list(wctx.substate),
1528 subrepos=list(wctx.substate),
1529 unknown=True,
1529 unknown=True,
1530 ignored=True,
1530 ignored=True,
1531 )
1531 )
1532 )
1532 )
1533 files.update(wctx.substate)
1533 files.update(wctx.substate)
1534 else:
1534 else:
1535 files.update(ctx.files())
1535 files.update(ctx.files())
1536 files.update(ctx.substate)
1536 files.update(ctx.substate)
1537
1537
1538 m = ctx.matchfileset(repo.getcwd(), expr)
1538 m = ctx.matchfileset(repo.getcwd(), expr)
1539 if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
1539 if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
1540 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1540 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1541 for f in sorted(files):
1541 for f in sorted(files):
1542 if not m(f):
1542 if not m(f):
1543 continue
1543 continue
1544 ui.write(b"%s\n" % f)
1544 ui.write(b"%s\n" % f)
1545
1545
1546
1546
1547 @command(
1547 @command(
1548 b"debug-repair-issue6528",
1548 b"debug-repair-issue6528",
1549 [
1549 [
1550 (
1550 (
1551 b'',
1551 b'',
1552 b'to-report',
1552 b'to-report',
1553 b'',
1553 b'',
1554 _(b'build a report of affected revisions to this file'),
1554 _(b'build a report of affected revisions to this file'),
1555 _(b'FILE'),
1555 _(b'FILE'),
1556 ),
1556 ),
1557 (
1557 (
1558 b'',
1558 b'',
1559 b'from-report',
1559 b'from-report',
1560 b'',
1560 b'',
1561 _(b'repair revisions listed in this report file'),
1561 _(b'repair revisions listed in this report file'),
1562 _(b'FILE'),
1562 _(b'FILE'),
1563 ),
1563 ),
1564 (
1564 (
1565 b'',
1565 b'',
1566 b'paranoid',
1566 b'paranoid',
1567 False,
1567 False,
1568 _(b'check that both detection methods do the same thing'),
1568 _(b'check that both detection methods do the same thing'),
1569 ),
1569 ),
1570 ]
1570 ]
1571 + cmdutil.dryrunopts,
1571 + cmdutil.dryrunopts,
1572 )
1572 )
1573 def debug_repair_issue6528(ui, repo, **opts):
1573 def debug_repair_issue6528(ui, repo, **opts):
1574 """find affected revisions and repair them. See issue6528 for more details.
1574 """find affected revisions and repair them. See issue6528 for more details.
1575
1575
1576 The `--to-report` and `--from-report` flags allow you to cache and reuse the
1576 The `--to-report` and `--from-report` flags allow you to cache and reuse the
1577 computation of affected revisions for a given repository across clones.
1577 computation of affected revisions for a given repository across clones.
1578 The report format is line-based (with empty lines ignored):
1578 The report format is line-based (with empty lines ignored):
1579
1579
1580 ```
1580 ```
1581 <ascii-hex of the affected revision>,... <unencoded filelog index filename>
1581 <ascii-hex of the affected revision>,... <unencoded filelog index filename>
1582 ```
1582 ```
1583
1583
1584 There can be multiple broken revisions per filelog, they are separated by
1584 There can be multiple broken revisions per filelog, they are separated by
1585 a comma with no spaces. The only space is between the revision(s) and the
1585 a comma with no spaces. The only space is between the revision(s) and the
1586 filename.
1586 filename.
1587
1587
1588 Note that this does *not* mean that this repairs future affected revisions,
1588 Note that this does *not* mean that this repairs future affected revisions,
1589 that needs a separate fix at the exchange level that was introduced in
1589 that needs a separate fix at the exchange level that was introduced in
1590 Mercurial 5.9.1.
1590 Mercurial 5.9.1.
1591
1591
1592 There is a `--paranoid` flag to test that the fast implementation is correct
1592 There is a `--paranoid` flag to test that the fast implementation is correct
1593 by checking it against the slow implementation. Since this matter is quite
1593 by checking it against the slow implementation. Since this matter is quite
1594 urgent and testing every edge-case is probably quite costly, we use this
1594 urgent and testing every edge-case is probably quite costly, we use this
1595 method to test on large repositories as a fuzzing method of sorts.
1595 method to test on large repositories as a fuzzing method of sorts.
1596 """
1596 """
1597 cmdutil.check_incompatible_arguments(
1597 cmdutil.check_incompatible_arguments(
1598 opts, 'to_report', ['from_report', 'dry_run']
1598 opts, 'to_report', ['from_report', 'dry_run']
1599 )
1599 )
1600 dry_run = opts.get('dry_run')
1600 dry_run = opts.get('dry_run')
1601 to_report = opts.get('to_report')
1601 to_report = opts.get('to_report')
1602 from_report = opts.get('from_report')
1602 from_report = opts.get('from_report')
1603 paranoid = opts.get('paranoid')
1603 paranoid = opts.get('paranoid')
1604 # TODO maybe add filelog pattern and revision pattern parameters to help
1604 # TODO maybe add filelog pattern and revision pattern parameters to help
1605 # narrow down the search for users that know what they're looking for?
1605 # narrow down the search for users that know what they're looking for?
1606
1606
1607 if requirements.REVLOGV1_REQUIREMENT not in repo.requirements:
1607 if requirements.REVLOGV1_REQUIREMENT not in repo.requirements:
1608 msg = b"can only repair revlogv1 repositories, v2 is not affected"
1608 msg = b"can only repair revlogv1 repositories, v2 is not affected"
1609 raise error.Abort(_(msg))
1609 raise error.Abort(_(msg))
1610
1610
1611 rewrite.repair_issue6528(
1611 rewrite.repair_issue6528(
1612 ui,
1612 ui,
1613 repo,
1613 repo,
1614 dry_run=dry_run,
1614 dry_run=dry_run,
1615 to_report=to_report,
1615 to_report=to_report,
1616 from_report=from_report,
1616 from_report=from_report,
1617 paranoid=paranoid,
1617 paranoid=paranoid,
1618 )
1618 )
1619
1619
1620
1620
1621 @command(b'debugformat', [] + cmdutil.formatteropts)
1621 @command(b'debugformat', [] + cmdutil.formatteropts)
1622 def debugformat(ui, repo, **opts):
1622 def debugformat(ui, repo, **opts):
1623 """display format information about the current repository
1623 """display format information about the current repository
1624
1624
1625 Use --verbose to get extra information about current config value and
1625 Use --verbose to get extra information about current config value and
1626 Mercurial default."""
1626 Mercurial default."""
1627 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1627 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1628 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1628 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1629
1629
1630 def makeformatname(name):
1630 def makeformatname(name):
1631 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1631 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1632
1632
1633 fm = ui.formatter(b'debugformat', pycompat.byteskwargs(opts))
1633 fm = ui.formatter(b'debugformat', pycompat.byteskwargs(opts))
1634 if fm.isplain():
1634 if fm.isplain():
1635
1635
1636 def formatvalue(value):
1636 def formatvalue(value):
1637 if hasattr(value, 'startswith'):
1637 if hasattr(value, 'startswith'):
1638 return value
1638 return value
1639 if value:
1639 if value:
1640 return b'yes'
1640 return b'yes'
1641 else:
1641 else:
1642 return b'no'
1642 return b'no'
1643
1643
1644 else:
1644 else:
1645 formatvalue = pycompat.identity
1645 formatvalue = pycompat.identity
1646
1646
1647 fm.plain(b'format-variant')
1647 fm.plain(b'format-variant')
1648 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1648 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1649 fm.plain(b' repo')
1649 fm.plain(b' repo')
1650 if ui.verbose:
1650 if ui.verbose:
1651 fm.plain(b' config default')
1651 fm.plain(b' config default')
1652 fm.plain(b'\n')
1652 fm.plain(b'\n')
1653 for fv in upgrade.allformatvariant:
1653 for fv in upgrade.allformatvariant:
1654 fm.startitem()
1654 fm.startitem()
1655 repovalue = fv.fromrepo(repo)
1655 repovalue = fv.fromrepo(repo)
1656 configvalue = fv.fromconfig(repo)
1656 configvalue = fv.fromconfig(repo)
1657
1657
1658 if repovalue != configvalue:
1658 if repovalue != configvalue:
1659 namelabel = b'formatvariant.name.mismatchconfig'
1659 namelabel = b'formatvariant.name.mismatchconfig'
1660 repolabel = b'formatvariant.repo.mismatchconfig'
1660 repolabel = b'formatvariant.repo.mismatchconfig'
1661 elif repovalue != fv.default:
1661 elif repovalue != fv.default:
1662 namelabel = b'formatvariant.name.mismatchdefault'
1662 namelabel = b'formatvariant.name.mismatchdefault'
1663 repolabel = b'formatvariant.repo.mismatchdefault'
1663 repolabel = b'formatvariant.repo.mismatchdefault'
1664 else:
1664 else:
1665 namelabel = b'formatvariant.name.uptodate'
1665 namelabel = b'formatvariant.name.uptodate'
1666 repolabel = b'formatvariant.repo.uptodate'
1666 repolabel = b'formatvariant.repo.uptodate'
1667
1667
1668 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1668 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1669 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1669 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1670 if fv.default != configvalue:
1670 if fv.default != configvalue:
1671 configlabel = b'formatvariant.config.special'
1671 configlabel = b'formatvariant.config.special'
1672 else:
1672 else:
1673 configlabel = b'formatvariant.config.default'
1673 configlabel = b'formatvariant.config.default'
1674 fm.condwrite(
1674 fm.condwrite(
1675 ui.verbose,
1675 ui.verbose,
1676 b'config',
1676 b'config',
1677 b' %6s',
1677 b' %6s',
1678 formatvalue(configvalue),
1678 formatvalue(configvalue),
1679 label=configlabel,
1679 label=configlabel,
1680 )
1680 )
1681 fm.condwrite(
1681 fm.condwrite(
1682 ui.verbose,
1682 ui.verbose,
1683 b'default',
1683 b'default',
1684 b' %7s',
1684 b' %7s',
1685 formatvalue(fv.default),
1685 formatvalue(fv.default),
1686 label=b'formatvariant.default',
1686 label=b'formatvariant.default',
1687 )
1687 )
1688 fm.plain(b'\n')
1688 fm.plain(b'\n')
1689 fm.end()
1689 fm.end()
1690
1690
1691
1691
1692 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1692 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1693 def debugfsinfo(ui, path=b"."):
1693 def debugfsinfo(ui, path=b"."):
1694 """show information detected about current filesystem"""
1694 """show information detected about current filesystem"""
1695 ui.writenoi18n(b'path: %s\n' % path)
1695 ui.writenoi18n(b'path: %s\n' % path)
1696 ui.writenoi18n(
1696 ui.writenoi18n(
1697 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1697 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1698 )
1698 )
1699 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1699 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1700 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1700 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1701 ui.writenoi18n(
1701 ui.writenoi18n(
1702 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1702 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1703 )
1703 )
1704 ui.writenoi18n(
1704 ui.writenoi18n(
1705 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1705 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1706 )
1706 )
1707 casesensitive = b'(unknown)'
1707 casesensitive = b'(unknown)'
1708 try:
1708 try:
1709 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1709 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1710 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1710 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1711 except OSError:
1711 except OSError:
1712 pass
1712 pass
1713 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1713 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1714
1714
1715
1715
1716 @command(
1716 @command(
1717 b'debuggetbundle',
1717 b'debuggetbundle',
1718 [
1718 [
1719 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1719 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1720 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1720 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1721 (
1721 (
1722 b't',
1722 b't',
1723 b'type',
1723 b'type',
1724 b'bzip2',
1724 b'bzip2',
1725 _(b'bundle compression type to use'),
1725 _(b'bundle compression type to use'),
1726 _(b'TYPE'),
1726 _(b'TYPE'),
1727 ),
1727 ),
1728 ],
1728 ],
1729 _(b'REPO FILE [-H|-C ID]...'),
1729 _(b'REPO FILE [-H|-C ID]...'),
1730 norepo=True,
1730 norepo=True,
1731 )
1731 )
1732 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1732 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1733 """retrieves a bundle from a repo
1733 """retrieves a bundle from a repo
1734
1734
1735 Every ID must be a full-length hex node id string. Saves the bundle to the
1735 Every ID must be a full-length hex node id string. Saves the bundle to the
1736 given file.
1736 given file.
1737 """
1737 """
1738 repo = hg.peer(ui, pycompat.byteskwargs(opts), repopath)
1738 repo = hg.peer(ui, pycompat.byteskwargs(opts), repopath)
1739 if not repo.capable(b'getbundle'):
1739 if not repo.capable(b'getbundle'):
1740 raise error.Abort(b"getbundle() not supported by target repository")
1740 raise error.Abort(b"getbundle() not supported by target repository")
1741 args = {}
1741 args = {}
1742 if common:
1742 if common:
1743 args['common'] = [bin(s) for s in common]
1743 args['common'] = [bin(s) for s in common]
1744 if head:
1744 if head:
1745 args['heads'] = [bin(s) for s in head]
1745 args['heads'] = [bin(s) for s in head]
1746 # TODO: get desired bundlecaps from command line.
1746 # TODO: get desired bundlecaps from command line.
1747 args['bundlecaps'] = None
1747 args['bundlecaps'] = None
1748 bundle = repo.getbundle(b'debug', **args)
1748 bundle = repo.getbundle(b'debug', **args)
1749
1749
1750 bundletype = opts.get('type', b'bzip2').lower()
1750 bundletype = opts.get('type', b'bzip2').lower()
1751 btypes = {
1751 btypes = {
1752 b'none': b'HG10UN',
1752 b'none': b'HG10UN',
1753 b'bzip2': b'HG10BZ',
1753 b'bzip2': b'HG10BZ',
1754 b'gzip': b'HG10GZ',
1754 b'gzip': b'HG10GZ',
1755 b'bundle2': b'HG20',
1755 b'bundle2': b'HG20',
1756 }
1756 }
1757 bundletype = btypes.get(bundletype)
1757 bundletype = btypes.get(bundletype)
1758 if bundletype not in bundle2.bundletypes:
1758 if bundletype not in bundle2.bundletypes:
1759 raise error.Abort(_(b'unknown bundle type specified with --type'))
1759 raise error.Abort(_(b'unknown bundle type specified with --type'))
1760 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1760 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1761
1761
1762
1762
1763 @command(b'debugignore', [], b'[FILE]...')
1763 @command(b'debugignore', [], b'[FILE]...')
1764 def debugignore(ui, repo, *files, **opts):
1764 def debugignore(ui, repo, *files, **opts):
1765 """display the combined ignore pattern and information about ignored files
1765 """display the combined ignore pattern and information about ignored files
1766
1766
1767 With no argument display the combined ignore pattern.
1767 With no argument display the combined ignore pattern.
1768
1768
1769 Given space separated file names, shows if the given file is ignored and
1769 Given space separated file names, shows if the given file is ignored and
1770 if so, show the ignore rule (file and line number) that matched it.
1770 if so, show the ignore rule (file and line number) that matched it.
1771 """
1771 """
1772 ignore = repo.dirstate._ignore
1772 ignore = repo.dirstate._ignore
1773 if not files:
1773 if not files:
1774 # Show all the patterns
1774 # Show all the patterns
1775 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1775 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1776 else:
1776 else:
1777 m = scmutil.match(repo[None], pats=files)
1777 m = scmutil.match(repo[None], pats=files)
1778 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1778 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1779 for f in m.files():
1779 for f in m.files():
1780 nf = util.normpath(f)
1780 nf = util.normpath(f)
1781 ignored = None
1781 ignored = None
1782 ignoredata = None
1782 ignoredata = None
1783 if nf != b'.':
1783 if nf != b'.':
1784 if ignore(nf):
1784 if ignore(nf):
1785 ignored = nf
1785 ignored = nf
1786 ignoredata = repo.dirstate._ignorefileandline(nf)
1786 ignoredata = repo.dirstate._ignorefileandline(nf)
1787 else:
1787 else:
1788 for p in pathutil.finddirs(nf):
1788 for p in pathutil.finddirs(nf):
1789 if ignore(p):
1789 if ignore(p):
1790 ignored = p
1790 ignored = p
1791 ignoredata = repo.dirstate._ignorefileandline(p)
1791 ignoredata = repo.dirstate._ignorefileandline(p)
1792 break
1792 break
1793 if ignored:
1793 if ignored:
1794 if ignored == nf:
1794 if ignored == nf:
1795 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1795 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1796 else:
1796 else:
1797 ui.write(
1797 ui.write(
1798 _(
1798 _(
1799 b"%s is ignored because of "
1799 b"%s is ignored because of "
1800 b"containing directory %s\n"
1800 b"containing directory %s\n"
1801 )
1801 )
1802 % (uipathfn(f), ignored)
1802 % (uipathfn(f), ignored)
1803 )
1803 )
1804 ignorefile, lineno, line = ignoredata
1804 ignorefile, lineno, line = ignoredata
1805 ui.write(
1805 ui.write(
1806 _(b"(ignore rule in %s, line %d: '%s')\n")
1806 _(b"(ignore rule in %s, line %d: '%s')\n")
1807 % (ignorefile, lineno, line)
1807 % (ignorefile, lineno, line)
1808 )
1808 )
1809 else:
1809 else:
1810 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1810 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1811
1811
1812
1812
1813 @command(
1813 @command(
1814 b'debug-revlog-index|debugindex',
1814 b'debug-revlog-index|debugindex',
1815 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1815 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1816 _(b'-c|-m|FILE'),
1816 _(b'-c|-m|FILE'),
1817 )
1817 )
1818 def debugindex(ui, repo, file_=None, **opts):
1818 def debugindex(ui, repo, file_=None, **opts):
1819 """dump index data for a revlog"""
1819 """dump index data for a revlog"""
1820 opts = pycompat.byteskwargs(opts)
1820 opts = pycompat.byteskwargs(opts)
1821 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1821 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1822
1822
1823 fm = ui.formatter(b'debugindex', opts)
1823 fm = ui.formatter(b'debugindex', opts)
1824
1824
1825 revlog = getattr(store, '_revlog', store)
1825 revlog = getattr(store, '_revlog', store)
1826
1826
1827 return revlog_debug.debug_index(
1827 return revlog_debug.debug_index(
1828 ui,
1828 ui,
1829 repo,
1829 repo,
1830 formatter=fm,
1830 formatter=fm,
1831 revlog=revlog,
1831 revlog=revlog,
1832 full_node=ui.debugflag,
1832 full_node=ui.debugflag,
1833 )
1833 )
1834
1834
1835
1835
1836 @command(
1836 @command(
1837 b'debugindexdot',
1837 b'debugindexdot',
1838 cmdutil.debugrevlogopts,
1838 cmdutil.debugrevlogopts,
1839 _(b'-c|-m|FILE'),
1839 _(b'-c|-m|FILE'),
1840 optionalrepo=True,
1840 optionalrepo=True,
1841 )
1841 )
1842 def debugindexdot(ui, repo, file_=None, **opts):
1842 def debugindexdot(ui, repo, file_=None, **opts):
1843 """dump an index DAG as a graphviz dot file"""
1843 """dump an index DAG as a graphviz dot file"""
1844 r = cmdutil.openstorage(
1844 r = cmdutil.openstorage(
1845 repo, b'debugindexdot', file_, pycompat.byteskwargs(opts)
1845 repo, b'debugindexdot', file_, pycompat.byteskwargs(opts)
1846 )
1846 )
1847 ui.writenoi18n(b"digraph G {\n")
1847 ui.writenoi18n(b"digraph G {\n")
1848 for i in r:
1848 for i in r:
1849 node = r.node(i)
1849 node = r.node(i)
1850 pp = r.parents(node)
1850 pp = r.parents(node)
1851 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1851 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1852 if pp[1] != repo.nullid:
1852 if pp[1] != repo.nullid:
1853 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1853 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1854 ui.write(b"}\n")
1854 ui.write(b"}\n")
1855
1855
1856
1856
1857 @command(b'debugindexstats', [])
1857 @command(b'debugindexstats', [])
1858 def debugindexstats(ui, repo):
1858 def debugindexstats(ui, repo):
1859 """show stats related to the changelog index"""
1859 """show stats related to the changelog index"""
1860 repo.changelog.shortest(repo.nullid, 1)
1860 repo.changelog.shortest(repo.nullid, 1)
1861 index = repo.changelog.index
1861 index = repo.changelog.index
1862 if not hasattr(index, 'stats'):
1862 if not hasattr(index, 'stats'):
1863 raise error.Abort(_(b'debugindexstats only works with native code'))
1863 raise error.Abort(_(b'debugindexstats only works with native C code'))
1864 for k, v in sorted(index.stats().items()):
1864 for k, v in sorted(index.stats().items()):
1865 ui.write(b'%s: %d\n' % (k, v))
1865 ui.write(b'%s: %d\n' % (k, v))
1866
1866
1867
1867
1868 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1868 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1869 def debuginstall(ui, **opts):
1869 def debuginstall(ui, **opts):
1870 """test Mercurial installation
1870 """test Mercurial installation
1871
1871
1872 Returns 0 on success.
1872 Returns 0 on success.
1873 """
1873 """
1874 problems = 0
1874 problems = 0
1875
1875
1876 fm = ui.formatter(b'debuginstall', pycompat.byteskwargs(opts))
1876 fm = ui.formatter(b'debuginstall', pycompat.byteskwargs(opts))
1877 fm.startitem()
1877 fm.startitem()
1878
1878
1879 # encoding might be unknown or wrong. don't translate these messages.
1879 # encoding might be unknown or wrong. don't translate these messages.
1880 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1880 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1881 err = None
1881 err = None
1882 try:
1882 try:
1883 codecs.lookup(pycompat.sysstr(encoding.encoding))
1883 codecs.lookup(pycompat.sysstr(encoding.encoding))
1884 except LookupError as inst:
1884 except LookupError as inst:
1885 err = stringutil.forcebytestr(inst)
1885 err = stringutil.forcebytestr(inst)
1886 problems += 1
1886 problems += 1
1887 fm.condwrite(
1887 fm.condwrite(
1888 err,
1888 err,
1889 b'encodingerror',
1889 b'encodingerror',
1890 b" %s\n (check that your locale is properly set)\n",
1890 b" %s\n (check that your locale is properly set)\n",
1891 err,
1891 err,
1892 )
1892 )
1893
1893
1894 # Python
1894 # Python
1895 pythonlib = None
1895 pythonlib = None
1896 if hasattr(os, '__file__'):
1896 if hasattr(os, '__file__'):
1897 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1897 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1898 elif getattr(sys, 'oxidized', False):
1898 elif getattr(sys, 'oxidized', False):
1899 pythonlib = pycompat.sysexecutable
1899 pythonlib = pycompat.sysexecutable
1900
1900
1901 fm.write(
1901 fm.write(
1902 b'pythonexe',
1902 b'pythonexe',
1903 _(b"checking Python executable (%s)\n"),
1903 _(b"checking Python executable (%s)\n"),
1904 pycompat.sysexecutable or _(b"unknown"),
1904 pycompat.sysexecutable or _(b"unknown"),
1905 )
1905 )
1906 fm.write(
1906 fm.write(
1907 b'pythonimplementation',
1907 b'pythonimplementation',
1908 _(b"checking Python implementation (%s)\n"),
1908 _(b"checking Python implementation (%s)\n"),
1909 pycompat.sysbytes(platform.python_implementation()),
1909 pycompat.sysbytes(platform.python_implementation()),
1910 )
1910 )
1911 fm.write(
1911 fm.write(
1912 b'pythonver',
1912 b'pythonver',
1913 _(b"checking Python version (%s)\n"),
1913 _(b"checking Python version (%s)\n"),
1914 (b"%d.%d.%d" % sys.version_info[:3]),
1914 (b"%d.%d.%d" % sys.version_info[:3]),
1915 )
1915 )
1916 fm.write(
1916 fm.write(
1917 b'pythonlib',
1917 b'pythonlib',
1918 _(b"checking Python lib (%s)...\n"),
1918 _(b"checking Python lib (%s)...\n"),
1919 pythonlib or _(b"unknown"),
1919 pythonlib or _(b"unknown"),
1920 )
1920 )
1921
1921
1922 try:
1922 try:
1923 from . import rustext # pytype: disable=import-error
1923 from . import rustext # pytype: disable=import-error
1924
1924
1925 rustext.__doc__ # trigger lazy import
1925 rustext.__doc__ # trigger lazy import
1926 except ImportError:
1926 except ImportError:
1927 rustext = None
1927 rustext = None
1928
1928
1929 security = set(sslutil.supportedprotocols)
1929 security = set(sslutil.supportedprotocols)
1930 if sslutil.hassni:
1930 if sslutil.hassni:
1931 security.add(b'sni')
1931 security.add(b'sni')
1932
1932
1933 fm.write(
1933 fm.write(
1934 b'pythonsecurity',
1934 b'pythonsecurity',
1935 _(b"checking Python security support (%s)\n"),
1935 _(b"checking Python security support (%s)\n"),
1936 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1936 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1937 )
1937 )
1938
1938
1939 # These are warnings, not errors. So don't increment problem count. This
1939 # These are warnings, not errors. So don't increment problem count. This
1940 # may change in the future.
1940 # may change in the future.
1941 if b'tls1.2' not in security:
1941 if b'tls1.2' not in security:
1942 fm.plain(
1942 fm.plain(
1943 _(
1943 _(
1944 b' TLS 1.2 not supported by Python install; '
1944 b' TLS 1.2 not supported by Python install; '
1945 b'network connections lack modern security\n'
1945 b'network connections lack modern security\n'
1946 )
1946 )
1947 )
1947 )
1948 if b'sni' not in security:
1948 if b'sni' not in security:
1949 fm.plain(
1949 fm.plain(
1950 _(
1950 _(
1951 b' SNI not supported by Python install; may have '
1951 b' SNI not supported by Python install; may have '
1952 b'connectivity issues with some servers\n'
1952 b'connectivity issues with some servers\n'
1953 )
1953 )
1954 )
1954 )
1955
1955
1956 fm.plain(
1956 fm.plain(
1957 _(
1957 _(
1958 b"checking Rust extensions (%s)\n"
1958 b"checking Rust extensions (%s)\n"
1959 % (b'missing' if rustext is None else b'installed')
1959 % (b'missing' if rustext is None else b'installed')
1960 ),
1960 ),
1961 )
1961 )
1962
1962
1963 # TODO print CA cert info
1963 # TODO print CA cert info
1964
1964
1965 # hg version
1965 # hg version
1966 hgver = util.version()
1966 hgver = util.version()
1967 fm.write(
1967 fm.write(
1968 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1968 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1969 )
1969 )
1970 fm.write(
1970 fm.write(
1971 b'hgverextra',
1971 b'hgverextra',
1972 _(b"checking Mercurial custom build (%s)\n"),
1972 _(b"checking Mercurial custom build (%s)\n"),
1973 b'+'.join(hgver.split(b'+')[1:]),
1973 b'+'.join(hgver.split(b'+')[1:]),
1974 )
1974 )
1975
1975
1976 # compiled modules
1976 # compiled modules
1977 hgmodules = None
1977 hgmodules = None
1978 if hasattr(sys.modules[__name__], '__file__'):
1978 if hasattr(sys.modules[__name__], '__file__'):
1979 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1979 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1980 elif getattr(sys, 'oxidized', False):
1980 elif getattr(sys, 'oxidized', False):
1981 hgmodules = pycompat.sysexecutable
1981 hgmodules = pycompat.sysexecutable
1982
1982
1983 fm.write(
1983 fm.write(
1984 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1984 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1985 )
1985 )
1986 fm.write(
1986 fm.write(
1987 b'hgmodules',
1987 b'hgmodules',
1988 _(b"checking installed modules (%s)...\n"),
1988 _(b"checking installed modules (%s)...\n"),
1989 hgmodules or _(b"unknown"),
1989 hgmodules or _(b"unknown"),
1990 )
1990 )
1991
1991
1992 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1992 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1993 rustext = rustandc # for now, that's the only case
1993 rustext = rustandc # for now, that's the only case
1994 cext = policy.policy in (b'c', b'allow') or rustandc
1994 cext = policy.policy in (b'c', b'allow') or rustandc
1995 nopure = cext or rustext
1995 nopure = cext or rustext
1996 if nopure:
1996 if nopure:
1997 err = None
1997 err = None
1998 try:
1998 try:
1999 if cext:
1999 if cext:
2000 from .cext import ( # pytype: disable=import-error
2000 from .cext import ( # pytype: disable=import-error
2001 base85,
2001 base85,
2002 bdiff,
2002 bdiff,
2003 mpatch,
2003 mpatch,
2004 osutil,
2004 osutil,
2005 )
2005 )
2006
2006
2007 # quiet pyflakes
2007 # quiet pyflakes
2008 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
2008 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
2009 if rustext:
2009 if rustext:
2010 from .rustext import ( # pytype: disable=import-error
2010 from .rustext import ( # pytype: disable=import-error
2011 ancestor,
2011 ancestor,
2012 dirstate,
2012 dirstate,
2013 )
2013 )
2014
2014
2015 dir(ancestor), dir(dirstate) # quiet pyflakes
2015 dir(ancestor), dir(dirstate) # quiet pyflakes
2016 except Exception as inst:
2016 except Exception as inst:
2017 err = stringutil.forcebytestr(inst)
2017 err = stringutil.forcebytestr(inst)
2018 problems += 1
2018 problems += 1
2019 fm.condwrite(err, b'extensionserror', b" %s\n", err)
2019 fm.condwrite(err, b'extensionserror', b" %s\n", err)
2020
2020
2021 compengines = util.compengines._engines.values()
2021 compengines = util.compengines._engines.values()
2022 fm.write(
2022 fm.write(
2023 b'compengines',
2023 b'compengines',
2024 _(b'checking registered compression engines (%s)\n'),
2024 _(b'checking registered compression engines (%s)\n'),
2025 fm.formatlist(
2025 fm.formatlist(
2026 sorted(e.name() for e in compengines),
2026 sorted(e.name() for e in compengines),
2027 name=b'compengine',
2027 name=b'compengine',
2028 fmt=b'%s',
2028 fmt=b'%s',
2029 sep=b', ',
2029 sep=b', ',
2030 ),
2030 ),
2031 )
2031 )
2032 fm.write(
2032 fm.write(
2033 b'compenginesavail',
2033 b'compenginesavail',
2034 _(b'checking available compression engines (%s)\n'),
2034 _(b'checking available compression engines (%s)\n'),
2035 fm.formatlist(
2035 fm.formatlist(
2036 sorted(e.name() for e in compengines if e.available()),
2036 sorted(e.name() for e in compengines if e.available()),
2037 name=b'compengine',
2037 name=b'compengine',
2038 fmt=b'%s',
2038 fmt=b'%s',
2039 sep=b', ',
2039 sep=b', ',
2040 ),
2040 ),
2041 )
2041 )
2042 wirecompengines = compression.compengines.supportedwireengines(
2042 wirecompengines = compression.compengines.supportedwireengines(
2043 compression.SERVERROLE
2043 compression.SERVERROLE
2044 )
2044 )
2045 fm.write(
2045 fm.write(
2046 b'compenginesserver',
2046 b'compenginesserver',
2047 _(
2047 _(
2048 b'checking available compression engines '
2048 b'checking available compression engines '
2049 b'for wire protocol (%s)\n'
2049 b'for wire protocol (%s)\n'
2050 ),
2050 ),
2051 fm.formatlist(
2051 fm.formatlist(
2052 [e.name() for e in wirecompengines if e.wireprotosupport()],
2052 [e.name() for e in wirecompengines if e.wireprotosupport()],
2053 name=b'compengine',
2053 name=b'compengine',
2054 fmt=b'%s',
2054 fmt=b'%s',
2055 sep=b', ',
2055 sep=b', ',
2056 ),
2056 ),
2057 )
2057 )
2058 re2 = b'missing'
2058 re2 = b'missing'
2059 if util.has_re2():
2059 if util.has_re2():
2060 re2 = b'available'
2060 re2 = b'available'
2061 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
2061 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
2062 fm.data(re2=bool(util._re2))
2062 fm.data(re2=bool(util._re2))
2063
2063
2064 # templates
2064 # templates
2065 p = templater.templatedir()
2065 p = templater.templatedir()
2066 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
2066 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
2067 fm.condwrite(not p, b'', _(b" no template directories found\n"))
2067 fm.condwrite(not p, b'', _(b" no template directories found\n"))
2068 if p:
2068 if p:
2069 (m, fp) = templater.try_open_template(b"map-cmdline.default")
2069 (m, fp) = templater.try_open_template(b"map-cmdline.default")
2070 if m:
2070 if m:
2071 # template found, check if it is working
2071 # template found, check if it is working
2072 err = None
2072 err = None
2073 try:
2073 try:
2074 templater.templater.frommapfile(m)
2074 templater.templater.frommapfile(m)
2075 except Exception as inst:
2075 except Exception as inst:
2076 err = stringutil.forcebytestr(inst)
2076 err = stringutil.forcebytestr(inst)
2077 p = None
2077 p = None
2078 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
2078 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
2079 else:
2079 else:
2080 p = None
2080 p = None
2081 fm.condwrite(
2081 fm.condwrite(
2082 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
2082 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
2083 )
2083 )
2084 fm.condwrite(
2084 fm.condwrite(
2085 not m,
2085 not m,
2086 b'defaulttemplatenotfound',
2086 b'defaulttemplatenotfound',
2087 _(b" template '%s' not found\n"),
2087 _(b" template '%s' not found\n"),
2088 b"default",
2088 b"default",
2089 )
2089 )
2090 if not p:
2090 if not p:
2091 problems += 1
2091 problems += 1
2092 fm.condwrite(
2092 fm.condwrite(
2093 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
2093 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
2094 )
2094 )
2095
2095
2096 # editor
2096 # editor
2097 editor = ui.geteditor()
2097 editor = ui.geteditor()
2098 editor = util.expandpath(editor)
2098 editor = util.expandpath(editor)
2099 editorbin = procutil.shellsplit(editor)[0]
2099 editorbin = procutil.shellsplit(editor)[0]
2100 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
2100 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
2101 cmdpath = procutil.findexe(editorbin)
2101 cmdpath = procutil.findexe(editorbin)
2102 fm.condwrite(
2102 fm.condwrite(
2103 not cmdpath and editor == b'vi',
2103 not cmdpath and editor == b'vi',
2104 b'vinotfound',
2104 b'vinotfound',
2105 _(
2105 _(
2106 b" No commit editor set and can't find %s in PATH\n"
2106 b" No commit editor set and can't find %s in PATH\n"
2107 b" (specify a commit editor in your configuration"
2107 b" (specify a commit editor in your configuration"
2108 b" file)\n"
2108 b" file)\n"
2109 ),
2109 ),
2110 not cmdpath and editor == b'vi' and editorbin,
2110 not cmdpath and editor == b'vi' and editorbin,
2111 )
2111 )
2112 fm.condwrite(
2112 fm.condwrite(
2113 not cmdpath and editor != b'vi',
2113 not cmdpath and editor != b'vi',
2114 b'editornotfound',
2114 b'editornotfound',
2115 _(
2115 _(
2116 b" Can't find editor '%s' in PATH\n"
2116 b" Can't find editor '%s' in PATH\n"
2117 b" (specify a commit editor in your configuration"
2117 b" (specify a commit editor in your configuration"
2118 b" file)\n"
2118 b" file)\n"
2119 ),
2119 ),
2120 not cmdpath and editorbin,
2120 not cmdpath and editorbin,
2121 )
2121 )
2122 if not cmdpath and editor != b'vi':
2122 if not cmdpath and editor != b'vi':
2123 problems += 1
2123 problems += 1
2124
2124
2125 # check username
2125 # check username
2126 username = None
2126 username = None
2127 err = None
2127 err = None
2128 try:
2128 try:
2129 username = ui.username()
2129 username = ui.username()
2130 except error.Abort as e:
2130 except error.Abort as e:
2131 err = e.message
2131 err = e.message
2132 problems += 1
2132 problems += 1
2133
2133
2134 fm.condwrite(
2134 fm.condwrite(
2135 username, b'username', _(b"checking username (%s)\n"), username
2135 username, b'username', _(b"checking username (%s)\n"), username
2136 )
2136 )
2137 fm.condwrite(
2137 fm.condwrite(
2138 err,
2138 err,
2139 b'usernameerror',
2139 b'usernameerror',
2140 _(
2140 _(
2141 b"checking username...\n %s\n"
2141 b"checking username...\n %s\n"
2142 b" (specify a username in your configuration file)\n"
2142 b" (specify a username in your configuration file)\n"
2143 ),
2143 ),
2144 err,
2144 err,
2145 )
2145 )
2146
2146
2147 for name, mod in extensions.extensions():
2147 for name, mod in extensions.extensions():
2148 handler = getattr(mod, 'debuginstall', None)
2148 handler = getattr(mod, 'debuginstall', None)
2149 if handler is not None:
2149 if handler is not None:
2150 problems += handler(ui, fm)
2150 problems += handler(ui, fm)
2151
2151
2152 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
2152 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
2153 if not problems:
2153 if not problems:
2154 fm.data(problems=problems)
2154 fm.data(problems=problems)
2155 fm.condwrite(
2155 fm.condwrite(
2156 problems,
2156 problems,
2157 b'problems',
2157 b'problems',
2158 _(b"%d problems detected, please check your install!\n"),
2158 _(b"%d problems detected, please check your install!\n"),
2159 problems,
2159 problems,
2160 )
2160 )
2161 fm.end()
2161 fm.end()
2162
2162
2163 return problems
2163 return problems
2164
2164
2165
2165
2166 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
2166 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
2167 def debugknown(ui, repopath, *ids, **opts):
2167 def debugknown(ui, repopath, *ids, **opts):
2168 """test whether node ids are known to a repo
2168 """test whether node ids are known to a repo
2169
2169
2170 Every ID must be a full-length hex node id string. Returns a list of 0s
2170 Every ID must be a full-length hex node id string. Returns a list of 0s
2171 and 1s indicating unknown/known.
2171 and 1s indicating unknown/known.
2172 """
2172 """
2173 repo = hg.peer(ui, pycompat.byteskwargs(opts), repopath)
2173 repo = hg.peer(ui, pycompat.byteskwargs(opts), repopath)
2174 if not repo.capable(b'known'):
2174 if not repo.capable(b'known'):
2175 raise error.Abort(b"known() not supported by target repository")
2175 raise error.Abort(b"known() not supported by target repository")
2176 flags = repo.known([bin(s) for s in ids])
2176 flags = repo.known([bin(s) for s in ids])
2177 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2177 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2178
2178
2179
2179
2180 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2180 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2181 def debuglabelcomplete(ui, repo, *args):
2181 def debuglabelcomplete(ui, repo, *args):
2182 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2182 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2183 debugnamecomplete(ui, repo, *args)
2183 debugnamecomplete(ui, repo, *args)
2184
2184
2185
2185
2186 @command(
2186 @command(
2187 b'debuglocks',
2187 b'debuglocks',
2188 [
2188 [
2189 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2189 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2190 (
2190 (
2191 b'W',
2191 b'W',
2192 b'force-free-wlock',
2192 b'force-free-wlock',
2193 None,
2193 None,
2194 _(b'free the working state lock (DANGEROUS)'),
2194 _(b'free the working state lock (DANGEROUS)'),
2195 ),
2195 ),
2196 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2196 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2197 (
2197 (
2198 b'S',
2198 b'S',
2199 b'set-wlock',
2199 b'set-wlock',
2200 None,
2200 None,
2201 _(b'set the working state lock until stopped'),
2201 _(b'set the working state lock until stopped'),
2202 ),
2202 ),
2203 ],
2203 ],
2204 _(b'[OPTION]...'),
2204 _(b'[OPTION]...'),
2205 )
2205 )
2206 def debuglocks(ui, repo, **opts):
2206 def debuglocks(ui, repo, **opts):
2207 """show or modify state of locks
2207 """show or modify state of locks
2208
2208
2209 By default, this command will show which locks are held. This
2209 By default, this command will show which locks are held. This
2210 includes the user and process holding the lock, the amount of time
2210 includes the user and process holding the lock, the amount of time
2211 the lock has been held, and the machine name where the process is
2211 the lock has been held, and the machine name where the process is
2212 running if it's not local.
2212 running if it's not local.
2213
2213
2214 Locks protect the integrity of Mercurial's data, so should be
2214 Locks protect the integrity of Mercurial's data, so should be
2215 treated with care. System crashes or other interruptions may cause
2215 treated with care. System crashes or other interruptions may cause
2216 locks to not be properly released, though Mercurial will usually
2216 locks to not be properly released, though Mercurial will usually
2217 detect and remove such stale locks automatically.
2217 detect and remove such stale locks automatically.
2218
2218
2219 However, detecting stale locks may not always be possible (for
2219 However, detecting stale locks may not always be possible (for
2220 instance, on a shared filesystem). Removing locks may also be
2220 instance, on a shared filesystem). Removing locks may also be
2221 blocked by filesystem permissions.
2221 blocked by filesystem permissions.
2222
2222
2223 Setting a lock will prevent other commands from changing the data.
2223 Setting a lock will prevent other commands from changing the data.
2224 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2224 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2225 The set locks are removed when the command exits.
2225 The set locks are removed when the command exits.
2226
2226
2227 Returns 0 if no locks are held.
2227 Returns 0 if no locks are held.
2228
2228
2229 """
2229 """
2230
2230
2231 if opts.get('force_free_lock'):
2231 if opts.get('force_free_lock'):
2232 repo.svfs.tryunlink(b'lock')
2232 repo.svfs.tryunlink(b'lock')
2233 if opts.get('force_free_wlock'):
2233 if opts.get('force_free_wlock'):
2234 repo.vfs.tryunlink(b'wlock')
2234 repo.vfs.tryunlink(b'wlock')
2235 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2235 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2236 return 0
2236 return 0
2237
2237
2238 locks = []
2238 locks = []
2239 try:
2239 try:
2240 if opts.get('set_wlock'):
2240 if opts.get('set_wlock'):
2241 try:
2241 try:
2242 locks.append(repo.wlock(False))
2242 locks.append(repo.wlock(False))
2243 except error.LockHeld:
2243 except error.LockHeld:
2244 raise error.Abort(_(b'wlock is already held'))
2244 raise error.Abort(_(b'wlock is already held'))
2245 if opts.get('set_lock'):
2245 if opts.get('set_lock'):
2246 try:
2246 try:
2247 locks.append(repo.lock(False))
2247 locks.append(repo.lock(False))
2248 except error.LockHeld:
2248 except error.LockHeld:
2249 raise error.Abort(_(b'lock is already held'))
2249 raise error.Abort(_(b'lock is already held'))
2250 if len(locks):
2250 if len(locks):
2251 try:
2251 try:
2252 if ui.interactive():
2252 if ui.interactive():
2253 prompt = _(b"ready to release the lock (y)? $$ &Yes")
2253 prompt = _(b"ready to release the lock (y)? $$ &Yes")
2254 ui.promptchoice(prompt)
2254 ui.promptchoice(prompt)
2255 else:
2255 else:
2256 msg = b"%d locks held, waiting for signal\n"
2256 msg = b"%d locks held, waiting for signal\n"
2257 msg %= len(locks)
2257 msg %= len(locks)
2258 ui.status(msg)
2258 ui.status(msg)
2259 while True: # XXX wait for a signal
2259 while True: # XXX wait for a signal
2260 time.sleep(0.1)
2260 time.sleep(0.1)
2261 except KeyboardInterrupt:
2261 except KeyboardInterrupt:
2262 msg = b"signal-received releasing locks\n"
2262 msg = b"signal-received releasing locks\n"
2263 ui.status(msg)
2263 ui.status(msg)
2264 return 0
2264 return 0
2265 finally:
2265 finally:
2266 release(*locks)
2266 release(*locks)
2267
2267
2268 now = time.time()
2268 now = time.time()
2269 held = 0
2269 held = 0
2270
2270
2271 def report(vfs, name, method):
2271 def report(vfs, name, method):
2272 # this causes stale locks to get reaped for more accurate reporting
2272 # this causes stale locks to get reaped for more accurate reporting
2273 try:
2273 try:
2274 l = method(False)
2274 l = method(False)
2275 except error.LockHeld:
2275 except error.LockHeld:
2276 l = None
2276 l = None
2277
2277
2278 if l:
2278 if l:
2279 l.release()
2279 l.release()
2280 else:
2280 else:
2281 try:
2281 try:
2282 st = vfs.lstat(name)
2282 st = vfs.lstat(name)
2283 age = now - st[stat.ST_MTIME]
2283 age = now - st[stat.ST_MTIME]
2284 user = util.username(st.st_uid)
2284 user = util.username(st.st_uid)
2285 locker = vfs.readlock(name)
2285 locker = vfs.readlock(name)
2286 if b":" in locker:
2286 if b":" in locker:
2287 host, pid = locker.split(b':')
2287 host, pid = locker.split(b':')
2288 if host == socket.gethostname():
2288 if host == socket.gethostname():
2289 locker = b'user %s, process %s' % (user or b'None', pid)
2289 locker = b'user %s, process %s' % (user or b'None', pid)
2290 else:
2290 else:
2291 locker = b'user %s, process %s, host %s' % (
2291 locker = b'user %s, process %s, host %s' % (
2292 user or b'None',
2292 user or b'None',
2293 pid,
2293 pid,
2294 host,
2294 host,
2295 )
2295 )
2296 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2296 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2297 return 1
2297 return 1
2298 except FileNotFoundError:
2298 except FileNotFoundError:
2299 pass
2299 pass
2300
2300
2301 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2301 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2302 return 0
2302 return 0
2303
2303
2304 held += report(repo.svfs, b"lock", repo.lock)
2304 held += report(repo.svfs, b"lock", repo.lock)
2305 held += report(repo.vfs, b"wlock", repo.wlock)
2305 held += report(repo.vfs, b"wlock", repo.wlock)
2306
2306
2307 return held
2307 return held
2308
2308
2309
2309
2310 @command(
2310 @command(
2311 b'debugmanifestfulltextcache',
2311 b'debugmanifestfulltextcache',
2312 [
2312 [
2313 (b'', b'clear', False, _(b'clear the cache')),
2313 (b'', b'clear', False, _(b'clear the cache')),
2314 (
2314 (
2315 b'a',
2315 b'a',
2316 b'add',
2316 b'add',
2317 [],
2317 [],
2318 _(b'add the given manifest nodes to the cache'),
2318 _(b'add the given manifest nodes to the cache'),
2319 _(b'NODE'),
2319 _(b'NODE'),
2320 ),
2320 ),
2321 ],
2321 ],
2322 b'',
2322 b'',
2323 )
2323 )
2324 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2324 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2325 """show, clear or amend the contents of the manifest fulltext cache"""
2325 """show, clear or amend the contents of the manifest fulltext cache"""
2326
2326
2327 def getcache():
2327 def getcache():
2328 r = repo.manifestlog.getstorage(b'')
2328 r = repo.manifestlog.getstorage(b'')
2329 try:
2329 try:
2330 return r._fulltextcache
2330 return r._fulltextcache
2331 except AttributeError:
2331 except AttributeError:
2332 msg = _(
2332 msg = _(
2333 b"Current revlog implementation doesn't appear to have a "
2333 b"Current revlog implementation doesn't appear to have a "
2334 b"manifest fulltext cache\n"
2334 b"manifest fulltext cache\n"
2335 )
2335 )
2336 raise error.Abort(msg)
2336 raise error.Abort(msg)
2337
2337
2338 if opts.get('clear'):
2338 if opts.get('clear'):
2339 with repo.wlock():
2339 with repo.wlock():
2340 cache = getcache()
2340 cache = getcache()
2341 cache.clear(clear_persisted_data=True)
2341 cache.clear(clear_persisted_data=True)
2342 return
2342 return
2343
2343
2344 if add:
2344 if add:
2345 with repo.wlock():
2345 with repo.wlock():
2346 m = repo.manifestlog
2346 m = repo.manifestlog
2347 store = m.getstorage(b'')
2347 store = m.getstorage(b'')
2348 for n in add:
2348 for n in add:
2349 try:
2349 try:
2350 manifest = m[store.lookup(n)]
2350 manifest = m[store.lookup(n)]
2351 except error.LookupError as e:
2351 except error.LookupError as e:
2352 raise error.Abort(
2352 raise error.Abort(
2353 bytes(e), hint=b"Check your manifest node id"
2353 bytes(e), hint=b"Check your manifest node id"
2354 )
2354 )
2355 manifest.read() # stores revisision in cache too
2355 manifest.read() # stores revisision in cache too
2356 return
2356 return
2357
2357
2358 cache = getcache()
2358 cache = getcache()
2359 if not len(cache):
2359 if not len(cache):
2360 ui.write(_(b'cache empty\n'))
2360 ui.write(_(b'cache empty\n'))
2361 else:
2361 else:
2362 ui.write(
2362 ui.write(
2363 _(
2363 _(
2364 b'cache contains %d manifest entries, in order of most to '
2364 b'cache contains %d manifest entries, in order of most to '
2365 b'least recent:\n'
2365 b'least recent:\n'
2366 )
2366 )
2367 % (len(cache),)
2367 % (len(cache),)
2368 )
2368 )
2369 totalsize = 0
2369 totalsize = 0
2370 for nodeid in cache:
2370 for nodeid in cache:
2371 # Use cache.get to not update the LRU order
2371 # Use cache.get to not update the LRU order
2372 data = cache.peek(nodeid)
2372 data = cache.peek(nodeid)
2373 size = len(data)
2373 size = len(data)
2374 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2374 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2375 ui.write(
2375 ui.write(
2376 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2376 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2377 )
2377 )
2378 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2378 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2379 ui.write(
2379 ui.write(
2380 _(b'total cache data size %s, on-disk %s\n')
2380 _(b'total cache data size %s, on-disk %s\n')
2381 % (util.bytecount(totalsize), util.bytecount(ondisk))
2381 % (util.bytecount(totalsize), util.bytecount(ondisk))
2382 )
2382 )
2383
2383
2384
2384
2385 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2385 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2386 def debugmergestate(ui, repo, *args, **opts):
2386 def debugmergestate(ui, repo, *args, **opts):
2387 """print merge state
2387 """print merge state
2388
2388
2389 Use --verbose to print out information about whether v1 or v2 merge state
2389 Use --verbose to print out information about whether v1 or v2 merge state
2390 was chosen."""
2390 was chosen."""
2391
2391
2392 if ui.verbose:
2392 if ui.verbose:
2393 ms = mergestatemod.mergestate(repo)
2393 ms = mergestatemod.mergestate(repo)
2394
2394
2395 # sort so that reasonable information is on top
2395 # sort so that reasonable information is on top
2396 v1records = ms._readrecordsv1()
2396 v1records = ms._readrecordsv1()
2397 v2records = ms._readrecordsv2()
2397 v2records = ms._readrecordsv2()
2398
2398
2399 if not v1records and not v2records:
2399 if not v1records and not v2records:
2400 pass
2400 pass
2401 elif not v2records:
2401 elif not v2records:
2402 ui.writenoi18n(b'no version 2 merge state\n')
2402 ui.writenoi18n(b'no version 2 merge state\n')
2403 elif ms._v1v2match(v1records, v2records):
2403 elif ms._v1v2match(v1records, v2records):
2404 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2404 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2405 else:
2405 else:
2406 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2406 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2407
2407
2408 if not opts['template']:
2408 if not opts['template']:
2409 opts['template'] = (
2409 opts['template'] = (
2410 b'{if(commits, "", "no merge state found\n")}'
2410 b'{if(commits, "", "no merge state found\n")}'
2411 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2411 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2412 b'{files % "file: {path} (state \\"{state}\\")\n'
2412 b'{files % "file: {path} (state \\"{state}\\")\n'
2413 b'{if(local_path, "'
2413 b'{if(local_path, "'
2414 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2414 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2415 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2415 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2416 b' other path: {other_path} (node {other_node})\n'
2416 b' other path: {other_path} (node {other_node})\n'
2417 b'")}'
2417 b'")}'
2418 b'{if(rename_side, "'
2418 b'{if(rename_side, "'
2419 b' rename side: {rename_side}\n'
2419 b' rename side: {rename_side}\n'
2420 b' renamed path: {renamed_path}\n'
2420 b' renamed path: {renamed_path}\n'
2421 b'")}'
2421 b'")}'
2422 b'{extras % " extra: {key} = {value}\n"}'
2422 b'{extras % " extra: {key} = {value}\n"}'
2423 b'"}'
2423 b'"}'
2424 b'{extras % "extra: {file} ({key} = {value})\n"}'
2424 b'{extras % "extra: {file} ({key} = {value})\n"}'
2425 )
2425 )
2426
2426
2427 ms = mergestatemod.mergestate.read(repo)
2427 ms = mergestatemod.mergestate.read(repo)
2428
2428
2429 fm = ui.formatter(b'debugmergestate', pycompat.byteskwargs(opts))
2429 fm = ui.formatter(b'debugmergestate', pycompat.byteskwargs(opts))
2430 fm.startitem()
2430 fm.startitem()
2431
2431
2432 fm_commits = fm.nested(b'commits')
2432 fm_commits = fm.nested(b'commits')
2433 if ms.active():
2433 if ms.active():
2434 for name, node, label_index in (
2434 for name, node, label_index in (
2435 (b'local', ms.local, 0),
2435 (b'local', ms.local, 0),
2436 (b'other', ms.other, 1),
2436 (b'other', ms.other, 1),
2437 ):
2437 ):
2438 fm_commits.startitem()
2438 fm_commits.startitem()
2439 fm_commits.data(name=name)
2439 fm_commits.data(name=name)
2440 fm_commits.data(node=hex(node))
2440 fm_commits.data(node=hex(node))
2441 if ms._labels and len(ms._labels) > label_index:
2441 if ms._labels and len(ms._labels) > label_index:
2442 fm_commits.data(label=ms._labels[label_index])
2442 fm_commits.data(label=ms._labels[label_index])
2443 fm_commits.end()
2443 fm_commits.end()
2444
2444
2445 fm_files = fm.nested(b'files')
2445 fm_files = fm.nested(b'files')
2446 if ms.active():
2446 if ms.active():
2447 for f in ms:
2447 for f in ms:
2448 fm_files.startitem()
2448 fm_files.startitem()
2449 fm_files.data(path=f)
2449 fm_files.data(path=f)
2450 state = ms._state[f]
2450 state = ms._state[f]
2451 fm_files.data(state=state[0])
2451 fm_files.data(state=state[0])
2452 if state[0] in (
2452 if state[0] in (
2453 mergestatemod.MERGE_RECORD_UNRESOLVED,
2453 mergestatemod.MERGE_RECORD_UNRESOLVED,
2454 mergestatemod.MERGE_RECORD_RESOLVED,
2454 mergestatemod.MERGE_RECORD_RESOLVED,
2455 ):
2455 ):
2456 fm_files.data(local_key=state[1])
2456 fm_files.data(local_key=state[1])
2457 fm_files.data(local_path=state[2])
2457 fm_files.data(local_path=state[2])
2458 fm_files.data(ancestor_path=state[3])
2458 fm_files.data(ancestor_path=state[3])
2459 fm_files.data(ancestor_node=state[4])
2459 fm_files.data(ancestor_node=state[4])
2460 fm_files.data(other_path=state[5])
2460 fm_files.data(other_path=state[5])
2461 fm_files.data(other_node=state[6])
2461 fm_files.data(other_node=state[6])
2462 fm_files.data(local_flags=state[7])
2462 fm_files.data(local_flags=state[7])
2463 elif state[0] in (
2463 elif state[0] in (
2464 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2464 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2465 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2465 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2466 ):
2466 ):
2467 fm_files.data(renamed_path=state[1])
2467 fm_files.data(renamed_path=state[1])
2468 fm_files.data(rename_side=state[2])
2468 fm_files.data(rename_side=state[2])
2469 fm_extras = fm_files.nested(b'extras')
2469 fm_extras = fm_files.nested(b'extras')
2470 for k, v in sorted(ms.extras(f).items()):
2470 for k, v in sorted(ms.extras(f).items()):
2471 fm_extras.startitem()
2471 fm_extras.startitem()
2472 fm_extras.data(key=k)
2472 fm_extras.data(key=k)
2473 fm_extras.data(value=v)
2473 fm_extras.data(value=v)
2474 fm_extras.end()
2474 fm_extras.end()
2475
2475
2476 fm_files.end()
2476 fm_files.end()
2477
2477
2478 fm_extras = fm.nested(b'extras')
2478 fm_extras = fm.nested(b'extras')
2479 for f, d in sorted(ms.allextras().items()):
2479 for f, d in sorted(ms.allextras().items()):
2480 if f in ms:
2480 if f in ms:
2481 # If file is in mergestate, we have already processed it's extras
2481 # If file is in mergestate, we have already processed it's extras
2482 continue
2482 continue
2483 for k, v in d.items():
2483 for k, v in d.items():
2484 fm_extras.startitem()
2484 fm_extras.startitem()
2485 fm_extras.data(file=f)
2485 fm_extras.data(file=f)
2486 fm_extras.data(key=k)
2486 fm_extras.data(key=k)
2487 fm_extras.data(value=v)
2487 fm_extras.data(value=v)
2488 fm_extras.end()
2488 fm_extras.end()
2489
2489
2490 fm.end()
2490 fm.end()
2491
2491
2492
2492
2493 @command(b'debugnamecomplete', [], _(b'NAME...'))
2493 @command(b'debugnamecomplete', [], _(b'NAME...'))
2494 def debugnamecomplete(ui, repo, *args):
2494 def debugnamecomplete(ui, repo, *args):
2495 '''complete "names" - tags, open branch names, bookmark names'''
2495 '''complete "names" - tags, open branch names, bookmark names'''
2496
2496
2497 names = set()
2497 names = set()
2498 # since we previously only listed open branches, we will handle that
2498 # since we previously only listed open branches, we will handle that
2499 # specially (after this for loop)
2499 # specially (after this for loop)
2500 for name, ns in repo.names.items():
2500 for name, ns in repo.names.items():
2501 if name != b'branches':
2501 if name != b'branches':
2502 names.update(ns.listnames(repo))
2502 names.update(ns.listnames(repo))
2503 names.update(
2503 names.update(
2504 tag
2504 tag
2505 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2505 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2506 if not closed
2506 if not closed
2507 )
2507 )
2508 completions = set()
2508 completions = set()
2509 if not args:
2509 if not args:
2510 args = [b'']
2510 args = [b'']
2511 for a in args:
2511 for a in args:
2512 completions.update(n for n in names if n.startswith(a))
2512 completions.update(n for n in names if n.startswith(a))
2513 ui.write(b'\n'.join(sorted(completions)))
2513 ui.write(b'\n'.join(sorted(completions)))
2514 ui.write(b'\n')
2514 ui.write(b'\n')
2515
2515
2516
2516
2517 @command(
2517 @command(
2518 b'debugnodemap',
2518 b'debugnodemap',
2519 (
2519 (
2520 cmdutil.debugrevlogopts
2520 cmdutil.debugrevlogopts
2521 + [
2521 + [
2522 (
2522 (
2523 b'',
2523 b'',
2524 b'dump-new',
2524 b'dump-new',
2525 False,
2525 False,
2526 _(b'write a (new) persistent binary nodemap on stdout'),
2526 _(b'write a (new) persistent binary nodemap on stdout'),
2527 ),
2527 ),
2528 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2528 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2529 (
2529 (
2530 b'',
2530 b'',
2531 b'check',
2531 b'check',
2532 False,
2532 False,
2533 _(b'check that the data on disk data are correct.'),
2533 _(b'check that the data on disk data are correct.'),
2534 ),
2534 ),
2535 (
2535 (
2536 b'',
2536 b'',
2537 b'metadata',
2537 b'metadata',
2538 False,
2538 False,
2539 _(b'display the on disk meta data for the nodemap'),
2539 _(b'display the on disk meta data for the nodemap'),
2540 ),
2540 ),
2541 ]
2541 ]
2542 ),
2542 ),
2543 _(b'-c|-m|FILE'),
2543 _(b'-c|-m|FILE'),
2544 )
2544 )
2545 def debugnodemap(ui, repo, file_=None, **opts):
2545 def debugnodemap(ui, repo, file_=None, **opts):
2546 """write and inspect on disk nodemap"""
2546 """write and inspect on disk nodemap"""
2547 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
2547 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
2548 if file_ is not None:
2548 if file_ is not None:
2549 raise error.InputError(
2549 raise error.InputError(
2550 _(b'cannot specify a file with other arguments')
2550 _(b'cannot specify a file with other arguments')
2551 )
2551 )
2552 elif file_ is None:
2552 elif file_ is None:
2553 opts['changelog'] = True
2553 opts['changelog'] = True
2554 r = cmdutil.openstorage(
2554 r = cmdutil.openstorage(
2555 repo.unfiltered(), b'debugnodemap', file_, pycompat.byteskwargs(opts)
2555 repo.unfiltered(), b'debugnodemap', file_, pycompat.byteskwargs(opts)
2556 )
2556 )
2557 if isinstance(r, (manifest.manifestrevlog, filelog.filelog)):
2557 if isinstance(r, (manifest.manifestrevlog, filelog.filelog)):
2558 r = r._revlog
2558 r = r._revlog
2559 if opts['dump_new']:
2559 if opts['dump_new']:
2560 if hasattr(r.index, "nodemap_data_all"):
2560 if hasattr(r.index, "nodemap_data_all"):
2561 data = r.index.nodemap_data_all()
2561 data = r.index.nodemap_data_all()
2562 else:
2562 else:
2563 data = nodemap.persistent_data(r.index)
2563 data = nodemap.persistent_data(r.index)
2564 ui.write(data)
2564 ui.write(data)
2565 elif opts['dump_disk']:
2565 elif opts['dump_disk']:
2566 nm_data = nodemap.persisted_data(r)
2566 nm_data = nodemap.persisted_data(r)
2567 if nm_data is not None:
2567 if nm_data is not None:
2568 docket, data = nm_data
2568 docket, data = nm_data
2569 ui.write(data[:])
2569 ui.write(data[:])
2570 elif opts['check']:
2570 elif opts['check']:
2571 nm_data = nodemap.persisted_data(r)
2571 nm_data = nodemap.persisted_data(r)
2572 if nm_data is not None:
2572 if nm_data is not None:
2573 docket, data = nm_data
2573 docket, data = nm_data
2574 return nodemap.check_data(ui, r.index, data)
2574 return nodemap.check_data(ui, r.index, data)
2575 elif opts['metadata']:
2575 elif opts['metadata']:
2576 nm_data = nodemap.persisted_data(r)
2576 nm_data = nodemap.persisted_data(r)
2577 if nm_data is not None:
2577 if nm_data is not None:
2578 docket, data = nm_data
2578 docket, data = nm_data
2579 ui.write((b"uid: %s\n") % docket.uid)
2579 ui.write((b"uid: %s\n") % docket.uid)
2580 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2580 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2581 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2581 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2582 ui.write((b"data-length: %d\n") % docket.data_length)
2582 ui.write((b"data-length: %d\n") % docket.data_length)
2583 ui.write((b"data-unused: %d\n") % docket.data_unused)
2583 ui.write((b"data-unused: %d\n") % docket.data_unused)
2584 unused_perc = docket.data_unused * 100.0 / docket.data_length
2584 unused_perc = docket.data_unused * 100.0 / docket.data_length
2585 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2585 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2586
2586
2587
2587
2588 @command(
2588 @command(
2589 b'debugobsolete',
2589 b'debugobsolete',
2590 [
2590 [
2591 (b'', b'flags', 0, _(b'markers flag')),
2591 (b'', b'flags', 0, _(b'markers flag')),
2592 (
2592 (
2593 b'',
2593 b'',
2594 b'record-parents',
2594 b'record-parents',
2595 False,
2595 False,
2596 _(b'record parent information for the precursor'),
2596 _(b'record parent information for the precursor'),
2597 ),
2597 ),
2598 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2598 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2599 (
2599 (
2600 b'',
2600 b'',
2601 b'exclusive',
2601 b'exclusive',
2602 False,
2602 False,
2603 _(b'restrict display to markers only relevant to REV'),
2603 _(b'restrict display to markers only relevant to REV'),
2604 ),
2604 ),
2605 (b'', b'index', False, _(b'display index of the marker')),
2605 (b'', b'index', False, _(b'display index of the marker')),
2606 (b'', b'delete', [], _(b'delete markers specified by indices')),
2606 (b'', b'delete', [], _(b'delete markers specified by indices')),
2607 ]
2607 ]
2608 + cmdutil.commitopts2
2608 + cmdutil.commitopts2
2609 + cmdutil.formatteropts,
2609 + cmdutil.formatteropts,
2610 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2610 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2611 )
2611 )
2612 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2612 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2613 """create arbitrary obsolete marker
2613 """create arbitrary obsolete marker
2614
2614
2615 With no arguments, displays the list of obsolescence markers."""
2615 With no arguments, displays the list of obsolescence markers."""
2616
2616
2617 def parsenodeid(s):
2617 def parsenodeid(s):
2618 try:
2618 try:
2619 # We do not use revsingle/revrange functions here to accept
2619 # We do not use revsingle/revrange functions here to accept
2620 # arbitrary node identifiers, possibly not present in the
2620 # arbitrary node identifiers, possibly not present in the
2621 # local repository.
2621 # local repository.
2622 n = bin(s)
2622 n = bin(s)
2623 if len(n) != repo.nodeconstants.nodelen:
2623 if len(n) != repo.nodeconstants.nodelen:
2624 raise ValueError
2624 raise ValueError
2625 return n
2625 return n
2626 except ValueError:
2626 except ValueError:
2627 raise error.InputError(
2627 raise error.InputError(
2628 b'changeset references must be full hexadecimal '
2628 b'changeset references must be full hexadecimal '
2629 b'node identifiers'
2629 b'node identifiers'
2630 )
2630 )
2631
2631
2632 if opts.get('delete'):
2632 if opts.get('delete'):
2633 indices = []
2633 indices = []
2634 for v in opts.get('delete'):
2634 for v in opts.get('delete'):
2635 try:
2635 try:
2636 indices.append(int(v))
2636 indices.append(int(v))
2637 except ValueError:
2637 except ValueError:
2638 raise error.InputError(
2638 raise error.InputError(
2639 _(b'invalid index value: %r') % v,
2639 _(b'invalid index value: %r') % v,
2640 hint=_(b'use integers for indices'),
2640 hint=_(b'use integers for indices'),
2641 )
2641 )
2642
2642
2643 if repo.currenttransaction():
2643 if repo.currenttransaction():
2644 raise error.Abort(
2644 raise error.Abort(
2645 _(b'cannot delete obsmarkers in the middle of transaction.')
2645 _(b'cannot delete obsmarkers in the middle of transaction.')
2646 )
2646 )
2647
2647
2648 with repo.lock():
2648 with repo.lock():
2649 n = repair.deleteobsmarkers(repo.obsstore, indices)
2649 n = repair.deleteobsmarkers(repo.obsstore, indices)
2650 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2650 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2651
2651
2652 return
2652 return
2653
2653
2654 if precursor is not None:
2654 if precursor is not None:
2655 if opts['rev']:
2655 if opts['rev']:
2656 raise error.InputError(
2656 raise error.InputError(
2657 b'cannot select revision when creating marker'
2657 b'cannot select revision when creating marker'
2658 )
2658 )
2659 metadata = {}
2659 metadata = {}
2660 metadata[b'user'] = encoding.fromlocal(opts['user'] or ui.username())
2660 metadata[b'user'] = encoding.fromlocal(opts['user'] or ui.username())
2661 succs = tuple(parsenodeid(succ) for succ in successors)
2661 succs = tuple(parsenodeid(succ) for succ in successors)
2662 l = repo.lock()
2662 l = repo.lock()
2663 try:
2663 try:
2664 tr = repo.transaction(b'debugobsolete')
2664 tr = repo.transaction(b'debugobsolete')
2665 try:
2665 try:
2666 date = opts.get('date')
2666 date = opts.get('date')
2667 if date:
2667 if date:
2668 date = dateutil.parsedate(date)
2668 date = dateutil.parsedate(date)
2669 else:
2669 else:
2670 date = None
2670 date = None
2671 prec = parsenodeid(precursor)
2671 prec = parsenodeid(precursor)
2672 parents = None
2672 parents = None
2673 if opts['record_parents']:
2673 if opts['record_parents']:
2674 if prec not in repo.unfiltered():
2674 if prec not in repo.unfiltered():
2675 raise error.Abort(
2675 raise error.Abort(
2676 b'cannot used --record-parents on '
2676 b'cannot used --record-parents on '
2677 b'unknown changesets'
2677 b'unknown changesets'
2678 )
2678 )
2679 parents = repo.unfiltered()[prec].parents()
2679 parents = repo.unfiltered()[prec].parents()
2680 parents = tuple(p.node() for p in parents)
2680 parents = tuple(p.node() for p in parents)
2681 repo.obsstore.create(
2681 repo.obsstore.create(
2682 tr,
2682 tr,
2683 prec,
2683 prec,
2684 succs,
2684 succs,
2685 opts['flags'],
2685 opts['flags'],
2686 parents=parents,
2686 parents=parents,
2687 date=date,
2687 date=date,
2688 metadata=metadata,
2688 metadata=metadata,
2689 ui=ui,
2689 ui=ui,
2690 )
2690 )
2691 tr.close()
2691 tr.close()
2692 except ValueError as exc:
2692 except ValueError as exc:
2693 raise error.Abort(
2693 raise error.Abort(
2694 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2694 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2695 )
2695 )
2696 finally:
2696 finally:
2697 tr.release()
2697 tr.release()
2698 finally:
2698 finally:
2699 l.release()
2699 l.release()
2700 else:
2700 else:
2701 if opts['rev']:
2701 if opts['rev']:
2702 revs = logcmdutil.revrange(repo, opts['rev'])
2702 revs = logcmdutil.revrange(repo, opts['rev'])
2703 nodes = [repo[r].node() for r in revs]
2703 nodes = [repo[r].node() for r in revs]
2704 markers = list(
2704 markers = list(
2705 obsutil.getmarkers(
2705 obsutil.getmarkers(
2706 repo, nodes=nodes, exclusive=opts['exclusive']
2706 repo, nodes=nodes, exclusive=opts['exclusive']
2707 )
2707 )
2708 )
2708 )
2709 markers.sort(key=lambda x: x._data)
2709 markers.sort(key=lambda x: x._data)
2710 else:
2710 else:
2711 markers = obsutil.getmarkers(repo)
2711 markers = obsutil.getmarkers(repo)
2712
2712
2713 markerstoiter = markers
2713 markerstoiter = markers
2714 isrelevant = lambda m: True
2714 isrelevant = lambda m: True
2715 if opts.get('rev') and opts.get('index'):
2715 if opts.get('rev') and opts.get('index'):
2716 markerstoiter = obsutil.getmarkers(repo)
2716 markerstoiter = obsutil.getmarkers(repo)
2717 markerset = set(markers)
2717 markerset = set(markers)
2718 isrelevant = lambda m: m in markerset
2718 isrelevant = lambda m: m in markerset
2719
2719
2720 fm = ui.formatter(b'debugobsolete', pycompat.byteskwargs(opts))
2720 fm = ui.formatter(b'debugobsolete', pycompat.byteskwargs(opts))
2721 for i, m in enumerate(markerstoiter):
2721 for i, m in enumerate(markerstoiter):
2722 if not isrelevant(m):
2722 if not isrelevant(m):
2723 # marker can be irrelevant when we're iterating over a set
2723 # marker can be irrelevant when we're iterating over a set
2724 # of markers (markerstoiter) which is bigger than the set
2724 # of markers (markerstoiter) which is bigger than the set
2725 # of markers we want to display (markers)
2725 # of markers we want to display (markers)
2726 # this can happen if both --index and --rev options are
2726 # this can happen if both --index and --rev options are
2727 # provided and thus we need to iterate over all of the markers
2727 # provided and thus we need to iterate over all of the markers
2728 # to get the correct indices, but only display the ones that
2728 # to get the correct indices, but only display the ones that
2729 # are relevant to --rev value
2729 # are relevant to --rev value
2730 continue
2730 continue
2731 fm.startitem()
2731 fm.startitem()
2732 ind = i if opts.get('index') else None
2732 ind = i if opts.get('index') else None
2733 cmdutil.showmarker(fm, m, index=ind)
2733 cmdutil.showmarker(fm, m, index=ind)
2734 fm.end()
2734 fm.end()
2735
2735
2736
2736
2737 @command(
2737 @command(
2738 b'debugp1copies',
2738 b'debugp1copies',
2739 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2739 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2740 _(b'[-r REV]'),
2740 _(b'[-r REV]'),
2741 )
2741 )
2742 def debugp1copies(ui, repo, **opts):
2742 def debugp1copies(ui, repo, **opts):
2743 """dump copy information compared to p1"""
2743 """dump copy information compared to p1"""
2744
2744
2745 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
2745 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
2746 for dst, src in ctx.p1copies().items():
2746 for dst, src in ctx.p1copies().items():
2747 ui.write(b'%s -> %s\n' % (src, dst))
2747 ui.write(b'%s -> %s\n' % (src, dst))
2748
2748
2749
2749
2750 @command(
2750 @command(
2751 b'debugp2copies',
2751 b'debugp2copies',
2752 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2752 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2753 _(b'[-r REV]'),
2753 _(b'[-r REV]'),
2754 )
2754 )
2755 def debugp2copies(ui, repo, **opts):
2755 def debugp2copies(ui, repo, **opts):
2756 """dump copy information compared to p2"""
2756 """dump copy information compared to p2"""
2757
2757
2758 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
2758 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
2759 for dst, src in ctx.p2copies().items():
2759 for dst, src in ctx.p2copies().items():
2760 ui.write(b'%s -> %s\n' % (src, dst))
2760 ui.write(b'%s -> %s\n' % (src, dst))
2761
2761
2762
2762
2763 @command(
2763 @command(
2764 b'debugpathcomplete',
2764 b'debugpathcomplete',
2765 [
2765 [
2766 (b'f', b'full', None, _(b'complete an entire path')),
2766 (b'f', b'full', None, _(b'complete an entire path')),
2767 (b'n', b'normal', None, _(b'show only normal files')),
2767 (b'n', b'normal', None, _(b'show only normal files')),
2768 (b'a', b'added', None, _(b'show only added files')),
2768 (b'a', b'added', None, _(b'show only added files')),
2769 (b'r', b'removed', None, _(b'show only removed files')),
2769 (b'r', b'removed', None, _(b'show only removed files')),
2770 ],
2770 ],
2771 _(b'FILESPEC...'),
2771 _(b'FILESPEC...'),
2772 )
2772 )
2773 def debugpathcomplete(ui, repo, *specs, **opts):
2773 def debugpathcomplete(ui, repo, *specs, **opts):
2774 """complete part or all of a tracked path
2774 """complete part or all of a tracked path
2775
2775
2776 This command supports shells that offer path name completion. It
2776 This command supports shells that offer path name completion. It
2777 currently completes only files already known to the dirstate.
2777 currently completes only files already known to the dirstate.
2778
2778
2779 Completion extends only to the next path segment unless
2779 Completion extends only to the next path segment unless
2780 --full is specified, in which case entire paths are used."""
2780 --full is specified, in which case entire paths are used."""
2781
2781
2782 def complete(path, acceptable):
2782 def complete(path, acceptable):
2783 dirstate = repo.dirstate
2783 dirstate = repo.dirstate
2784 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2784 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2785 rootdir = repo.root + pycompat.ossep
2785 rootdir = repo.root + pycompat.ossep
2786 if spec != repo.root and not spec.startswith(rootdir):
2786 if spec != repo.root and not spec.startswith(rootdir):
2787 return [], []
2787 return [], []
2788 if os.path.isdir(spec):
2788 if os.path.isdir(spec):
2789 spec += b'/'
2789 spec += b'/'
2790 spec = spec[len(rootdir) :]
2790 spec = spec[len(rootdir) :]
2791 fixpaths = pycompat.ossep != b'/'
2791 fixpaths = pycompat.ossep != b'/'
2792 if fixpaths:
2792 if fixpaths:
2793 spec = spec.replace(pycompat.ossep, b'/')
2793 spec = spec.replace(pycompat.ossep, b'/')
2794 speclen = len(spec)
2794 speclen = len(spec)
2795 fullpaths = opts['full']
2795 fullpaths = opts['full']
2796 files, dirs = set(), set()
2796 files, dirs = set(), set()
2797 adddir, addfile = dirs.add, files.add
2797 adddir, addfile = dirs.add, files.add
2798 for f, st in dirstate.items():
2798 for f, st in dirstate.items():
2799 if f.startswith(spec) and st.state in acceptable:
2799 if f.startswith(spec) and st.state in acceptable:
2800 if fixpaths:
2800 if fixpaths:
2801 f = f.replace(b'/', pycompat.ossep)
2801 f = f.replace(b'/', pycompat.ossep)
2802 if fullpaths:
2802 if fullpaths:
2803 addfile(f)
2803 addfile(f)
2804 continue
2804 continue
2805 s = f.find(pycompat.ossep, speclen)
2805 s = f.find(pycompat.ossep, speclen)
2806 if s >= 0:
2806 if s >= 0:
2807 adddir(f[:s])
2807 adddir(f[:s])
2808 else:
2808 else:
2809 addfile(f)
2809 addfile(f)
2810 return files, dirs
2810 return files, dirs
2811
2811
2812 acceptable = b''
2812 acceptable = b''
2813 if opts['normal']:
2813 if opts['normal']:
2814 acceptable += b'nm'
2814 acceptable += b'nm'
2815 if opts['added']:
2815 if opts['added']:
2816 acceptable += b'a'
2816 acceptable += b'a'
2817 if opts['removed']:
2817 if opts['removed']:
2818 acceptable += b'r'
2818 acceptable += b'r'
2819 cwd = repo.getcwd()
2819 cwd = repo.getcwd()
2820 if not specs:
2820 if not specs:
2821 specs = [b'.']
2821 specs = [b'.']
2822
2822
2823 files, dirs = set(), set()
2823 files, dirs = set(), set()
2824 for spec in specs:
2824 for spec in specs:
2825 f, d = complete(spec, acceptable or b'nmar')
2825 f, d = complete(spec, acceptable or b'nmar')
2826 files.update(f)
2826 files.update(f)
2827 dirs.update(d)
2827 dirs.update(d)
2828 files.update(dirs)
2828 files.update(dirs)
2829 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2829 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2830 ui.write(b'\n')
2830 ui.write(b'\n')
2831
2831
2832
2832
2833 @command(
2833 @command(
2834 b'debugpathcopies',
2834 b'debugpathcopies',
2835 cmdutil.walkopts,
2835 cmdutil.walkopts,
2836 b'hg debugpathcopies REV1 REV2 [FILE]',
2836 b'hg debugpathcopies REV1 REV2 [FILE]',
2837 inferrepo=True,
2837 inferrepo=True,
2838 )
2838 )
2839 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2839 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2840 """show copies between two revisions"""
2840 """show copies between two revisions"""
2841 ctx1 = scmutil.revsingle(repo, rev1)
2841 ctx1 = scmutil.revsingle(repo, rev1)
2842 ctx2 = scmutil.revsingle(repo, rev2)
2842 ctx2 = scmutil.revsingle(repo, rev2)
2843 m = scmutil.match(ctx1, pats, opts)
2843 m = scmutil.match(ctx1, pats, opts)
2844 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2844 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2845 ui.write(b'%s -> %s\n' % (src, dst))
2845 ui.write(b'%s -> %s\n' % (src, dst))
2846
2846
2847
2847
2848 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2848 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2849 def debugpeer(ui, path):
2849 def debugpeer(ui, path):
2850 """establish a connection to a peer repository"""
2850 """establish a connection to a peer repository"""
2851 # Always enable peer request logging. Requires --debug to display
2851 # Always enable peer request logging. Requires --debug to display
2852 # though.
2852 # though.
2853 overrides = {
2853 overrides = {
2854 (b'devel', b'debug.peer-request'): True,
2854 (b'devel', b'debug.peer-request'): True,
2855 }
2855 }
2856
2856
2857 with ui.configoverride(overrides):
2857 with ui.configoverride(overrides):
2858 peer = hg.peer(ui, {}, path)
2858 peer = hg.peer(ui, {}, path)
2859
2859
2860 try:
2860 try:
2861 local = peer.local() is not None
2861 local = peer.local() is not None
2862 canpush = peer.canpush()
2862 canpush = peer.canpush()
2863
2863
2864 ui.write(_(b'url: %s\n') % peer.url())
2864 ui.write(_(b'url: %s\n') % peer.url())
2865 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2865 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2866 ui.write(
2866 ui.write(
2867 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2867 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2868 )
2868 )
2869 finally:
2869 finally:
2870 peer.close()
2870 peer.close()
2871
2871
2872
2872
2873 @command(
2873 @command(
2874 b'debugpickmergetool',
2874 b'debugpickmergetool',
2875 [
2875 [
2876 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2876 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2877 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2877 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2878 ]
2878 ]
2879 + cmdutil.walkopts
2879 + cmdutil.walkopts
2880 + cmdutil.mergetoolopts,
2880 + cmdutil.mergetoolopts,
2881 _(b'[PATTERN]...'),
2881 _(b'[PATTERN]...'),
2882 inferrepo=True,
2882 inferrepo=True,
2883 )
2883 )
2884 def debugpickmergetool(ui, repo, *pats, **opts):
2884 def debugpickmergetool(ui, repo, *pats, **opts):
2885 """examine which merge tool is chosen for specified file
2885 """examine which merge tool is chosen for specified file
2886
2886
2887 As described in :hg:`help merge-tools`, Mercurial examines
2887 As described in :hg:`help merge-tools`, Mercurial examines
2888 configurations below in this order to decide which merge tool is
2888 configurations below in this order to decide which merge tool is
2889 chosen for specified file.
2889 chosen for specified file.
2890
2890
2891 1. ``--tool`` option
2891 1. ``--tool`` option
2892 2. ``HGMERGE`` environment variable
2892 2. ``HGMERGE`` environment variable
2893 3. configurations in ``merge-patterns`` section
2893 3. configurations in ``merge-patterns`` section
2894 4. configuration of ``ui.merge``
2894 4. configuration of ``ui.merge``
2895 5. configurations in ``merge-tools`` section
2895 5. configurations in ``merge-tools`` section
2896 6. ``hgmerge`` tool (for historical reason only)
2896 6. ``hgmerge`` tool (for historical reason only)
2897 7. default tool for fallback (``:merge`` or ``:prompt``)
2897 7. default tool for fallback (``:merge`` or ``:prompt``)
2898
2898
2899 This command writes out examination result in the style below::
2899 This command writes out examination result in the style below::
2900
2900
2901 FILE = MERGETOOL
2901 FILE = MERGETOOL
2902
2902
2903 By default, all files known in the first parent context of the
2903 By default, all files known in the first parent context of the
2904 working directory are examined. Use file patterns and/or -I/-X
2904 working directory are examined. Use file patterns and/or -I/-X
2905 options to limit target files. -r/--rev is also useful to examine
2905 options to limit target files. -r/--rev is also useful to examine
2906 files in another context without actual updating to it.
2906 files in another context without actual updating to it.
2907
2907
2908 With --debug, this command shows warning messages while matching
2908 With --debug, this command shows warning messages while matching
2909 against ``merge-patterns`` and so on, too. It is recommended to
2909 against ``merge-patterns`` and so on, too. It is recommended to
2910 use this option with explicit file patterns and/or -I/-X options,
2910 use this option with explicit file patterns and/or -I/-X options,
2911 because this option increases amount of output per file according
2911 because this option increases amount of output per file according
2912 to configurations in hgrc.
2912 to configurations in hgrc.
2913
2913
2914 With -v/--verbose, this command shows configurations below at
2914 With -v/--verbose, this command shows configurations below at
2915 first (only if specified).
2915 first (only if specified).
2916
2916
2917 - ``--tool`` option
2917 - ``--tool`` option
2918 - ``HGMERGE`` environment variable
2918 - ``HGMERGE`` environment variable
2919 - configuration of ``ui.merge``
2919 - configuration of ``ui.merge``
2920
2920
2921 If merge tool is chosen before matching against
2921 If merge tool is chosen before matching against
2922 ``merge-patterns``, this command can't show any helpful
2922 ``merge-patterns``, this command can't show any helpful
2923 information, even with --debug. In such case, information above is
2923 information, even with --debug. In such case, information above is
2924 useful to know why a merge tool is chosen.
2924 useful to know why a merge tool is chosen.
2925 """
2925 """
2926 overrides = {}
2926 overrides = {}
2927 if opts['tool']:
2927 if opts['tool']:
2928 overrides[(b'ui', b'forcemerge')] = opts['tool']
2928 overrides[(b'ui', b'forcemerge')] = opts['tool']
2929 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts['tool'])))
2929 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts['tool'])))
2930
2930
2931 with ui.configoverride(overrides, b'debugmergepatterns'):
2931 with ui.configoverride(overrides, b'debugmergepatterns'):
2932 hgmerge = encoding.environ.get(b"HGMERGE")
2932 hgmerge = encoding.environ.get(b"HGMERGE")
2933 if hgmerge is not None:
2933 if hgmerge is not None:
2934 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2934 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2935 uimerge = ui.config(b"ui", b"merge")
2935 uimerge = ui.config(b"ui", b"merge")
2936 if uimerge:
2936 if uimerge:
2937 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2937 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2938
2938
2939 ctx = scmutil.revsingle(repo, opts.get('rev'))
2939 ctx = scmutil.revsingle(repo, opts.get('rev'))
2940 m = scmutil.match(ctx, pats, pycompat.byteskwargs(opts))
2940 m = scmutil.match(ctx, pats, pycompat.byteskwargs(opts))
2941 changedelete = opts['changedelete']
2941 changedelete = opts['changedelete']
2942 for path in ctx.walk(m):
2942 for path in ctx.walk(m):
2943 fctx = ctx[path]
2943 fctx = ctx[path]
2944 with ui.silent(
2944 with ui.silent(
2945 error=True
2945 error=True
2946 ) if not ui.debugflag else util.nullcontextmanager():
2946 ) if not ui.debugflag else util.nullcontextmanager():
2947 tool, toolpath = filemerge._picktool(
2947 tool, toolpath = filemerge._picktool(
2948 repo,
2948 repo,
2949 ui,
2949 ui,
2950 path,
2950 path,
2951 fctx.isbinary(),
2951 fctx.isbinary(),
2952 b'l' in fctx.flags(),
2952 b'l' in fctx.flags(),
2953 changedelete,
2953 changedelete,
2954 )
2954 )
2955 ui.write(b'%s = %s\n' % (path, tool))
2955 ui.write(b'%s = %s\n' % (path, tool))
2956
2956
2957
2957
2958 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2958 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2959 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2959 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2960 """access the pushkey key/value protocol
2960 """access the pushkey key/value protocol
2961
2961
2962 With two args, list the keys in the given namespace.
2962 With two args, list the keys in the given namespace.
2963
2963
2964 With five args, set a key to new if it currently is set to old.
2964 With five args, set a key to new if it currently is set to old.
2965 Reports success or failure.
2965 Reports success or failure.
2966 """
2966 """
2967
2967
2968 target = hg.peer(ui, {}, repopath)
2968 target = hg.peer(ui, {}, repopath)
2969 try:
2969 try:
2970 if keyinfo:
2970 if keyinfo:
2971 key, old, new = keyinfo
2971 key, old, new = keyinfo
2972 with target.commandexecutor() as e:
2972 with target.commandexecutor() as e:
2973 r = e.callcommand(
2973 r = e.callcommand(
2974 b'pushkey',
2974 b'pushkey',
2975 {
2975 {
2976 b'namespace': namespace,
2976 b'namespace': namespace,
2977 b'key': key,
2977 b'key': key,
2978 b'old': old,
2978 b'old': old,
2979 b'new': new,
2979 b'new': new,
2980 },
2980 },
2981 ).result()
2981 ).result()
2982
2982
2983 ui.status(pycompat.bytestr(r) + b'\n')
2983 ui.status(pycompat.bytestr(r) + b'\n')
2984 return not r
2984 return not r
2985 else:
2985 else:
2986 for k, v in sorted(target.listkeys(namespace).items()):
2986 for k, v in sorted(target.listkeys(namespace).items()):
2987 ui.write(
2987 ui.write(
2988 b"%s\t%s\n"
2988 b"%s\t%s\n"
2989 % (stringutil.escapestr(k), stringutil.escapestr(v))
2989 % (stringutil.escapestr(k), stringutil.escapestr(v))
2990 )
2990 )
2991 finally:
2991 finally:
2992 target.close()
2992 target.close()
2993
2993
2994
2994
2995 @command(b'debugpvec', [], _(b'A B'))
2995 @command(b'debugpvec', [], _(b'A B'))
2996 def debugpvec(ui, repo, a, b=None):
2996 def debugpvec(ui, repo, a, b=None):
2997 ca = scmutil.revsingle(repo, a)
2997 ca = scmutil.revsingle(repo, a)
2998 cb = scmutil.revsingle(repo, b)
2998 cb = scmutil.revsingle(repo, b)
2999 pa = pvec.ctxpvec(ca)
2999 pa = pvec.ctxpvec(ca)
3000 pb = pvec.ctxpvec(cb)
3000 pb = pvec.ctxpvec(cb)
3001 if pa == pb:
3001 if pa == pb:
3002 rel = b"="
3002 rel = b"="
3003 elif pa > pb:
3003 elif pa > pb:
3004 rel = b">"
3004 rel = b">"
3005 elif pa < pb:
3005 elif pa < pb:
3006 rel = b"<"
3006 rel = b"<"
3007 elif pa | pb:
3007 elif pa | pb:
3008 rel = b"|"
3008 rel = b"|"
3009 ui.write(_(b"a: %s\n") % pa)
3009 ui.write(_(b"a: %s\n") % pa)
3010 ui.write(_(b"b: %s\n") % pb)
3010 ui.write(_(b"b: %s\n") % pb)
3011 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
3011 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
3012 ui.write(
3012 ui.write(
3013 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
3013 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
3014 % (
3014 % (
3015 abs(pa._depth - pb._depth),
3015 abs(pa._depth - pb._depth),
3016 pvec._hamming(pa._vec, pb._vec),
3016 pvec._hamming(pa._vec, pb._vec),
3017 pa.distance(pb),
3017 pa.distance(pb),
3018 rel,
3018 rel,
3019 )
3019 )
3020 )
3020 )
3021
3021
3022
3022
3023 @command(
3023 @command(
3024 b'debugrebuilddirstate|debugrebuildstate',
3024 b'debugrebuilddirstate|debugrebuildstate',
3025 [
3025 [
3026 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
3026 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
3027 (
3027 (
3028 b'',
3028 b'',
3029 b'minimal',
3029 b'minimal',
3030 None,
3030 None,
3031 _(
3031 _(
3032 b'only rebuild files that are inconsistent with '
3032 b'only rebuild files that are inconsistent with '
3033 b'the working copy parent'
3033 b'the working copy parent'
3034 ),
3034 ),
3035 ),
3035 ),
3036 ],
3036 ],
3037 _(b'[-r REV]'),
3037 _(b'[-r REV]'),
3038 )
3038 )
3039 def debugrebuilddirstate(ui, repo, rev, **opts):
3039 def debugrebuilddirstate(ui, repo, rev, **opts):
3040 """rebuild the dirstate as it would look like for the given revision
3040 """rebuild the dirstate as it would look like for the given revision
3041
3041
3042 If no revision is specified the first current parent will be used.
3042 If no revision is specified the first current parent will be used.
3043
3043
3044 The dirstate will be set to the files of the given revision.
3044 The dirstate will be set to the files of the given revision.
3045 The actual working directory content or existing dirstate
3045 The actual working directory content or existing dirstate
3046 information such as adds or removes is not considered.
3046 information such as adds or removes is not considered.
3047
3047
3048 ``minimal`` will only rebuild the dirstate status for files that claim to be
3048 ``minimal`` will only rebuild the dirstate status for files that claim to be
3049 tracked but are not in the parent manifest, or that exist in the parent
3049 tracked but are not in the parent manifest, or that exist in the parent
3050 manifest but are not in the dirstate. It will not change adds, removes, or
3050 manifest but are not in the dirstate. It will not change adds, removes, or
3051 modified files that are in the working copy parent.
3051 modified files that are in the working copy parent.
3052
3052
3053 One use of this command is to make the next :hg:`status` invocation
3053 One use of this command is to make the next :hg:`status` invocation
3054 check the actual file content.
3054 check the actual file content.
3055 """
3055 """
3056 ctx = scmutil.revsingle(repo, rev)
3056 ctx = scmutil.revsingle(repo, rev)
3057 with repo.wlock():
3057 with repo.wlock():
3058 if repo.currenttransaction() is not None:
3058 if repo.currenttransaction() is not None:
3059 msg = b'rebuild the dirstate outside of a transaction'
3059 msg = b'rebuild the dirstate outside of a transaction'
3060 raise error.ProgrammingError(msg)
3060 raise error.ProgrammingError(msg)
3061 dirstate = repo.dirstate
3061 dirstate = repo.dirstate
3062 changedfiles = None
3062 changedfiles = None
3063 # See command doc for what minimal does.
3063 # See command doc for what minimal does.
3064 if opts.get('minimal'):
3064 if opts.get('minimal'):
3065 manifestfiles = set(ctx.manifest().keys())
3065 manifestfiles = set(ctx.manifest().keys())
3066 dirstatefiles = set(dirstate)
3066 dirstatefiles = set(dirstate)
3067 manifestonly = manifestfiles - dirstatefiles
3067 manifestonly = manifestfiles - dirstatefiles
3068 dsonly = dirstatefiles - manifestfiles
3068 dsonly = dirstatefiles - manifestfiles
3069 dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added}
3069 dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added}
3070 changedfiles = manifestonly | dsnotadded
3070 changedfiles = manifestonly | dsnotadded
3071
3071
3072 with dirstate.changing_parents(repo):
3072 with dirstate.changing_parents(repo):
3073 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
3073 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
3074
3074
3075
3075
3076 @command(
3076 @command(
3077 b'debugrebuildfncache',
3077 b'debugrebuildfncache',
3078 [
3078 [
3079 (
3079 (
3080 b'',
3080 b'',
3081 b'only-data',
3081 b'only-data',
3082 False,
3082 False,
3083 _(b'only look for wrong .d files (much faster)'),
3083 _(b'only look for wrong .d files (much faster)'),
3084 )
3084 )
3085 ],
3085 ],
3086 b'',
3086 b'',
3087 )
3087 )
3088 def debugrebuildfncache(ui, repo, **opts):
3088 def debugrebuildfncache(ui, repo, **opts):
3089 """rebuild the fncache file"""
3089 """rebuild the fncache file"""
3090 repair.rebuildfncache(ui, repo, opts.get("only_data"))
3090 repair.rebuildfncache(ui, repo, opts.get("only_data"))
3091
3091
3092
3092
3093 @command(
3093 @command(
3094 b'debugrename',
3094 b'debugrename',
3095 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
3095 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
3096 _(b'[-r REV] [FILE]...'),
3096 _(b'[-r REV] [FILE]...'),
3097 )
3097 )
3098 def debugrename(ui, repo, *pats, **opts):
3098 def debugrename(ui, repo, *pats, **opts):
3099 """dump rename information"""
3099 """dump rename information"""
3100
3100
3101 ctx = scmutil.revsingle(repo, opts.get('rev'))
3101 ctx = scmutil.revsingle(repo, opts.get('rev'))
3102 m = scmutil.match(ctx, pats, pycompat.byteskwargs(opts))
3102 m = scmutil.match(ctx, pats, pycompat.byteskwargs(opts))
3103 for abs in ctx.walk(m):
3103 for abs in ctx.walk(m):
3104 fctx = ctx[abs]
3104 fctx = ctx[abs]
3105 o = fctx.filelog().renamed(fctx.filenode())
3105 o = fctx.filelog().renamed(fctx.filenode())
3106 rel = repo.pathto(abs)
3106 rel = repo.pathto(abs)
3107 if o:
3107 if o:
3108 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
3108 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
3109 else:
3109 else:
3110 ui.write(_(b"%s not renamed\n") % rel)
3110 ui.write(_(b"%s not renamed\n") % rel)
3111
3111
3112
3112
3113 @command(b'debugrequires|debugrequirements', [], b'')
3113 @command(b'debugrequires|debugrequirements', [], b'')
3114 def debugrequirements(ui, repo):
3114 def debugrequirements(ui, repo):
3115 """print the current repo requirements"""
3115 """print the current repo requirements"""
3116 for r in sorted(repo.requirements):
3116 for r in sorted(repo.requirements):
3117 ui.write(b"%s\n" % r)
3117 ui.write(b"%s\n" % r)
3118
3118
3119
3119
3120 @command(
3120 @command(
3121 b'debugrevlog',
3121 b'debugrevlog',
3122 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
3122 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
3123 _(b'-c|-m|FILE'),
3123 _(b'-c|-m|FILE'),
3124 optionalrepo=True,
3124 optionalrepo=True,
3125 )
3125 )
3126 def debugrevlog(ui, repo, file_=None, **opts):
3126 def debugrevlog(ui, repo, file_=None, **opts):
3127 """show data and statistics about a revlog"""
3127 """show data and statistics about a revlog"""
3128 r = cmdutil.openrevlog(
3128 r = cmdutil.openrevlog(
3129 repo, b'debugrevlog', file_, pycompat.byteskwargs(opts)
3129 repo, b'debugrevlog', file_, pycompat.byteskwargs(opts)
3130 )
3130 )
3131
3131
3132 if opts.get("dump"):
3132 if opts.get("dump"):
3133 revlog_debug.dump(ui, r)
3133 revlog_debug.dump(ui, r)
3134 else:
3134 else:
3135 revlog_debug.debug_revlog(ui, r)
3135 revlog_debug.debug_revlog(ui, r)
3136 return 0
3136 return 0
3137
3137
3138
3138
3139 @command(
3139 @command(
3140 b'debugrevlogindex',
3140 b'debugrevlogindex',
3141 cmdutil.debugrevlogopts
3141 cmdutil.debugrevlogopts
3142 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3142 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3143 _(b'[-f FORMAT] -c|-m|FILE'),
3143 _(b'[-f FORMAT] -c|-m|FILE'),
3144 optionalrepo=True,
3144 optionalrepo=True,
3145 )
3145 )
3146 def debugrevlogindex(ui, repo, file_=None, **opts):
3146 def debugrevlogindex(ui, repo, file_=None, **opts):
3147 """dump the contents of a revlog index"""
3147 """dump the contents of a revlog index"""
3148 r = cmdutil.openrevlog(
3148 r = cmdutil.openrevlog(
3149 repo, b'debugrevlogindex', file_, pycompat.byteskwargs(opts)
3149 repo, b'debugrevlogindex', file_, pycompat.byteskwargs(opts)
3150 )
3150 )
3151 format = opts.get('format', 0)
3151 format = opts.get('format', 0)
3152 if format not in (0, 1):
3152 if format not in (0, 1):
3153 raise error.Abort(_(b"unknown format %d") % format)
3153 raise error.Abort(_(b"unknown format %d") % format)
3154
3154
3155 if ui.debugflag:
3155 if ui.debugflag:
3156 shortfn = hex
3156 shortfn = hex
3157 else:
3157 else:
3158 shortfn = short
3158 shortfn = short
3159
3159
3160 # There might not be anything in r, so have a sane default
3160 # There might not be anything in r, so have a sane default
3161 idlen = 12
3161 idlen = 12
3162 for i in r:
3162 for i in r:
3163 idlen = len(shortfn(r.node(i)))
3163 idlen = len(shortfn(r.node(i)))
3164 break
3164 break
3165
3165
3166 if format == 0:
3166 if format == 0:
3167 if ui.verbose:
3167 if ui.verbose:
3168 ui.writenoi18n(
3168 ui.writenoi18n(
3169 b" rev offset length linkrev %s %s p2\n"
3169 b" rev offset length linkrev %s %s p2\n"
3170 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3170 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3171 )
3171 )
3172 else:
3172 else:
3173 ui.writenoi18n(
3173 ui.writenoi18n(
3174 b" rev linkrev %s %s p2\n"
3174 b" rev linkrev %s %s p2\n"
3175 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3175 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3176 )
3176 )
3177 elif format == 1:
3177 elif format == 1:
3178 if ui.verbose:
3178 if ui.verbose:
3179 ui.writenoi18n(
3179 ui.writenoi18n(
3180 (
3180 (
3181 b" rev flag offset length size link p1"
3181 b" rev flag offset length size link p1"
3182 b" p2 %s\n"
3182 b" p2 %s\n"
3183 )
3183 )
3184 % b"nodeid".rjust(idlen)
3184 % b"nodeid".rjust(idlen)
3185 )
3185 )
3186 else:
3186 else:
3187 ui.writenoi18n(
3187 ui.writenoi18n(
3188 b" rev flag size link p1 p2 %s\n"
3188 b" rev flag size link p1 p2 %s\n"
3189 % b"nodeid".rjust(idlen)
3189 % b"nodeid".rjust(idlen)
3190 )
3190 )
3191
3191
3192 for i in r:
3192 for i in r:
3193 node = r.node(i)
3193 node = r.node(i)
3194 if format == 0:
3194 if format == 0:
3195 try:
3195 try:
3196 pp = r.parents(node)
3196 pp = r.parents(node)
3197 except Exception:
3197 except Exception:
3198 pp = [repo.nullid, repo.nullid]
3198 pp = [repo.nullid, repo.nullid]
3199 if ui.verbose:
3199 if ui.verbose:
3200 ui.write(
3200 ui.write(
3201 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3201 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3202 % (
3202 % (
3203 i,
3203 i,
3204 r.start(i),
3204 r.start(i),
3205 r.length(i),
3205 r.length(i),
3206 r.linkrev(i),
3206 r.linkrev(i),
3207 shortfn(node),
3207 shortfn(node),
3208 shortfn(pp[0]),
3208 shortfn(pp[0]),
3209 shortfn(pp[1]),
3209 shortfn(pp[1]),
3210 )
3210 )
3211 )
3211 )
3212 else:
3212 else:
3213 ui.write(
3213 ui.write(
3214 b"% 6d % 7d %s %s %s\n"
3214 b"% 6d % 7d %s %s %s\n"
3215 % (
3215 % (
3216 i,
3216 i,
3217 r.linkrev(i),
3217 r.linkrev(i),
3218 shortfn(node),
3218 shortfn(node),
3219 shortfn(pp[0]),
3219 shortfn(pp[0]),
3220 shortfn(pp[1]),
3220 shortfn(pp[1]),
3221 )
3221 )
3222 )
3222 )
3223 elif format == 1:
3223 elif format == 1:
3224 pr = r.parentrevs(i)
3224 pr = r.parentrevs(i)
3225 if ui.verbose:
3225 if ui.verbose:
3226 ui.write(
3226 ui.write(
3227 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3227 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3228 % (
3228 % (
3229 i,
3229 i,
3230 r.flags(i),
3230 r.flags(i),
3231 r.start(i),
3231 r.start(i),
3232 r.length(i),
3232 r.length(i),
3233 r.rawsize(i),
3233 r.rawsize(i),
3234 r.linkrev(i),
3234 r.linkrev(i),
3235 pr[0],
3235 pr[0],
3236 pr[1],
3236 pr[1],
3237 shortfn(node),
3237 shortfn(node),
3238 )
3238 )
3239 )
3239 )
3240 else:
3240 else:
3241 ui.write(
3241 ui.write(
3242 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3242 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3243 % (
3243 % (
3244 i,
3244 i,
3245 r.flags(i),
3245 r.flags(i),
3246 r.rawsize(i),
3246 r.rawsize(i),
3247 r.linkrev(i),
3247 r.linkrev(i),
3248 pr[0],
3248 pr[0],
3249 pr[1],
3249 pr[1],
3250 shortfn(node),
3250 shortfn(node),
3251 )
3251 )
3252 )
3252 )
3253
3253
3254
3254
3255 @command(
3255 @command(
3256 b'debugrevspec',
3256 b'debugrevspec',
3257 [
3257 [
3258 (
3258 (
3259 b'',
3259 b'',
3260 b'optimize',
3260 b'optimize',
3261 None,
3261 None,
3262 _(b'print parsed tree after optimizing (DEPRECATED)'),
3262 _(b'print parsed tree after optimizing (DEPRECATED)'),
3263 ),
3263 ),
3264 (
3264 (
3265 b'',
3265 b'',
3266 b'show-revs',
3266 b'show-revs',
3267 True,
3267 True,
3268 _(b'print list of result revisions (default)'),
3268 _(b'print list of result revisions (default)'),
3269 ),
3269 ),
3270 (
3270 (
3271 b's',
3271 b's',
3272 b'show-set',
3272 b'show-set',
3273 None,
3273 None,
3274 _(b'print internal representation of result set'),
3274 _(b'print internal representation of result set'),
3275 ),
3275 ),
3276 (
3276 (
3277 b'p',
3277 b'p',
3278 b'show-stage',
3278 b'show-stage',
3279 [],
3279 [],
3280 _(b'print parsed tree at the given stage'),
3280 _(b'print parsed tree at the given stage'),
3281 _(b'NAME'),
3281 _(b'NAME'),
3282 ),
3282 ),
3283 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3283 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3284 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3284 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3285 ],
3285 ],
3286 b'REVSPEC',
3286 b'REVSPEC',
3287 )
3287 )
3288 def debugrevspec(ui, repo, expr, **opts):
3288 def debugrevspec(ui, repo, expr, **opts):
3289 """parse and apply a revision specification
3289 """parse and apply a revision specification
3290
3290
3291 Use -p/--show-stage option to print the parsed tree at the given stages.
3291 Use -p/--show-stage option to print the parsed tree at the given stages.
3292 Use -p all to print tree at every stage.
3292 Use -p all to print tree at every stage.
3293
3293
3294 Use --no-show-revs option with -s or -p to print only the set
3294 Use --no-show-revs option with -s or -p to print only the set
3295 representation or the parsed tree respectively.
3295 representation or the parsed tree respectively.
3296
3296
3297 Use --verify-optimized to compare the optimized result with the unoptimized
3297 Use --verify-optimized to compare the optimized result with the unoptimized
3298 one. Returns 1 if the optimized result differs.
3298 one. Returns 1 if the optimized result differs.
3299 """
3299 """
3300 aliases = ui.configitems(b'revsetalias')
3300 aliases = ui.configitems(b'revsetalias')
3301 stages = [
3301 stages = [
3302 (b'parsed', lambda tree: tree),
3302 (b'parsed', lambda tree: tree),
3303 (
3303 (
3304 b'expanded',
3304 b'expanded',
3305 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3305 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3306 ),
3306 ),
3307 (b'concatenated', revsetlang.foldconcat),
3307 (b'concatenated', revsetlang.foldconcat),
3308 (b'analyzed', revsetlang.analyze),
3308 (b'analyzed', revsetlang.analyze),
3309 (b'optimized', revsetlang.optimize),
3309 (b'optimized', revsetlang.optimize),
3310 ]
3310 ]
3311 if opts['no_optimized']:
3311 if opts['no_optimized']:
3312 stages = stages[:-1]
3312 stages = stages[:-1]
3313 if opts['verify_optimized'] and opts['no_optimized']:
3313 if opts['verify_optimized'] and opts['no_optimized']:
3314 raise error.Abort(
3314 raise error.Abort(
3315 _(b'cannot use --verify-optimized with --no-optimized')
3315 _(b'cannot use --verify-optimized with --no-optimized')
3316 )
3316 )
3317 stagenames = {n for n, f in stages}
3317 stagenames = {n for n, f in stages}
3318
3318
3319 showalways = set()
3319 showalways = set()
3320 showchanged = set()
3320 showchanged = set()
3321 if ui.verbose and not opts['show_stage']:
3321 if ui.verbose and not opts['show_stage']:
3322 # show parsed tree by --verbose (deprecated)
3322 # show parsed tree by --verbose (deprecated)
3323 showalways.add(b'parsed')
3323 showalways.add(b'parsed')
3324 showchanged.update([b'expanded', b'concatenated'])
3324 showchanged.update([b'expanded', b'concatenated'])
3325 if opts['optimize']:
3325 if opts['optimize']:
3326 showalways.add(b'optimized')
3326 showalways.add(b'optimized')
3327 if opts['show_stage'] and opts['optimize']:
3327 if opts['show_stage'] and opts['optimize']:
3328 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3328 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3329 if opts['show_stage'] == [b'all']:
3329 if opts['show_stage'] == [b'all']:
3330 showalways.update(stagenames)
3330 showalways.update(stagenames)
3331 else:
3331 else:
3332 for n in opts['show_stage']:
3332 for n in opts['show_stage']:
3333 if n not in stagenames:
3333 if n not in stagenames:
3334 raise error.Abort(_(b'invalid stage name: %s') % n)
3334 raise error.Abort(_(b'invalid stage name: %s') % n)
3335 showalways.update(opts['show_stage'])
3335 showalways.update(opts['show_stage'])
3336
3336
3337 treebystage = {}
3337 treebystage = {}
3338 printedtree = None
3338 printedtree = None
3339 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3339 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3340 for n, f in stages:
3340 for n, f in stages:
3341 treebystage[n] = tree = f(tree)
3341 treebystage[n] = tree = f(tree)
3342 if n in showalways or (n in showchanged and tree != printedtree):
3342 if n in showalways or (n in showchanged and tree != printedtree):
3343 if opts['show_stage'] or n != b'parsed':
3343 if opts['show_stage'] or n != b'parsed':
3344 ui.write(b"* %s:\n" % n)
3344 ui.write(b"* %s:\n" % n)
3345 ui.write(revsetlang.prettyformat(tree), b"\n")
3345 ui.write(revsetlang.prettyformat(tree), b"\n")
3346 printedtree = tree
3346 printedtree = tree
3347
3347
3348 if opts['verify_optimized']:
3348 if opts['verify_optimized']:
3349 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3349 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3350 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3350 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3351 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
3351 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
3352 ui.writenoi18n(
3352 ui.writenoi18n(
3353 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3353 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3354 )
3354 )
3355 ui.writenoi18n(
3355 ui.writenoi18n(
3356 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3356 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3357 )
3357 )
3358 arevs = list(arevs)
3358 arevs = list(arevs)
3359 brevs = list(brevs)
3359 brevs = list(brevs)
3360 if arevs == brevs:
3360 if arevs == brevs:
3361 return 0
3361 return 0
3362 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3362 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3363 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3363 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3364 sm = difflib.SequenceMatcher(None, arevs, brevs)
3364 sm = difflib.SequenceMatcher(None, arevs, brevs)
3365 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3365 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3366 if tag in ('delete', 'replace'):
3366 if tag in ('delete', 'replace'):
3367 for c in arevs[alo:ahi]:
3367 for c in arevs[alo:ahi]:
3368 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3368 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3369 if tag in ('insert', 'replace'):
3369 if tag in ('insert', 'replace'):
3370 for c in brevs[blo:bhi]:
3370 for c in brevs[blo:bhi]:
3371 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3371 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3372 if tag == 'equal':
3372 if tag == 'equal':
3373 for c in arevs[alo:ahi]:
3373 for c in arevs[alo:ahi]:
3374 ui.write(b' %d\n' % c)
3374 ui.write(b' %d\n' % c)
3375 return 1
3375 return 1
3376
3376
3377 func = revset.makematcher(tree)
3377 func = revset.makematcher(tree)
3378 revs = func(repo)
3378 revs = func(repo)
3379 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
3379 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
3380 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3380 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3381 if not opts['show_revs']:
3381 if not opts['show_revs']:
3382 return
3382 return
3383 for c in revs:
3383 for c in revs:
3384 ui.write(b"%d\n" % c)
3384 ui.write(b"%d\n" % c)
3385
3385
3386
3386
3387 @command(
3387 @command(
3388 b'debugserve',
3388 b'debugserve',
3389 [
3389 [
3390 (
3390 (
3391 b'',
3391 b'',
3392 b'sshstdio',
3392 b'sshstdio',
3393 False,
3393 False,
3394 _(b'run an SSH server bound to process handles'),
3394 _(b'run an SSH server bound to process handles'),
3395 ),
3395 ),
3396 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3396 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3397 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3397 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3398 ],
3398 ],
3399 b'',
3399 b'',
3400 )
3400 )
3401 def debugserve(ui, repo, **opts):
3401 def debugserve(ui, repo, **opts):
3402 """run a server with advanced settings
3402 """run a server with advanced settings
3403
3403
3404 This command is similar to :hg:`serve`. It exists partially as a
3404 This command is similar to :hg:`serve`. It exists partially as a
3405 workaround to the fact that ``hg serve --stdio`` must have specific
3405 workaround to the fact that ``hg serve --stdio`` must have specific
3406 arguments for security reasons.
3406 arguments for security reasons.
3407 """
3407 """
3408 if not opts['sshstdio']:
3408 if not opts['sshstdio']:
3409 raise error.Abort(_(b'only --sshstdio is currently supported'))
3409 raise error.Abort(_(b'only --sshstdio is currently supported'))
3410
3410
3411 logfh = None
3411 logfh = None
3412
3412
3413 if opts['logiofd'] and opts['logiofile']:
3413 if opts['logiofd'] and opts['logiofile']:
3414 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3414 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3415
3415
3416 if opts['logiofd']:
3416 if opts['logiofd']:
3417 # Ideally we would be line buffered. But line buffering in binary
3417 # Ideally we would be line buffered. But line buffering in binary
3418 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3418 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3419 # buffering could have performance impacts. But since this isn't
3419 # buffering could have performance impacts. But since this isn't
3420 # performance critical code, it should be fine.
3420 # performance critical code, it should be fine.
3421 try:
3421 try:
3422 logfh = os.fdopen(int(opts['logiofd']), 'ab', 0)
3422 logfh = os.fdopen(int(opts['logiofd']), 'ab', 0)
3423 except OSError as e:
3423 except OSError as e:
3424 if e.errno != errno.ESPIPE:
3424 if e.errno != errno.ESPIPE:
3425 raise
3425 raise
3426 # can't seek a pipe, so `ab` mode fails on py3
3426 # can't seek a pipe, so `ab` mode fails on py3
3427 logfh = os.fdopen(int(opts['logiofd']), 'wb', 0)
3427 logfh = os.fdopen(int(opts['logiofd']), 'wb', 0)
3428 elif opts['logiofile']:
3428 elif opts['logiofile']:
3429 logfh = open(opts['logiofile'], b'ab', 0)
3429 logfh = open(opts['logiofile'], b'ab', 0)
3430
3430
3431 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3431 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3432 s.serve_forever()
3432 s.serve_forever()
3433
3433
3434
3434
3435 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3435 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3436 def debugsetparents(ui, repo, rev1, rev2=None):
3436 def debugsetparents(ui, repo, rev1, rev2=None):
3437 """manually set the parents of the current working directory (DANGEROUS)
3437 """manually set the parents of the current working directory (DANGEROUS)
3438
3438
3439 This command is not what you are looking for and should not be used. Using
3439 This command is not what you are looking for and should not be used. Using
3440 this command will most certainly results in slight corruption of the file
3440 this command will most certainly results in slight corruption of the file
3441 level histories withing your repository. DO NOT USE THIS COMMAND.
3441 level histories withing your repository. DO NOT USE THIS COMMAND.
3442
3442
3443 The command update the p1 and p2 field in the dirstate, and not touching
3443 The command update the p1 and p2 field in the dirstate, and not touching
3444 anything else. This useful for writing repository conversion tools, but
3444 anything else. This useful for writing repository conversion tools, but
3445 should be used with extreme care. For example, neither the working
3445 should be used with extreme care. For example, neither the working
3446 directory nor the dirstate is updated, so file status may be incorrect
3446 directory nor the dirstate is updated, so file status may be incorrect
3447 after running this command. Only used if you are one of the few people that
3447 after running this command. Only used if you are one of the few people that
3448 deeply unstand both conversion tools and file level histories. If you are
3448 deeply unstand both conversion tools and file level histories. If you are
3449 reading this help, you are not one of this people (most of them sailed west
3449 reading this help, you are not one of this people (most of them sailed west
3450 from Mithlond anyway.
3450 from Mithlond anyway.
3451
3451
3452 So one last time DO NOT USE THIS COMMAND.
3452 So one last time DO NOT USE THIS COMMAND.
3453
3453
3454 Returns 0 on success.
3454 Returns 0 on success.
3455 """
3455 """
3456
3456
3457 node1 = scmutil.revsingle(repo, rev1).node()
3457 node1 = scmutil.revsingle(repo, rev1).node()
3458 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3458 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3459
3459
3460 with repo.wlock():
3460 with repo.wlock():
3461 repo.setparents(node1, node2)
3461 repo.setparents(node1, node2)
3462
3462
3463
3463
3464 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3464 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3465 def debugsidedata(ui, repo, file_, rev=None, **opts):
3465 def debugsidedata(ui, repo, file_, rev=None, **opts):
3466 """dump the side data for a cl/manifest/file revision
3466 """dump the side data for a cl/manifest/file revision
3467
3467
3468 Use --verbose to dump the sidedata content."""
3468 Use --verbose to dump the sidedata content."""
3469 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
3469 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
3470 if rev is not None:
3470 if rev is not None:
3471 raise error.InputError(
3471 raise error.InputError(
3472 _(b'cannot specify a revision with other arguments')
3472 _(b'cannot specify a revision with other arguments')
3473 )
3473 )
3474 file_, rev = None, file_
3474 file_, rev = None, file_
3475 elif rev is None:
3475 elif rev is None:
3476 raise error.InputError(_(b'please specify a revision'))
3476 raise error.InputError(_(b'please specify a revision'))
3477 r = cmdutil.openstorage(
3477 r = cmdutil.openstorage(
3478 repo, b'debugdata', file_, pycompat.byteskwargs(opts)
3478 repo, b'debugdata', file_, pycompat.byteskwargs(opts)
3479 )
3479 )
3480 r = getattr(r, '_revlog', r)
3480 r = getattr(r, '_revlog', r)
3481 try:
3481 try:
3482 sidedata = r.sidedata(r.lookup(rev))
3482 sidedata = r.sidedata(r.lookup(rev))
3483 except KeyError:
3483 except KeyError:
3484 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3484 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3485 if sidedata:
3485 if sidedata:
3486 sidedata = list(sidedata.items())
3486 sidedata = list(sidedata.items())
3487 sidedata.sort()
3487 sidedata.sort()
3488 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3488 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3489 for key, value in sidedata:
3489 for key, value in sidedata:
3490 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3490 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3491 if ui.verbose:
3491 if ui.verbose:
3492 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3492 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3493
3493
3494
3494
3495 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3495 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3496 def debugssl(ui, repo, source=None, **opts):
3496 def debugssl(ui, repo, source=None, **opts):
3497 """test a secure connection to a server
3497 """test a secure connection to a server
3498
3498
3499 This builds the certificate chain for the server on Windows, installing the
3499 This builds the certificate chain for the server on Windows, installing the
3500 missing intermediates and trusted root via Windows Update if necessary. It
3500 missing intermediates and trusted root via Windows Update if necessary. It
3501 does nothing on other platforms.
3501 does nothing on other platforms.
3502
3502
3503 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3503 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3504 that server is used. See :hg:`help urls` for more information.
3504 that server is used. See :hg:`help urls` for more information.
3505
3505
3506 If the update succeeds, retry the original operation. Otherwise, the cause
3506 If the update succeeds, retry the original operation. Otherwise, the cause
3507 of the SSL error is likely another issue.
3507 of the SSL error is likely another issue.
3508 """
3508 """
3509 if not pycompat.iswindows:
3509 if not pycompat.iswindows:
3510 raise error.Abort(
3510 raise error.Abort(
3511 _(b'certificate chain building is only possible on Windows')
3511 _(b'certificate chain building is only possible on Windows')
3512 )
3512 )
3513
3513
3514 if not source:
3514 if not source:
3515 if not repo:
3515 if not repo:
3516 raise error.Abort(
3516 raise error.Abort(
3517 _(
3517 _(
3518 b"there is no Mercurial repository here, and no "
3518 b"there is no Mercurial repository here, and no "
3519 b"server specified"
3519 b"server specified"
3520 )
3520 )
3521 )
3521 )
3522 source = b"default"
3522 source = b"default"
3523
3523
3524 path = urlutil.get_unique_pull_path_obj(b'debugssl', ui, source)
3524 path = urlutil.get_unique_pull_path_obj(b'debugssl', ui, source)
3525 url = path.url
3525 url = path.url
3526
3526
3527 defaultport = {b'https': 443, b'ssh': 22}
3527 defaultport = {b'https': 443, b'ssh': 22}
3528 if url.scheme in defaultport:
3528 if url.scheme in defaultport:
3529 try:
3529 try:
3530 addr = (url.host, int(url.port or defaultport[url.scheme]))
3530 addr = (url.host, int(url.port or defaultport[url.scheme]))
3531 except ValueError:
3531 except ValueError:
3532 raise error.Abort(_(b"malformed port number in URL"))
3532 raise error.Abort(_(b"malformed port number in URL"))
3533 else:
3533 else:
3534 raise error.Abort(_(b"only https and ssh connections are supported"))
3534 raise error.Abort(_(b"only https and ssh connections are supported"))
3535
3535
3536 from . import win32
3536 from . import win32
3537
3537
3538 s = ssl.wrap_socket(
3538 s = ssl.wrap_socket(
3539 socket.socket(),
3539 socket.socket(),
3540 ssl_version=ssl.PROTOCOL_TLS,
3540 ssl_version=ssl.PROTOCOL_TLS,
3541 cert_reqs=ssl.CERT_NONE,
3541 cert_reqs=ssl.CERT_NONE,
3542 ca_certs=None,
3542 ca_certs=None,
3543 )
3543 )
3544
3544
3545 try:
3545 try:
3546 s.connect(addr)
3546 s.connect(addr)
3547 cert = s.getpeercert(True)
3547 cert = s.getpeercert(True)
3548
3548
3549 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3549 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3550
3550
3551 complete = win32.checkcertificatechain(cert, build=False)
3551 complete = win32.checkcertificatechain(cert, build=False)
3552
3552
3553 if not complete:
3553 if not complete:
3554 ui.status(_(b'certificate chain is incomplete, updating... '))
3554 ui.status(_(b'certificate chain is incomplete, updating... '))
3555
3555
3556 if not win32.checkcertificatechain(cert):
3556 if not win32.checkcertificatechain(cert):
3557 ui.status(_(b'failed.\n'))
3557 ui.status(_(b'failed.\n'))
3558 else:
3558 else:
3559 ui.status(_(b'done.\n'))
3559 ui.status(_(b'done.\n'))
3560 else:
3560 else:
3561 ui.status(_(b'full certificate chain is available\n'))
3561 ui.status(_(b'full certificate chain is available\n'))
3562 finally:
3562 finally:
3563 s.close()
3563 s.close()
3564
3564
3565
3565
3566 @command(
3566 @command(
3567 b'debug::stable-tail-sort',
3567 b'debug::stable-tail-sort',
3568 [
3568 [
3569 (
3569 (
3570 b'T',
3570 b'T',
3571 b'template',
3571 b'template',
3572 b'{rev}\n',
3572 b'{rev}\n',
3573 _(b'display with template'),
3573 _(b'display with template'),
3574 _(b'TEMPLATE'),
3574 _(b'TEMPLATE'),
3575 ),
3575 ),
3576 ],
3576 ],
3577 b'REV',
3577 b'REV',
3578 )
3578 )
3579 def debug_stable_tail_sort(ui, repo, revspec, template, **opts):
3579 def debug_stable_tail_sort(ui, repo, revspec, template, **opts):
3580 """display the stable-tail sort of the ancestors of a given node"""
3580 """display the stable-tail sort of the ancestors of a given node"""
3581 rev = logcmdutil.revsingle(repo, revspec).rev()
3581 rev = logcmdutil.revsingle(repo, revspec).rev()
3582 cl = repo.changelog
3582 cl = repo.changelog
3583
3583
3584 displayer = logcmdutil.maketemplater(ui, repo, template)
3584 displayer = logcmdutil.maketemplater(ui, repo, template)
3585 sorted_revs = stabletailsort._stable_tail_sort_naive(cl, rev)
3585 sorted_revs = stabletailsort._stable_tail_sort_naive(cl, rev)
3586 for ancestor_rev in sorted_revs:
3586 for ancestor_rev in sorted_revs:
3587 displayer.show(repo[ancestor_rev])
3587 displayer.show(repo[ancestor_rev])
3588
3588
3589
3589
3590 @command(
3590 @command(
3591 b'debug::stable-tail-sort-leaps',
3591 b'debug::stable-tail-sort-leaps',
3592 [
3592 [
3593 (
3593 (
3594 b'T',
3594 b'T',
3595 b'template',
3595 b'template',
3596 b'{rev}',
3596 b'{rev}',
3597 _(b'display with template'),
3597 _(b'display with template'),
3598 _(b'TEMPLATE'),
3598 _(b'TEMPLATE'),
3599 ),
3599 ),
3600 (b's', b'specific', False, _(b'restrict to specific leaps')),
3600 (b's', b'specific', False, _(b'restrict to specific leaps')),
3601 ],
3601 ],
3602 b'REV',
3602 b'REV',
3603 )
3603 )
3604 def debug_stable_tail_sort_leaps(ui, repo, rspec, template, specific, **opts):
3604 def debug_stable_tail_sort_leaps(ui, repo, rspec, template, specific, **opts):
3605 """display the leaps in the stable-tail sort of a node, one per line"""
3605 """display the leaps in the stable-tail sort of a node, one per line"""
3606 rev = logcmdutil.revsingle(repo, rspec).rev()
3606 rev = logcmdutil.revsingle(repo, rspec).rev()
3607
3607
3608 if specific:
3608 if specific:
3609 get_leaps = stabletailsort._find_specific_leaps_naive
3609 get_leaps = stabletailsort._find_specific_leaps_naive
3610 else:
3610 else:
3611 get_leaps = stabletailsort._find_all_leaps_naive
3611 get_leaps = stabletailsort._find_all_leaps_naive
3612
3612
3613 displayer = logcmdutil.maketemplater(ui, repo, template)
3613 displayer = logcmdutil.maketemplater(ui, repo, template)
3614 for source, target in get_leaps(repo.changelog, rev):
3614 for source, target in get_leaps(repo.changelog, rev):
3615 displayer.show(repo[source])
3615 displayer.show(repo[source])
3616 displayer.show(repo[target])
3616 displayer.show(repo[target])
3617 ui.write(b'\n')
3617 ui.write(b'\n')
3618
3618
3619
3619
3620 @command(
3620 @command(
3621 b"debugbackupbundle",
3621 b"debugbackupbundle",
3622 [
3622 [
3623 (
3623 (
3624 b"",
3624 b"",
3625 b"recover",
3625 b"recover",
3626 b"",
3626 b"",
3627 b"brings the specified changeset back into the repository",
3627 b"brings the specified changeset back into the repository",
3628 )
3628 )
3629 ]
3629 ]
3630 + cmdutil.logopts,
3630 + cmdutil.logopts,
3631 _(b"hg debugbackupbundle [--recover HASH]"),
3631 _(b"hg debugbackupbundle [--recover HASH]"),
3632 )
3632 )
3633 def debugbackupbundle(ui, repo, *pats, **opts):
3633 def debugbackupbundle(ui, repo, *pats, **opts):
3634 """lists the changesets available in backup bundles
3634 """lists the changesets available in backup bundles
3635
3635
3636 Without any arguments, this command prints a list of the changesets in each
3636 Without any arguments, this command prints a list of the changesets in each
3637 backup bundle.
3637 backup bundle.
3638
3638
3639 --recover takes a changeset hash and unbundles the first bundle that
3639 --recover takes a changeset hash and unbundles the first bundle that
3640 contains that hash, which puts that changeset back in your repository.
3640 contains that hash, which puts that changeset back in your repository.
3641
3641
3642 --verbose will print the entire commit message and the bundle path for that
3642 --verbose will print the entire commit message and the bundle path for that
3643 backup.
3643 backup.
3644 """
3644 """
3645 backups = list(
3645 backups = list(
3646 filter(
3646 filter(
3647 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3647 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3648 )
3648 )
3649 )
3649 )
3650 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3650 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3651
3651
3652 opts["bundle"] = b""
3652 opts["bundle"] = b""
3653 opts["force"] = None
3653 opts["force"] = None
3654 limit = logcmdutil.getlimit(pycompat.byteskwargs(opts))
3654 limit = logcmdutil.getlimit(pycompat.byteskwargs(opts))
3655
3655
3656 def display(other, chlist, displayer):
3656 def display(other, chlist, displayer):
3657 if opts.get("newest_first"):
3657 if opts.get("newest_first"):
3658 chlist.reverse()
3658 chlist.reverse()
3659 count = 0
3659 count = 0
3660 for n in chlist:
3660 for n in chlist:
3661 if limit is not None and count >= limit:
3661 if limit is not None and count >= limit:
3662 break
3662 break
3663 parents = [
3663 parents = [
3664 True for p in other.changelog.parents(n) if p != repo.nullid
3664 True for p in other.changelog.parents(n) if p != repo.nullid
3665 ]
3665 ]
3666 if opts.get("no_merges") and len(parents) == 2:
3666 if opts.get("no_merges") and len(parents) == 2:
3667 continue
3667 continue
3668 count += 1
3668 count += 1
3669 displayer.show(other[n])
3669 displayer.show(other[n])
3670
3670
3671 recovernode = opts.get("recover")
3671 recovernode = opts.get("recover")
3672 if recovernode:
3672 if recovernode:
3673 if scmutil.isrevsymbol(repo, recovernode):
3673 if scmutil.isrevsymbol(repo, recovernode):
3674 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3674 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3675 return
3675 return
3676 elif backups:
3676 elif backups:
3677 msg = _(
3677 msg = _(
3678 b"Recover changesets using: hg debugbackupbundle --recover "
3678 b"Recover changesets using: hg debugbackupbundle --recover "
3679 b"<changeset hash>\n\nAvailable backup changesets:"
3679 b"<changeset hash>\n\nAvailable backup changesets:"
3680 )
3680 )
3681 ui.status(msg, label=b"status.removed")
3681 ui.status(msg, label=b"status.removed")
3682 else:
3682 else:
3683 ui.status(_(b"no backup changesets found\n"))
3683 ui.status(_(b"no backup changesets found\n"))
3684 return
3684 return
3685
3685
3686 for backup in backups:
3686 for backup in backups:
3687 # Much of this is copied from the hg incoming logic
3687 # Much of this is copied from the hg incoming logic
3688 source = os.path.relpath(backup, encoding.getcwd())
3688 source = os.path.relpath(backup, encoding.getcwd())
3689 path = urlutil.get_unique_pull_path_obj(
3689 path = urlutil.get_unique_pull_path_obj(
3690 b'debugbackupbundle',
3690 b'debugbackupbundle',
3691 ui,
3691 ui,
3692 source,
3692 source,
3693 )
3693 )
3694 try:
3694 try:
3695 other = hg.peer(repo, pycompat.byteskwargs(opts), path)
3695 other = hg.peer(repo, pycompat.byteskwargs(opts), path)
3696 except error.LookupError as ex:
3696 except error.LookupError as ex:
3697 msg = _(b"\nwarning: unable to open bundle %s") % path.loc
3697 msg = _(b"\nwarning: unable to open bundle %s") % path.loc
3698 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3698 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3699 ui.warn(msg, hint=hint)
3699 ui.warn(msg, hint=hint)
3700 continue
3700 continue
3701 branches = (path.branch, opts.get('branch', []))
3701 branches = (path.branch, opts.get('branch', []))
3702 revs, checkout = hg.addbranchrevs(
3702 revs, checkout = hg.addbranchrevs(
3703 repo, other, branches, opts.get("rev")
3703 repo, other, branches, opts.get("rev")
3704 )
3704 )
3705
3705
3706 if revs:
3706 if revs:
3707 revs = [other.lookup(rev) for rev in revs]
3707 revs = [other.lookup(rev) for rev in revs]
3708
3708
3709 with ui.silent():
3709 with ui.silent():
3710 try:
3710 try:
3711 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3711 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3712 ui, repo, other, revs, opts["bundle"], opts["force"]
3712 ui, repo, other, revs, opts["bundle"], opts["force"]
3713 )
3713 )
3714 except error.LookupError:
3714 except error.LookupError:
3715 continue
3715 continue
3716
3716
3717 try:
3717 try:
3718 if not chlist:
3718 if not chlist:
3719 continue
3719 continue
3720 if recovernode:
3720 if recovernode:
3721 with repo.lock(), repo.transaction(b"unbundle") as tr:
3721 with repo.lock(), repo.transaction(b"unbundle") as tr:
3722 if scmutil.isrevsymbol(other, recovernode):
3722 if scmutil.isrevsymbol(other, recovernode):
3723 ui.status(_(b"Unbundling %s\n") % (recovernode))
3723 ui.status(_(b"Unbundling %s\n") % (recovernode))
3724 f = hg.openpath(ui, path.loc)
3724 f = hg.openpath(ui, path.loc)
3725 gen = exchange.readbundle(ui, f, path.loc)
3725 gen = exchange.readbundle(ui, f, path.loc)
3726 if isinstance(gen, bundle2.unbundle20):
3726 if isinstance(gen, bundle2.unbundle20):
3727 bundle2.applybundle(
3727 bundle2.applybundle(
3728 repo,
3728 repo,
3729 gen,
3729 gen,
3730 tr,
3730 tr,
3731 source=b"unbundle",
3731 source=b"unbundle",
3732 url=b"bundle:" + path.loc,
3732 url=b"bundle:" + path.loc,
3733 )
3733 )
3734 else:
3734 else:
3735 gen.apply(repo, b"unbundle", b"bundle:" + path.loc)
3735 gen.apply(repo, b"unbundle", b"bundle:" + path.loc)
3736 break
3736 break
3737 else:
3737 else:
3738 backupdate = encoding.strtolocal(
3738 backupdate = encoding.strtolocal(
3739 time.strftime(
3739 time.strftime(
3740 "%a %H:%M, %Y-%m-%d",
3740 "%a %H:%M, %Y-%m-%d",
3741 time.localtime(os.path.getmtime(path.loc)),
3741 time.localtime(os.path.getmtime(path.loc)),
3742 )
3742 )
3743 )
3743 )
3744 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3744 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3745 if ui.verbose:
3745 if ui.verbose:
3746 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), path.loc))
3746 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), path.loc))
3747 else:
3747 else:
3748 opts[
3748 opts[
3749 "template"
3749 "template"
3750 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3750 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3751 displayer = logcmdutil.changesetdisplayer(
3751 displayer = logcmdutil.changesetdisplayer(
3752 ui, other, pycompat.byteskwargs(opts), False
3752 ui, other, pycompat.byteskwargs(opts), False
3753 )
3753 )
3754 display(other, chlist, displayer)
3754 display(other, chlist, displayer)
3755 displayer.close()
3755 displayer.close()
3756 finally:
3756 finally:
3757 cleanupfn()
3757 cleanupfn()
3758
3758
3759
3759
3760 @command(
3760 @command(
3761 b'debugsub',
3761 b'debugsub',
3762 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3762 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3763 _(b'[-r REV] [REV]'),
3763 _(b'[-r REV] [REV]'),
3764 )
3764 )
3765 def debugsub(ui, repo, rev=None):
3765 def debugsub(ui, repo, rev=None):
3766 ctx = scmutil.revsingle(repo, rev, None)
3766 ctx = scmutil.revsingle(repo, rev, None)
3767 for k, v in sorted(ctx.substate.items()):
3767 for k, v in sorted(ctx.substate.items()):
3768 ui.writenoi18n(b'path %s\n' % k)
3768 ui.writenoi18n(b'path %s\n' % k)
3769 ui.writenoi18n(b' source %s\n' % v[0])
3769 ui.writenoi18n(b' source %s\n' % v[0])
3770 ui.writenoi18n(b' revision %s\n' % v[1])
3770 ui.writenoi18n(b' revision %s\n' % v[1])
3771
3771
3772
3772
3773 @command(
3773 @command(
3774 b'debugshell',
3774 b'debugshell',
3775 [
3775 [
3776 (
3776 (
3777 b'c',
3777 b'c',
3778 b'command',
3778 b'command',
3779 b'',
3779 b'',
3780 _(b'program passed in as a string'),
3780 _(b'program passed in as a string'),
3781 _(b'COMMAND'),
3781 _(b'COMMAND'),
3782 )
3782 )
3783 ],
3783 ],
3784 _(b'[-c COMMAND]'),
3784 _(b'[-c COMMAND]'),
3785 optionalrepo=True,
3785 optionalrepo=True,
3786 )
3786 )
3787 def debugshell(ui, repo, **opts):
3787 def debugshell(ui, repo, **opts):
3788 """run an interactive Python interpreter
3788 """run an interactive Python interpreter
3789
3789
3790 The local namespace is provided with a reference to the ui and
3790 The local namespace is provided with a reference to the ui and
3791 the repo instance (if available).
3791 the repo instance (if available).
3792 """
3792 """
3793 import code
3793 import code
3794
3794
3795 imported_objects = {
3795 imported_objects = {
3796 'ui': ui,
3796 'ui': ui,
3797 'repo': repo,
3797 'repo': repo,
3798 }
3798 }
3799
3799
3800 # py2exe disables initialization of the site module, which is responsible
3800 # py2exe disables initialization of the site module, which is responsible
3801 # for arranging for ``quit()`` to exit the interpreter. Manually initialize
3801 # for arranging for ``quit()`` to exit the interpreter. Manually initialize
3802 # the stuff that site normally does here, so that the interpreter can be
3802 # the stuff that site normally does here, so that the interpreter can be
3803 # quit in a consistent manner, whether run with pyoxidizer, exewrapper.c,
3803 # quit in a consistent manner, whether run with pyoxidizer, exewrapper.c,
3804 # py.exe, or py2exe.
3804 # py.exe, or py2exe.
3805 if getattr(sys, "frozen", None) == 'console_exe':
3805 if getattr(sys, "frozen", None) == 'console_exe':
3806 try:
3806 try:
3807 import site
3807 import site
3808
3808
3809 site.setcopyright()
3809 site.setcopyright()
3810 site.sethelper()
3810 site.sethelper()
3811 site.setquit()
3811 site.setquit()
3812 except ImportError:
3812 except ImportError:
3813 site = None # Keep PyCharm happy
3813 site = None # Keep PyCharm happy
3814
3814
3815 command = opts.get('command')
3815 command = opts.get('command')
3816 if command:
3816 if command:
3817 compiled = code.compile_command(encoding.strfromlocal(command))
3817 compiled = code.compile_command(encoding.strfromlocal(command))
3818 code.InteractiveInterpreter(locals=imported_objects).runcode(compiled)
3818 code.InteractiveInterpreter(locals=imported_objects).runcode(compiled)
3819 return
3819 return
3820
3820
3821 code.interact(local=imported_objects)
3821 code.interact(local=imported_objects)
3822
3822
3823
3823
3824 @command(
3824 @command(
3825 b'debug-revlog-stats',
3825 b'debug-revlog-stats',
3826 [
3826 [
3827 (b'c', b'changelog', None, _(b'Display changelog statistics')),
3827 (b'c', b'changelog', None, _(b'Display changelog statistics')),
3828 (b'm', b'manifest', None, _(b'Display manifest statistics')),
3828 (b'm', b'manifest', None, _(b'Display manifest statistics')),
3829 (b'f', b'filelogs', None, _(b'Display filelogs statistics')),
3829 (b'f', b'filelogs', None, _(b'Display filelogs statistics')),
3830 ]
3830 ]
3831 + cmdutil.formatteropts,
3831 + cmdutil.formatteropts,
3832 )
3832 )
3833 def debug_revlog_stats(ui, repo, **opts):
3833 def debug_revlog_stats(ui, repo, **opts):
3834 """display statistics about revlogs in the store"""
3834 """display statistics about revlogs in the store"""
3835 changelog = opts["changelog"]
3835 changelog = opts["changelog"]
3836 manifest = opts["manifest"]
3836 manifest = opts["manifest"]
3837 filelogs = opts["filelogs"]
3837 filelogs = opts["filelogs"]
3838
3838
3839 if changelog is None and manifest is None and filelogs is None:
3839 if changelog is None and manifest is None and filelogs is None:
3840 changelog = True
3840 changelog = True
3841 manifest = True
3841 manifest = True
3842 filelogs = True
3842 filelogs = True
3843
3843
3844 repo = repo.unfiltered()
3844 repo = repo.unfiltered()
3845 fm = ui.formatter(b'debug-revlog-stats', pycompat.byteskwargs(opts))
3845 fm = ui.formatter(b'debug-revlog-stats', pycompat.byteskwargs(opts))
3846 revlog_debug.debug_revlog_stats(repo, fm, changelog, manifest, filelogs)
3846 revlog_debug.debug_revlog_stats(repo, fm, changelog, manifest, filelogs)
3847 fm.end()
3847 fm.end()
3848
3848
3849
3849
3850 @command(
3850 @command(
3851 b'debugsuccessorssets',
3851 b'debugsuccessorssets',
3852 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3852 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3853 _(b'[REV]'),
3853 _(b'[REV]'),
3854 )
3854 )
3855 def debugsuccessorssets(ui, repo, *revs, **opts):
3855 def debugsuccessorssets(ui, repo, *revs, **opts):
3856 """show set of successors for revision
3856 """show set of successors for revision
3857
3857
3858 A successors set of changeset A is a consistent group of revisions that
3858 A successors set of changeset A is a consistent group of revisions that
3859 succeed A. It contains non-obsolete changesets only unless closests
3859 succeed A. It contains non-obsolete changesets only unless closests
3860 successors set is set.
3860 successors set is set.
3861
3861
3862 In most cases a changeset A has a single successors set containing a single
3862 In most cases a changeset A has a single successors set containing a single
3863 successor (changeset A replaced by A').
3863 successor (changeset A replaced by A').
3864
3864
3865 A changeset that is made obsolete with no successors are called "pruned".
3865 A changeset that is made obsolete with no successors are called "pruned".
3866 Such changesets have no successors sets at all.
3866 Such changesets have no successors sets at all.
3867
3867
3868 A changeset that has been "split" will have a successors set containing
3868 A changeset that has been "split" will have a successors set containing
3869 more than one successor.
3869 more than one successor.
3870
3870
3871 A changeset that has been rewritten in multiple different ways is called
3871 A changeset that has been rewritten in multiple different ways is called
3872 "divergent". Such changesets have multiple successor sets (each of which
3872 "divergent". Such changesets have multiple successor sets (each of which
3873 may also be split, i.e. have multiple successors).
3873 may also be split, i.e. have multiple successors).
3874
3874
3875 Results are displayed as follows::
3875 Results are displayed as follows::
3876
3876
3877 <rev1>
3877 <rev1>
3878 <successors-1A>
3878 <successors-1A>
3879 <rev2>
3879 <rev2>
3880 <successors-2A>
3880 <successors-2A>
3881 <successors-2B1> <successors-2B2> <successors-2B3>
3881 <successors-2B1> <successors-2B2> <successors-2B3>
3882
3882
3883 Here rev2 has two possible (i.e. divergent) successors sets. The first
3883 Here rev2 has two possible (i.e. divergent) successors sets. The first
3884 holds one element, whereas the second holds three (i.e. the changeset has
3884 holds one element, whereas the second holds three (i.e. the changeset has
3885 been split).
3885 been split).
3886 """
3886 """
3887 # passed to successorssets caching computation from one call to another
3887 # passed to successorssets caching computation from one call to another
3888 cache = {}
3888 cache = {}
3889 ctx2str = bytes
3889 ctx2str = bytes
3890 node2str = short
3890 node2str = short
3891 for rev in logcmdutil.revrange(repo, revs):
3891 for rev in logcmdutil.revrange(repo, revs):
3892 ctx = repo[rev]
3892 ctx = repo[rev]
3893 ui.write(b'%s\n' % ctx2str(ctx))
3893 ui.write(b'%s\n' % ctx2str(ctx))
3894 for succsset in obsutil.successorssets(
3894 for succsset in obsutil.successorssets(
3895 repo, ctx.node(), closest=opts['closest'], cache=cache
3895 repo, ctx.node(), closest=opts['closest'], cache=cache
3896 ):
3896 ):
3897 if succsset:
3897 if succsset:
3898 ui.write(b' ')
3898 ui.write(b' ')
3899 ui.write(node2str(succsset[0]))
3899 ui.write(node2str(succsset[0]))
3900 for node in succsset[1:]:
3900 for node in succsset[1:]:
3901 ui.write(b' ')
3901 ui.write(b' ')
3902 ui.write(node2str(node))
3902 ui.write(node2str(node))
3903 ui.write(b'\n')
3903 ui.write(b'\n')
3904
3904
3905
3905
3906 @command(b'debugtagscache', [])
3906 @command(b'debugtagscache', [])
3907 def debugtagscache(ui, repo):
3907 def debugtagscache(ui, repo):
3908 """display the contents of .hg/cache/hgtagsfnodes1"""
3908 """display the contents of .hg/cache/hgtagsfnodes1"""
3909 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3909 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3910 flog = repo.file(b'.hgtags')
3910 flog = repo.file(b'.hgtags')
3911 for r in repo:
3911 for r in repo:
3912 node = repo[r].node()
3912 node = repo[r].node()
3913 tagsnode = cache.getfnode(node, computemissing=False)
3913 tagsnode = cache.getfnode(node, computemissing=False)
3914 if tagsnode:
3914 if tagsnode:
3915 tagsnodedisplay = hex(tagsnode)
3915 tagsnodedisplay = hex(tagsnode)
3916 if not flog.hasnode(tagsnode):
3916 if not flog.hasnode(tagsnode):
3917 tagsnodedisplay += b' (unknown node)'
3917 tagsnodedisplay += b' (unknown node)'
3918 elif tagsnode is None:
3918 elif tagsnode is None:
3919 tagsnodedisplay = b'missing'
3919 tagsnodedisplay = b'missing'
3920 else:
3920 else:
3921 tagsnodedisplay = b'invalid'
3921 tagsnodedisplay = b'invalid'
3922
3922
3923 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3923 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3924
3924
3925
3925
3926 @command(
3926 @command(
3927 b'debugtemplate',
3927 b'debugtemplate',
3928 [
3928 [
3929 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3929 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3930 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3930 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3931 ],
3931 ],
3932 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3932 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3933 optionalrepo=True,
3933 optionalrepo=True,
3934 )
3934 )
3935 def debugtemplate(ui, repo, tmpl, **opts):
3935 def debugtemplate(ui, repo, tmpl, **opts):
3936 """parse and apply a template
3936 """parse and apply a template
3937
3937
3938 If -r/--rev is given, the template is processed as a log template and
3938 If -r/--rev is given, the template is processed as a log template and
3939 applied to the given changesets. Otherwise, it is processed as a generic
3939 applied to the given changesets. Otherwise, it is processed as a generic
3940 template.
3940 template.
3941
3941
3942 Use --verbose to print the parsed tree.
3942 Use --verbose to print the parsed tree.
3943 """
3943 """
3944 revs = None
3944 revs = None
3945 if opts['rev']:
3945 if opts['rev']:
3946 if repo is None:
3946 if repo is None:
3947 raise error.RepoError(
3947 raise error.RepoError(
3948 _(b'there is no Mercurial repository here (.hg not found)')
3948 _(b'there is no Mercurial repository here (.hg not found)')
3949 )
3949 )
3950 revs = logcmdutil.revrange(repo, opts['rev'])
3950 revs = logcmdutil.revrange(repo, opts['rev'])
3951
3951
3952 props = {}
3952 props = {}
3953 for d in opts['define']:
3953 for d in opts['define']:
3954 try:
3954 try:
3955 k, v = (e.strip() for e in d.split(b'=', 1))
3955 k, v = (e.strip() for e in d.split(b'=', 1))
3956 if not k or k == b'ui':
3956 if not k or k == b'ui':
3957 raise ValueError
3957 raise ValueError
3958 props[k] = v
3958 props[k] = v
3959 except ValueError:
3959 except ValueError:
3960 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3960 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3961
3961
3962 if ui.verbose:
3962 if ui.verbose:
3963 aliases = ui.configitems(b'templatealias')
3963 aliases = ui.configitems(b'templatealias')
3964 tree = templater.parse(tmpl)
3964 tree = templater.parse(tmpl)
3965 ui.note(templater.prettyformat(tree), b'\n')
3965 ui.note(templater.prettyformat(tree), b'\n')
3966 newtree = templater.expandaliases(tree, aliases)
3966 newtree = templater.expandaliases(tree, aliases)
3967 if newtree != tree:
3967 if newtree != tree:
3968 ui.notenoi18n(
3968 ui.notenoi18n(
3969 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3969 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3970 )
3970 )
3971
3971
3972 if revs is None:
3972 if revs is None:
3973 tres = formatter.templateresources(ui, repo)
3973 tres = formatter.templateresources(ui, repo)
3974 t = formatter.maketemplater(ui, tmpl, resources=tres)
3974 t = formatter.maketemplater(ui, tmpl, resources=tres)
3975 if ui.verbose:
3975 if ui.verbose:
3976 kwds, funcs = t.symbolsuseddefault()
3976 kwds, funcs = t.symbolsuseddefault()
3977 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3977 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3978 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3978 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3979 ui.write(t.renderdefault(props))
3979 ui.write(t.renderdefault(props))
3980 else:
3980 else:
3981 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
3981 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
3982 if ui.verbose:
3982 if ui.verbose:
3983 kwds, funcs = displayer.t.symbolsuseddefault()
3983 kwds, funcs = displayer.t.symbolsuseddefault()
3984 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3984 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3985 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3985 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3986 for r in revs:
3986 for r in revs:
3987 displayer.show(repo[r], **pycompat.strkwargs(props))
3987 displayer.show(repo[r], **pycompat.strkwargs(props))
3988 displayer.close()
3988 displayer.close()
3989
3989
3990
3990
3991 @command(
3991 @command(
3992 b'debuguigetpass',
3992 b'debuguigetpass',
3993 [
3993 [
3994 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
3994 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
3995 ],
3995 ],
3996 _(b'[-p TEXT]'),
3996 _(b'[-p TEXT]'),
3997 norepo=True,
3997 norepo=True,
3998 )
3998 )
3999 def debuguigetpass(ui, prompt=b''):
3999 def debuguigetpass(ui, prompt=b''):
4000 """show prompt to type password"""
4000 """show prompt to type password"""
4001 r = ui.getpass(prompt)
4001 r = ui.getpass(prompt)
4002 if r is None:
4002 if r is None:
4003 r = b"<default response>"
4003 r = b"<default response>"
4004 ui.writenoi18n(b'response: %s\n' % r)
4004 ui.writenoi18n(b'response: %s\n' % r)
4005
4005
4006
4006
4007 @command(
4007 @command(
4008 b'debuguiprompt',
4008 b'debuguiprompt',
4009 [
4009 [
4010 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4010 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4011 ],
4011 ],
4012 _(b'[-p TEXT]'),
4012 _(b'[-p TEXT]'),
4013 norepo=True,
4013 norepo=True,
4014 )
4014 )
4015 def debuguiprompt(ui, prompt=b''):
4015 def debuguiprompt(ui, prompt=b''):
4016 """show plain prompt"""
4016 """show plain prompt"""
4017 r = ui.prompt(prompt)
4017 r = ui.prompt(prompt)
4018 ui.writenoi18n(b'response: %s\n' % r)
4018 ui.writenoi18n(b'response: %s\n' % r)
4019
4019
4020
4020
4021 @command(b'debugupdatecaches', [])
4021 @command(b'debugupdatecaches', [])
4022 def debugupdatecaches(ui, repo, *pats, **opts):
4022 def debugupdatecaches(ui, repo, *pats, **opts):
4023 """warm all known caches in the repository"""
4023 """warm all known caches in the repository"""
4024 with repo.wlock(), repo.lock():
4024 with repo.wlock(), repo.lock():
4025 repo.updatecaches(caches=repository.CACHES_ALL)
4025 repo.updatecaches(caches=repository.CACHES_ALL)
4026
4026
4027
4027
4028 @command(
4028 @command(
4029 b'debugupgraderepo',
4029 b'debugupgraderepo',
4030 [
4030 [
4031 (
4031 (
4032 b'o',
4032 b'o',
4033 b'optimize',
4033 b'optimize',
4034 [],
4034 [],
4035 _(b'extra optimization to perform'),
4035 _(b'extra optimization to perform'),
4036 _(b'NAME'),
4036 _(b'NAME'),
4037 ),
4037 ),
4038 (b'', b'run', False, _(b'performs an upgrade')),
4038 (b'', b'run', False, _(b'performs an upgrade')),
4039 (b'', b'backup', True, _(b'keep the old repository content around')),
4039 (b'', b'backup', True, _(b'keep the old repository content around')),
4040 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4040 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4041 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4041 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4042 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4042 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4043 ],
4043 ],
4044 )
4044 )
4045 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4045 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4046 """upgrade a repository to use different features
4046 """upgrade a repository to use different features
4047
4047
4048 If no arguments are specified, the repository is evaluated for upgrade
4048 If no arguments are specified, the repository is evaluated for upgrade
4049 and a list of problems and potential optimizations is printed.
4049 and a list of problems and potential optimizations is printed.
4050
4050
4051 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4051 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4052 can be influenced via additional arguments. More details will be provided
4052 can be influenced via additional arguments. More details will be provided
4053 by the command output when run without ``--run``.
4053 by the command output when run without ``--run``.
4054
4054
4055 During the upgrade, the repository will be locked and no writes will be
4055 During the upgrade, the repository will be locked and no writes will be
4056 allowed.
4056 allowed.
4057
4057
4058 At the end of the upgrade, the repository may not be readable while new
4058 At the end of the upgrade, the repository may not be readable while new
4059 repository data is swapped in. This window will be as long as it takes to
4059 repository data is swapped in. This window will be as long as it takes to
4060 rename some directories inside the ``.hg`` directory. On most machines, this
4060 rename some directories inside the ``.hg`` directory. On most machines, this
4061 should complete almost instantaneously and the chances of a consumer being
4061 should complete almost instantaneously and the chances of a consumer being
4062 unable to access the repository should be low.
4062 unable to access the repository should be low.
4063
4063
4064 By default, all revlogs will be upgraded. You can restrict this using flags
4064 By default, all revlogs will be upgraded. You can restrict this using flags
4065 such as `--manifest`:
4065 such as `--manifest`:
4066
4066
4067 * `--manifest`: only optimize the manifest
4067 * `--manifest`: only optimize the manifest
4068 * `--no-manifest`: optimize all revlog but the manifest
4068 * `--no-manifest`: optimize all revlog but the manifest
4069 * `--changelog`: optimize the changelog only
4069 * `--changelog`: optimize the changelog only
4070 * `--no-changelog --no-manifest`: optimize filelogs only
4070 * `--no-changelog --no-manifest`: optimize filelogs only
4071 * `--filelogs`: optimize the filelogs only
4071 * `--filelogs`: optimize the filelogs only
4072 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4072 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4073 """
4073 """
4074 return upgrade.upgraderepo(
4074 return upgrade.upgraderepo(
4075 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4075 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4076 )
4076 )
4077
4077
4078
4078
4079 @command(
4079 @command(
4080 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4080 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4081 )
4081 )
4082 def debugwalk(ui, repo, *pats, **opts):
4082 def debugwalk(ui, repo, *pats, **opts):
4083 """show how files match on given patterns"""
4083 """show how files match on given patterns"""
4084 m = scmutil.match(repo[None], pats, pycompat.byteskwargs(opts))
4084 m = scmutil.match(repo[None], pats, pycompat.byteskwargs(opts))
4085 if ui.verbose:
4085 if ui.verbose:
4086 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4086 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4087 items = list(repo[None].walk(m))
4087 items = list(repo[None].walk(m))
4088 if not items:
4088 if not items:
4089 return
4089 return
4090 f = lambda fn: fn
4090 f = lambda fn: fn
4091 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4091 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4092 f = lambda fn: util.normpath(fn)
4092 f = lambda fn: util.normpath(fn)
4093 fmt = b'f %%-%ds %%-%ds %%s' % (
4093 fmt = b'f %%-%ds %%-%ds %%s' % (
4094 max([len(abs) for abs in items]),
4094 max([len(abs) for abs in items]),
4095 max([len(repo.pathto(abs)) for abs in items]),
4095 max([len(repo.pathto(abs)) for abs in items]),
4096 )
4096 )
4097 for abs in items:
4097 for abs in items:
4098 line = fmt % (
4098 line = fmt % (
4099 abs,
4099 abs,
4100 f(repo.pathto(abs)),
4100 f(repo.pathto(abs)),
4101 m.exact(abs) and b'exact' or b'',
4101 m.exact(abs) and b'exact' or b'',
4102 )
4102 )
4103 ui.write(b"%s\n" % line.rstrip())
4103 ui.write(b"%s\n" % line.rstrip())
4104
4104
4105
4105
4106 @command(b'debugwhyunstable', [], _(b'REV'))
4106 @command(b'debugwhyunstable', [], _(b'REV'))
4107 def debugwhyunstable(ui, repo, rev):
4107 def debugwhyunstable(ui, repo, rev):
4108 """explain instabilities of a changeset"""
4108 """explain instabilities of a changeset"""
4109 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4109 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4110 dnodes = b''
4110 dnodes = b''
4111 if entry.get(b'divergentnodes'):
4111 if entry.get(b'divergentnodes'):
4112 dnodes = (
4112 dnodes = (
4113 b' '.join(
4113 b' '.join(
4114 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4114 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4115 for ctx in entry[b'divergentnodes']
4115 for ctx in entry[b'divergentnodes']
4116 )
4116 )
4117 + b' '
4117 + b' '
4118 )
4118 )
4119 ui.write(
4119 ui.write(
4120 b'%s: %s%s %s\n'
4120 b'%s: %s%s %s\n'
4121 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4121 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4122 )
4122 )
4123
4123
4124
4124
4125 @command(
4125 @command(
4126 b'debugwireargs',
4126 b'debugwireargs',
4127 [
4127 [
4128 (b'', b'three', b'', b'three'),
4128 (b'', b'three', b'', b'three'),
4129 (b'', b'four', b'', b'four'),
4129 (b'', b'four', b'', b'four'),
4130 (b'', b'five', b'', b'five'),
4130 (b'', b'five', b'', b'five'),
4131 ]
4131 ]
4132 + cmdutil.remoteopts,
4132 + cmdutil.remoteopts,
4133 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4133 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4134 norepo=True,
4134 norepo=True,
4135 )
4135 )
4136 def debugwireargs(ui, repopath, *vals, **opts):
4136 def debugwireargs(ui, repopath, *vals, **opts):
4137 repo = hg.peer(ui, pycompat.byteskwargs(opts), repopath)
4137 repo = hg.peer(ui, pycompat.byteskwargs(opts), repopath)
4138 try:
4138 try:
4139 for opt in cmdutil.remoteopts:
4139 for opt in cmdutil.remoteopts:
4140 del opts[pycompat.sysstr(opt[1])]
4140 del opts[pycompat.sysstr(opt[1])]
4141 args = {}
4141 args = {}
4142 for k, v in opts.items():
4142 for k, v in opts.items():
4143 if v:
4143 if v:
4144 args[k] = v
4144 args[k] = v
4145
4145
4146 # run twice to check that we don't mess up the stream for the next command
4146 # run twice to check that we don't mess up the stream for the next command
4147 res1 = repo.debugwireargs(*vals, **args)
4147 res1 = repo.debugwireargs(*vals, **args)
4148 res2 = repo.debugwireargs(*vals, **args)
4148 res2 = repo.debugwireargs(*vals, **args)
4149 ui.write(b"%s\n" % res1)
4149 ui.write(b"%s\n" % res1)
4150 if res1 != res2:
4150 if res1 != res2:
4151 ui.warn(b"%s\n" % res2)
4151 ui.warn(b"%s\n" % res2)
4152 finally:
4152 finally:
4153 repo.close()
4153 repo.close()
4154
4154
4155
4155
4156 def _parsewirelangblocks(fh):
4156 def _parsewirelangblocks(fh):
4157 activeaction = None
4157 activeaction = None
4158 blocklines = []
4158 blocklines = []
4159 lastindent = 0
4159 lastindent = 0
4160
4160
4161 for line in fh:
4161 for line in fh:
4162 line = line.rstrip()
4162 line = line.rstrip()
4163 if not line:
4163 if not line:
4164 continue
4164 continue
4165
4165
4166 if line.startswith(b'#'):
4166 if line.startswith(b'#'):
4167 continue
4167 continue
4168
4168
4169 if not line.startswith(b' '):
4169 if not line.startswith(b' '):
4170 # New block. Flush previous one.
4170 # New block. Flush previous one.
4171 if activeaction:
4171 if activeaction:
4172 yield activeaction, blocklines
4172 yield activeaction, blocklines
4173
4173
4174 activeaction = line
4174 activeaction = line
4175 blocklines = []
4175 blocklines = []
4176 lastindent = 0
4176 lastindent = 0
4177 continue
4177 continue
4178
4178
4179 # Else we start with an indent.
4179 # Else we start with an indent.
4180
4180
4181 if not activeaction:
4181 if not activeaction:
4182 raise error.Abort(_(b'indented line outside of block'))
4182 raise error.Abort(_(b'indented line outside of block'))
4183
4183
4184 indent = len(line) - len(line.lstrip())
4184 indent = len(line) - len(line.lstrip())
4185
4185
4186 # If this line is indented more than the last line, concatenate it.
4186 # If this line is indented more than the last line, concatenate it.
4187 if indent > lastindent and blocklines:
4187 if indent > lastindent and blocklines:
4188 blocklines[-1] += line.lstrip()
4188 blocklines[-1] += line.lstrip()
4189 else:
4189 else:
4190 blocklines.append(line)
4190 blocklines.append(line)
4191 lastindent = indent
4191 lastindent = indent
4192
4192
4193 # Flush last block.
4193 # Flush last block.
4194 if activeaction:
4194 if activeaction:
4195 yield activeaction, blocklines
4195 yield activeaction, blocklines
4196
4196
4197
4197
4198 @command(
4198 @command(
4199 b'debugwireproto',
4199 b'debugwireproto',
4200 [
4200 [
4201 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4201 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4202 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4202 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4203 (
4203 (
4204 b'',
4204 b'',
4205 b'noreadstderr',
4205 b'noreadstderr',
4206 False,
4206 False,
4207 _(b'do not read from stderr of the remote'),
4207 _(b'do not read from stderr of the remote'),
4208 ),
4208 ),
4209 (
4209 (
4210 b'',
4210 b'',
4211 b'nologhandshake',
4211 b'nologhandshake',
4212 False,
4212 False,
4213 _(b'do not log I/O related to the peer handshake'),
4213 _(b'do not log I/O related to the peer handshake'),
4214 ),
4214 ),
4215 ]
4215 ]
4216 + cmdutil.remoteopts,
4216 + cmdutil.remoteopts,
4217 _(b'[PATH]'),
4217 _(b'[PATH]'),
4218 optionalrepo=True,
4218 optionalrepo=True,
4219 )
4219 )
4220 def debugwireproto(ui, repo, path=None, **opts):
4220 def debugwireproto(ui, repo, path=None, **opts):
4221 """send wire protocol commands to a server
4221 """send wire protocol commands to a server
4222
4222
4223 This command can be used to issue wire protocol commands to remote
4223 This command can be used to issue wire protocol commands to remote
4224 peers and to debug the raw data being exchanged.
4224 peers and to debug the raw data being exchanged.
4225
4225
4226 ``--localssh`` will start an SSH server against the current repository
4226 ``--localssh`` will start an SSH server against the current repository
4227 and connect to that. By default, the connection will perform a handshake
4227 and connect to that. By default, the connection will perform a handshake
4228 and establish an appropriate peer instance.
4228 and establish an appropriate peer instance.
4229
4229
4230 ``--peer`` can be used to bypass the handshake protocol and construct a
4230 ``--peer`` can be used to bypass the handshake protocol and construct a
4231 peer instance using the specified class type. Valid values are ``raw``,
4231 peer instance using the specified class type. Valid values are ``raw``,
4232 ``ssh1``. ``raw`` instances only allow sending raw data payloads and
4232 ``ssh1``. ``raw`` instances only allow sending raw data payloads and
4233 don't support higher-level command actions.
4233 don't support higher-level command actions.
4234
4234
4235 ``--noreadstderr`` can be used to disable automatic reading from stderr
4235 ``--noreadstderr`` can be used to disable automatic reading from stderr
4236 of the peer (for SSH connections only). Disabling automatic reading of
4236 of the peer (for SSH connections only). Disabling automatic reading of
4237 stderr is useful for making output more deterministic.
4237 stderr is useful for making output more deterministic.
4238
4238
4239 Commands are issued via a mini language which is specified via stdin.
4239 Commands are issued via a mini language which is specified via stdin.
4240 The language consists of individual actions to perform. An action is
4240 The language consists of individual actions to perform. An action is
4241 defined by a block. A block is defined as a line with no leading
4241 defined by a block. A block is defined as a line with no leading
4242 space followed by 0 or more lines with leading space. Blocks are
4242 space followed by 0 or more lines with leading space. Blocks are
4243 effectively a high-level command with additional metadata.
4243 effectively a high-level command with additional metadata.
4244
4244
4245 Lines beginning with ``#`` are ignored.
4245 Lines beginning with ``#`` are ignored.
4246
4246
4247 The following sections denote available actions.
4247 The following sections denote available actions.
4248
4248
4249 raw
4249 raw
4250 ---
4250 ---
4251
4251
4252 Send raw data to the server.
4252 Send raw data to the server.
4253
4253
4254 The block payload contains the raw data to send as one atomic send
4254 The block payload contains the raw data to send as one atomic send
4255 operation. The data may not actually be delivered in a single system
4255 operation. The data may not actually be delivered in a single system
4256 call: it depends on the abilities of the transport being used.
4256 call: it depends on the abilities of the transport being used.
4257
4257
4258 Each line in the block is de-indented and concatenated. Then, that
4258 Each line in the block is de-indented and concatenated. Then, that
4259 value is evaluated as a Python b'' literal. This allows the use of
4259 value is evaluated as a Python b'' literal. This allows the use of
4260 backslash escaping, etc.
4260 backslash escaping, etc.
4261
4261
4262 raw+
4262 raw+
4263 ----
4263 ----
4264
4264
4265 Behaves like ``raw`` except flushes output afterwards.
4265 Behaves like ``raw`` except flushes output afterwards.
4266
4266
4267 command <X>
4267 command <X>
4268 -----------
4268 -----------
4269
4269
4270 Send a request to run a named command, whose name follows the ``command``
4270 Send a request to run a named command, whose name follows the ``command``
4271 string.
4271 string.
4272
4272
4273 Arguments to the command are defined as lines in this block. The format of
4273 Arguments to the command are defined as lines in this block. The format of
4274 each line is ``<key> <value>``. e.g.::
4274 each line is ``<key> <value>``. e.g.::
4275
4275
4276 command listkeys
4276 command listkeys
4277 namespace bookmarks
4277 namespace bookmarks
4278
4278
4279 If the value begins with ``eval:``, it will be interpreted as a Python
4279 If the value begins with ``eval:``, it will be interpreted as a Python
4280 literal expression. Otherwise values are interpreted as Python b'' literals.
4280 literal expression. Otherwise values are interpreted as Python b'' literals.
4281 This allows sending complex types and encoding special byte sequences via
4281 This allows sending complex types and encoding special byte sequences via
4282 backslash escaping.
4282 backslash escaping.
4283
4283
4284 The following arguments have special meaning:
4284 The following arguments have special meaning:
4285
4285
4286 ``PUSHFILE``
4286 ``PUSHFILE``
4287 When defined, the *push* mechanism of the peer will be used instead
4287 When defined, the *push* mechanism of the peer will be used instead
4288 of the static request-response mechanism and the content of the
4288 of the static request-response mechanism and the content of the
4289 file specified in the value of this argument will be sent as the
4289 file specified in the value of this argument will be sent as the
4290 command payload.
4290 command payload.
4291
4291
4292 This can be used to submit a local bundle file to the remote.
4292 This can be used to submit a local bundle file to the remote.
4293
4293
4294 batchbegin
4294 batchbegin
4295 ----------
4295 ----------
4296
4296
4297 Instruct the peer to begin a batched send.
4297 Instruct the peer to begin a batched send.
4298
4298
4299 All ``command`` blocks are queued for execution until the next
4299 All ``command`` blocks are queued for execution until the next
4300 ``batchsubmit`` block.
4300 ``batchsubmit`` block.
4301
4301
4302 batchsubmit
4302 batchsubmit
4303 -----------
4303 -----------
4304
4304
4305 Submit previously queued ``command`` blocks as a batch request.
4305 Submit previously queued ``command`` blocks as a batch request.
4306
4306
4307 This action MUST be paired with a ``batchbegin`` action.
4307 This action MUST be paired with a ``batchbegin`` action.
4308
4308
4309 httprequest <method> <path>
4309 httprequest <method> <path>
4310 ---------------------------
4310 ---------------------------
4311
4311
4312 (HTTP peer only)
4312 (HTTP peer only)
4313
4313
4314 Send an HTTP request to the peer.
4314 Send an HTTP request to the peer.
4315
4315
4316 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4316 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4317
4317
4318 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4318 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4319 headers to add to the request. e.g. ``Accept: foo``.
4319 headers to add to the request. e.g. ``Accept: foo``.
4320
4320
4321 The following arguments are special:
4321 The following arguments are special:
4322
4322
4323 ``BODYFILE``
4323 ``BODYFILE``
4324 The content of the file defined as the value to this argument will be
4324 The content of the file defined as the value to this argument will be
4325 transferred verbatim as the HTTP request body.
4325 transferred verbatim as the HTTP request body.
4326
4326
4327 ``frame <type> <flags> <payload>``
4327 ``frame <type> <flags> <payload>``
4328 Send a unified protocol frame as part of the request body.
4328 Send a unified protocol frame as part of the request body.
4329
4329
4330 All frames will be collected and sent as the body to the HTTP
4330 All frames will be collected and sent as the body to the HTTP
4331 request.
4331 request.
4332
4332
4333 close
4333 close
4334 -----
4334 -----
4335
4335
4336 Close the connection to the server.
4336 Close the connection to the server.
4337
4337
4338 flush
4338 flush
4339 -----
4339 -----
4340
4340
4341 Flush data written to the server.
4341 Flush data written to the server.
4342
4342
4343 readavailable
4343 readavailable
4344 -------------
4344 -------------
4345
4345
4346 Close the write end of the connection and read all available data from
4346 Close the write end of the connection and read all available data from
4347 the server.
4347 the server.
4348
4348
4349 If the connection to the server encompasses multiple pipes, we poll both
4349 If the connection to the server encompasses multiple pipes, we poll both
4350 pipes and read available data.
4350 pipes and read available data.
4351
4351
4352 readline
4352 readline
4353 --------
4353 --------
4354
4354
4355 Read a line of output from the server. If there are multiple output
4355 Read a line of output from the server. If there are multiple output
4356 pipes, reads only the main pipe.
4356 pipes, reads only the main pipe.
4357
4357
4358 ereadline
4358 ereadline
4359 ---------
4359 ---------
4360
4360
4361 Like ``readline``, but read from the stderr pipe, if available.
4361 Like ``readline``, but read from the stderr pipe, if available.
4362
4362
4363 read <X>
4363 read <X>
4364 --------
4364 --------
4365
4365
4366 ``read()`` N bytes from the server's main output pipe.
4366 ``read()`` N bytes from the server's main output pipe.
4367
4367
4368 eread <X>
4368 eread <X>
4369 ---------
4369 ---------
4370
4370
4371 ``read()`` N bytes from the server's stderr pipe, if available.
4371 ``read()`` N bytes from the server's stderr pipe, if available.
4372
4372
4373 Specifying Unified Frame-Based Protocol Frames
4373 Specifying Unified Frame-Based Protocol Frames
4374 ----------------------------------------------
4374 ----------------------------------------------
4375
4375
4376 It is possible to emit a *Unified Frame-Based Protocol* by using special
4376 It is possible to emit a *Unified Frame-Based Protocol* by using special
4377 syntax.
4377 syntax.
4378
4378
4379 A frame is composed as a type, flags, and payload. These can be parsed
4379 A frame is composed as a type, flags, and payload. These can be parsed
4380 from a string of the form:
4380 from a string of the form:
4381
4381
4382 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4382 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4383
4383
4384 ``request-id`` and ``stream-id`` are integers defining the request and
4384 ``request-id`` and ``stream-id`` are integers defining the request and
4385 stream identifiers.
4385 stream identifiers.
4386
4386
4387 ``type`` can be an integer value for the frame type or the string name
4387 ``type`` can be an integer value for the frame type or the string name
4388 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4388 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4389 ``command-name``.
4389 ``command-name``.
4390
4390
4391 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4391 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4392 components. Each component (and there can be just one) can be an integer
4392 components. Each component (and there can be just one) can be an integer
4393 or a flag name for stream flags or frame flags, respectively. Values are
4393 or a flag name for stream flags or frame flags, respectively. Values are
4394 resolved to integers and then bitwise OR'd together.
4394 resolved to integers and then bitwise OR'd together.
4395
4395
4396 ``payload`` represents the raw frame payload. If it begins with
4396 ``payload`` represents the raw frame payload. If it begins with
4397 ``cbor:``, the following string is evaluated as Python code and the
4397 ``cbor:``, the following string is evaluated as Python code and the
4398 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4398 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4399 as a Python byte string literal.
4399 as a Python byte string literal.
4400 """
4400 """
4401 if opts['localssh'] and not repo:
4401 if opts['localssh'] and not repo:
4402 raise error.Abort(_(b'--localssh requires a repository'))
4402 raise error.Abort(_(b'--localssh requires a repository'))
4403
4403
4404 if opts['peer'] and opts['peer'] not in (
4404 if opts['peer'] and opts['peer'] not in (
4405 b'raw',
4405 b'raw',
4406 b'ssh1',
4406 b'ssh1',
4407 ):
4407 ):
4408 raise error.Abort(
4408 raise error.Abort(
4409 _(b'invalid value for --peer'),
4409 _(b'invalid value for --peer'),
4410 hint=_(b'valid values are "raw" and "ssh1"'),
4410 hint=_(b'valid values are "raw" and "ssh1"'),
4411 )
4411 )
4412
4412
4413 if path and opts['localssh']:
4413 if path and opts['localssh']:
4414 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4414 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4415
4415
4416 if ui.interactive():
4416 if ui.interactive():
4417 ui.write(_(b'(waiting for commands on stdin)\n'))
4417 ui.write(_(b'(waiting for commands on stdin)\n'))
4418
4418
4419 blocks = list(_parsewirelangblocks(ui.fin))
4419 blocks = list(_parsewirelangblocks(ui.fin))
4420
4420
4421 proc = None
4421 proc = None
4422 stdin = None
4422 stdin = None
4423 stdout = None
4423 stdout = None
4424 stderr = None
4424 stderr = None
4425 opener = None
4425 opener = None
4426
4426
4427 if opts['localssh']:
4427 if opts['localssh']:
4428 # We start the SSH server in its own process so there is process
4428 # We start the SSH server in its own process so there is process
4429 # separation. This prevents a whole class of potential bugs around
4429 # separation. This prevents a whole class of potential bugs around
4430 # shared state from interfering with server operation.
4430 # shared state from interfering with server operation.
4431 args = procutil.hgcmd() + [
4431 args = procutil.hgcmd() + [
4432 b'-R',
4432 b'-R',
4433 repo.root,
4433 repo.root,
4434 b'debugserve',
4434 b'debugserve',
4435 b'--sshstdio',
4435 b'--sshstdio',
4436 ]
4436 ]
4437 proc = subprocess.Popen(
4437 proc = subprocess.Popen(
4438 pycompat.rapply(procutil.tonativestr, args),
4438 pycompat.rapply(procutil.tonativestr, args),
4439 stdin=subprocess.PIPE,
4439 stdin=subprocess.PIPE,
4440 stdout=subprocess.PIPE,
4440 stdout=subprocess.PIPE,
4441 stderr=subprocess.PIPE,
4441 stderr=subprocess.PIPE,
4442 bufsize=0,
4442 bufsize=0,
4443 )
4443 )
4444
4444
4445 stdin = proc.stdin
4445 stdin = proc.stdin
4446 stdout = proc.stdout
4446 stdout = proc.stdout
4447 stderr = proc.stderr
4447 stderr = proc.stderr
4448
4448
4449 # We turn the pipes into observers so we can log I/O.
4449 # We turn the pipes into observers so we can log I/O.
4450 if ui.verbose or opts['peer'] == b'raw':
4450 if ui.verbose or opts['peer'] == b'raw':
4451 stdin = util.makeloggingfileobject(
4451 stdin = util.makeloggingfileobject(
4452 ui, proc.stdin, b'i', logdata=True
4452 ui, proc.stdin, b'i', logdata=True
4453 )
4453 )
4454 stdout = util.makeloggingfileobject(
4454 stdout = util.makeloggingfileobject(
4455 ui, proc.stdout, b'o', logdata=True
4455 ui, proc.stdout, b'o', logdata=True
4456 )
4456 )
4457 stderr = util.makeloggingfileobject(
4457 stderr = util.makeloggingfileobject(
4458 ui, proc.stderr, b'e', logdata=True
4458 ui, proc.stderr, b'e', logdata=True
4459 )
4459 )
4460
4460
4461 # --localssh also implies the peer connection settings.
4461 # --localssh also implies the peer connection settings.
4462
4462
4463 url = b'ssh://localserver'
4463 url = b'ssh://localserver'
4464 autoreadstderr = not opts['noreadstderr']
4464 autoreadstderr = not opts['noreadstderr']
4465
4465
4466 if opts['peer'] == b'ssh1':
4466 if opts['peer'] == b'ssh1':
4467 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4467 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4468 peer = sshpeer.sshv1peer(
4468 peer = sshpeer.sshv1peer(
4469 ui,
4469 ui,
4470 url,
4470 url,
4471 proc,
4471 proc,
4472 stdin,
4472 stdin,
4473 stdout,
4473 stdout,
4474 stderr,
4474 stderr,
4475 None,
4475 None,
4476 autoreadstderr=autoreadstderr,
4476 autoreadstderr=autoreadstderr,
4477 )
4477 )
4478 elif opts['peer'] == b'raw':
4478 elif opts['peer'] == b'raw':
4479 ui.write(_(b'using raw connection to peer\n'))
4479 ui.write(_(b'using raw connection to peer\n'))
4480 peer = None
4480 peer = None
4481 else:
4481 else:
4482 ui.write(_(b'creating ssh peer from handshake results\n'))
4482 ui.write(_(b'creating ssh peer from handshake results\n'))
4483 peer = sshpeer._make_peer(
4483 peer = sshpeer._make_peer(
4484 ui,
4484 ui,
4485 url,
4485 url,
4486 proc,
4486 proc,
4487 stdin,
4487 stdin,
4488 stdout,
4488 stdout,
4489 stderr,
4489 stderr,
4490 autoreadstderr=autoreadstderr,
4490 autoreadstderr=autoreadstderr,
4491 )
4491 )
4492
4492
4493 elif path:
4493 elif path:
4494 # We bypass hg.peer() so we can proxy the sockets.
4494 # We bypass hg.peer() so we can proxy the sockets.
4495 # TODO consider not doing this because we skip
4495 # TODO consider not doing this because we skip
4496 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4496 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4497 u = urlutil.url(path)
4497 u = urlutil.url(path)
4498 if u.scheme != b'http':
4498 if u.scheme != b'http':
4499 raise error.Abort(_(b'only http:// paths are currently supported'))
4499 raise error.Abort(_(b'only http:// paths are currently supported'))
4500
4500
4501 url, authinfo = u.authinfo()
4501 url, authinfo = u.authinfo()
4502 openerargs = {
4502 openerargs = {
4503 'useragent': b'Mercurial debugwireproto',
4503 'useragent': b'Mercurial debugwireproto',
4504 }
4504 }
4505
4505
4506 # Turn pipes/sockets into observers so we can log I/O.
4506 # Turn pipes/sockets into observers so we can log I/O.
4507 if ui.verbose:
4507 if ui.verbose:
4508 openerargs.update(
4508 openerargs.update(
4509 {
4509 {
4510 'loggingfh': ui,
4510 'loggingfh': ui,
4511 'loggingname': b's',
4511 'loggingname': b's',
4512 'loggingopts': {
4512 'loggingopts': {
4513 'logdata': True,
4513 'logdata': True,
4514 'logdataapis': False,
4514 'logdataapis': False,
4515 },
4515 },
4516 }
4516 }
4517 )
4517 )
4518
4518
4519 if ui.debugflag:
4519 if ui.debugflag:
4520 openerargs['loggingopts']['logdataapis'] = True
4520 openerargs['loggingopts']['logdataapis'] = True
4521
4521
4522 # Don't send default headers when in raw mode. This allows us to
4522 # Don't send default headers when in raw mode. This allows us to
4523 # bypass most of the behavior of our URL handling code so we can
4523 # bypass most of the behavior of our URL handling code so we can
4524 # have near complete control over what's sent on the wire.
4524 # have near complete control over what's sent on the wire.
4525 if opts['peer'] == b'raw':
4525 if opts['peer'] == b'raw':
4526 openerargs['sendaccept'] = False
4526 openerargs['sendaccept'] = False
4527
4527
4528 opener = urlmod.opener(ui, authinfo, **openerargs)
4528 opener = urlmod.opener(ui, authinfo, **openerargs)
4529
4529
4530 if opts['peer'] == b'raw':
4530 if opts['peer'] == b'raw':
4531 ui.write(_(b'using raw connection to peer\n'))
4531 ui.write(_(b'using raw connection to peer\n'))
4532 peer = None
4532 peer = None
4533 elif opts['peer']:
4533 elif opts['peer']:
4534 raise error.Abort(
4534 raise error.Abort(
4535 _(b'--peer %s not supported with HTTP peers') % opts['peer']
4535 _(b'--peer %s not supported with HTTP peers') % opts['peer']
4536 )
4536 )
4537 else:
4537 else:
4538 peer_path = urlutil.try_path(ui, path)
4538 peer_path = urlutil.try_path(ui, path)
4539 peer = httppeer._make_peer(ui, peer_path, opener=opener)
4539 peer = httppeer._make_peer(ui, peer_path, opener=opener)
4540
4540
4541 # We /could/ populate stdin/stdout with sock.makefile()...
4541 # We /could/ populate stdin/stdout with sock.makefile()...
4542 else:
4542 else:
4543 raise error.Abort(_(b'unsupported connection configuration'))
4543 raise error.Abort(_(b'unsupported connection configuration'))
4544
4544
4545 batchedcommands = None
4545 batchedcommands = None
4546
4546
4547 # Now perform actions based on the parsed wire language instructions.
4547 # Now perform actions based on the parsed wire language instructions.
4548 for action, lines in blocks:
4548 for action, lines in blocks:
4549 if action in (b'raw', b'raw+'):
4549 if action in (b'raw', b'raw+'):
4550 if not stdin:
4550 if not stdin:
4551 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4551 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4552
4552
4553 # Concatenate the data together.
4553 # Concatenate the data together.
4554 data = b''.join(l.lstrip() for l in lines)
4554 data = b''.join(l.lstrip() for l in lines)
4555 data = stringutil.unescapestr(data)
4555 data = stringutil.unescapestr(data)
4556 stdin.write(data)
4556 stdin.write(data)
4557
4557
4558 if action == b'raw+':
4558 if action == b'raw+':
4559 stdin.flush()
4559 stdin.flush()
4560 elif action == b'flush':
4560 elif action == b'flush':
4561 if not stdin:
4561 if not stdin:
4562 raise error.Abort(_(b'cannot call flush on this peer'))
4562 raise error.Abort(_(b'cannot call flush on this peer'))
4563 stdin.flush()
4563 stdin.flush()
4564 elif action.startswith(b'command'):
4564 elif action.startswith(b'command'):
4565 if not peer:
4565 if not peer:
4566 raise error.Abort(
4566 raise error.Abort(
4567 _(
4567 _(
4568 b'cannot send commands unless peer instance '
4568 b'cannot send commands unless peer instance '
4569 b'is available'
4569 b'is available'
4570 )
4570 )
4571 )
4571 )
4572
4572
4573 command = action.split(b' ', 1)[1]
4573 command = action.split(b' ', 1)[1]
4574
4574
4575 args = {}
4575 args = {}
4576 for line in lines:
4576 for line in lines:
4577 # We need to allow empty values.
4577 # We need to allow empty values.
4578 fields = line.lstrip().split(b' ', 1)
4578 fields = line.lstrip().split(b' ', 1)
4579 if len(fields) == 1:
4579 if len(fields) == 1:
4580 key = fields[0]
4580 key = fields[0]
4581 value = b''
4581 value = b''
4582 else:
4582 else:
4583 key, value = fields
4583 key, value = fields
4584
4584
4585 if value.startswith(b'eval:'):
4585 if value.startswith(b'eval:'):
4586 value = stringutil.evalpythonliteral(value[5:])
4586 value = stringutil.evalpythonliteral(value[5:])
4587 else:
4587 else:
4588 value = stringutil.unescapestr(value)
4588 value = stringutil.unescapestr(value)
4589
4589
4590 args[key] = value
4590 args[key] = value
4591
4591
4592 if batchedcommands is not None:
4592 if batchedcommands is not None:
4593 batchedcommands.append((command, args))
4593 batchedcommands.append((command, args))
4594 continue
4594 continue
4595
4595
4596 ui.status(_(b'sending %s command\n') % command)
4596 ui.status(_(b'sending %s command\n') % command)
4597
4597
4598 if b'PUSHFILE' in args:
4598 if b'PUSHFILE' in args:
4599 with open(args[b'PUSHFILE'], 'rb') as fh:
4599 with open(args[b'PUSHFILE'], 'rb') as fh:
4600 del args[b'PUSHFILE']
4600 del args[b'PUSHFILE']
4601 res, output = peer._callpush(
4601 res, output = peer._callpush(
4602 command, fh, **pycompat.strkwargs(args)
4602 command, fh, **pycompat.strkwargs(args)
4603 )
4603 )
4604 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4604 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4605 ui.status(
4605 ui.status(
4606 _(b'remote output: %s\n') % stringutil.escapestr(output)
4606 _(b'remote output: %s\n') % stringutil.escapestr(output)
4607 )
4607 )
4608 else:
4608 else:
4609 with peer.commandexecutor() as e:
4609 with peer.commandexecutor() as e:
4610 res = e.callcommand(command, args).result()
4610 res = e.callcommand(command, args).result()
4611
4611
4612 ui.status(
4612 ui.status(
4613 _(b'response: %s\n')
4613 _(b'response: %s\n')
4614 % stringutil.pprint(res, bprefix=True, indent=2)
4614 % stringutil.pprint(res, bprefix=True, indent=2)
4615 )
4615 )
4616
4616
4617 elif action == b'batchbegin':
4617 elif action == b'batchbegin':
4618 if batchedcommands is not None:
4618 if batchedcommands is not None:
4619 raise error.Abort(_(b'nested batchbegin not allowed'))
4619 raise error.Abort(_(b'nested batchbegin not allowed'))
4620
4620
4621 batchedcommands = []
4621 batchedcommands = []
4622 elif action == b'batchsubmit':
4622 elif action == b'batchsubmit':
4623 # There is a batching API we could go through. But it would be
4623 # There is a batching API we could go through. But it would be
4624 # difficult to normalize requests into function calls. It is easier
4624 # difficult to normalize requests into function calls. It is easier
4625 # to bypass this layer and normalize to commands + args.
4625 # to bypass this layer and normalize to commands + args.
4626 ui.status(
4626 ui.status(
4627 _(b'sending batch with %d sub-commands\n')
4627 _(b'sending batch with %d sub-commands\n')
4628 % len(batchedcommands)
4628 % len(batchedcommands)
4629 )
4629 )
4630 assert peer is not None
4630 assert peer is not None
4631 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4631 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4632 ui.status(
4632 ui.status(
4633 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4633 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4634 )
4634 )
4635
4635
4636 batchedcommands = None
4636 batchedcommands = None
4637
4637
4638 elif action.startswith(b'httprequest '):
4638 elif action.startswith(b'httprequest '):
4639 if not opener:
4639 if not opener:
4640 raise error.Abort(
4640 raise error.Abort(
4641 _(b'cannot use httprequest without an HTTP peer')
4641 _(b'cannot use httprequest without an HTTP peer')
4642 )
4642 )
4643
4643
4644 request = action.split(b' ', 2)
4644 request = action.split(b' ', 2)
4645 if len(request) != 3:
4645 if len(request) != 3:
4646 raise error.Abort(
4646 raise error.Abort(
4647 _(
4647 _(
4648 b'invalid httprequest: expected format is '
4648 b'invalid httprequest: expected format is '
4649 b'"httprequest <method> <path>'
4649 b'"httprequest <method> <path>'
4650 )
4650 )
4651 )
4651 )
4652
4652
4653 method, httppath = request[1:]
4653 method, httppath = request[1:]
4654 headers = {}
4654 headers = {}
4655 body = None
4655 body = None
4656 frames = []
4656 frames = []
4657 for line in lines:
4657 for line in lines:
4658 line = line.lstrip()
4658 line = line.lstrip()
4659 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4659 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4660 if m:
4660 if m:
4661 # Headers need to use native strings.
4661 # Headers need to use native strings.
4662 key = pycompat.strurl(m.group(1))
4662 key = pycompat.strurl(m.group(1))
4663 value = pycompat.strurl(m.group(2))
4663 value = pycompat.strurl(m.group(2))
4664 headers[key] = value
4664 headers[key] = value
4665 continue
4665 continue
4666
4666
4667 if line.startswith(b'BODYFILE '):
4667 if line.startswith(b'BODYFILE '):
4668 with open(line.split(b' ', 1), b'rb') as fh:
4668 with open(line.split(b' ', 1), b'rb') as fh:
4669 body = fh.read()
4669 body = fh.read()
4670 elif line.startswith(b'frame '):
4670 elif line.startswith(b'frame '):
4671 frame = wireprotoframing.makeframefromhumanstring(
4671 frame = wireprotoframing.makeframefromhumanstring(
4672 line[len(b'frame ') :]
4672 line[len(b'frame ') :]
4673 )
4673 )
4674
4674
4675 frames.append(frame)
4675 frames.append(frame)
4676 else:
4676 else:
4677 raise error.Abort(
4677 raise error.Abort(
4678 _(b'unknown argument to httprequest: %s') % line
4678 _(b'unknown argument to httprequest: %s') % line
4679 )
4679 )
4680
4680
4681 url = path + httppath
4681 url = path + httppath
4682
4682
4683 if frames:
4683 if frames:
4684 body = b''.join(bytes(f) for f in frames)
4684 body = b''.join(bytes(f) for f in frames)
4685
4685
4686 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4686 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4687
4687
4688 # urllib.Request insists on using has_data() as a proxy for
4688 # urllib.Request insists on using has_data() as a proxy for
4689 # determining the request method. Override that to use our
4689 # determining the request method. Override that to use our
4690 # explicitly requested method.
4690 # explicitly requested method.
4691 req.get_method = lambda: pycompat.sysstr(method)
4691 req.get_method = lambda: pycompat.sysstr(method)
4692
4692
4693 try:
4693 try:
4694 res = opener.open(req)
4694 res = opener.open(req)
4695 body = res.read()
4695 body = res.read()
4696 except util.urlerr.urlerror as e:
4696 except util.urlerr.urlerror as e:
4697 # read() method must be called, but only exists in Python 2
4697 # read() method must be called, but only exists in Python 2
4698 getattr(e, 'read', lambda: None)()
4698 getattr(e, 'read', lambda: None)()
4699 continue
4699 continue
4700
4700
4701 ct = res.headers.get('Content-Type')
4701 ct = res.headers.get('Content-Type')
4702 if ct == 'application/mercurial-cbor':
4702 if ct == 'application/mercurial-cbor':
4703 ui.write(
4703 ui.write(
4704 _(b'cbor> %s\n')
4704 _(b'cbor> %s\n')
4705 % stringutil.pprint(
4705 % stringutil.pprint(
4706 cborutil.decodeall(body), bprefix=True, indent=2
4706 cborutil.decodeall(body), bprefix=True, indent=2
4707 )
4707 )
4708 )
4708 )
4709
4709
4710 elif action == b'close':
4710 elif action == b'close':
4711 assert peer is not None
4711 assert peer is not None
4712 peer.close()
4712 peer.close()
4713 elif action == b'readavailable':
4713 elif action == b'readavailable':
4714 if not stdout or not stderr:
4714 if not stdout or not stderr:
4715 raise error.Abort(
4715 raise error.Abort(
4716 _(b'readavailable not available on this peer')
4716 _(b'readavailable not available on this peer')
4717 )
4717 )
4718
4718
4719 stdin.close()
4719 stdin.close()
4720 stdout.read()
4720 stdout.read()
4721 stderr.read()
4721 stderr.read()
4722
4722
4723 elif action == b'readline':
4723 elif action == b'readline':
4724 if not stdout:
4724 if not stdout:
4725 raise error.Abort(_(b'readline not available on this peer'))
4725 raise error.Abort(_(b'readline not available on this peer'))
4726 stdout.readline()
4726 stdout.readline()
4727 elif action == b'ereadline':
4727 elif action == b'ereadline':
4728 if not stderr:
4728 if not stderr:
4729 raise error.Abort(_(b'ereadline not available on this peer'))
4729 raise error.Abort(_(b'ereadline not available on this peer'))
4730 stderr.readline()
4730 stderr.readline()
4731 elif action.startswith(b'read '):
4731 elif action.startswith(b'read '):
4732 count = int(action.split(b' ', 1)[1])
4732 count = int(action.split(b' ', 1)[1])
4733 if not stdout:
4733 if not stdout:
4734 raise error.Abort(_(b'read not available on this peer'))
4734 raise error.Abort(_(b'read not available on this peer'))
4735 stdout.read(count)
4735 stdout.read(count)
4736 elif action.startswith(b'eread '):
4736 elif action.startswith(b'eread '):
4737 count = int(action.split(b' ', 1)[1])
4737 count = int(action.split(b' ', 1)[1])
4738 if not stderr:
4738 if not stderr:
4739 raise error.Abort(_(b'eread not available on this peer'))
4739 raise error.Abort(_(b'eread not available on this peer'))
4740 stderr.read(count)
4740 stderr.read(count)
4741 else:
4741 else:
4742 raise error.Abort(_(b'unknown action: %s') % action)
4742 raise error.Abort(_(b'unknown action: %s') % action)
4743
4743
4744 if batchedcommands is not None:
4744 if batchedcommands is not None:
4745 raise error.Abort(_(b'unclosed "batchbegin" request'))
4745 raise error.Abort(_(b'unclosed "batchbegin" request'))
4746
4746
4747 if peer:
4747 if peer:
4748 peer.close()
4748 peer.close()
4749
4749
4750 if proc:
4750 if proc:
4751 proc.kill()
4751 proc.kill()
@@ -1,1163 +1,1158 b''
1 // revlog.rs
1 // revlog.rs
2 //
2 //
3 // Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
3 // Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::{
8 use crate::{
9 cindex,
9 cindex,
10 conversion::{rev_pyiter_collect, rev_pyiter_collect_or_else},
10 conversion::{rev_pyiter_collect, rev_pyiter_collect_or_else},
11 utils::{node_from_py_bytes, node_from_py_object},
11 utils::{node_from_py_bytes, node_from_py_object},
12 PyRevision,
12 PyRevision,
13 };
13 };
14 use cpython::{
14 use cpython::{
15 buffer::{Element, PyBuffer},
15 buffer::{Element, PyBuffer},
16 exc::{IndexError, ValueError},
16 exc::{IndexError, ValueError},
17 ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyInt, PyList,
17 ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyInt, PyList,
18 PyModule, PyObject, PyResult, PySet, PyString, PyTuple, Python,
18 PyModule, PyObject, PyResult, PySet, PyString, PyTuple, Python,
19 PythonObject, ToPyObject,
19 PythonObject, ToPyObject,
20 };
20 };
21 use hg::{
21 use hg::{
22 errors::HgError,
22 errors::HgError,
23 index::{
23 index::{
24 IndexHeader, Phase, RevisionDataParams, SnapshotsCache,
24 IndexHeader, Phase, RevisionDataParams, SnapshotsCache,
25 INDEX_ENTRY_SIZE,
25 INDEX_ENTRY_SIZE,
26 },
26 },
27 nodemap::{Block, NodeMapError, NodeTree},
27 nodemap::{Block, NodeMapError, NodeTree},
28 revlog::{nodemap::NodeMap, NodePrefix, RevlogError, RevlogIndex},
28 revlog::{nodemap::NodeMap, NodePrefix, RevlogError, RevlogIndex},
29 BaseRevision, Revision, UncheckedRevision, NULL_REVISION,
29 BaseRevision, Revision, UncheckedRevision, NULL_REVISION,
30 };
30 };
31 use std::{cell::RefCell, collections::HashMap};
31 use std::{cell::RefCell, collections::HashMap};
32
32
33 /// Return a Struct implementing the Graph trait
33 /// Return a Struct implementing the Graph trait
34 pub(crate) fn pyindex_to_graph(
34 pub(crate) fn pyindex_to_graph(
35 py: Python,
35 py: Python,
36 index: PyObject,
36 index: PyObject,
37 ) -> PyResult<cindex::Index> {
37 ) -> PyResult<cindex::Index> {
38 match index.extract::<MixedIndex>(py) {
38 match index.extract::<MixedIndex>(py) {
39 Ok(midx) => Ok(midx.clone_cindex(py)),
39 Ok(midx) => Ok(midx.clone_cindex(py)),
40 Err(_) => cindex::Index::new(py, index),
40 Err(_) => cindex::Index::new(py, index),
41 }
41 }
42 }
42 }
43
43
44 py_class!(pub class MixedIndex |py| {
44 py_class!(pub class MixedIndex |py| {
45 data cindex: RefCell<cindex::Index>;
45 data cindex: RefCell<cindex::Index>;
46 data index: RefCell<hg::index::Index>;
46 data index: RefCell<hg::index::Index>;
47 data nt: RefCell<Option<NodeTree>>;
47 data nt: RefCell<Option<NodeTree>>;
48 data docket: RefCell<Option<PyObject>>;
48 data docket: RefCell<Option<PyObject>>;
49 // Holds a reference to the mmap'ed persistent nodemap data
49 // Holds a reference to the mmap'ed persistent nodemap data
50 data nodemap_mmap: RefCell<Option<PyBuffer>>;
50 data nodemap_mmap: RefCell<Option<PyBuffer>>;
51 // Holds a reference to the mmap'ed persistent index data
51 // Holds a reference to the mmap'ed persistent index data
52 data index_mmap: RefCell<Option<PyBuffer>>;
52 data index_mmap: RefCell<Option<PyBuffer>>;
53
53
54 def __new__(
54 def __new__(
55 _cls,
55 _cls,
56 cindex: PyObject,
56 cindex: PyObject,
57 data: PyObject,
57 data: PyObject,
58 default_header: u32,
58 default_header: u32,
59 ) -> PyResult<MixedIndex> {
59 ) -> PyResult<MixedIndex> {
60 Self::new(py, cindex, data, default_header)
60 Self::new(py, cindex, data, default_header)
61 }
61 }
62
62
63 /// Compatibility layer used for Python consumers needing access to the C index
63 /// Compatibility layer used for Python consumers needing access to the C index
64 ///
64 ///
65 /// Only use case so far is `scmutil.shortesthexnodeidprefix`,
65 /// Only use case so far is `scmutil.shortesthexnodeidprefix`,
66 /// that may need to build a custom `nodetree`, based on a specified revset.
66 /// that may need to build a custom `nodetree`, based on a specified revset.
67 /// With a Rust implementation of the nodemap, we will be able to get rid of
67 /// With a Rust implementation of the nodemap, we will be able to get rid of
68 /// this, by exposing our own standalone nodemap class,
68 /// this, by exposing our own standalone nodemap class,
69 /// ready to accept `MixedIndex`.
69 /// ready to accept `MixedIndex`.
70 def get_cindex(&self) -> PyResult<PyObject> {
70 def get_cindex(&self) -> PyResult<PyObject> {
71 Ok(self.cindex(py).borrow().inner().clone_ref(py))
71 Ok(self.cindex(py).borrow().inner().clone_ref(py))
72 }
72 }
73
73
74 // Index API involving nodemap, as defined in mercurial/pure/parsers.py
74 // Index API involving nodemap, as defined in mercurial/pure/parsers.py
75
75
76 /// Return Revision if found, raises a bare `error.RevlogError`
76 /// Return Revision if found, raises a bare `error.RevlogError`
77 /// in case of ambiguity, same as C version does
77 /// in case of ambiguity, same as C version does
78 def get_rev(&self, node: PyBytes) -> PyResult<Option<PyRevision>> {
78 def get_rev(&self, node: PyBytes) -> PyResult<Option<PyRevision>> {
79 let opt = self.get_nodetree(py)?.borrow();
79 let opt = self.get_nodetree(py)?.borrow();
80 let nt = opt.as_ref().unwrap();
80 let nt = opt.as_ref().unwrap();
81 let idx = &*self.cindex(py).borrow();
81 let idx = &*self.cindex(py).borrow();
82 let ridx = &*self.index(py).borrow();
82 let ridx = &*self.index(py).borrow();
83 let node = node_from_py_bytes(py, &node)?;
83 let node = node_from_py_bytes(py, &node)?;
84 let rust_rev =
84 let rust_rev =
85 nt.find_bin(ridx, node.into()).map_err(|e| nodemap_error(py, e))?;
85 nt.find_bin(ridx, node.into()).map_err(|e| nodemap_error(py, e))?;
86 let c_rev =
86 let c_rev =
87 nt.find_bin(idx, node.into()).map_err(|e| nodemap_error(py, e))?;
87 nt.find_bin(idx, node.into()).map_err(|e| nodemap_error(py, e))?;
88 assert_eq!(rust_rev, c_rev);
88 assert_eq!(rust_rev, c_rev);
89 Ok(rust_rev.map(Into::into))
89 Ok(rust_rev.map(Into::into))
90
90
91 }
91 }
92
92
93 /// same as `get_rev()` but raises a bare `error.RevlogError` if node
93 /// same as `get_rev()` but raises a bare `error.RevlogError` if node
94 /// is not found.
94 /// is not found.
95 ///
95 ///
96 /// No need to repeat `node` in the exception, `mercurial/revlog.py`
96 /// No need to repeat `node` in the exception, `mercurial/revlog.py`
97 /// will catch and rewrap with it
97 /// will catch and rewrap with it
98 def rev(&self, node: PyBytes) -> PyResult<PyRevision> {
98 def rev(&self, node: PyBytes) -> PyResult<PyRevision> {
99 self.get_rev(py, node)?.ok_or_else(|| revlog_error(py))
99 self.get_rev(py, node)?.ok_or_else(|| revlog_error(py))
100 }
100 }
101
101
102 /// return True if the node exist in the index
102 /// return True if the node exist in the index
103 def has_node(&self, node: PyBytes) -> PyResult<bool> {
103 def has_node(&self, node: PyBytes) -> PyResult<bool> {
104 // TODO OPTIM we could avoid a needless conversion here,
104 // TODO OPTIM we could avoid a needless conversion here,
105 // to do when scaffolding for pure Rust switch is removed,
105 // to do when scaffolding for pure Rust switch is removed,
106 // as `get_rev()` currently does the necessary assertions
106 // as `get_rev()` currently does the necessary assertions
107 self.get_rev(py, node).map(|opt| opt.is_some())
107 self.get_rev(py, node).map(|opt| opt.is_some())
108 }
108 }
109
109
110 /// find length of shortest hex nodeid of a binary ID
110 /// find length of shortest hex nodeid of a binary ID
111 def shortest(&self, node: PyBytes) -> PyResult<usize> {
111 def shortest(&self, node: PyBytes) -> PyResult<usize> {
112 let opt = self.get_nodetree(py)?.borrow();
112 let opt = self.get_nodetree(py)?.borrow();
113 let nt = opt.as_ref().unwrap();
113 let nt = opt.as_ref().unwrap();
114 let idx = &*self.index(py).borrow();
114 let idx = &*self.index(py).borrow();
115 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
115 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
116 {
116 {
117 Ok(Some(l)) => Ok(l),
117 Ok(Some(l)) => Ok(l),
118 Ok(None) => Err(revlog_error(py)),
118 Ok(None) => Err(revlog_error(py)),
119 Err(e) => Err(nodemap_error(py, e)),
119 Err(e) => Err(nodemap_error(py, e)),
120 }
120 }
121 }
121 }
122
122
123 def partialmatch(&self, node: PyObject) -> PyResult<Option<PyBytes>> {
123 def partialmatch(&self, node: PyObject) -> PyResult<Option<PyBytes>> {
124 let opt = self.get_nodetree(py)?.borrow();
124 let opt = self.get_nodetree(py)?.borrow();
125 let nt = opt.as_ref().unwrap();
125 let nt = opt.as_ref().unwrap();
126 let idx = &*self.index(py).borrow();
126 let idx = &*self.index(py).borrow();
127
127
128 let node_as_string = if cfg!(feature = "python3-sys") {
128 let node_as_string = if cfg!(feature = "python3-sys") {
129 node.cast_as::<PyString>(py)?.to_string(py)?.to_string()
129 node.cast_as::<PyString>(py)?.to_string(py)?.to_string()
130 }
130 }
131 else {
131 else {
132 let node = node.extract::<PyBytes>(py)?;
132 let node = node.extract::<PyBytes>(py)?;
133 String::from_utf8_lossy(node.data(py)).to_string()
133 String::from_utf8_lossy(node.data(py)).to_string()
134 };
134 };
135
135
136 let prefix = NodePrefix::from_hex(&node_as_string)
136 let prefix = NodePrefix::from_hex(&node_as_string)
137 .map_err(|_| PyErr::new::<ValueError, _>(
137 .map_err(|_| PyErr::new::<ValueError, _>(
138 py, format!("Invalid node or prefix '{}'", node_as_string))
138 py, format!("Invalid node or prefix '{}'", node_as_string))
139 )?;
139 )?;
140
140
141 nt.find_bin(idx, prefix)
141 nt.find_bin(idx, prefix)
142 // TODO make an inner API returning the node directly
142 // TODO make an inner API returning the node directly
143 .map(|opt| opt.map(
143 .map(|opt| opt.map(
144 |rev| PyBytes::new(py, idx.node(rev).unwrap().as_bytes())))
144 |rev| PyBytes::new(py, idx.node(rev).unwrap().as_bytes())))
145 .map_err(|e| nodemap_error(py, e))
145 .map_err(|e| nodemap_error(py, e))
146
146
147 }
147 }
148
148
149 /// append an index entry
149 /// append an index entry
150 def append(&self, tup: PyTuple) -> PyResult<PyObject> {
150 def append(&self, tup: PyTuple) -> PyResult<PyObject> {
151 if tup.len(py) < 8 {
151 if tup.len(py) < 8 {
152 // this is better than the panic promised by tup.get_item()
152 // this is better than the panic promised by tup.get_item()
153 return Err(
153 return Err(
154 PyErr::new::<IndexError, _>(py, "tuple index out of range"))
154 PyErr::new::<IndexError, _>(py, "tuple index out of range"))
155 }
155 }
156 let node_bytes = tup.get_item(py, 7).extract(py)?;
156 let node_bytes = tup.get_item(py, 7).extract(py)?;
157 let node = node_from_py_object(py, &node_bytes)?;
157 let node = node_from_py_object(py, &node_bytes)?;
158
158
159 let rev = self.len(py)? as BaseRevision;
159 let rev = self.len(py)? as BaseRevision;
160 let mut idx = self.cindex(py).borrow_mut();
160 let mut idx = self.cindex(py).borrow_mut();
161
161
162 // This is ok since we will just add the revision to the index
162 // This is ok since we will just add the revision to the index
163 let rev = Revision(rev);
163 let rev = Revision(rev);
164 idx.append(py, tup.clone_ref(py))?;
164 idx.append(py, tup.clone_ref(py))?;
165 self.index(py)
165 self.index(py)
166 .borrow_mut()
166 .borrow_mut()
167 .append(py_tuple_to_revision_data_params(py, tup)?)
167 .append(py_tuple_to_revision_data_params(py, tup)?)
168 .unwrap();
168 .unwrap();
169 self.get_nodetree(py)?.borrow_mut().as_mut().unwrap()
169 self.get_nodetree(py)?.borrow_mut().as_mut().unwrap()
170 .insert(&*idx, &node, rev)
170 .insert(&*idx, &node, rev)
171 .map_err(|e| nodemap_error(py, e))?;
171 .map_err(|e| nodemap_error(py, e))?;
172 Ok(py.None())
172 Ok(py.None())
173 }
173 }
174
174
175 def __delitem__(&self, key: PyObject) -> PyResult<()> {
175 def __delitem__(&self, key: PyObject) -> PyResult<()> {
176 // __delitem__ is both for `del idx[r]` and `del idx[r1:r2]`
176 // __delitem__ is both for `del idx[r]` and `del idx[r1:r2]`
177 self.cindex(py).borrow().inner().del_item(py, &key)?;
177 self.cindex(py).borrow().inner().del_item(py, &key)?;
178 let start = key.getattr(py, "start")?;
178 let start = key.getattr(py, "start")?;
179 let start = UncheckedRevision(start.extract(py)?);
179 let start = UncheckedRevision(start.extract(py)?);
180 let start = self.index(py)
180 let start = self.index(py)
181 .borrow()
181 .borrow()
182 .check_revision(start)
182 .check_revision(start)
183 .ok_or_else(|| {
183 .ok_or_else(|| {
184 nodemap_error(py, NodeMapError::RevisionNotInIndex(start))
184 nodemap_error(py, NodeMapError::RevisionNotInIndex(start))
185 })?;
185 })?;
186 self.index(py).borrow_mut().remove(start).unwrap();
186 self.index(py).borrow_mut().remove(start).unwrap();
187 let mut opt = self.get_nodetree(py)?.borrow_mut();
187 let mut opt = self.get_nodetree(py)?.borrow_mut();
188 let nt = opt.as_mut().unwrap();
188 let nt = opt.as_mut().unwrap();
189 nt.invalidate_all();
189 nt.invalidate_all();
190 self.fill_nodemap(py, nt)?;
190 self.fill_nodemap(py, nt)?;
191 Ok(())
191 Ok(())
192 }
192 }
193
193
194 //
194 //
195 // Reforwarded C index API
195 // Reforwarded C index API
196 //
196 //
197
197
198 // index_methods (tp_methods). Same ordering as in revlog.c
198 // index_methods (tp_methods). Same ordering as in revlog.c
199
199
200 /// return the gca set of the given revs
200 /// return the gca set of the given revs
201 def ancestors(&self, *args, **kw) -> PyResult<PyObject> {
201 def ancestors(&self, *args, **kw) -> PyResult<PyObject> {
202 let rust_res = self.inner_ancestors(py, args)?;
202 let rust_res = self.inner_ancestors(py, args)?;
203
203
204 let c_res = self.call_cindex(py, "ancestors", args, kw)?;
204 let c_res = self.call_cindex(py, "ancestors", args, kw)?;
205 // the algorithm should always provide the results in reverse ordering
205 // the algorithm should always provide the results in reverse ordering
206 assert_py_eq(py, "ancestors", &rust_res, &c_res)?;
206 assert_py_eq(py, "ancestors", &rust_res, &c_res)?;
207
207
208 Ok(rust_res)
208 Ok(rust_res)
209 }
209 }
210
210
211 /// return the heads of the common ancestors of the given revs
211 /// return the heads of the common ancestors of the given revs
212 def commonancestorsheads(&self, *args, **kw) -> PyResult<PyObject> {
212 def commonancestorsheads(&self, *args, **kw) -> PyResult<PyObject> {
213 let rust_res = self.inner_commonancestorsheads(py, args)?;
213 let rust_res = self.inner_commonancestorsheads(py, args)?;
214
214
215 let c_res = self.call_cindex(py, "commonancestorsheads", args, kw)?;
215 let c_res = self.call_cindex(py, "commonancestorsheads", args, kw)?;
216 // the algorithm should always provide the results in reverse ordering
216 // the algorithm should always provide the results in reverse ordering
217 assert_py_eq(py, "commonancestorsheads", &rust_res, &c_res)?;
217 assert_py_eq(py, "commonancestorsheads", &rust_res, &c_res)?;
218
218
219 Ok(rust_res)
219 Ok(rust_res)
220 }
220 }
221
221
222 /// Clear the index caches and inner py_class data.
222 /// Clear the index caches and inner py_class data.
223 /// It is Python's responsibility to call `update_nodemap_data` again.
223 /// It is Python's responsibility to call `update_nodemap_data` again.
224 def clearcaches(&self, *args, **kw) -> PyResult<PyObject> {
224 def clearcaches(&self, *args, **kw) -> PyResult<PyObject> {
225 self.nt(py).borrow_mut().take();
225 self.nt(py).borrow_mut().take();
226 self.docket(py).borrow_mut().take();
226 self.docket(py).borrow_mut().take();
227 self.nodemap_mmap(py).borrow_mut().take();
227 self.nodemap_mmap(py).borrow_mut().take();
228 self.index(py).borrow_mut().clear_caches();
228 self.index(py).borrow_mut().clear_caches();
229 self.call_cindex(py, "clearcaches", args, kw)
229 self.call_cindex(py, "clearcaches", args, kw)
230 }
230 }
231
231
232 /// return the raw binary string representing a revision
232 /// return the raw binary string representing a revision
233 def entry_binary(&self, *args, **kw) -> PyResult<PyObject> {
233 def entry_binary(&self, *args, **kw) -> PyResult<PyObject> {
234 let rindex = self.index(py).borrow();
234 let rindex = self.index(py).borrow();
235 let rev = UncheckedRevision(args.get_item(py, 0).extract(py)?);
235 let rev = UncheckedRevision(args.get_item(py, 0).extract(py)?);
236 let rust_bytes = rindex.check_revision(rev).and_then(
236 let rust_bytes = rindex.check_revision(rev).and_then(
237 |r| rindex.entry_binary(r))
237 |r| rindex.entry_binary(r))
238 .ok_or_else(|| rev_not_in_index(py, rev))?;
238 .ok_or_else(|| rev_not_in_index(py, rev))?;
239 let rust_res = PyBytes::new(py, rust_bytes).into_object();
239 let rust_res = PyBytes::new(py, rust_bytes).into_object();
240
240
241 let c_res = self.call_cindex(py, "entry_binary", args, kw)?;
241 let c_res = self.call_cindex(py, "entry_binary", args, kw)?;
242 assert_py_eq(py, "entry_binary", &rust_res, &c_res)?;
242 assert_py_eq(py, "entry_binary", &rust_res, &c_res)?;
243 Ok(rust_res)
243 Ok(rust_res)
244 }
244 }
245
245
246 /// return a binary packed version of the header
246 /// return a binary packed version of the header
247 def pack_header(&self, *args, **kw) -> PyResult<PyObject> {
247 def pack_header(&self, *args, **kw) -> PyResult<PyObject> {
248 let rindex = self.index(py).borrow();
248 let rindex = self.index(py).borrow();
249 let packed = rindex.pack_header(args.get_item(py, 0).extract(py)?);
249 let packed = rindex.pack_header(args.get_item(py, 0).extract(py)?);
250 let rust_res = PyBytes::new(py, &packed).into_object();
250 let rust_res = PyBytes::new(py, &packed).into_object();
251
251
252 let c_res = self.call_cindex(py, "pack_header", args, kw)?;
252 let c_res = self.call_cindex(py, "pack_header", args, kw)?;
253 assert_py_eq(py, "pack_header", &rust_res, &c_res)?;
253 assert_py_eq(py, "pack_header", &rust_res, &c_res)?;
254 Ok(rust_res)
254 Ok(rust_res)
255 }
255 }
256
256
257 /// compute phases
257 /// compute phases
258 def computephasesmapsets(&self, *args, **kw) -> PyResult<PyObject> {
258 def computephasesmapsets(&self, *args, **kw) -> PyResult<PyObject> {
259 let py_roots = args.get_item(py, 0).extract::<PyDict>(py)?;
259 let py_roots = args.get_item(py, 0).extract::<PyDict>(py)?;
260 let rust_res = self.inner_computephasesmapsets(py, py_roots)?;
260 let rust_res = self.inner_computephasesmapsets(py, py_roots)?;
261
261
262 let c_res = self.call_cindex(py, "computephasesmapsets", args, kw)?;
262 let c_res = self.call_cindex(py, "computephasesmapsets", args, kw)?;
263 assert_py_eq(py, "computephasesmapsets", &rust_res, &c_res)?;
263 assert_py_eq(py, "computephasesmapsets", &rust_res, &c_res)?;
264 Ok(rust_res)
264 Ok(rust_res)
265 }
265 }
266
266
267 /// reachableroots
267 /// reachableroots
268 def reachableroots2(&self, *args, **kw) -> PyResult<PyObject> {
268 def reachableroots2(&self, *args, **kw) -> PyResult<PyObject> {
269 let rust_res = self.inner_reachableroots2(
269 let rust_res = self.inner_reachableroots2(
270 py,
270 py,
271 UncheckedRevision(args.get_item(py, 0).extract(py)?),
271 UncheckedRevision(args.get_item(py, 0).extract(py)?),
272 args.get_item(py, 1),
272 args.get_item(py, 1),
273 args.get_item(py, 2),
273 args.get_item(py, 2),
274 args.get_item(py, 3).extract(py)?,
274 args.get_item(py, 3).extract(py)?,
275 )?;
275 )?;
276
276
277 let c_res = self.call_cindex(py, "reachableroots2", args, kw)?;
277 let c_res = self.call_cindex(py, "reachableroots2", args, kw)?;
278 // ordering of C result depends on how the computation went, and
278 // ordering of C result depends on how the computation went, and
279 // Rust result ordering is arbitrary. Hence we compare after
279 // Rust result ordering is arbitrary. Hence we compare after
280 // sorting the results (in Python to avoid reconverting everything
280 // sorting the results (in Python to avoid reconverting everything
281 // back to Rust structs).
281 // back to Rust structs).
282 assert_py_eq_normalized(py, "reachableroots2", &rust_res, &c_res,
282 assert_py_eq_normalized(py, "reachableroots2", &rust_res, &c_res,
283 |v| format!("sorted({})", v))?;
283 |v| format!("sorted({})", v))?;
284
284
285 Ok(rust_res)
285 Ok(rust_res)
286 }
286 }
287
287
288 /// get head revisions
288 /// get head revisions
289 def headrevs(&self, *args, **kw) -> PyResult<PyObject> {
289 def headrevs(&self, *args, **kw) -> PyResult<PyObject> {
290 let rust_res = self.inner_headrevs(py)?;
290 let rust_res = self.inner_headrevs(py)?;
291
291
292 let c_res = self.call_cindex(py, "headrevs", args, kw)?;
292 let c_res = self.call_cindex(py, "headrevs", args, kw)?;
293 assert_py_eq(py, "headrevs", &rust_res, &c_res)?;
293 assert_py_eq(py, "headrevs", &rust_res, &c_res)?;
294 Ok(rust_res)
294 Ok(rust_res)
295 }
295 }
296
296
297 /// get filtered head revisions
297 /// get filtered head revisions
298 def headrevsfiltered(&self, *args, **kw) -> PyResult<PyObject> {
298 def headrevsfiltered(&self, *args, **kw) -> PyResult<PyObject> {
299 let rust_res = self.inner_headrevsfiltered(py, &args.get_item(py, 0))?;
299 let rust_res = self.inner_headrevsfiltered(py, &args.get_item(py, 0))?;
300 let c_res = self.call_cindex(py, "headrevsfiltered", args, kw)?;
300 let c_res = self.call_cindex(py, "headrevsfiltered", args, kw)?;
301
301
302 assert_py_eq(py, "headrevsfiltered", &rust_res, &c_res)?;
302 assert_py_eq(py, "headrevsfiltered", &rust_res, &c_res)?;
303 Ok(rust_res)
303 Ok(rust_res)
304 }
304 }
305
305
306 /// True if the object is a snapshot
306 /// True if the object is a snapshot
307 def issnapshot(&self, *args, **kw) -> PyResult<bool> {
307 def issnapshot(&self, *args, **kw) -> PyResult<bool> {
308 let index = self.index(py).borrow();
308 let index = self.index(py).borrow();
309 let result = index
309 let result = index
310 .is_snapshot(UncheckedRevision(args.get_item(py, 0).extract(py)?))
310 .is_snapshot(UncheckedRevision(args.get_item(py, 0).extract(py)?))
311 .map_err(|e| {
311 .map_err(|e| {
312 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
312 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
313 })?;
313 })?;
314 let cresult = self.call_cindex(py, "issnapshot", args, kw)?;
314 let cresult = self.call_cindex(py, "issnapshot", args, kw)?;
315 assert_eq!(result, cresult.extract(py)?);
315 assert_eq!(result, cresult.extract(py)?);
316 Ok(result)
316 Ok(result)
317 }
317 }
318
318
319 /// Gather snapshot data in a cache dict
319 /// Gather snapshot data in a cache dict
320 def findsnapshots(&self, *args, **kw) -> PyResult<PyObject> {
320 def findsnapshots(&self, *args, **kw) -> PyResult<PyObject> {
321 let index = self.index(py).borrow();
321 let index = self.index(py).borrow();
322 let cache: PyDict = args.get_item(py, 0).extract(py)?;
322 let cache: PyDict = args.get_item(py, 0).extract(py)?;
323 // this methods operates by setting new values in the cache,
323 // this methods operates by setting new values in the cache,
324 // hence we will compare results by letting the C implementation
324 // hence we will compare results by letting the C implementation
325 // operate over a deepcopy of the cache, and finally compare both
325 // operate over a deepcopy of the cache, and finally compare both
326 // caches.
326 // caches.
327 let c_cache = PyDict::new(py);
327 let c_cache = PyDict::new(py);
328 for (k, v) in cache.items(py) {
328 for (k, v) in cache.items(py) {
329 c_cache.set_item(py, k, PySet::new(py, v)?)?;
329 c_cache.set_item(py, k, PySet::new(py, v)?)?;
330 }
330 }
331
331
332 let start_rev = UncheckedRevision(args.get_item(py, 1).extract(py)?);
332 let start_rev = UncheckedRevision(args.get_item(py, 1).extract(py)?);
333 let end_rev = UncheckedRevision(args.get_item(py, 2).extract(py)?);
333 let end_rev = UncheckedRevision(args.get_item(py, 2).extract(py)?);
334 let mut cache_wrapper = PySnapshotsCache{ py, dict: cache };
334 let mut cache_wrapper = PySnapshotsCache{ py, dict: cache };
335 index.find_snapshots(
335 index.find_snapshots(
336 start_rev,
336 start_rev,
337 end_rev,
337 end_rev,
338 &mut cache_wrapper,
338 &mut cache_wrapper,
339 ).map_err(|_| revlog_error(py))?;
339 ).map_err(|_| revlog_error(py))?;
340
340
341 let c_args = PyTuple::new(
341 let c_args = PyTuple::new(
342 py,
342 py,
343 &[
343 &[
344 c_cache.clone_ref(py).into_object(),
344 c_cache.clone_ref(py).into_object(),
345 args.get_item(py, 1),
345 args.get_item(py, 1),
346 args.get_item(py, 2)
346 args.get_item(py, 2)
347 ]
347 ]
348 );
348 );
349 self.call_cindex(py, "findsnapshots", &c_args, kw)?;
349 self.call_cindex(py, "findsnapshots", &c_args, kw)?;
350 assert_py_eq(py, "findsnapshots cache",
350 assert_py_eq(py, "findsnapshots cache",
351 &cache_wrapper.into_object(),
351 &cache_wrapper.into_object(),
352 &c_cache.into_object())?;
352 &c_cache.into_object())?;
353 Ok(py.None())
353 Ok(py.None())
354 }
354 }
355
355
356 /// determine revisions with deltas to reconstruct fulltext
356 /// determine revisions with deltas to reconstruct fulltext
357 def deltachain(&self, *args, **kw) -> PyResult<PyObject> {
357 def deltachain(&self, *args, **kw) -> PyResult<PyObject> {
358 let index = self.index(py).borrow();
358 let index = self.index(py).borrow();
359 let rev = args.get_item(py, 0).extract::<BaseRevision>(py)?.into();
359 let rev = args.get_item(py, 0).extract::<BaseRevision>(py)?.into();
360 let stop_rev =
360 let stop_rev =
361 args.get_item(py, 1).extract::<Option<BaseRevision>>(py)?;
361 args.get_item(py, 1).extract::<Option<BaseRevision>>(py)?;
362 let rev = index.check_revision(rev).ok_or_else(|| {
362 let rev = index.check_revision(rev).ok_or_else(|| {
363 nodemap_error(py, NodeMapError::RevisionNotInIndex(rev))
363 nodemap_error(py, NodeMapError::RevisionNotInIndex(rev))
364 })?;
364 })?;
365 let stop_rev = if let Some(stop_rev) = stop_rev {
365 let stop_rev = if let Some(stop_rev) = stop_rev {
366 let stop_rev = UncheckedRevision(stop_rev);
366 let stop_rev = UncheckedRevision(stop_rev);
367 Some(index.check_revision(stop_rev).ok_or_else(|| {
367 Some(index.check_revision(stop_rev).ok_or_else(|| {
368 nodemap_error(py, NodeMapError::RevisionNotInIndex(stop_rev))
368 nodemap_error(py, NodeMapError::RevisionNotInIndex(stop_rev))
369 })?)
369 })?)
370 } else {None};
370 } else {None};
371 let (chain, stopped) = index.delta_chain(rev, stop_rev).map_err(|e| {
371 let (chain, stopped) = index.delta_chain(rev, stop_rev).map_err(|e| {
372 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
372 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
373 })?;
373 })?;
374
374
375 let cresult = self.call_cindex(py, "deltachain", args, kw)?;
375 let cresult = self.call_cindex(py, "deltachain", args, kw)?;
376 let cchain: Vec<BaseRevision> =
376 let cchain: Vec<BaseRevision> =
377 cresult.get_item(py, 0)?.extract::<Vec<BaseRevision>>(py)?;
377 cresult.get_item(py, 0)?.extract::<Vec<BaseRevision>>(py)?;
378 let chain: Vec<_> = chain.into_iter().map(|r| r.0).collect();
378 let chain: Vec<_> = chain.into_iter().map(|r| r.0).collect();
379 assert_eq!(chain, cchain);
379 assert_eq!(chain, cchain);
380 assert_eq!(stopped, cresult.get_item(py, 1)?.extract(py)?);
380 assert_eq!(stopped, cresult.get_item(py, 1)?.extract(py)?);
381
381
382 Ok(
382 Ok(
383 PyTuple::new(
383 PyTuple::new(
384 py,
384 py,
385 &[
385 &[
386 chain.into_py_object(py).into_object(),
386 chain.into_py_object(py).into_object(),
387 stopped.into_py_object(py).into_object()
387 stopped.into_py_object(py).into_object()
388 ]
388 ]
389 ).into_object()
389 ).into_object()
390 )
390 )
391
391
392 }
392 }
393
393
394 /// slice planned chunk read to reach a density threshold
394 /// slice planned chunk read to reach a density threshold
395 def slicechunktodensity(&self, *args, **kw) -> PyResult<PyObject> {
395 def slicechunktodensity(&self, *args, **kw) -> PyResult<PyObject> {
396 let rust_res = self.inner_slicechunktodensity(
396 let rust_res = self.inner_slicechunktodensity(
397 py,
397 py,
398 args.get_item(py, 0),
398 args.get_item(py, 0),
399 args.get_item(py, 1).extract(py)?,
399 args.get_item(py, 1).extract(py)?,
400 args.get_item(py, 2).extract(py)?
400 args.get_item(py, 2).extract(py)?
401 )?;
401 )?;
402
402
403 let c_res = self.call_cindex(py, "slicechunktodensity", args, kw)?;
403 let c_res = self.call_cindex(py, "slicechunktodensity", args, kw)?;
404 assert_py_eq(py, "slicechunktodensity", &rust_res, &c_res)?;
404 assert_py_eq(py, "slicechunktodensity", &rust_res, &c_res)?;
405 Ok(rust_res)
405 Ok(rust_res)
406 }
406 }
407
407
408 /// stats for the index
409 def stats(&self, *args, **kw) -> PyResult<PyObject> {
410 self.call_cindex(py, "stats", args, kw)
411 }
412
413 // index_sequence_methods and index_mapping_methods.
408 // index_sequence_methods and index_mapping_methods.
414 //
409 //
415 // Since we call back through the high level Python API,
410 // Since we call back through the high level Python API,
416 // there's no point making a distinction between index_get
411 // there's no point making a distinction between index_get
417 // and index_getitem.
412 // and index_getitem.
418 // gracinet 2023: this above is no longer true for the pure Rust impl
413 // gracinet 2023: this above is no longer true for the pure Rust impl
419
414
420 def __len__(&self) -> PyResult<usize> {
415 def __len__(&self) -> PyResult<usize> {
421 self.len(py)
416 self.len(py)
422 }
417 }
423
418
424 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
419 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
425 let rust_res = self.inner_getitem(py, key.clone_ref(py))?;
420 let rust_res = self.inner_getitem(py, key.clone_ref(py))?;
426
421
427 // this conversion seems needless, but that's actually because
422 // this conversion seems needless, but that's actually because
428 // `index_getitem` does not handle conversion from PyLong,
423 // `index_getitem` does not handle conversion from PyLong,
429 // which expressions such as [e for e in index] internally use.
424 // which expressions such as [e for e in index] internally use.
430 // Note that we don't seem to have a direct way to call
425 // Note that we don't seem to have a direct way to call
431 // PySequence_GetItem (does the job), which would possibly be better
426 // PySequence_GetItem (does the job), which would possibly be better
432 // for performance
427 // for performance
433 // gracinet 2023: the above comment can be removed when we use
428 // gracinet 2023: the above comment can be removed when we use
434 // the pure Rust impl only. Note also that `key` can be a binary
429 // the pure Rust impl only. Note also that `key` can be a binary
435 // node id.
430 // node id.
436 let c_key = match key.extract::<BaseRevision>(py) {
431 let c_key = match key.extract::<BaseRevision>(py) {
437 Ok(rev) => rev.to_py_object(py).into_object(),
432 Ok(rev) => rev.to_py_object(py).into_object(),
438 Err(_) => key,
433 Err(_) => key,
439 };
434 };
440 let c_res = self.cindex(py).borrow().inner().get_item(py, c_key)?;
435 let c_res = self.cindex(py).borrow().inner().get_item(py, c_key)?;
441
436
442 assert_py_eq(py, "__getitem__", &rust_res, &c_res)?;
437 assert_py_eq(py, "__getitem__", &rust_res, &c_res)?;
443 Ok(rust_res)
438 Ok(rust_res)
444 }
439 }
445
440
446 def __contains__(&self, item: PyObject) -> PyResult<bool> {
441 def __contains__(&self, item: PyObject) -> PyResult<bool> {
447 // ObjectProtocol does not seem to provide contains(), so
442 // ObjectProtocol does not seem to provide contains(), so
448 // this is an equivalent implementation of the index_contains()
443 // this is an equivalent implementation of the index_contains()
449 // defined in revlog.c
444 // defined in revlog.c
450 let cindex = self.cindex(py).borrow();
445 let cindex = self.cindex(py).borrow();
451 match item.extract::<i32>(py) {
446 match item.extract::<i32>(py) {
452 Ok(rev) => {
447 Ok(rev) => {
453 Ok(rev >= -1 && rev < self.len(py)? as BaseRevision)
448 Ok(rev >= -1 && rev < self.len(py)? as BaseRevision)
454 }
449 }
455 Err(_) => {
450 Err(_) => {
456 let item_bytes: PyBytes = item.extract(py)?;
451 let item_bytes: PyBytes = item.extract(py)?;
457 let rust_res = self.has_node(py, item_bytes)?;
452 let rust_res = self.has_node(py, item_bytes)?;
458
453
459 let c_res = cindex.inner().call_method(
454 let c_res = cindex.inner().call_method(
460 py,
455 py,
461 "has_node",
456 "has_node",
462 PyTuple::new(py, &[item.clone_ref(py)]),
457 PyTuple::new(py, &[item.clone_ref(py)]),
463 None)?
458 None)?
464 .extract(py)?;
459 .extract(py)?;
465
460
466 assert_eq!(rust_res, c_res);
461 assert_eq!(rust_res, c_res);
467 Ok(rust_res)
462 Ok(rust_res)
468 }
463 }
469 }
464 }
470 }
465 }
471
466
472 def nodemap_data_all(&self) -> PyResult<PyBytes> {
467 def nodemap_data_all(&self) -> PyResult<PyBytes> {
473 self.inner_nodemap_data_all(py)
468 self.inner_nodemap_data_all(py)
474 }
469 }
475
470
476 def nodemap_data_incremental(&self) -> PyResult<PyObject> {
471 def nodemap_data_incremental(&self) -> PyResult<PyObject> {
477 self.inner_nodemap_data_incremental(py)
472 self.inner_nodemap_data_incremental(py)
478 }
473 }
479 def update_nodemap_data(
474 def update_nodemap_data(
480 &self,
475 &self,
481 docket: PyObject,
476 docket: PyObject,
482 nm_data: PyObject
477 nm_data: PyObject
483 ) -> PyResult<PyObject> {
478 ) -> PyResult<PyObject> {
484 self.inner_update_nodemap_data(py, docket, nm_data)
479 self.inner_update_nodemap_data(py, docket, nm_data)
485 }
480 }
486
481
487 @property
482 @property
488 def entry_size(&self) -> PyResult<PyInt> {
483 def entry_size(&self) -> PyResult<PyInt> {
489 let rust_res: PyInt = INDEX_ENTRY_SIZE.to_py_object(py);
484 let rust_res: PyInt = INDEX_ENTRY_SIZE.to_py_object(py);
490
485
491 let c_res = self.cindex(py).borrow().inner()
486 let c_res = self.cindex(py).borrow().inner()
492 .getattr(py, "entry_size")?;
487 .getattr(py, "entry_size")?;
493 assert_py_eq(py, "entry_size", rust_res.as_object(), &c_res)?;
488 assert_py_eq(py, "entry_size", rust_res.as_object(), &c_res)?;
494
489
495 Ok(rust_res)
490 Ok(rust_res)
496 }
491 }
497
492
498 @property
493 @property
499 def rust_ext_compat(&self) -> PyResult<PyInt> {
494 def rust_ext_compat(&self) -> PyResult<PyInt> {
500 // will be entirely removed when the Rust index yet useful to
495 // will be entirely removed when the Rust index yet useful to
501 // implement in Rust to detangle things when removing `self.cindex`
496 // implement in Rust to detangle things when removing `self.cindex`
502 let rust_res: PyInt = 1.to_py_object(py);
497 let rust_res: PyInt = 1.to_py_object(py);
503
498
504 let c_res = self.cindex(py).borrow().inner()
499 let c_res = self.cindex(py).borrow().inner()
505 .getattr(py, "rust_ext_compat")?;
500 .getattr(py, "rust_ext_compat")?;
506 assert_py_eq(py, "rust_ext_compat", rust_res.as_object(), &c_res)?;
501 assert_py_eq(py, "rust_ext_compat", rust_res.as_object(), &c_res)?;
507
502
508 Ok(rust_res)
503 Ok(rust_res)
509 }
504 }
510
505
511 });
506 });
512
507
513 /// Take a (potentially) mmap'ed buffer, and return the underlying Python
508 /// Take a (potentially) mmap'ed buffer, and return the underlying Python
514 /// buffer along with the Rust slice into said buffer. We need to keep the
509 /// buffer along with the Rust slice into said buffer. We need to keep the
515 /// Python buffer around, otherwise we'd get a dangling pointer once the buffer
510 /// Python buffer around, otherwise we'd get a dangling pointer once the buffer
516 /// is freed from Python's side.
511 /// is freed from Python's side.
517 ///
512 ///
518 /// # Safety
513 /// # Safety
519 ///
514 ///
520 /// The caller must make sure that the buffer is kept around for at least as
515 /// The caller must make sure that the buffer is kept around for at least as
521 /// long as the slice.
516 /// long as the slice.
522 #[deny(unsafe_op_in_unsafe_fn)]
517 #[deny(unsafe_op_in_unsafe_fn)]
523 unsafe fn mmap_keeparound(
518 unsafe fn mmap_keeparound(
524 py: Python,
519 py: Python,
525 data: PyObject,
520 data: PyObject,
526 ) -> PyResult<(
521 ) -> PyResult<(
527 PyBuffer,
522 PyBuffer,
528 Box<dyn std::ops::Deref<Target = [u8]> + Send + 'static>,
523 Box<dyn std::ops::Deref<Target = [u8]> + Send + 'static>,
529 )> {
524 )> {
530 let buf = PyBuffer::get(py, &data)?;
525 let buf = PyBuffer::get(py, &data)?;
531 let len = buf.item_count();
526 let len = buf.item_count();
532
527
533 // Build a slice from the mmap'ed buffer data
528 // Build a slice from the mmap'ed buffer data
534 let cbuf = buf.buf_ptr();
529 let cbuf = buf.buf_ptr();
535 let bytes = if std::mem::size_of::<u8>() == buf.item_size()
530 let bytes = if std::mem::size_of::<u8>() == buf.item_size()
536 && buf.is_c_contiguous()
531 && buf.is_c_contiguous()
537 && u8::is_compatible_format(buf.format())
532 && u8::is_compatible_format(buf.format())
538 {
533 {
539 unsafe { std::slice::from_raw_parts(cbuf as *const u8, len) }
534 unsafe { std::slice::from_raw_parts(cbuf as *const u8, len) }
540 } else {
535 } else {
541 return Err(PyErr::new::<ValueError, _>(
536 return Err(PyErr::new::<ValueError, _>(
542 py,
537 py,
543 "Nodemap data buffer has an invalid memory representation"
538 "Nodemap data buffer has an invalid memory representation"
544 .to_string(),
539 .to_string(),
545 ));
540 ));
546 };
541 };
547
542
548 Ok((buf, Box::new(bytes)))
543 Ok((buf, Box::new(bytes)))
549 }
544 }
550
545
551 fn py_tuple_to_revision_data_params(
546 fn py_tuple_to_revision_data_params(
552 py: Python,
547 py: Python,
553 tuple: PyTuple,
548 tuple: PyTuple,
554 ) -> PyResult<RevisionDataParams> {
549 ) -> PyResult<RevisionDataParams> {
555 if tuple.len(py) < 8 {
550 if tuple.len(py) < 8 {
556 // this is better than the panic promised by tup.get_item()
551 // this is better than the panic promised by tup.get_item()
557 return Err(PyErr::new::<IndexError, _>(
552 return Err(PyErr::new::<IndexError, _>(
558 py,
553 py,
559 "tuple index out of range",
554 "tuple index out of range",
560 ));
555 ));
561 }
556 }
562 let offset_or_flags: u64 = tuple.get_item(py, 0).extract(py)?;
557 let offset_or_flags: u64 = tuple.get_item(py, 0).extract(py)?;
563 let node_id = tuple
558 let node_id = tuple
564 .get_item(py, 7)
559 .get_item(py, 7)
565 .extract::<PyBytes>(py)?
560 .extract::<PyBytes>(py)?
566 .data(py)
561 .data(py)
567 .try_into()
562 .try_into()
568 .unwrap();
563 .unwrap();
569 let flags = (offset_or_flags & 0xFFFF) as u16;
564 let flags = (offset_or_flags & 0xFFFF) as u16;
570 let data_offset = offset_or_flags >> 16;
565 let data_offset = offset_or_flags >> 16;
571 Ok(RevisionDataParams {
566 Ok(RevisionDataParams {
572 flags,
567 flags,
573 data_offset,
568 data_offset,
574 data_compressed_length: tuple.get_item(py, 1).extract(py)?,
569 data_compressed_length: tuple.get_item(py, 1).extract(py)?,
575 data_uncompressed_length: tuple.get_item(py, 2).extract(py)?,
570 data_uncompressed_length: tuple.get_item(py, 2).extract(py)?,
576 data_delta_base: tuple.get_item(py, 3).extract(py)?,
571 data_delta_base: tuple.get_item(py, 3).extract(py)?,
577 link_rev: tuple.get_item(py, 4).extract(py)?,
572 link_rev: tuple.get_item(py, 4).extract(py)?,
578 parent_rev_1: tuple.get_item(py, 5).extract(py)?,
573 parent_rev_1: tuple.get_item(py, 5).extract(py)?,
579 parent_rev_2: tuple.get_item(py, 6).extract(py)?,
574 parent_rev_2: tuple.get_item(py, 6).extract(py)?,
580 node_id,
575 node_id,
581 ..Default::default()
576 ..Default::default()
582 })
577 })
583 }
578 }
584 fn revision_data_params_to_py_tuple(
579 fn revision_data_params_to_py_tuple(
585 py: Python,
580 py: Python,
586 params: RevisionDataParams,
581 params: RevisionDataParams,
587 ) -> PyTuple {
582 ) -> PyTuple {
588 PyTuple::new(
583 PyTuple::new(
589 py,
584 py,
590 &[
585 &[
591 params.data_offset.into_py_object(py).into_object(),
586 params.data_offset.into_py_object(py).into_object(),
592 params
587 params
593 .data_compressed_length
588 .data_compressed_length
594 .into_py_object(py)
589 .into_py_object(py)
595 .into_object(),
590 .into_object(),
596 params
591 params
597 .data_uncompressed_length
592 .data_uncompressed_length
598 .into_py_object(py)
593 .into_py_object(py)
599 .into_object(),
594 .into_object(),
600 params.data_delta_base.into_py_object(py).into_object(),
595 params.data_delta_base.into_py_object(py).into_object(),
601 params.link_rev.into_py_object(py).into_object(),
596 params.link_rev.into_py_object(py).into_object(),
602 params.parent_rev_1.into_py_object(py).into_object(),
597 params.parent_rev_1.into_py_object(py).into_object(),
603 params.parent_rev_2.into_py_object(py).into_object(),
598 params.parent_rev_2.into_py_object(py).into_object(),
604 PyBytes::new(py, &params.node_id)
599 PyBytes::new(py, &params.node_id)
605 .into_py_object(py)
600 .into_py_object(py)
606 .into_object(),
601 .into_object(),
607 params._sidedata_offset.into_py_object(py).into_object(),
602 params._sidedata_offset.into_py_object(py).into_object(),
608 params
603 params
609 ._sidedata_compressed_length
604 ._sidedata_compressed_length
610 .into_py_object(py)
605 .into_py_object(py)
611 .into_object(),
606 .into_object(),
612 params
607 params
613 .data_compression_mode
608 .data_compression_mode
614 .into_py_object(py)
609 .into_py_object(py)
615 .into_object(),
610 .into_object(),
616 params
611 params
617 ._sidedata_compression_mode
612 ._sidedata_compression_mode
618 .into_py_object(py)
613 .into_py_object(py)
619 .into_object(),
614 .into_object(),
620 params._rank.into_py_object(py).into_object(),
615 params._rank.into_py_object(py).into_object(),
621 ],
616 ],
622 )
617 )
623 }
618 }
624
619
625 struct PySnapshotsCache<'p> {
620 struct PySnapshotsCache<'p> {
626 py: Python<'p>,
621 py: Python<'p>,
627 dict: PyDict,
622 dict: PyDict,
628 }
623 }
629
624
630 impl<'p> PySnapshotsCache<'p> {
625 impl<'p> PySnapshotsCache<'p> {
631 fn into_object(self) -> PyObject {
626 fn into_object(self) -> PyObject {
632 self.dict.into_object()
627 self.dict.into_object()
633 }
628 }
634 }
629 }
635
630
636 impl<'p> SnapshotsCache for PySnapshotsCache<'p> {
631 impl<'p> SnapshotsCache for PySnapshotsCache<'p> {
637 fn insert_for(
632 fn insert_for(
638 &mut self,
633 &mut self,
639 rev: BaseRevision,
634 rev: BaseRevision,
640 value: BaseRevision,
635 value: BaseRevision,
641 ) -> Result<(), RevlogError> {
636 ) -> Result<(), RevlogError> {
642 let pyvalue = value.into_py_object(self.py).into_object();
637 let pyvalue = value.into_py_object(self.py).into_object();
643 match self.dict.get_item(self.py, rev) {
638 match self.dict.get_item(self.py, rev) {
644 Some(obj) => obj
639 Some(obj) => obj
645 .extract::<PySet>(self.py)
640 .extract::<PySet>(self.py)
646 .and_then(|set| set.add(self.py, pyvalue)),
641 .and_then(|set| set.add(self.py, pyvalue)),
647 None => PySet::new(self.py, vec![pyvalue])
642 None => PySet::new(self.py, vec![pyvalue])
648 .and_then(|set| self.dict.set_item(self.py, rev, set)),
643 .and_then(|set| self.dict.set_item(self.py, rev, set)),
649 }
644 }
650 .map_err(|_| {
645 .map_err(|_| {
651 RevlogError::Other(HgError::unsupported(
646 RevlogError::Other(HgError::unsupported(
652 "Error in Python caches handling",
647 "Error in Python caches handling",
653 ))
648 ))
654 })
649 })
655 }
650 }
656 }
651 }
657
652
658 impl MixedIndex {
653 impl MixedIndex {
659 fn new(
654 fn new(
660 py: Python,
655 py: Python,
661 cindex: PyObject,
656 cindex: PyObject,
662 data: PyObject,
657 data: PyObject,
663 header: u32,
658 header: u32,
664 ) -> PyResult<MixedIndex> {
659 ) -> PyResult<MixedIndex> {
665 // Safety: we keep the buffer around inside the class as `index_mmap`
660 // Safety: we keep the buffer around inside the class as `index_mmap`
666 let (buf, bytes) = unsafe { mmap_keeparound(py, data)? };
661 let (buf, bytes) = unsafe { mmap_keeparound(py, data)? };
667
662
668 Self::create_instance(
663 Self::create_instance(
669 py,
664 py,
670 RefCell::new(cindex::Index::new(py, cindex)?),
665 RefCell::new(cindex::Index::new(py, cindex)?),
671 RefCell::new(
666 RefCell::new(
672 hg::index::Index::new(
667 hg::index::Index::new(
673 bytes,
668 bytes,
674 IndexHeader::parse(&header.to_be_bytes())
669 IndexHeader::parse(&header.to_be_bytes())
675 .expect("default header is broken")
670 .expect("default header is broken")
676 .unwrap(),
671 .unwrap(),
677 )
672 )
678 .map_err(|e| {
673 .map_err(|e| {
679 revlog_error_with_msg(py, e.to_string().as_bytes())
674 revlog_error_with_msg(py, e.to_string().as_bytes())
680 })?,
675 })?,
681 ),
676 ),
682 RefCell::new(None),
677 RefCell::new(None),
683 RefCell::new(None),
678 RefCell::new(None),
684 RefCell::new(None),
679 RefCell::new(None),
685 RefCell::new(Some(buf)),
680 RefCell::new(Some(buf)),
686 )
681 )
687 }
682 }
688
683
689 fn len(&self, py: Python) -> PyResult<usize> {
684 fn len(&self, py: Python) -> PyResult<usize> {
690 let rust_index_len = self.index(py).borrow().len();
685 let rust_index_len = self.index(py).borrow().len();
691 let cindex_len = self.cindex(py).borrow().inner().len(py)?;
686 let cindex_len = self.cindex(py).borrow().inner().len(py)?;
692 assert_eq!(rust_index_len, cindex_len);
687 assert_eq!(rust_index_len, cindex_len);
693 Ok(rust_index_len)
688 Ok(rust_index_len)
694 }
689 }
695
690
696 /// This is scaffolding at this point, but it could also become
691 /// This is scaffolding at this point, but it could also become
697 /// a way to start a persistent nodemap or perform a
692 /// a way to start a persistent nodemap or perform a
698 /// vacuum / repack operation
693 /// vacuum / repack operation
699 fn fill_nodemap(
694 fn fill_nodemap(
700 &self,
695 &self,
701 py: Python,
696 py: Python,
702 nt: &mut NodeTree,
697 nt: &mut NodeTree,
703 ) -> PyResult<PyObject> {
698 ) -> PyResult<PyObject> {
704 let index = self.index(py).borrow();
699 let index = self.index(py).borrow();
705 for r in 0..self.len(py)? {
700 for r in 0..self.len(py)? {
706 let rev = Revision(r as BaseRevision);
701 let rev = Revision(r as BaseRevision);
707 // in this case node() won't ever return None
702 // in this case node() won't ever return None
708 nt.insert(&*index, index.node(rev).unwrap(), rev)
703 nt.insert(&*index, index.node(rev).unwrap(), rev)
709 .map_err(|e| nodemap_error(py, e))?
704 .map_err(|e| nodemap_error(py, e))?
710 }
705 }
711 Ok(py.None())
706 Ok(py.None())
712 }
707 }
713
708
714 fn get_nodetree<'a>(
709 fn get_nodetree<'a>(
715 &'a self,
710 &'a self,
716 py: Python<'a>,
711 py: Python<'a>,
717 ) -> PyResult<&'a RefCell<Option<NodeTree>>> {
712 ) -> PyResult<&'a RefCell<Option<NodeTree>>> {
718 if self.nt(py).borrow().is_none() {
713 if self.nt(py).borrow().is_none() {
719 let readonly = Box::<Vec<_>>::default();
714 let readonly = Box::<Vec<_>>::default();
720 let mut nt = NodeTree::load_bytes(readonly, 0);
715 let mut nt = NodeTree::load_bytes(readonly, 0);
721 self.fill_nodemap(py, &mut nt)?;
716 self.fill_nodemap(py, &mut nt)?;
722 self.nt(py).borrow_mut().replace(nt);
717 self.nt(py).borrow_mut().replace(nt);
723 }
718 }
724 Ok(self.nt(py))
719 Ok(self.nt(py))
725 }
720 }
726
721
727 /// forward a method call to the underlying C index
722 /// forward a method call to the underlying C index
728 fn call_cindex(
723 fn call_cindex(
729 &self,
724 &self,
730 py: Python,
725 py: Python,
731 name: &str,
726 name: &str,
732 args: &PyTuple,
727 args: &PyTuple,
733 kwargs: Option<&PyDict>,
728 kwargs: Option<&PyDict>,
734 ) -> PyResult<PyObject> {
729 ) -> PyResult<PyObject> {
735 self.cindex(py)
730 self.cindex(py)
736 .borrow()
731 .borrow()
737 .inner()
732 .inner()
738 .call_method(py, name, args, kwargs)
733 .call_method(py, name, args, kwargs)
739 }
734 }
740
735
741 pub fn clone_cindex(&self, py: Python) -> cindex::Index {
736 pub fn clone_cindex(&self, py: Python) -> cindex::Index {
742 self.cindex(py).borrow().clone_ref(py)
737 self.cindex(py).borrow().clone_ref(py)
743 }
738 }
744
739
745 /// Returns the full nodemap bytes to be written as-is to disk
740 /// Returns the full nodemap bytes to be written as-is to disk
746 fn inner_nodemap_data_all(&self, py: Python) -> PyResult<PyBytes> {
741 fn inner_nodemap_data_all(&self, py: Python) -> PyResult<PyBytes> {
747 let nodemap = self.get_nodetree(py)?.borrow_mut().take().unwrap();
742 let nodemap = self.get_nodetree(py)?.borrow_mut().take().unwrap();
748 let (readonly, bytes) = nodemap.into_readonly_and_added_bytes();
743 let (readonly, bytes) = nodemap.into_readonly_and_added_bytes();
749
744
750 // If there's anything readonly, we need to build the data again from
745 // If there's anything readonly, we need to build the data again from
751 // scratch
746 // scratch
752 let bytes = if readonly.len() > 0 {
747 let bytes = if readonly.len() > 0 {
753 let mut nt = NodeTree::load_bytes(Box::<Vec<_>>::default(), 0);
748 let mut nt = NodeTree::load_bytes(Box::<Vec<_>>::default(), 0);
754 self.fill_nodemap(py, &mut nt)?;
749 self.fill_nodemap(py, &mut nt)?;
755
750
756 let (readonly, bytes) = nt.into_readonly_and_added_bytes();
751 let (readonly, bytes) = nt.into_readonly_and_added_bytes();
757 assert_eq!(readonly.len(), 0);
752 assert_eq!(readonly.len(), 0);
758
753
759 bytes
754 bytes
760 } else {
755 } else {
761 bytes
756 bytes
762 };
757 };
763
758
764 let bytes = PyBytes::new(py, &bytes);
759 let bytes = PyBytes::new(py, &bytes);
765 Ok(bytes)
760 Ok(bytes)
766 }
761 }
767
762
768 /// Returns the last saved docket along with the size of any changed data
763 /// Returns the last saved docket along with the size of any changed data
769 /// (in number of blocks), and said data as bytes.
764 /// (in number of blocks), and said data as bytes.
770 fn inner_nodemap_data_incremental(
765 fn inner_nodemap_data_incremental(
771 &self,
766 &self,
772 py: Python,
767 py: Python,
773 ) -> PyResult<PyObject> {
768 ) -> PyResult<PyObject> {
774 let docket = self.docket(py).borrow();
769 let docket = self.docket(py).borrow();
775 let docket = match docket.as_ref() {
770 let docket = match docket.as_ref() {
776 Some(d) => d,
771 Some(d) => d,
777 None => return Ok(py.None()),
772 None => return Ok(py.None()),
778 };
773 };
779
774
780 let node_tree = self.get_nodetree(py)?.borrow_mut().take().unwrap();
775 let node_tree = self.get_nodetree(py)?.borrow_mut().take().unwrap();
781 let masked_blocks = node_tree.masked_readonly_blocks();
776 let masked_blocks = node_tree.masked_readonly_blocks();
782 let (_, data) = node_tree.into_readonly_and_added_bytes();
777 let (_, data) = node_tree.into_readonly_and_added_bytes();
783 let changed = masked_blocks * std::mem::size_of::<Block>();
778 let changed = masked_blocks * std::mem::size_of::<Block>();
784
779
785 Ok((docket, changed, PyBytes::new(py, &data))
780 Ok((docket, changed, PyBytes::new(py, &data))
786 .to_py_object(py)
781 .to_py_object(py)
787 .into_object())
782 .into_object())
788 }
783 }
789
784
790 /// Update the nodemap from the new (mmaped) data.
785 /// Update the nodemap from the new (mmaped) data.
791 /// The docket is kept as a reference for later incremental calls.
786 /// The docket is kept as a reference for later incremental calls.
792 fn inner_update_nodemap_data(
787 fn inner_update_nodemap_data(
793 &self,
788 &self,
794 py: Python,
789 py: Python,
795 docket: PyObject,
790 docket: PyObject,
796 nm_data: PyObject,
791 nm_data: PyObject,
797 ) -> PyResult<PyObject> {
792 ) -> PyResult<PyObject> {
798 // Safety: we keep the buffer around inside the class as `nodemap_mmap`
793 // Safety: we keep the buffer around inside the class as `nodemap_mmap`
799 let (buf, bytes) = unsafe { mmap_keeparound(py, nm_data)? };
794 let (buf, bytes) = unsafe { mmap_keeparound(py, nm_data)? };
800 let len = buf.item_count();
795 let len = buf.item_count();
801 self.nodemap_mmap(py).borrow_mut().replace(buf);
796 self.nodemap_mmap(py).borrow_mut().replace(buf);
802
797
803 let mut nt = NodeTree::load_bytes(bytes, len);
798 let mut nt = NodeTree::load_bytes(bytes, len);
804
799
805 let data_tip = docket
800 let data_tip = docket
806 .getattr(py, "tip_rev")?
801 .getattr(py, "tip_rev")?
807 .extract::<BaseRevision>(py)?
802 .extract::<BaseRevision>(py)?
808 .into();
803 .into();
809 self.docket(py).borrow_mut().replace(docket.clone_ref(py));
804 self.docket(py).borrow_mut().replace(docket.clone_ref(py));
810 let idx = self.index(py).borrow();
805 let idx = self.index(py).borrow();
811 let data_tip = idx.check_revision(data_tip).ok_or_else(|| {
806 let data_tip = idx.check_revision(data_tip).ok_or_else(|| {
812 nodemap_error(py, NodeMapError::RevisionNotInIndex(data_tip))
807 nodemap_error(py, NodeMapError::RevisionNotInIndex(data_tip))
813 })?;
808 })?;
814 let current_tip = idx.len();
809 let current_tip = idx.len();
815
810
816 for r in (data_tip.0 + 1)..current_tip as BaseRevision {
811 for r in (data_tip.0 + 1)..current_tip as BaseRevision {
817 let rev = Revision(r);
812 let rev = Revision(r);
818 // in this case node() won't ever return None
813 // in this case node() won't ever return None
819 nt.insert(&*idx, idx.node(rev).unwrap(), rev)
814 nt.insert(&*idx, idx.node(rev).unwrap(), rev)
820 .map_err(|e| nodemap_error(py, e))?
815 .map_err(|e| nodemap_error(py, e))?
821 }
816 }
822
817
823 *self.nt(py).borrow_mut() = Some(nt);
818 *self.nt(py).borrow_mut() = Some(nt);
824
819
825 Ok(py.None())
820 Ok(py.None())
826 }
821 }
827
822
828 fn inner_getitem(&self, py: Python, key: PyObject) -> PyResult<PyObject> {
823 fn inner_getitem(&self, py: Python, key: PyObject) -> PyResult<PyObject> {
829 let idx = self.index(py).borrow();
824 let idx = self.index(py).borrow();
830 Ok(match key.extract::<BaseRevision>(py) {
825 Ok(match key.extract::<BaseRevision>(py) {
831 Ok(key_as_int) => {
826 Ok(key_as_int) => {
832 let entry_params = if key_as_int == NULL_REVISION.0 {
827 let entry_params = if key_as_int == NULL_REVISION.0 {
833 RevisionDataParams::default()
828 RevisionDataParams::default()
834 } else {
829 } else {
835 let rev = UncheckedRevision(key_as_int);
830 let rev = UncheckedRevision(key_as_int);
836 match idx.entry_as_params(rev) {
831 match idx.entry_as_params(rev) {
837 Some(e) => e,
832 Some(e) => e,
838 None => {
833 None => {
839 return Err(PyErr::new::<IndexError, _>(
834 return Err(PyErr::new::<IndexError, _>(
840 py,
835 py,
841 "revlog index out of range",
836 "revlog index out of range",
842 ));
837 ));
843 }
838 }
844 }
839 }
845 };
840 };
846 revision_data_params_to_py_tuple(py, entry_params)
841 revision_data_params_to_py_tuple(py, entry_params)
847 .into_object()
842 .into_object()
848 }
843 }
849 _ => self.get_rev(py, key.extract::<PyBytes>(py)?)?.map_or_else(
844 _ => self.get_rev(py, key.extract::<PyBytes>(py)?)?.map_or_else(
850 || py.None(),
845 || py.None(),
851 |py_rev| py_rev.into_py_object(py).into_object(),
846 |py_rev| py_rev.into_py_object(py).into_object(),
852 ),
847 ),
853 })
848 })
854 }
849 }
855
850
856 fn inner_headrevs(&self, py: Python) -> PyResult<PyObject> {
851 fn inner_headrevs(&self, py: Python) -> PyResult<PyObject> {
857 let index = &mut *self.index(py).borrow_mut();
852 let index = &mut *self.index(py).borrow_mut();
858 let as_vec: Vec<PyObject> = index
853 let as_vec: Vec<PyObject> = index
859 .head_revs()
854 .head_revs()
860 .map_err(|e| graph_error(py, e))?
855 .map_err(|e| graph_error(py, e))?
861 .iter()
856 .iter()
862 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
857 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
863 .collect();
858 .collect();
864 Ok(PyList::new(py, &as_vec).into_object())
859 Ok(PyList::new(py, &as_vec).into_object())
865 }
860 }
866
861
867 fn inner_headrevsfiltered(
862 fn inner_headrevsfiltered(
868 &self,
863 &self,
869 py: Python,
864 py: Python,
870 filtered_revs: &PyObject,
865 filtered_revs: &PyObject,
871 ) -> PyResult<PyObject> {
866 ) -> PyResult<PyObject> {
872 let index = &mut *self.index(py).borrow_mut();
867 let index = &mut *self.index(py).borrow_mut();
873 let filtered_revs = rev_pyiter_collect(py, filtered_revs, index)?;
868 let filtered_revs = rev_pyiter_collect(py, filtered_revs, index)?;
874
869
875 let as_vec: Vec<PyObject> = index
870 let as_vec: Vec<PyObject> = index
876 .head_revs_filtered(&filtered_revs)
871 .head_revs_filtered(&filtered_revs)
877 .map_err(|e| graph_error(py, e))?
872 .map_err(|e| graph_error(py, e))?
878 .iter()
873 .iter()
879 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
874 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
880 .collect();
875 .collect();
881 Ok(PyList::new(py, &as_vec).into_object())
876 Ok(PyList::new(py, &as_vec).into_object())
882 }
877 }
883
878
884 fn inner_ancestors(
879 fn inner_ancestors(
885 &self,
880 &self,
886 py: Python,
881 py: Python,
887 py_revs: &PyTuple,
882 py_revs: &PyTuple,
888 ) -> PyResult<PyObject> {
883 ) -> PyResult<PyObject> {
889 let index = &mut *self.index(py).borrow_mut();
884 let index = &mut *self.index(py).borrow_mut();
890 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
885 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
891 let as_vec: Vec<_> = index
886 let as_vec: Vec<_> = index
892 .ancestors(&revs)
887 .ancestors(&revs)
893 .map_err(|e| graph_error(py, e))?
888 .map_err(|e| graph_error(py, e))?
894 .iter()
889 .iter()
895 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
890 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
896 .collect();
891 .collect();
897 Ok(PyList::new(py, &as_vec).into_object())
892 Ok(PyList::new(py, &as_vec).into_object())
898 }
893 }
899
894
900 fn inner_commonancestorsheads(
895 fn inner_commonancestorsheads(
901 &self,
896 &self,
902 py: Python,
897 py: Python,
903 py_revs: &PyTuple,
898 py_revs: &PyTuple,
904 ) -> PyResult<PyObject> {
899 ) -> PyResult<PyObject> {
905 let index = &mut *self.index(py).borrow_mut();
900 let index = &mut *self.index(py).borrow_mut();
906 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
901 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
907 let as_vec: Vec<_> = index
902 let as_vec: Vec<_> = index
908 .common_ancestor_heads(&revs)
903 .common_ancestor_heads(&revs)
909 .map_err(|e| graph_error(py, e))?
904 .map_err(|e| graph_error(py, e))?
910 .iter()
905 .iter()
911 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
906 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
912 .collect();
907 .collect();
913 Ok(PyList::new(py, &as_vec).into_object())
908 Ok(PyList::new(py, &as_vec).into_object())
914 }
909 }
915
910
916 fn inner_computephasesmapsets(
911 fn inner_computephasesmapsets(
917 &self,
912 &self,
918 py: Python,
913 py: Python,
919 py_roots: PyDict,
914 py_roots: PyDict,
920 ) -> PyResult<PyObject> {
915 ) -> PyResult<PyObject> {
921 let index = &*self.index(py).borrow();
916 let index = &*self.index(py).borrow();
922 let opt = self.get_nodetree(py)?.borrow();
917 let opt = self.get_nodetree(py)?.borrow();
923 let nt = opt.as_ref().unwrap();
918 let nt = opt.as_ref().unwrap();
924 let roots: Result<HashMap<Phase, Vec<Revision>>, PyErr> = py_roots
919 let roots: Result<HashMap<Phase, Vec<Revision>>, PyErr> = py_roots
925 .items_list(py)
920 .items_list(py)
926 .iter(py)
921 .iter(py)
927 .map(|r| {
922 .map(|r| {
928 let phase = r.get_item(py, 0)?;
923 let phase = r.get_item(py, 0)?;
929 let nodes = r.get_item(py, 1)?;
924 let nodes = r.get_item(py, 1)?;
930 // Transform the nodes from Python to revs here since we
925 // Transform the nodes from Python to revs here since we
931 // have access to the nodemap
926 // have access to the nodemap
932 let revs: Result<_, _> = nodes
927 let revs: Result<_, _> = nodes
933 .iter(py)?
928 .iter(py)?
934 .map(|node| match node?.extract::<PyBytes>(py) {
929 .map(|node| match node?.extract::<PyBytes>(py) {
935 Ok(py_bytes) => {
930 Ok(py_bytes) => {
936 let node = node_from_py_bytes(py, &py_bytes)?;
931 let node = node_from_py_bytes(py, &py_bytes)?;
937 nt.find_bin(index, node.into())
932 nt.find_bin(index, node.into())
938 .map_err(|e| nodemap_error(py, e))?
933 .map_err(|e| nodemap_error(py, e))?
939 .ok_or_else(|| revlog_error(py))
934 .ok_or_else(|| revlog_error(py))
940 }
935 }
941 Err(e) => Err(e),
936 Err(e) => Err(e),
942 })
937 })
943 .collect();
938 .collect();
944 let phase = Phase::try_from(phase.extract::<usize>(py)?)
939 let phase = Phase::try_from(phase.extract::<usize>(py)?)
945 .map_err(|_| revlog_error(py));
940 .map_err(|_| revlog_error(py));
946 Ok((phase?, revs?))
941 Ok((phase?, revs?))
947 })
942 })
948 .collect();
943 .collect();
949 let (len, phase_maps) = index
944 let (len, phase_maps) = index
950 .compute_phases_map_sets(roots?)
945 .compute_phases_map_sets(roots?)
951 .map_err(|e| graph_error(py, e))?;
946 .map_err(|e| graph_error(py, e))?;
952
947
953 // Ugly hack, but temporary
948 // Ugly hack, but temporary
954 const IDX_TO_PHASE_NUM: [usize; 4] = [1, 2, 32, 96];
949 const IDX_TO_PHASE_NUM: [usize; 4] = [1, 2, 32, 96];
955 let py_phase_maps = PyDict::new(py);
950 let py_phase_maps = PyDict::new(py);
956 for (idx, roots) in phase_maps.iter().enumerate() {
951 for (idx, roots) in phase_maps.iter().enumerate() {
957 let phase_num = IDX_TO_PHASE_NUM[idx].into_py_object(py);
952 let phase_num = IDX_TO_PHASE_NUM[idx].into_py_object(py);
958 // OPTIM too bad we have to collect here. At least, we could
953 // OPTIM too bad we have to collect here. At least, we could
959 // reuse the same Vec and allocate it with capacity at
954 // reuse the same Vec and allocate it with capacity at
960 // max(len(phase_maps)
955 // max(len(phase_maps)
961 let roots_vec: Vec<PyInt> = roots
956 let roots_vec: Vec<PyInt> = roots
962 .iter()
957 .iter()
963 .map(|r| PyRevision::from(*r).into_py_object(py))
958 .map(|r| PyRevision::from(*r).into_py_object(py))
964 .collect();
959 .collect();
965 py_phase_maps.set_item(
960 py_phase_maps.set_item(
966 py,
961 py,
967 phase_num,
962 phase_num,
968 PySet::new(py, roots_vec)?,
963 PySet::new(py, roots_vec)?,
969 )?;
964 )?;
970 }
965 }
971 Ok(PyTuple::new(
966 Ok(PyTuple::new(
972 py,
967 py,
973 &[
968 &[
974 len.into_py_object(py).into_object(),
969 len.into_py_object(py).into_object(),
975 py_phase_maps.into_object(),
970 py_phase_maps.into_object(),
976 ],
971 ],
977 )
972 )
978 .into_object())
973 .into_object())
979 }
974 }
980
975
981 fn inner_slicechunktodensity(
976 fn inner_slicechunktodensity(
982 &self,
977 &self,
983 py: Python,
978 py: Python,
984 revs: PyObject,
979 revs: PyObject,
985 target_density: f64,
980 target_density: f64,
986 min_gap_size: usize,
981 min_gap_size: usize,
987 ) -> PyResult<PyObject> {
982 ) -> PyResult<PyObject> {
988 let index = &mut *self.index(py).borrow_mut();
983 let index = &mut *self.index(py).borrow_mut();
989 let revs: Vec<_> = rev_pyiter_collect(py, &revs, index)?;
984 let revs: Vec<_> = rev_pyiter_collect(py, &revs, index)?;
990 let as_nested_vec =
985 let as_nested_vec =
991 index.slice_chunk_to_density(&revs, target_density, min_gap_size);
986 index.slice_chunk_to_density(&revs, target_density, min_gap_size);
992 let mut res = Vec::with_capacity(as_nested_vec.len());
987 let mut res = Vec::with_capacity(as_nested_vec.len());
993 let mut py_chunk = Vec::new();
988 let mut py_chunk = Vec::new();
994 for chunk in as_nested_vec {
989 for chunk in as_nested_vec {
995 py_chunk.clear();
990 py_chunk.clear();
996 py_chunk.reserve_exact(chunk.len());
991 py_chunk.reserve_exact(chunk.len());
997 for rev in chunk {
992 for rev in chunk {
998 py_chunk.push(
993 py_chunk.push(
999 PyRevision::from(rev).into_py_object(py).into_object(),
994 PyRevision::from(rev).into_py_object(py).into_object(),
1000 );
995 );
1001 }
996 }
1002 res.push(PyList::new(py, &py_chunk).into_object());
997 res.push(PyList::new(py, &py_chunk).into_object());
1003 }
998 }
1004 // This is just to do the same as C, not sure why it does this
999 // This is just to do the same as C, not sure why it does this
1005 if res.len() == 1 {
1000 if res.len() == 1 {
1006 Ok(PyTuple::new(py, &res).into_object())
1001 Ok(PyTuple::new(py, &res).into_object())
1007 } else {
1002 } else {
1008 Ok(PyList::new(py, &res).into_object())
1003 Ok(PyList::new(py, &res).into_object())
1009 }
1004 }
1010 }
1005 }
1011
1006
1012 fn inner_reachableroots2(
1007 fn inner_reachableroots2(
1013 &self,
1008 &self,
1014 py: Python,
1009 py: Python,
1015 min_root: UncheckedRevision,
1010 min_root: UncheckedRevision,
1016 heads: PyObject,
1011 heads: PyObject,
1017 roots: PyObject,
1012 roots: PyObject,
1018 include_path: bool,
1013 include_path: bool,
1019 ) -> PyResult<PyObject> {
1014 ) -> PyResult<PyObject> {
1020 let index = &*self.index(py).borrow();
1015 let index = &*self.index(py).borrow();
1021 let heads = rev_pyiter_collect_or_else(py, &heads, index, |_rev| {
1016 let heads = rev_pyiter_collect_or_else(py, &heads, index, |_rev| {
1022 PyErr::new::<IndexError, _>(py, "head out of range")
1017 PyErr::new::<IndexError, _>(py, "head out of range")
1023 })?;
1018 })?;
1024 let roots: Result<_, _> = roots
1019 let roots: Result<_, _> = roots
1025 .iter(py)?
1020 .iter(py)?
1026 .map(|r| {
1021 .map(|r| {
1027 r.and_then(|o| match o.extract::<PyRevision>(py) {
1022 r.and_then(|o| match o.extract::<PyRevision>(py) {
1028 Ok(r) => Ok(UncheckedRevision(r.0)),
1023 Ok(r) => Ok(UncheckedRevision(r.0)),
1029 Err(e) => Err(e),
1024 Err(e) => Err(e),
1030 })
1025 })
1031 })
1026 })
1032 .collect();
1027 .collect();
1033 let as_set = index
1028 let as_set = index
1034 .reachable_roots(min_root, heads, roots?, include_path)
1029 .reachable_roots(min_root, heads, roots?, include_path)
1035 .map_err(|e| graph_error(py, e))?;
1030 .map_err(|e| graph_error(py, e))?;
1036 let as_vec: Vec<PyObject> = as_set
1031 let as_vec: Vec<PyObject> = as_set
1037 .iter()
1032 .iter()
1038 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
1033 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
1039 .collect();
1034 .collect();
1040 Ok(PyList::new(py, &as_vec).into_object())
1035 Ok(PyList::new(py, &as_vec).into_object())
1041 }
1036 }
1042 }
1037 }
1043
1038
1044 fn revlog_error(py: Python) -> PyErr {
1039 fn revlog_error(py: Python) -> PyErr {
1045 match py
1040 match py
1046 .import("mercurial.error")
1041 .import("mercurial.error")
1047 .and_then(|m| m.get(py, "RevlogError"))
1042 .and_then(|m| m.get(py, "RevlogError"))
1048 {
1043 {
1049 Err(e) => e,
1044 Err(e) => e,
1050 Ok(cls) => PyErr::from_instance(
1045 Ok(cls) => PyErr::from_instance(
1051 py,
1046 py,
1052 cls.call(py, (py.None(),), None).ok().into_py_object(py),
1047 cls.call(py, (py.None(),), None).ok().into_py_object(py),
1053 ),
1048 ),
1054 }
1049 }
1055 }
1050 }
1056
1051
1057 fn revlog_error_with_msg(py: Python, msg: &[u8]) -> PyErr {
1052 fn revlog_error_with_msg(py: Python, msg: &[u8]) -> PyErr {
1058 match py
1053 match py
1059 .import("mercurial.error")
1054 .import("mercurial.error")
1060 .and_then(|m| m.get(py, "RevlogError"))
1055 .and_then(|m| m.get(py, "RevlogError"))
1061 {
1056 {
1062 Err(e) => e,
1057 Err(e) => e,
1063 Ok(cls) => PyErr::from_instance(
1058 Ok(cls) => PyErr::from_instance(
1064 py,
1059 py,
1065 cls.call(py, (PyBytes::new(py, msg),), None)
1060 cls.call(py, (PyBytes::new(py, msg),), None)
1066 .ok()
1061 .ok()
1067 .into_py_object(py),
1062 .into_py_object(py),
1068 ),
1063 ),
1069 }
1064 }
1070 }
1065 }
1071
1066
1072 fn graph_error(py: Python, _err: hg::GraphError) -> PyErr {
1067 fn graph_error(py: Python, _err: hg::GraphError) -> PyErr {
1073 // ParentOutOfRange is currently the only alternative
1068 // ParentOutOfRange is currently the only alternative
1074 // in `hg::GraphError`. The C index always raises this simple ValueError.
1069 // in `hg::GraphError`. The C index always raises this simple ValueError.
1075 PyErr::new::<ValueError, _>(py, "parent out of range")
1070 PyErr::new::<ValueError, _>(py, "parent out of range")
1076 }
1071 }
1077
1072
1078 fn nodemap_rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1073 fn nodemap_rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1079 PyErr::new::<ValueError, _>(
1074 PyErr::new::<ValueError, _>(
1080 py,
1075 py,
1081 format!(
1076 format!(
1082 "Inconsistency: Revision {} found in nodemap \
1077 "Inconsistency: Revision {} found in nodemap \
1083 is not in revlog index",
1078 is not in revlog index",
1084 rev
1079 rev
1085 ),
1080 ),
1086 )
1081 )
1087 }
1082 }
1088
1083
1089 fn rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1084 fn rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1090 PyErr::new::<ValueError, _>(
1085 PyErr::new::<ValueError, _>(
1091 py,
1086 py,
1092 format!("revlog index out of range: {}", rev),
1087 format!("revlog index out of range: {}", rev),
1093 )
1088 )
1094 }
1089 }
1095
1090
1096 /// Standard treatment of NodeMapError
1091 /// Standard treatment of NodeMapError
1097 fn nodemap_error(py: Python, err: NodeMapError) -> PyErr {
1092 fn nodemap_error(py: Python, err: NodeMapError) -> PyErr {
1098 match err {
1093 match err {
1099 NodeMapError::MultipleResults => revlog_error(py),
1094 NodeMapError::MultipleResults => revlog_error(py),
1100 NodeMapError::RevisionNotInIndex(r) => nodemap_rev_not_in_index(py, r),
1095 NodeMapError::RevisionNotInIndex(r) => nodemap_rev_not_in_index(py, r),
1101 }
1096 }
1102 }
1097 }
1103
1098
1104 /// assert two Python objects to be equal from a Python point of view
1099 /// assert two Python objects to be equal from a Python point of view
1105 ///
1100 ///
1106 /// `method` is a label for the assertion error message, intended to be the
1101 /// `method` is a label for the assertion error message, intended to be the
1107 /// name of the caller.
1102 /// name of the caller.
1108 /// `normalizer` is a function that takes a Python variable name and returns
1103 /// `normalizer` is a function that takes a Python variable name and returns
1109 /// an expression that the conparison will actually use.
1104 /// an expression that the conparison will actually use.
1110 /// Foe example: `|v| format!("sorted({})", v)`
1105 /// Foe example: `|v| format!("sorted({})", v)`
1111 fn assert_py_eq_normalized(
1106 fn assert_py_eq_normalized(
1112 py: Python,
1107 py: Python,
1113 method: &str,
1108 method: &str,
1114 rust: &PyObject,
1109 rust: &PyObject,
1115 c: &PyObject,
1110 c: &PyObject,
1116 normalizer: impl FnOnce(&str) -> String + Copy,
1111 normalizer: impl FnOnce(&str) -> String + Copy,
1117 ) -> PyResult<()> {
1112 ) -> PyResult<()> {
1118 let locals = PyDict::new(py);
1113 let locals = PyDict::new(py);
1119 locals.set_item(py, "rust".into_py_object(py).into_object(), rust)?;
1114 locals.set_item(py, "rust".into_py_object(py).into_object(), rust)?;
1120 locals.set_item(py, "c".into_py_object(py).into_object(), c)?;
1115 locals.set_item(py, "c".into_py_object(py).into_object(), c)?;
1121 // let lhs = format!(normalizer_fmt, "rust");
1116 // let lhs = format!(normalizer_fmt, "rust");
1122 // let rhs = format!(normalizer_fmt, "c");
1117 // let rhs = format!(normalizer_fmt, "c");
1123 let is_eq: PyBool = py
1118 let is_eq: PyBool = py
1124 .eval(
1119 .eval(
1125 &format!("{} == {}", &normalizer("rust"), &normalizer("c")),
1120 &format!("{} == {}", &normalizer("rust"), &normalizer("c")),
1126 None,
1121 None,
1127 Some(&locals),
1122 Some(&locals),
1128 )?
1123 )?
1129 .extract(py)?;
1124 .extract(py)?;
1130 assert!(
1125 assert!(
1131 is_eq.is_true(),
1126 is_eq.is_true(),
1132 "{} results differ. Rust: {:?} C: {:?} (before any normalization)",
1127 "{} results differ. Rust: {:?} C: {:?} (before any normalization)",
1133 method,
1128 method,
1134 rust,
1129 rust,
1135 c
1130 c
1136 );
1131 );
1137 Ok(())
1132 Ok(())
1138 }
1133 }
1139
1134
1140 fn assert_py_eq(
1135 fn assert_py_eq(
1141 py: Python,
1136 py: Python,
1142 method: &str,
1137 method: &str,
1143 rust: &PyObject,
1138 rust: &PyObject,
1144 c: &PyObject,
1139 c: &PyObject,
1145 ) -> PyResult<()> {
1140 ) -> PyResult<()> {
1146 assert_py_eq_normalized(py, method, rust, c, |v| v.to_owned())
1141 assert_py_eq_normalized(py, method, rust, c, |v| v.to_owned())
1147 }
1142 }
1148
1143
1149 /// Create the module, with __package__ given from parent
1144 /// Create the module, with __package__ given from parent
1150 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
1145 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
1151 let dotted_name = &format!("{}.revlog", package);
1146 let dotted_name = &format!("{}.revlog", package);
1152 let m = PyModule::new(py, dotted_name)?;
1147 let m = PyModule::new(py, dotted_name)?;
1153 m.add(py, "__package__", package)?;
1148 m.add(py, "__package__", package)?;
1154 m.add(py, "__doc__", "RevLog - Rust implementations")?;
1149 m.add(py, "__doc__", "RevLog - Rust implementations")?;
1155
1150
1156 m.add_class::<MixedIndex>(py)?;
1151 m.add_class::<MixedIndex>(py)?;
1157
1152
1158 let sys = PyModule::import(py, "sys")?;
1153 let sys = PyModule::import(py, "sys")?;
1159 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
1154 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
1160 sys_modules.set_item(py, dotted_name, &m)?;
1155 sys_modules.set_item(py, dotted_name, &m)?;
1161
1156
1162 Ok(m)
1157 Ok(m)
1163 }
1158 }
@@ -1,818 +1,818 b''
1 $ cat << EOF >> $HGRCPATH
1 $ cat << EOF >> $HGRCPATH
2 > [ui]
2 > [ui]
3 > interactive=yes
3 > interactive=yes
4 > EOF
4 > EOF
5
5
6 $ hg init debugrevlog
6 $ hg init debugrevlog
7 $ cd debugrevlog
7 $ cd debugrevlog
8 $ echo a > a
8 $ echo a > a
9 $ hg ci -Am adda
9 $ hg ci -Am adda
10 adding a
10 adding a
11 $ hg rm .
11 $ hg rm .
12 removing a
12 removing a
13 $ hg ci -Am make-it-empty
13 $ hg ci -Am make-it-empty
14 $ hg revert --all -r 0
14 $ hg revert --all -r 0
15 adding a
15 adding a
16 $ hg ci -Am make-it-full
16 $ hg ci -Am make-it-full
17 #if reporevlogstore
17 #if reporevlogstore
18 $ hg debugrevlog -c
18 $ hg debugrevlog -c
19 format : 1
19 format : 1
20 flags : (none)
20 flags : (none)
21
21
22 revisions : 3
22 revisions : 3
23 merges : 0 ( 0.00%)
23 merges : 0 ( 0.00%)
24 normal : 3 (100.00%)
24 normal : 3 (100.00%)
25 revisions : 3
25 revisions : 3
26 empty : 0 ( 0.00%)
26 empty : 0 ( 0.00%)
27 text : 0 (100.00%)
27 text : 0 (100.00%)
28 delta : 0 (100.00%)
28 delta : 0 (100.00%)
29 snapshot : 3 (100.00%)
29 snapshot : 3 (100.00%)
30 lvl-0 : 3 (100.00%)
30 lvl-0 : 3 (100.00%)
31 deltas : 0 ( 0.00%)
31 deltas : 0 ( 0.00%)
32 revision size : 191
32 revision size : 191
33 snapshot : 191 (100.00%)
33 snapshot : 191 (100.00%)
34 lvl-0 : 191 (100.00%)
34 lvl-0 : 191 (100.00%)
35 deltas : 0 ( 0.00%)
35 deltas : 0 ( 0.00%)
36
36
37 chunks : 3
37 chunks : 3
38 0x75 (u) : 3 (100.00%)
38 0x75 (u) : 3 (100.00%)
39 chunks size : 191
39 chunks size : 191
40 0x75 (u) : 191 (100.00%)
40 0x75 (u) : 191 (100.00%)
41
41
42
42
43 total-stored-content: 188 bytes
43 total-stored-content: 188 bytes
44
44
45 avg chain length : 0
45 avg chain length : 0
46 max chain length : 0
46 max chain length : 0
47 max chain reach : 67
47 max chain reach : 67
48 compression ratio : 0
48 compression ratio : 0
49
49
50 uncompressed data size (min/max/avg) : 57 / 66 / 62
50 uncompressed data size (min/max/avg) : 57 / 66 / 62
51 full revision size (min/max/avg) : 58 / 67 / 63
51 full revision size (min/max/avg) : 58 / 67 / 63
52 inter-snapshot size (min/max/avg) : 0 / 0 / 0
52 inter-snapshot size (min/max/avg) : 0 / 0 / 0
53 delta size (min/max/avg) : 0 / 0 / 0
53 delta size (min/max/avg) : 0 / 0 / 0
54 $ hg debugrevlog -m
54 $ hg debugrevlog -m
55 format : 1
55 format : 1
56 flags : inline, generaldelta
56 flags : inline, generaldelta
57
57
58 revisions : 3
58 revisions : 3
59 merges : 0 ( 0.00%)
59 merges : 0 ( 0.00%)
60 normal : 3 (100.00%)
60 normal : 3 (100.00%)
61 revisions : 3
61 revisions : 3
62 empty : 1 (33.33%)
62 empty : 1 (33.33%)
63 text : 1 (100.00%)
63 text : 1 (100.00%)
64 delta : 0 ( 0.00%)
64 delta : 0 ( 0.00%)
65 snapshot : 2 (66.67%)
65 snapshot : 2 (66.67%)
66 lvl-0 : 2 (66.67%)
66 lvl-0 : 2 (66.67%)
67 deltas : 0 ( 0.00%)
67 deltas : 0 ( 0.00%)
68 revision size : 88
68 revision size : 88
69 snapshot : 88 (100.00%)
69 snapshot : 88 (100.00%)
70 lvl-0 : 88 (100.00%)
70 lvl-0 : 88 (100.00%)
71 deltas : 0 ( 0.00%)
71 deltas : 0 ( 0.00%)
72
72
73 chunks : 3
73 chunks : 3
74 empty : 1 (33.33%)
74 empty : 1 (33.33%)
75 0x75 (u) : 2 (66.67%)
75 0x75 (u) : 2 (66.67%)
76 chunks size : 88
76 chunks size : 88
77 empty : 0 ( 0.00%)
77 empty : 0 ( 0.00%)
78 0x75 (u) : 88 (100.00%)
78 0x75 (u) : 88 (100.00%)
79
79
80
80
81 total-stored-content: 86 bytes
81 total-stored-content: 86 bytes
82
82
83 avg chain length : 0
83 avg chain length : 0
84 max chain length : 0
84 max chain length : 0
85 max chain reach : 44
85 max chain reach : 44
86 compression ratio : 0
86 compression ratio : 0
87
87
88 uncompressed data size (min/max/avg) : 0 / 43 / 28
88 uncompressed data size (min/max/avg) : 0 / 43 / 28
89 full revision size (min/max/avg) : 44 / 44 / 44
89 full revision size (min/max/avg) : 44 / 44 / 44
90 inter-snapshot size (min/max/avg) : 0 / 0 / 0
90 inter-snapshot size (min/max/avg) : 0 / 0 / 0
91 delta size (min/max/avg) : 0 / 0 / 0
91 delta size (min/max/avg) : 0 / 0 / 0
92 $ hg debugrevlog a
92 $ hg debugrevlog a
93 format : 1
93 format : 1
94 flags : inline, generaldelta
94 flags : inline, generaldelta
95
95
96 revisions : 1
96 revisions : 1
97 merges : 0 ( 0.00%)
97 merges : 0 ( 0.00%)
98 normal : 1 (100.00%)
98 normal : 1 (100.00%)
99 revisions : 1
99 revisions : 1
100 empty : 0 ( 0.00%)
100 empty : 0 ( 0.00%)
101 text : 0 (100.00%)
101 text : 0 (100.00%)
102 delta : 0 (100.00%)
102 delta : 0 (100.00%)
103 snapshot : 1 (100.00%)
103 snapshot : 1 (100.00%)
104 lvl-0 : 1 (100.00%)
104 lvl-0 : 1 (100.00%)
105 deltas : 0 ( 0.00%)
105 deltas : 0 ( 0.00%)
106 revision size : 3
106 revision size : 3
107 snapshot : 3 (100.00%)
107 snapshot : 3 (100.00%)
108 lvl-0 : 3 (100.00%)
108 lvl-0 : 3 (100.00%)
109 deltas : 0 ( 0.00%)
109 deltas : 0 ( 0.00%)
110
110
111 chunks : 1
111 chunks : 1
112 0x75 (u) : 1 (100.00%)
112 0x75 (u) : 1 (100.00%)
113 chunks size : 3
113 chunks size : 3
114 0x75 (u) : 3 (100.00%)
114 0x75 (u) : 3 (100.00%)
115
115
116
116
117 total-stored-content: 2 bytes
117 total-stored-content: 2 bytes
118
118
119 avg chain length : 0
119 avg chain length : 0
120 max chain length : 0
120 max chain length : 0
121 max chain reach : 3
121 max chain reach : 3
122 compression ratio : 0
122 compression ratio : 0
123
123
124 uncompressed data size (min/max/avg) : 2 / 2 / 2
124 uncompressed data size (min/max/avg) : 2 / 2 / 2
125 full revision size (min/max/avg) : 3 / 3 / 3
125 full revision size (min/max/avg) : 3 / 3 / 3
126 inter-snapshot size (min/max/avg) : 0 / 0 / 0
126 inter-snapshot size (min/max/avg) : 0 / 0 / 0
127 delta size (min/max/avg) : 0 / 0 / 0
127 delta size (min/max/avg) : 0 / 0 / 0
128 #endif
128 #endif
129
129
130 Test debugindex, with and without the --verbose/--debug flag
130 Test debugindex, with and without the --verbose/--debug flag
131 $ hg debugrevlogindex a
131 $ hg debugrevlogindex a
132 rev linkrev nodeid p1 p2
132 rev linkrev nodeid p1 p2
133 0 0 b789fdd96dc2 000000000000 000000000000
133 0 0 b789fdd96dc2 000000000000 000000000000
134
134
135 #if no-reposimplestore
135 #if no-reposimplestore
136 $ hg --verbose debugrevlogindex a
136 $ hg --verbose debugrevlogindex a
137 rev offset length linkrev nodeid p1 p2
137 rev offset length linkrev nodeid p1 p2
138 0 0 3 0 b789fdd96dc2 000000000000 000000000000
138 0 0 3 0 b789fdd96dc2 000000000000 000000000000
139
139
140 $ hg --debug debugrevlogindex a
140 $ hg --debug debugrevlogindex a
141 rev offset length linkrev nodeid p1 p2
141 rev offset length linkrev nodeid p1 p2
142 0 0 3 0 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
142 0 0 3 0 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
143 #endif
143 #endif
144
144
145 $ hg debugrevlogindex -f 1 a
145 $ hg debugrevlogindex -f 1 a
146 rev flag size link p1 p2 nodeid
146 rev flag size link p1 p2 nodeid
147 0 0000 2 0 -1 -1 b789fdd96dc2
147 0 0000 2 0 -1 -1 b789fdd96dc2
148
148
149 #if no-reposimplestore
149 #if no-reposimplestore
150 $ hg --verbose debugrevlogindex -f 1 a
150 $ hg --verbose debugrevlogindex -f 1 a
151 rev flag offset length size link p1 p2 nodeid
151 rev flag offset length size link p1 p2 nodeid
152 0 0000 0 3 2 0 -1 -1 b789fdd96dc2
152 0 0000 0 3 2 0 -1 -1 b789fdd96dc2
153
153
154 $ hg --debug debugrevlogindex -f 1 a
154 $ hg --debug debugrevlogindex -f 1 a
155 rev flag offset length size link p1 p2 nodeid
155 rev flag offset length size link p1 p2 nodeid
156 0 0000 0 3 2 0 -1 -1 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
156 0 0000 0 3 2 0 -1 -1 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
157 #endif
157 #endif
158
158
159 $ hg debugindex -c
159 $ hg debugindex -c
160 rev linkrev nodeid p1-nodeid p2-nodeid
160 rev linkrev nodeid p1-nodeid p2-nodeid
161 0 0 07f494440405 000000000000 000000000000
161 0 0 07f494440405 000000000000 000000000000
162 1 1 8cccb4b5fec2 07f494440405 000000000000
162 1 1 8cccb4b5fec2 07f494440405 000000000000
163 2 2 b1e228c512c5 8cccb4b5fec2 000000000000
163 2 2 b1e228c512c5 8cccb4b5fec2 000000000000
164 $ hg debugindex -c --debug
164 $ hg debugindex -c --debug
165 rev rank linkrev nodeid p1-rev p1-nodeid p2-rev p2-nodeid full-size delta-base flags comp-mode data-offset chunk-size sd-comp-mode sidedata-offset sd-chunk-size
165 rev rank linkrev nodeid p1-rev p1-nodeid p2-rev p2-nodeid full-size delta-base flags comp-mode data-offset chunk-size sd-comp-mode sidedata-offset sd-chunk-size
166 0 -1 0 07f4944404050f47db2e5c5071e0e84e7a27bba9 -1 0000000000000000000000000000000000000000 -1 0000000000000000000000000000000000000000 57 0 0 2 0 58 inline 0 0
166 0 -1 0 07f4944404050f47db2e5c5071e0e84e7a27bba9 -1 0000000000000000000000000000000000000000 -1 0000000000000000000000000000000000000000 57 0 0 2 0 58 inline 0 0
167 1 -1 1 8cccb4b5fec20cafeb99dd01c26d4dee8ea4388a 0 07f4944404050f47db2e5c5071e0e84e7a27bba9 -1 0000000000000000000000000000000000000000 66 1 0 2 58 67 inline 0 0
167 1 -1 1 8cccb4b5fec20cafeb99dd01c26d4dee8ea4388a 0 07f4944404050f47db2e5c5071e0e84e7a27bba9 -1 0000000000000000000000000000000000000000 66 1 0 2 58 67 inline 0 0
168 2 -1 2 b1e228c512c5d7066d70562ed839c3323a62d6d2 1 8cccb4b5fec20cafeb99dd01c26d4dee8ea4388a -1 0000000000000000000000000000000000000000 65 2 0 2 125 66 inline 0 0
168 2 -1 2 b1e228c512c5d7066d70562ed839c3323a62d6d2 1 8cccb4b5fec20cafeb99dd01c26d4dee8ea4388a -1 0000000000000000000000000000000000000000 65 2 0 2 125 66 inline 0 0
169 $ hg debugindex -m
169 $ hg debugindex -m
170 rev linkrev nodeid p1-nodeid p2-nodeid
170 rev linkrev nodeid p1-nodeid p2-nodeid
171 0 0 a0c8bcbbb45c 000000000000 000000000000
171 0 0 a0c8bcbbb45c 000000000000 000000000000
172 1 1 57faf8a737ae a0c8bcbbb45c 000000000000
172 1 1 57faf8a737ae a0c8bcbbb45c 000000000000
173 2 2 a35b10320954 57faf8a737ae 000000000000
173 2 2 a35b10320954 57faf8a737ae 000000000000
174 $ hg debugindex -m --debug
174 $ hg debugindex -m --debug
175 rev rank linkrev nodeid p1-rev p1-nodeid p2-rev p2-nodeid full-size delta-base flags comp-mode data-offset chunk-size sd-comp-mode sidedata-offset sd-chunk-size
175 rev rank linkrev nodeid p1-rev p1-nodeid p2-rev p2-nodeid full-size delta-base flags comp-mode data-offset chunk-size sd-comp-mode sidedata-offset sd-chunk-size
176 0 -1 0 a0c8bcbbb45c63b90b70ad007bf38961f64f2af0 -1 0000000000000000000000000000000000000000 -1 0000000000000000000000000000000000000000 43 0 0 2 0 44 inline 0 0
176 0 -1 0 a0c8bcbbb45c63b90b70ad007bf38961f64f2af0 -1 0000000000000000000000000000000000000000 -1 0000000000000000000000000000000000000000 43 0 0 2 0 44 inline 0 0
177 1 -1 1 57faf8a737ae7faf490582941a82319ba6529dca 0 a0c8bcbbb45c63b90b70ad007bf38961f64f2af0 -1 0000000000000000000000000000000000000000 0 1 0 2 44 0 inline 0 0
177 1 -1 1 57faf8a737ae7faf490582941a82319ba6529dca 0 a0c8bcbbb45c63b90b70ad007bf38961f64f2af0 -1 0000000000000000000000000000000000000000 0 1 0 2 44 0 inline 0 0
178 2 -1 2 a35b103209548032201c16c7688cb2657f037a38 1 57faf8a737ae7faf490582941a82319ba6529dca -1 0000000000000000000000000000000000000000 43 2 0 2 44 44 inline 0 0
178 2 -1 2 a35b103209548032201c16c7688cb2657f037a38 1 57faf8a737ae7faf490582941a82319ba6529dca -1 0000000000000000000000000000000000000000 43 2 0 2 44 44 inline 0 0
179 $ hg debugindex a
179 $ hg debugindex a
180 rev linkrev nodeid p1-nodeid p2-nodeid
180 rev linkrev nodeid p1-nodeid p2-nodeid
181 0 0 b789fdd96dc2 000000000000 000000000000
181 0 0 b789fdd96dc2 000000000000 000000000000
182 $ hg debugindex --debug a
182 $ hg debugindex --debug a
183 rev rank linkrev nodeid p1-rev p1-nodeid p2-rev p2-nodeid full-size delta-base flags comp-mode data-offset chunk-size sd-comp-mode sidedata-offset sd-chunk-size
183 rev rank linkrev nodeid p1-rev p1-nodeid p2-rev p2-nodeid full-size delta-base flags comp-mode data-offset chunk-size sd-comp-mode sidedata-offset sd-chunk-size
184 0 -1 0 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 -1 0000000000000000000000000000000000000000 -1 0000000000000000000000000000000000000000 2 0 0 2 0 3 inline 0 0
184 0 -1 0 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 -1 0000000000000000000000000000000000000000 -1 0000000000000000000000000000000000000000 2 0 0 2 0 3 inline 0 0
185
185
186 debugdelta chain basic output
186 debugdelta chain basic output
187
187
188 #if reporevlogstore pure
188 #if reporevlogstore pure rust
189 $ hg debugindexstats
189 $ hg debugindexstats
190 abort: debugindexstats only works with native code
190 abort: debugindexstats only works with native C code
191 [255]
191 [255]
192 #endif
192 #endif
193 #if reporevlogstore no-pure
193 #if reporevlogstore no-pure no-rust
194 $ hg debugindexstats
194 $ hg debugindexstats
195 node trie capacity: 4
195 node trie capacity: 4
196 node trie count: 2
196 node trie count: 2
197 node trie depth: 1
197 node trie depth: 1
198 node trie last rev scanned: -1 (no-rust !)
198 node trie last rev scanned: -1 (no-rust !)
199 node trie last rev scanned: 3 (rust !)
199 node trie last rev scanned: 3 (rust !)
200 node trie lookups: 4 (no-rust !)
200 node trie lookups: 4 (no-rust !)
201 node trie lookups: 2 (rust !)
201 node trie lookups: 2 (rust !)
202 node trie misses: 1
202 node trie misses: 1
203 node trie splits: 1
203 node trie splits: 1
204 revs in memory: 3
204 revs in memory: 3
205 #endif
205 #endif
206
206
207 #if reporevlogstore no-pure
207 #if reporevlogstore no-pure
208 $ hg debugdeltachain -m --all-info
208 $ hg debugdeltachain -m --all-info
209 rev p1 p2 chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
209 rev p1 p2 chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
210 0 -1 -1 1 1 -1 base 44 43 44 1.02326 44 0 0.00000 44 44 1.00000 1
210 0 -1 -1 1 1 -1 base 44 43 44 1.02326 44 0 0.00000 44 44 1.00000 1
211 1 0 -1 2 1 -1 base 0 0 0 0.00000 0 0 0.00000 0 0 1.00000 1
211 1 0 -1 2 1 -1 base 0 0 0 0.00000 0 0 0.00000 0 0 1.00000 1
212 2 1 -1 3 1 -1 base 44 43 44 1.02326 44 0 0.00000 44 44 1.00000 1
212 2 1 -1 3 1 -1 base 44 43 44 1.02326 44 0 0.00000 44 44 1.00000 1
213
213
214 $ hg debugdeltachain -m -T '{rev} {chainid} {chainlen}\n'
214 $ hg debugdeltachain -m -T '{rev} {chainid} {chainlen}\n'
215 0 1 1
215 0 1 1
216 1 2 1
216 1 2 1
217 2 3 1
217 2 3 1
218
218
219 $ hg debugdeltachain -m -Tjson --size-info
219 $ hg debugdeltachain -m -Tjson --size-info
220 [
220 [
221 {
221 {
222 "chainid": 1,
222 "chainid": 1,
223 "chainlen": 1,
223 "chainlen": 1,
224 "chainratio": 1.0232558139534884,
224 "chainratio": 1.0232558139534884,
225 "chainsize": 44,
225 "chainsize": 44,
226 "compsize": 44,
226 "compsize": 44,
227 "deltatype": "base",
227 "deltatype": "base",
228 "p1": -1,
228 "p1": -1,
229 "p2": -1,
229 "p2": -1,
230 "prevrev": -1,
230 "prevrev": -1,
231 "rev": 0,
231 "rev": 0,
232 "uncompsize": 43
232 "uncompsize": 43
233 },
233 },
234 {
234 {
235 "chainid": 2,
235 "chainid": 2,
236 "chainlen": 1,
236 "chainlen": 1,
237 "chainratio": 0,
237 "chainratio": 0,
238 "chainsize": 0,
238 "chainsize": 0,
239 "compsize": 0,
239 "compsize": 0,
240 "deltatype": "base",
240 "deltatype": "base",
241 "p1": 0,
241 "p1": 0,
242 "p2": -1,
242 "p2": -1,
243 "prevrev": -1,
243 "prevrev": -1,
244 "rev": 1,
244 "rev": 1,
245 "uncompsize": 0
245 "uncompsize": 0
246 },
246 },
247 {
247 {
248 "chainid": 3,
248 "chainid": 3,
249 "chainlen": 1,
249 "chainlen": 1,
250 "chainratio": 1.0232558139534884,
250 "chainratio": 1.0232558139534884,
251 "chainsize": 44,
251 "chainsize": 44,
252 "compsize": 44,
252 "compsize": 44,
253 "deltatype": "base",
253 "deltatype": "base",
254 "p1": 1,
254 "p1": 1,
255 "p2": -1,
255 "p2": -1,
256 "prevrev": -1,
256 "prevrev": -1,
257 "rev": 2,
257 "rev": 2,
258 "uncompsize": 43
258 "uncompsize": 43
259 }
259 }
260 ]
260 ]
261
261
262 $ hg debugdeltachain -m -Tjson --all-info
262 $ hg debugdeltachain -m -Tjson --all-info
263 [
263 [
264 {
264 {
265 "chainid": 1,
265 "chainid": 1,
266 "chainlen": 1,
266 "chainlen": 1,
267 "chainratio": 1.0232558139534884,
267 "chainratio": 1.0232558139534884,
268 "chainsize": 44,
268 "chainsize": 44,
269 "compsize": 44,
269 "compsize": 44,
270 "deltatype": "base",
270 "deltatype": "base",
271 "extradist": 0,
271 "extradist": 0,
272 "extraratio": 0.0,
272 "extraratio": 0.0,
273 "largestblock": 44,
273 "largestblock": 44,
274 "lindist": 44,
274 "lindist": 44,
275 "p1": -1,
275 "p1": -1,
276 "p2": -1,
276 "p2": -1,
277 "prevrev": -1,
277 "prevrev": -1,
278 "readdensity": 1.0,
278 "readdensity": 1.0,
279 "readsize": 44,
279 "readsize": 44,
280 "rev": 0,
280 "rev": 0,
281 "srchunks": 1,
281 "srchunks": 1,
282 "uncompsize": 43
282 "uncompsize": 43
283 },
283 },
284 {
284 {
285 "chainid": 2,
285 "chainid": 2,
286 "chainlen": 1,
286 "chainlen": 1,
287 "chainratio": 0,
287 "chainratio": 0,
288 "chainsize": 0,
288 "chainsize": 0,
289 "compsize": 0,
289 "compsize": 0,
290 "deltatype": "base",
290 "deltatype": "base",
291 "extradist": 0,
291 "extradist": 0,
292 "extraratio": 0,
292 "extraratio": 0,
293 "largestblock": 0,
293 "largestblock": 0,
294 "lindist": 0,
294 "lindist": 0,
295 "p1": 0,
295 "p1": 0,
296 "p2": -1,
296 "p2": -1,
297 "prevrev": -1,
297 "prevrev": -1,
298 "readdensity": 1,
298 "readdensity": 1,
299 "readsize": 0,
299 "readsize": 0,
300 "rev": 1,
300 "rev": 1,
301 "srchunks": 1,
301 "srchunks": 1,
302 "uncompsize": 0
302 "uncompsize": 0
303 },
303 },
304 {
304 {
305 "chainid": 3,
305 "chainid": 3,
306 "chainlen": 1,
306 "chainlen": 1,
307 "chainratio": 1.0232558139534884,
307 "chainratio": 1.0232558139534884,
308 "chainsize": 44,
308 "chainsize": 44,
309 "compsize": 44,
309 "compsize": 44,
310 "deltatype": "base",
310 "deltatype": "base",
311 "extradist": 0,
311 "extradist": 0,
312 "extraratio": 0.0,
312 "extraratio": 0.0,
313 "largestblock": 44,
313 "largestblock": 44,
314 "lindist": 44,
314 "lindist": 44,
315 "p1": 1,
315 "p1": 1,
316 "p2": -1,
316 "p2": -1,
317 "prevrev": -1,
317 "prevrev": -1,
318 "readdensity": 1.0,
318 "readdensity": 1.0,
319 "readsize": 44,
319 "readsize": 44,
320 "rev": 2,
320 "rev": 2,
321 "srchunks": 1,
321 "srchunks": 1,
322 "uncompsize": 43
322 "uncompsize": 43
323 }
323 }
324 ]
324 ]
325
325
326 debugdelta chain with sparse read enabled
326 debugdelta chain with sparse read enabled
327
327
328 $ cat >> $HGRCPATH <<EOF
328 $ cat >> $HGRCPATH <<EOF
329 > [experimental]
329 > [experimental]
330 > sparse-read = True
330 > sparse-read = True
331 > EOF
331 > EOF
332 $ hg debugdeltachain -m --all-info
332 $ hg debugdeltachain -m --all-info
333 rev p1 p2 chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
333 rev p1 p2 chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
334 0 -1 -1 1 1 -1 base 44 43 44 1.02326 44 0 0.00000 44 44 1.00000 1
334 0 -1 -1 1 1 -1 base 44 43 44 1.02326 44 0 0.00000 44 44 1.00000 1
335 1 0 -1 2 1 -1 base 0 0 0 0.00000 0 0 0.00000 0 0 1.00000 1
335 1 0 -1 2 1 -1 base 0 0 0 0.00000 0 0 0.00000 0 0 1.00000 1
336 2 1 -1 3 1 -1 base 44 43 44 1.02326 44 0 0.00000 44 44 1.00000 1
336 2 1 -1 3 1 -1 base 44 43 44 1.02326 44 0 0.00000 44 44 1.00000 1
337
337
338 $ hg debugdeltachain -m --sparse-info -T '{rev} {chainid} {chainlen} {readsize} {largestblock} {readdensity}\n'
338 $ hg debugdeltachain -m --sparse-info -T '{rev} {chainid} {chainlen} {readsize} {largestblock} {readdensity}\n'
339 0 1 1 44 44 1.0
339 0 1 1 44 44 1.0
340 1 2 1 0 0 1
340 1 2 1 0 0 1
341 2 3 1 44 44 1.0
341 2 3 1 44 44 1.0
342
342
343 $ hg debugdeltachain -m -Tjson --sparse-info
343 $ hg debugdeltachain -m -Tjson --sparse-info
344 [
344 [
345 {
345 {
346 "chainid": 1,
346 "chainid": 1,
347 "chainlen": 1,
347 "chainlen": 1,
348 "deltatype": "base",
348 "deltatype": "base",
349 "largestblock": 44,
349 "largestblock": 44,
350 "p1": -1,
350 "p1": -1,
351 "p2": -1,
351 "p2": -1,
352 "prevrev": -1,
352 "prevrev": -1,
353 "readdensity": 1.0,
353 "readdensity": 1.0,
354 "readsize": 44,
354 "readsize": 44,
355 "rev": 0,
355 "rev": 0,
356 "srchunks": 1
356 "srchunks": 1
357 },
357 },
358 {
358 {
359 "chainid": 2,
359 "chainid": 2,
360 "chainlen": 1,
360 "chainlen": 1,
361 "deltatype": "base",
361 "deltatype": "base",
362 "largestblock": 0,
362 "largestblock": 0,
363 "p1": 0,
363 "p1": 0,
364 "p2": -1,
364 "p2": -1,
365 "prevrev": -1,
365 "prevrev": -1,
366 "readdensity": 1,
366 "readdensity": 1,
367 "readsize": 0,
367 "readsize": 0,
368 "rev": 1,
368 "rev": 1,
369 "srchunks": 1
369 "srchunks": 1
370 },
370 },
371 {
371 {
372 "chainid": 3,
372 "chainid": 3,
373 "chainlen": 1,
373 "chainlen": 1,
374 "deltatype": "base",
374 "deltatype": "base",
375 "largestblock": 44,
375 "largestblock": 44,
376 "p1": 1,
376 "p1": 1,
377 "p2": -1,
377 "p2": -1,
378 "prevrev": -1,
378 "prevrev": -1,
379 "readdensity": 1.0,
379 "readdensity": 1.0,
380 "readsize": 44,
380 "readsize": 44,
381 "rev": 2,
381 "rev": 2,
382 "srchunks": 1
382 "srchunks": 1
383 }
383 }
384 ]
384 ]
385
385
386 $ hg debugdeltachain -m -Tjson --all-info
386 $ hg debugdeltachain -m -Tjson --all-info
387 [
387 [
388 {
388 {
389 "chainid": 1,
389 "chainid": 1,
390 "chainlen": 1,
390 "chainlen": 1,
391 "chainratio": 1.0232558139534884,
391 "chainratio": 1.0232558139534884,
392 "chainsize": 44,
392 "chainsize": 44,
393 "compsize": 44,
393 "compsize": 44,
394 "deltatype": "base",
394 "deltatype": "base",
395 "extradist": 0,
395 "extradist": 0,
396 "extraratio": 0.0,
396 "extraratio": 0.0,
397 "largestblock": 44,
397 "largestblock": 44,
398 "lindist": 44,
398 "lindist": 44,
399 "p1": -1,
399 "p1": -1,
400 "p2": -1,
400 "p2": -1,
401 "prevrev": -1,
401 "prevrev": -1,
402 "readdensity": 1.0,
402 "readdensity": 1.0,
403 "readsize": 44,
403 "readsize": 44,
404 "rev": 0,
404 "rev": 0,
405 "srchunks": 1,
405 "srchunks": 1,
406 "uncompsize": 43
406 "uncompsize": 43
407 },
407 },
408 {
408 {
409 "chainid": 2,
409 "chainid": 2,
410 "chainlen": 1,
410 "chainlen": 1,
411 "chainratio": 0,
411 "chainratio": 0,
412 "chainsize": 0,
412 "chainsize": 0,
413 "compsize": 0,
413 "compsize": 0,
414 "deltatype": "base",
414 "deltatype": "base",
415 "extradist": 0,
415 "extradist": 0,
416 "extraratio": 0,
416 "extraratio": 0,
417 "largestblock": 0,
417 "largestblock": 0,
418 "lindist": 0,
418 "lindist": 0,
419 "p1": 0,
419 "p1": 0,
420 "p2": -1,
420 "p2": -1,
421 "prevrev": -1,
421 "prevrev": -1,
422 "readdensity": 1,
422 "readdensity": 1,
423 "readsize": 0,
423 "readsize": 0,
424 "rev": 1,
424 "rev": 1,
425 "srchunks": 1,
425 "srchunks": 1,
426 "uncompsize": 0
426 "uncompsize": 0
427 },
427 },
428 {
428 {
429 "chainid": 3,
429 "chainid": 3,
430 "chainlen": 1,
430 "chainlen": 1,
431 "chainratio": 1.0232558139534884,
431 "chainratio": 1.0232558139534884,
432 "chainsize": 44,
432 "chainsize": 44,
433 "compsize": 44,
433 "compsize": 44,
434 "deltatype": "base",
434 "deltatype": "base",
435 "extradist": 0,
435 "extradist": 0,
436 "extraratio": 0.0,
436 "extraratio": 0.0,
437 "largestblock": 44,
437 "largestblock": 44,
438 "lindist": 44,
438 "lindist": 44,
439 "p1": 1,
439 "p1": 1,
440 "p2": -1,
440 "p2": -1,
441 "prevrev": -1,
441 "prevrev": -1,
442 "readdensity": 1.0,
442 "readdensity": 1.0,
443 "readsize": 44,
443 "readsize": 44,
444 "rev": 2,
444 "rev": 2,
445 "srchunks": 1,
445 "srchunks": 1,
446 "uncompsize": 43
446 "uncompsize": 43
447 }
447 }
448 ]
448 ]
449
449
450 $ printf "This test checks things.\n" >> a
450 $ printf "This test checks things.\n" >> a
451 $ hg ci -m a
451 $ hg ci -m a
452 $ hg branch other
452 $ hg branch other
453 marked working directory as branch other
453 marked working directory as branch other
454 (branches are permanent and global, did you want a bookmark?)
454 (branches are permanent and global, did you want a bookmark?)
455 $ for i in `$TESTDIR/seq.py 5`; do
455 $ for i in `$TESTDIR/seq.py 5`; do
456 > printf "shorter ${i}" >> a
456 > printf "shorter ${i}" >> a
457 > hg ci -m "a other:$i"
457 > hg ci -m "a other:$i"
458 > hg up -q default
458 > hg up -q default
459 > printf "for the branch default we want longer chains: ${i}" >> a
459 > printf "for the branch default we want longer chains: ${i}" >> a
460 > hg ci -m "a default:$i"
460 > hg ci -m "a default:$i"
461 > hg up -q other
461 > hg up -q other
462 > done
462 > done
463 $ hg debugdeltachain a -T '{rev} {srchunks}\n' --all-info\
463 $ hg debugdeltachain a -T '{rev} {srchunks}\n' --all-info\
464 > --config experimental.sparse-read.density-threshold=0.50 \
464 > --config experimental.sparse-read.density-threshold=0.50 \
465 > --config experimental.sparse-read.min-gap-size=0
465 > --config experimental.sparse-read.min-gap-size=0
466 0 1
466 0 1
467 1 1
467 1 1
468 2 1
468 2 1
469 3 1
469 3 1
470 4 1
470 4 1
471 5 1
471 5 1
472 6 1
472 6 1
473 7 1
473 7 1
474 8 1
474 8 1
475 9 1
475 9 1
476 10 2 (no-zstd !)
476 10 2 (no-zstd !)
477 10 1 (zstd !)
477 10 1 (zstd !)
478 11 1
478 11 1
479 $ hg --config extensions.strip= strip --no-backup -r 1
479 $ hg --config extensions.strip= strip --no-backup -r 1
480 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
480 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
481
481
482 Test max chain len
482 Test max chain len
483 $ cat >> $HGRCPATH << EOF
483 $ cat >> $HGRCPATH << EOF
484 > [format]
484 > [format]
485 > maxchainlen=4
485 > maxchainlen=4
486 > EOF
486 > EOF
487
487
488 $ printf "This test checks if maxchainlen config value is respected also it can serve as basic test for debugrevlog -d <file>.\n" >> a
488 $ printf "This test checks if maxchainlen config value is respected also it can serve as basic test for debugrevlog -d <file>.\n" >> a
489 $ hg ci -m a
489 $ hg ci -m a
490 $ printf "b\n" >> a
490 $ printf "b\n" >> a
491 $ hg ci -m a
491 $ hg ci -m a
492 $ printf "c\n" >> a
492 $ printf "c\n" >> a
493 $ hg ci -m a
493 $ hg ci -m a
494 $ printf "d\n" >> a
494 $ printf "d\n" >> a
495 $ hg ci -m a
495 $ hg ci -m a
496 $ printf "e\n" >> a
496 $ printf "e\n" >> a
497 $ hg ci -m a
497 $ hg ci -m a
498 $ printf "f\n" >> a
498 $ printf "f\n" >> a
499 $ hg ci -m a
499 $ hg ci -m a
500 $ printf 'g\n' >> a
500 $ printf 'g\n' >> a
501 $ hg ci -m a
501 $ hg ci -m a
502 $ printf 'h\n' >> a
502 $ printf 'h\n' >> a
503 $ hg ci -m a
503 $ hg ci -m a
504
504
505 $ hg debugrevlog -d a
505 $ hg debugrevlog -d a
506 # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
506 # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
507 0 -1 -1 0 ??? 0 0 0 0 ??? ???? ? 1 0 (glob)
507 0 -1 -1 0 ??? 0 0 0 0 ??? ???? ? 1 0 (glob)
508 1 0 -1 ??? ??? 0 0 0 0 ??? ???? ? 1 1 (glob)
508 1 0 -1 ??? ??? 0 0 0 0 ??? ???? ? 1 1 (glob)
509 2 1 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob)
509 2 1 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob)
510 3 2 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob)
510 3 2 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob)
511 4 3 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 4 (glob)
511 4 3 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 4 (glob)
512 5 4 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 0 (glob)
512 5 4 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 0 (glob)
513 6 5 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 1 (glob)
513 6 5 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 1 (glob)
514 7 6 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob)
514 7 6 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob)
515 8 7 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob)
515 8 7 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob)
516 #endif
516 #endif
517
517
518 Test debuglocks command:
518 Test debuglocks command:
519
519
520 $ hg debuglocks
520 $ hg debuglocks
521 lock: free
521 lock: free
522 wlock: free
522 wlock: free
523
523
524 * Test setting the lock
524 * Test setting the lock
525
525
526 waitlock <file> will wait for file to be created. If it isn't in a reasonable
526 waitlock <file> will wait for file to be created. If it isn't in a reasonable
527 amount of time, displays error message and returns 1
527 amount of time, displays error message and returns 1
528 $ waitlock() {
528 $ waitlock() {
529 > start=`date +%s`
529 > start=`date +%s`
530 > timeout=5
530 > timeout=5
531 > while [ \( ! -f $1 \) -a \( ! -L $1 \) ]; do
531 > while [ \( ! -f $1 \) -a \( ! -L $1 \) ]; do
532 > now=`date +%s`
532 > now=`date +%s`
533 > if [ "`expr $now - $start`" -gt $timeout ]; then
533 > if [ "`expr $now - $start`" -gt $timeout ]; then
534 > echo "timeout: $1 was not created in $timeout seconds"
534 > echo "timeout: $1 was not created in $timeout seconds"
535 > return 1
535 > return 1
536 > fi
536 > fi
537 > sleep 0.1
537 > sleep 0.1
538 > done
538 > done
539 > }
539 > }
540 $ dolock() {
540 $ dolock() {
541 > {
541 > {
542 > waitlock .hg/unlock
542 > waitlock .hg/unlock
543 > rm -f .hg/unlock
543 > rm -f .hg/unlock
544 > echo y
544 > echo y
545 > } | hg debuglocks "$@" > /dev/null
545 > } | hg debuglocks "$@" > /dev/null
546 > }
546 > }
547 $ dolock -s &
547 $ dolock -s &
548 $ waitlock .hg/store/lock
548 $ waitlock .hg/store/lock
549
549
550 $ hg debuglocks
550 $ hg debuglocks
551 lock: user *, process * (*s) (glob)
551 lock: user *, process * (*s) (glob)
552 wlock: free
552 wlock: free
553 [1]
553 [1]
554 $ touch .hg/unlock
554 $ touch .hg/unlock
555 $ wait
555 $ wait
556 $ [ -f .hg/store/lock ] || echo "There is no lock"
556 $ [ -f .hg/store/lock ] || echo "There is no lock"
557 There is no lock
557 There is no lock
558
558
559 * Test setting the wlock
559 * Test setting the wlock
560
560
561 $ dolock -S &
561 $ dolock -S &
562 $ waitlock .hg/wlock
562 $ waitlock .hg/wlock
563
563
564 $ hg debuglocks
564 $ hg debuglocks
565 lock: free
565 lock: free
566 wlock: user *, process * (*s) (glob)
566 wlock: user *, process * (*s) (glob)
567 [1]
567 [1]
568 $ touch .hg/unlock
568 $ touch .hg/unlock
569 $ wait
569 $ wait
570 $ [ -f .hg/wlock ] || echo "There is no wlock"
570 $ [ -f .hg/wlock ] || echo "There is no wlock"
571 There is no wlock
571 There is no wlock
572
572
573 * Test setting both locks
573 * Test setting both locks
574
574
575 $ dolock -Ss &
575 $ dolock -Ss &
576 $ waitlock .hg/wlock && waitlock .hg/store/lock
576 $ waitlock .hg/wlock && waitlock .hg/store/lock
577
577
578 $ hg debuglocks
578 $ hg debuglocks
579 lock: user *, process * (*s) (glob)
579 lock: user *, process * (*s) (glob)
580 wlock: user *, process * (*s) (glob)
580 wlock: user *, process * (*s) (glob)
581 [2]
581 [2]
582
582
583 * Test failing to set a lock
583 * Test failing to set a lock
584
584
585 $ hg debuglocks -s
585 $ hg debuglocks -s
586 abort: lock is already held
586 abort: lock is already held
587 [255]
587 [255]
588
588
589 $ hg debuglocks -S
589 $ hg debuglocks -S
590 abort: wlock is already held
590 abort: wlock is already held
591 [255]
591 [255]
592
592
593 $ touch .hg/unlock
593 $ touch .hg/unlock
594 $ wait
594 $ wait
595
595
596 $ hg debuglocks
596 $ hg debuglocks
597 lock: free
597 lock: free
598 wlock: free
598 wlock: free
599
599
600 * Test forcing the lock
600 * Test forcing the lock
601
601
602 $ dolock -s &
602 $ dolock -s &
603 $ waitlock .hg/store/lock
603 $ waitlock .hg/store/lock
604
604
605 $ hg debuglocks
605 $ hg debuglocks
606 lock: user *, process * (*s) (glob)
606 lock: user *, process * (*s) (glob)
607 wlock: free
607 wlock: free
608 [1]
608 [1]
609
609
610 $ hg debuglocks -L
610 $ hg debuglocks -L
611
611
612 $ hg debuglocks
612 $ hg debuglocks
613 lock: free
613 lock: free
614 wlock: free
614 wlock: free
615
615
616 $ touch .hg/unlock
616 $ touch .hg/unlock
617 $ wait
617 $ wait
618
618
619 * Test forcing the wlock
619 * Test forcing the wlock
620
620
621 $ dolock -S &
621 $ dolock -S &
622 $ waitlock .hg/wlock
622 $ waitlock .hg/wlock
623
623
624 $ hg debuglocks
624 $ hg debuglocks
625 lock: free
625 lock: free
626 wlock: user *, process * (*s) (glob)
626 wlock: user *, process * (*s) (glob)
627 [1]
627 [1]
628
628
629 $ hg debuglocks -W
629 $ hg debuglocks -W
630
630
631 $ hg debuglocks
631 $ hg debuglocks
632 lock: free
632 lock: free
633 wlock: free
633 wlock: free
634
634
635 $ touch .hg/unlock
635 $ touch .hg/unlock
636 $ wait
636 $ wait
637
637
638 Test WdirUnsupported exception
638 Test WdirUnsupported exception
639
639
640 $ hg debugdata -c ffffffffffffffffffffffffffffffffffffffff
640 $ hg debugdata -c ffffffffffffffffffffffffffffffffffffffff
641 abort: working directory revision cannot be specified
641 abort: working directory revision cannot be specified
642 [255]
642 [255]
643
643
644 Test cache warming command
644 Test cache warming command
645
645
646 $ rm -rf .hg/cache/
646 $ rm -rf .hg/cache/
647 $ hg debugupdatecaches --debug
647 $ hg debugupdatecaches --debug
648 updating the branch cache
648 updating the branch cache
649 $ ls -r .hg/cache/*
649 $ ls -r .hg/cache/*
650 .hg/cache/tags2-served
650 .hg/cache/tags2-served
651 .hg/cache/tags2
651 .hg/cache/tags2
652 .hg/cache/rbc-revs-v1
652 .hg/cache/rbc-revs-v1
653 .hg/cache/rbc-names-v1
653 .hg/cache/rbc-names-v1
654 .hg/cache/hgtagsfnodes1
654 .hg/cache/hgtagsfnodes1
655 .hg/cache/branch2-visible-hidden
655 .hg/cache/branch2-visible-hidden
656 .hg/cache/branch2-visible
656 .hg/cache/branch2-visible
657 .hg/cache/branch2-served.hidden
657 .hg/cache/branch2-served.hidden
658 .hg/cache/branch2-served
658 .hg/cache/branch2-served
659 .hg/cache/branch2-immutable
659 .hg/cache/branch2-immutable
660 .hg/cache/branch2-base
660 .hg/cache/branch2-base
661
661
662 Test debugcolor
662 Test debugcolor
663
663
664 #if no-windows
664 #if no-windows
665 $ hg debugcolor --style --color always | grep -E 'mode|style|log\.'
665 $ hg debugcolor --style --color always | grep -E 'mode|style|log\.'
666 color mode: 'ansi'
666 color mode: 'ansi'
667 available style:
667 available style:
668 \x1b[0;33mlog.changeset\x1b[0m: \x1b[0;33myellow\x1b[0m (esc)
668 \x1b[0;33mlog.changeset\x1b[0m: \x1b[0;33myellow\x1b[0m (esc)
669 #endif
669 #endif
670
670
671 $ hg debugcolor --style --color never
671 $ hg debugcolor --style --color never
672 color mode: None
672 color mode: None
673 available style:
673 available style:
674
674
675 $ cd ..
675 $ cd ..
676
676
677 Test internal debugstacktrace command
677 Test internal debugstacktrace command
678
678
679 $ cat > debugstacktrace.py << EOF
679 $ cat > debugstacktrace.py << EOF
680 > from mercurial import (
680 > from mercurial import (
681 > util,
681 > util,
682 > )
682 > )
683 > from mercurial.utils import (
683 > from mercurial.utils import (
684 > procutil,
684 > procutil,
685 > )
685 > )
686 > def f():
686 > def f():
687 > util.debugstacktrace(f=procutil.stdout)
687 > util.debugstacktrace(f=procutil.stdout)
688 > g()
688 > g()
689 > def g():
689 > def g():
690 > util.dst(b'hello from g\\n', skip=1)
690 > util.dst(b'hello from g\\n', skip=1)
691 > h()
691 > h()
692 > def h():
692 > def h():
693 > util.dst(b'hi ...\\nfrom h hidden in g', 1, depth=2)
693 > util.dst(b'hi ...\\nfrom h hidden in g', 1, depth=2)
694 > f()
694 > f()
695 > EOF
695 > EOF
696 $ "$PYTHON" debugstacktrace.py
696 $ "$PYTHON" debugstacktrace.py
697 stacktrace at:
697 stacktrace at:
698 *debugstacktrace.py:15 in * (glob)
698 *debugstacktrace.py:15 in * (glob)
699 *debugstacktrace.py:8 in f (glob)
699 *debugstacktrace.py:8 in f (glob)
700 hello from g at:
700 hello from g at:
701 *debugstacktrace.py:15 in * (glob)
701 *debugstacktrace.py:15 in * (glob)
702 *debugstacktrace.py:9 in f (glob)
702 *debugstacktrace.py:9 in f (glob)
703 hi ...
703 hi ...
704 from h hidden in g at:
704 from h hidden in g at:
705 *debugstacktrace.py:9 in f (glob)
705 *debugstacktrace.py:9 in f (glob)
706 *debugstacktrace.py:12 in g (glob)
706 *debugstacktrace.py:12 in g (glob)
707
707
708 Test debugcapabilities command:
708 Test debugcapabilities command:
709
709
710 $ hg debugcapabilities ./debugrevlog/
710 $ hg debugcapabilities ./debugrevlog/
711 Main capabilities:
711 Main capabilities:
712 branchmap
712 branchmap
713 $USUAL_BUNDLE2_CAPS$
713 $USUAL_BUNDLE2_CAPS$
714 getbundle
714 getbundle
715 known
715 known
716 lookup
716 lookup
717 pushkey
717 pushkey
718 unbundle
718 unbundle
719 Bundle2 capabilities:
719 Bundle2 capabilities:
720 HG20
720 HG20
721 bookmarks
721 bookmarks
722 changegroup
722 changegroup
723 01
723 01
724 02
724 02
725 03
725 03
726 checkheads
726 checkheads
727 related
727 related
728 digests
728 digests
729 md5
729 md5
730 sha1
730 sha1
731 sha512
731 sha512
732 error
732 error
733 abort
733 abort
734 unsupportedcontent
734 unsupportedcontent
735 pushraced
735 pushraced
736 pushkey
736 pushkey
737 hgtagsfnodes
737 hgtagsfnodes
738 listkeys
738 listkeys
739 phases
739 phases
740 heads
740 heads
741 pushkey
741 pushkey
742 remote-changegroup
742 remote-changegroup
743 http
743 http
744 https
744 https
745 stream
745 stream
746 v2
746 v2
747
747
748 Test debugpeer
748 Test debugpeer
749
749
750 $ hg debugpeer ssh://user@dummy/debugrevlog
750 $ hg debugpeer ssh://user@dummy/debugrevlog
751 url: ssh://user@dummy/debugrevlog
751 url: ssh://user@dummy/debugrevlog
752 local: no
752 local: no
753 pushable: yes
753 pushable: yes
754
754
755 #if rust
755 #if rust
756
756
757 $ hg --debug debugpeer ssh://user@dummy/debugrevlog
757 $ hg --debug debugpeer ssh://user@dummy/debugrevlog
758 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R debugrevlog serve --stdio['"] (re)
758 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R debugrevlog serve --stdio['"] (re)
759 devel-peer-request: hello+between
759 devel-peer-request: hello+between
760 devel-peer-request: pairs: 81 bytes
760 devel-peer-request: pairs: 81 bytes
761 sending hello command
761 sending hello command
762 sending between command
762 sending between command
763 remote: 473
763 remote: 473
764 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlog-compression-zstd,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
764 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlog-compression-zstd,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
765 remote: 1
765 remote: 1
766 devel-peer-request: protocaps
766 devel-peer-request: protocaps
767 devel-peer-request: caps: * bytes (glob)
767 devel-peer-request: caps: * bytes (glob)
768 sending protocaps command
768 sending protocaps command
769 url: ssh://user@dummy/debugrevlog
769 url: ssh://user@dummy/debugrevlog
770 local: no
770 local: no
771 pushable: yes
771 pushable: yes
772
772
773 #endif
773 #endif
774
774
775 #if no-rust zstd
775 #if no-rust zstd
776
776
777 $ hg --debug debugpeer ssh://user@dummy/debugrevlog
777 $ hg --debug debugpeer ssh://user@dummy/debugrevlog
778 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R debugrevlog serve --stdio['"] (re)
778 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R debugrevlog serve --stdio['"] (re)
779 devel-peer-request: hello+between
779 devel-peer-request: hello+between
780 devel-peer-request: pairs: 81 bytes
780 devel-peer-request: pairs: 81 bytes
781 sending hello command
781 sending hello command
782 sending between command
782 sending between command
783 remote: 473
783 remote: 473
784 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlog-compression-zstd,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
784 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlog-compression-zstd,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
785 remote: 1
785 remote: 1
786 devel-peer-request: protocaps
786 devel-peer-request: protocaps
787 devel-peer-request: caps: * bytes (glob)
787 devel-peer-request: caps: * bytes (glob)
788 sending protocaps command
788 sending protocaps command
789 url: ssh://user@dummy/debugrevlog
789 url: ssh://user@dummy/debugrevlog
790 local: no
790 local: no
791 pushable: yes
791 pushable: yes
792
792
793 #endif
793 #endif
794
794
795 #if no-rust no-zstd
795 #if no-rust no-zstd
796
796
797 $ hg --debug debugpeer ssh://user@dummy/debugrevlog
797 $ hg --debug debugpeer ssh://user@dummy/debugrevlog
798 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R debugrevlog serve --stdio['"] (re)
798 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R debugrevlog serve --stdio['"] (re)
799 devel-peer-request: hello+between
799 devel-peer-request: hello+between
800 devel-peer-request: pairs: 81 bytes
800 devel-peer-request: pairs: 81 bytes
801 sending hello command
801 sending hello command
802 sending between command
802 sending between command
803 remote: 449
803 remote: 449
804 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
804 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
805 remote: 1
805 remote: 1
806 devel-peer-request: protocaps
806 devel-peer-request: protocaps
807 devel-peer-request: caps: * bytes (glob)
807 devel-peer-request: caps: * bytes (glob)
808 sending protocaps command
808 sending protocaps command
809 url: ssh://user@dummy/debugrevlog
809 url: ssh://user@dummy/debugrevlog
810 local: no
810 local: no
811 pushable: yes
811 pushable: yes
812
812
813 #endif
813 #endif
814
814
815 Test debugshell
815 Test debugshell
816
816
817 $ hg debugshell -c 'ui.write(b"%s\n" % ui.username())'
817 $ hg debugshell -c 'ui.write(b"%s\n" % ui.username())'
818 test
818 test
General Comments 0
You need to be logged in to leave comments. Login now